1. create consul
# kcompose convert -f /root/gitSwarm/dbcm-base-managers/compose/consul.yml
genetate two files:
consul-deployment.yaml consul-service.yaml
# kubectl create -f consul-deployment.yaml -f consul-service.yaml
get consul cluster virtual ip:
# kubectl get svc consul
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
consul ClusterIP 10.0.54.163 <none> 8500/TCP,8300/TCP,8301/TCP,8302/TCP,8400/TCP 16h
2. create mysql
After put metadata to consul(no change):
# kcompose convert -f /root/gitSwarm/dbcm-base-managers/compose/mysql.yml
genetate two files:
mysql-deployment.yaml mysql-service.yaml
modify mysql-deployment.yaml:
------------
spec.replicas = (instance number)
image = (mysql latest image)
env:
CLUSTER_ID
CONSUL = (consul cluster virtual ip)
-------------
# kubectl create -f mysql-deployment.yaml -f mysql-service.yaml
# kubectl get po -l io.kompose.service=mysql
NAME READY STATUS RESTARTS AGE
mysql-5b5d895b79-gxrbl 1/1 Running 0 2h
mysql-5b5d895b79-s9j5k 1/1 Running 0 2h
note:
consul service name no change
service id changes to mysql-all-{pod name} mysql-primary-{pod name}
just like "mysql-all-mysql-5b5d895b79-gxrbl" "mysql-primary-mysql-5b5d895b79-gxrb2"
3. scale cluster
# kubectl scale -f mysql-deployment.yaml --replicas=3
auto scale:
# kubectl autoscale -f mysql-deployment.yaml --min=2 --max=5 --cpu-percent=70
4. delete cluster
# kubectl delete -f mysql-deployment.yaml
5. get node log
# kubectl logs mysql-5b5d895b79-gxrbl(pod name)
6. RollingUpdate
# kubectl set image deployment/mysql mysql=registry.bst-1.cns.bstjpc.com:5000/dbelt/mysql-ms-mysql-v5.6.36-v0.5.0:20180206_update
check update status
# kubectl rollout status deployment/mysql
rollback
# kubectl rollout undo deployment/mysql
7. multiAZ
# kubectl label nodes mhc zone=beijing
modify mysql-deployment.yaml:
add:
spec.nodeSelector.zone=north
8. run command
kubectl exec [flag] pod_name shell_command
# kubectl exec -ti mysql-55d5cbc5c4-fjgfj bash
9. resize
resize:
kubectl set resources deployment nginx-deployment -c=nginx --limits=cpu=200m,memory=512Mi
lack:
1. stop/restart cluster
2. get cluster log
other:
1. make deployment name unique
[root@mhc k8s]# kubectl label pod consul-5f557878f4-974hw role=master
pod "consul-5f557878f4-974hw" labeled
[root@mhc k8s]#
[root@mhc k8s]# kubectl label pod consul-5f557878f4-hg6sh role=slave
pod "consul-5f557878f4-hg6sh" labeled
[root@mhc k8s]# kubectl get po -l role=master
NAME READY STATUS RESTARTS AGE
consul-5f557878f4-974hw 1/1 Running 0 17h