k8s实现高可用

1.1: 安装配置etcd

1.停掉master节点的etcd服务;在node2节点安装etcd并配置;
[root@k8s-master02 ~]# grep -v '^#' /etc/etcd/etcd.conf
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_NAME="default"
ETCD_ADVERTISE_CLIENT_URLS="http://10.0.0.13:2379"

systemctl start etcd

1.2 安装配置master01的api-server,controller-manager,scheduler(127.0.0.1:8080)

1.配置apiserver指向node2
[root@k8s-master01 ~]# vim /etc/kubernetes/apiserver +17
KUBE_ETCD_SERVERS="--etcd-servers=http://10.0.0.13:2379"

[root@k8s-master01 ~]# vim /etc/kubernetes/config +22
KUBE_MASTER="--master=http://127.0.0.1:8080"

1.3 安装配置master02的api-server,controller-manager,scheduler(127.0.0.1:8080)

[root@k8s-master02 ~]# yum install kubernetes-master
[root@k8s-master02 ~]# scp -rp 10.0.0.11:/etc/kubernetes/apiserver  /etc/kubernetes/apiserver

[root@k8s-master02 ~]# vim /etc/kubernetes/config
KUBE_MASTER="--master=http://127.0.0.1:8080"

[root@k8s-master02 ~]# systemctl restart kube-apiserver.service kube-controller-manager.service kube-scheduler.service 
[root@k8s-master02 ~]# systemctl enable kube-apiserver.service kube-controller-manager.service kube-scheduler.service 

[root@k8s-master02 ~]# kubectl get componentstatus
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
controller-manager   Healthy   ok          

测试两个master节点数据是否共享?
master1创建pod
[root@k8s-master01 deploy]# kubectl create -f k8s_deploy.yml 
deployment "nginx-deployment" created
[root@k8s-master01 deploy]# kubectl get all
NAME                      DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/nginx-deployment   3         3         3            3           5s

NAME             CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
svc/kubernetes   10.254.0.1   <none>        443/TCP   7m

NAME                             DESIRED   CURRENT   READY     AGE
rs/nginx-deployment-2807576163   3         3         3         5s

NAME                                   READY     STATUS    RESTARTS   AGE
po/nginx-deployment-2807576163-ft2bx   1/1       Running   0          5s
po/nginx-deployment-2807576163-r6ww3   1/1       Running   0          5s
po/nginx-deployment-2807576163-rmnp1   1/1       Running   0          5s
[root@k8s-master deploy]# kubectl get node 
NAME        STATUS    AGE
10.0.0.12   Ready     8m
10.0.0.13   Ready     7m
=============================================================================
master2节点之间可以查看到:  
[root@k8s-master02 ~]# kubectl get all 
NAME             CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
svc/kubernetes   10.254.0.1   <none>        443/TCP   7m
[root@k8s-master02 ~]# kubectl get all 
NAME                      DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/nginx-deployment   3         3         3            3           49s

NAME             CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
svc/kubernetes   10.254.0.1   <none>        443/TCP   8m

NAME                             DESIRED   CURRENT   READY     AGE
rs/nginx-deployment-2807576163   3         3         3         49s

NAME                                   READY     STATUS    RESTARTS   AGE
po/nginx-deployment-2807576163-ft2bx   1/1       Running   0          49s
po/nginx-deployment-2807576163-r6ww3   1/1       Running   0          49s
po/nginx-deployment-2807576163-rmnp1   1/1       Running   0          49s

1.4 为master01和master02安装配置Keepalived

1.安装keepalived
[root@k8s-master01 ~]# yum install keepalived.x86_64 -y
[root@k8s-master02 ~]# yum install keepalived.x86_64 -y

2.配置keepalived
[root@k8s-master01 ~]# cat /etc/keepalived/keepalived.conf 
global_defs {     
    router_id master
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 50
    priority 150
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
}
    virtual_ipaddress {
        10.0.0.3
    }
}

[root@k8s-master02 ~]# cat /etc/keepalived/keepalived.conf 
global_defs {
    router_id backup
}

vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 50
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        10.0.0.3
    }
}

3.测试一下VIP

1.5: 所有node节点kubelet,kube-proxy指向api-server的vip

1.node节点修改kubelet指向虚拟VIP
[root@k8s-node-1 ~]# vim /etc/kubernetes/kubelet
KUBELET_API_SERVER="--api-servers=http://10.0.0.3:8080"

[root@k8s-node-2 ~]# vim /etc/kubernetes/kubelet
KUBELET_API_SERVER="--api-servers=http://10.0.0.3:8080"

2.node节点修改kube-proxy指向虚拟VIP
[root@k8s-node-1 ~]# vim /etc/kubernetes/config
KUBE_MASTER="--master=http://10.0.0.3:8080"

[root@k8s-node-2 ~]# vim /etc/kubernetes/config
KUBE_MASTER="--master=http://10.0.0.3:8080"

systemctl restart kubelet.service kube-proxy.service

1.6: 测试高可用

1.停掉master01或者master02的keepalived服务和apiserver进行实战测试
[root@k8s-master ~]# systemctl stop keepalived.service
systemctl stop kube-apiserver.service


2.master02还是可以取到对应的值
[root@k8s-master02 ~]# kubectl get all -o wide
NAME                      DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/nginx-deployment   3         3         3            3           22m

NAME             CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE       SELECTOR
svc/kubernetes   10.254.0.1   <none>        443/TCP   30m       <none>

NAME                             DESIRED   CURRENT   READY     AGE       CONTAINER(S)   IMAGE(S)                    SELECTOR
rs/nginx-deployment-2807576163   3         3         3         22m       nginx          10.0.0.11:5000/nginx:1.13   app=nginx,pod-template-hash=2807576163

NAME                                   READY     STATUS    RESTARTS   AGE       IP            NODE
po/nginx-deployment-2807576163-ft2bx   1/1       Running   0          22m       172.18.55.2   10.0.0.13
po/nginx-deployment-2807576163-kdltg   1/1       Running   0          18s       172.18.55.4   10.0.0.13
po/nginx-deployment-2807576163-r6ww3   1/1       Running   0          22m       172.18.55.3   10.0.0.13

3.node节点通过vip远程连接测试
[root@k8s-node-2 ~]# kubectl -s 10.0.0.3:8080 get all
NAME                      DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/nginx-deployment   3         3         3            3           23m

NAME             CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
svc/kubernetes   10.254.0.1   <none>        443/TCP   31m

NAME                             DESIRED   CURRENT   READY     AGE
rs/nginx-deployment-2807576163   3         3         3         23m

NAME                                   READY     STATUS    RESTARTS   AGE
po/nginx-deployment-2807576163-ft2bx   1/1       Running   0          23m
po/nginx-deployment-2807576163-kdltg   1/1       Running   0          1m
po/nginx-deployment-2807576163-r6ww3   1/1       Running   0          23m

4.node节点远程连接测试暴露pod端口
[root@k8s-node-2 ~]# kubectl -s 10.0.0.3:8080 expose deploy/nginx-deployment --port=80 --target-port=80 --type=NodePort
service "nginx-deployment" exposed
[root@k8s-node-2 ~]# kubectl -s 10.0.0.3:8080 get all
NAME                      DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/nginx-deployment   3         3         3            3           27m

NAME                   CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
svc/kubernetes         10.254.0.1     <none>        443/TCP        35m
svc/nginx-deployment   10.254.48.48   <nodes>       80:48846/TCP   14s

NAME                             DESIRED   CURRENT   READY     AGE
rs/nginx-deployment-2807576163   3         3         3         27m

NAME                                   READY     STATUS    RESTARTS   AGE
po/nginx-deployment-2807576163-ft2bx   1/1       Running   0          27m
po/nginx-deployment-2807576163-kdltg   1/1       Running   0          5m
po/nginx-deployment-2807576163-r6ww3   1/1       Running   0          27m

到这里,k8s集群高可用搭建接近尾声,还有很多不足处,希望更多网友加以改进!!!因为这里etcd是单节点,可以提升至etcd集群,keepalived服务建议编写脚本来探测服务的状态,当某主节点的服务宕掉,立刻漂移至备节点,当主节点恢复时,VIP又漂移回主节点,保障我们的业务能够7*24高效运行!
posted @ 2020-03-19 17:11  爱可耐  阅读(1215)  评论(0编辑  收藏  举报