6、kubernetes集群搭建

搭建kubernetes集群——kubeasz方式

节点规划

主机名 ip 配置
k8s-master-01 172.31.3.110 2C4G
k8s-master-02 172.31.3.111 2C4G
k8s-master-03 172.31.3.112 2C4G
k8s-node-01 172.31.3.120 2C4G
k8s-node-02 172.31.3.121 2C4G
k8s-node-03 172.31.3.122 2C4G
k8s-harbor-01 172.31.3.140 2C4G
k8s-deploy-01 172.31.3.141 2C2G
Haproxy01 172.31.3.144 2C4G
Haproxy02 172.31.3.145 2C4G
k8s-etcd-01 172.31.3.130 2C4G
k8s-etcd-02 172.31.3.131 2C4G
k8s-etcd-03 172.31.3.132 2C4G

基础环境准备

下载kubeasz项⽬及组件

root@k8s-deploy-01:/opt/soft# apt install ansible
root@k8s-deploy-01:/opt/soft# apt install git

root@k8s-deploy-01:/opt/soft#  export release=3.3.1
root@k8s-deploy-01:/opt/soft# wget https://github.com/easzlab/kubeasz/releases/download/${release}/ezdown
root@k8s-deploy-01:/opt/soft# chmod +x ./ezdown
#根据需求修改这些版本号
root@k8s-deploy-01:/opt/soft# vim ezdown
# default settings, can be overridden by cmd line options, see usage
DOCKER_VER=20.10.17
KUBEASZ_VER=3.3.1
K8S_BIN_VER=v1.24.2
EXT_BIN_VER=1.2.0
SYS_PKG_VER=0.4.3
HARBOR_VER=v2.1.3
REGISTRY_MIRROR=CN
root@k8s-deploy-01:/opt/soft#  ./ezdown -D

同步harbor证书

root@k8s-deploy-01:~# mkdir /etc/docker/certs.d/xmtx.harbor.com/ -p
root@k8s-harbor-01:/opt/soft/certs# scp xmtx.harbor.com.cert 172.31.3.141:/etc/docker/certs.d/xmtx.harbor.com/
root@k8s-harbor-01:/opt/soft/certs# scp xmtx.harbor.com.key  172.31.3.141:/etc/docker/certs.d/xmtx.harbor.com/
root@k8s-harbor-01:/opt/soft/certs# scp ca.crt  172.31.3.141:/etc/docker/certs.d/xmtx.harbor.com/
#测试
root@k8s-deploy-01:/etc/docker/certs.d/xmtx.harbor.com# docker login xmtx.harbor.com
Username: admin
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded

配置免秘钥登录到其它服务器

root@k8s-deploy-01:~# ssh-keygen
#安装sshpass命令⽤于同步公钥到各k8s服务器
root@k8s-deploy-01:~# apt install sshpass
root@k8s-deploy-01:/opt# vim key.sh
#!/bin/bash
#目标主机列表
IP="
172.31.3.110
172.31.3.111
172.31.3.112
172.31.3.120
172.31.3.121
172.31.3.122
172.31.3.130
172.31.3.131
172.31.3.132
"
for node in ${IP};do
sshpass -p root ssh-copy-id ${node} -o StrictHostKeyChecking=no
echo "${node} 秘钥copy完成"
ssh ${node} ln -sv /usr/bin/python3 /usr/bin/python
echo "${node} /usr/bin/python3 软连接创建完成"
done
root@k8s-deploy-01:/opt# chmod +x key.sh 
root@k8s-deploy-01:/opt# ./key.sh

⽣成并⾃定义hosts⽂件

root@k8s-deploy-01:/etc/kubeasz# ./ezctl new k8s-cluster-01
2022-07-27 15:21:11 DEBUG generate custom cluster files in /etc/kubeasz/clusters/k8s-cluster-01
2022-07-27 15:21:11 DEBUG set versions
2022-07-27 15:21:11 DEBUG cluster k8s-cluster-01: files successfully created.
2022-07-27 15:21:11 INFO next steps 1: to config '/etc/kubeasz/clusters/k8s-cluster-01/hosts'
2022-07-27 15:21:11 INFO next steps 2: to config '/etc/kubeasz/clusters/k8s-cluster-01/config.yml'

编辑hosts⽂件

指定etcd节点、master节点、node节点、VIP、运⾏时、⽹络组建类型、service IP与pod IP范围等配置信息。

root@k8s-deploy-01:/etc/kubeasz# vim /etc/kubeasz/clusters/k8s-cluster-01/hosts
# 'etcd' cluster should have odd member(s) (1,3,5,...)
[etcd]   #etcd节点
172.31.3.130
172.31.3.131
172.31.3.132

# master node(s)
[kube_master]  #master节点
172.31.3.110
172.31.3.111

# work node(s)  #node节点
[kube_node]
172.31.3.120
172.31.3.121

# K8S Service CIDR, not overlap with node(host) networking
SERVICE_CIDR="10.100.0.0/16"

# Cluster CIDR (Pod CIDR), not overlap with node(host) networking
CLUSTER_CIDR="100.200.0.0/16"

# NodePort Range  
NODE_PORT_RANGE="30000-32767"  #端口范围

# Cluster DNS Domain
CLUSTER_DNS_DOMAIN="xmtx.local"

# Binaries Directory
bin_dir="/usr/local/bin"

编辑config.yml⽂件

配置本地harbor,node 节点上 pod 网段掩码长度,node节点最大pod 数,以及安装的插件

#上传镜像
root@k8s-deploy-01:/etc/kubeasz# docker tag easzlab/pause:3.7  xmtx.harbor.com/baseimages/pause:3.7
root@k8s-deploy-01:/etc/kubeasz# docker push xmtx.harbor.com/baseimages/pause:3.7
The push refers to repository [xmtx.harbor.com/baseimages/pause]
1cb555415fd3: Pushed 
3.7: digest: sha256:445a99db22e9add9bfb15ddb1980861a329e5dff5c88d7eec9cbf08b6b2f4eb1 size: 526
#配置域名解析 echo "172.31.3.140 xmtx.harbor.com" >> /etc/hosts

#修改config.yml
root@k8s-deploy-01:/etc/kubeasz# vim /etc/kubeasz/clusters/k8s-cluster-01/config.yml
SANDBOX_IMAGE: "xmtx.harbor.com/baseimages/pause:3.7"   #改为本地harbor
# node 节点上 pod 网段掩码长度(决定每个节点最多能分配的pod ip地址)
# 如果flannel 使用 --kube-subnet-mgr 参数,那么它将读取该设置为每个节点分配pod网段
# https://github.com/coreos/flannel/issues/847
NODE_CIDR_LEN: 21
# node节点最大pod 数
MAX_PODS: 500
# coredns 自动安装
dns_install: "no"  #coredns后面自己装
ENABLE_LOCAL_DNS_CACHE: false

# metric server 自动安装
metricsserver_install: "no"  #后面自己装

# dashboard 自动安装
dashboard_install: "no"   #后面自己装

root@k8s-deploy-01:/etc/kubeasz# vim playbooks/01.prepare.yml
- hosts:
  - kube_master
  - kube_node
  - etcd
    #- ex_lb
    #- chrony

部署集群

环境初始化

root@k8s-deploy-01:~# cd /etc/kubeasz/
root@k8s-deploy-01:/etc/kubeasz# ./ezctl --help
Usage: ezctl COMMAND [args]
-------------------------------------------------------------------------------------
Cluster setups:
    list		             to list all of the managed clusters
    checkout    <cluster>            to switch default kubeconfig of the cluster
    new         <cluster>            to start a new k8s deploy with name 'cluster'
    setup       <cluster>  <step>    to setup a cluster, also supporting a step-by-step way
    start       <cluster>            to start all of the k8s services stopped by 'ezctl stop'
    stop        <cluster>            to stop all of the k8s services temporarily
    upgrade     <cluster>            to upgrade the k8s cluster
    destroy     <cluster>            to destroy the k8s cluster
    backup      <cluster>            to backup the cluster state (etcd snapshot)
    restore     <cluster>            to restore the cluster state from backups
    start-aio		             to quickly setup an all-in-one cluster with 'default' settings

Cluster ops:
    add-etcd    <cluster>  <ip>      to add a etcd-node to the etcd cluster
    add-master  <cluster>  <ip>      to add a master node to the k8s cluster
    add-node    <cluster>  <ip>      to add a work node to the k8s cluster
    del-etcd    <cluster>  <ip>      to delete a etcd-node from the etcd cluster
    del-master  <cluster>  <ip>      to delete a master node from the k8s cluster
    del-node    <cluster>  <ip>      to delete a work node from the k8s cluster

Extra operation:
    kcfg-adm    <cluster>  <args>    to manage client kubeconfig of the k8s cluster

Use "ezctl help <command>" for more information about a given command.
#setup 参数
root@k8s-deploy-01:/etc/kubeasz# ./ezctl setup --help
Usage: ezctl setup <cluster> <step>
available steps:
    01  prepare            to prepare CA/certs & kubeconfig & other system settings 
    02  etcd               to setup the etcd cluster
    03  container-runtime  to setup the container runtime(docker or containerd)
    04  kube-master        to setup the master nodes
    05  kube-node          to setup the worker nodes
    06  network            to setup the network plugin
    07  cluster-addon      to setup other useful plugins
    90  all                to run 01~07 all at once
    10  ex-lb              to install external loadbalance for accessing k8s from outside
    11  harbor             to install a new harbor server or to integrate with an existed one

examples: ./ezctl setup test-k8s 01  (or ./ezctl setup test-k8s prepare)
	  ./ezctl setup test-k8s 02  (or ./ezctl setup test-k8s etcd)
          ./ezctl setup test-k8s all
          ./ezctl setup test-k8s 04 -t restart_master
#初始化
root@k8s-deploy-01:/etc/kubeasz# ./ezctl setup k8s-cluster-01 01

172.31.3.110               : ok=27   changed=24   unreachable=0    failed=0    skipped=113  rescued=0    ignored=0   
172.31.3.111               : ok=27   changed=24   unreachable=0    failed=0    skipped=113  rescued=0    ignored=0   
172.31.3.120               : ok=26   changed=23   unreachable=0    failed=0    skipped=114  rescued=0    ignored=0   
172.31.3.121               : ok=26   changed=22   unreachable=0    failed=0    skipped=114  rescued=0    ignored=0   
172.31.3.130               : ok=24   changed=21   unreachable=0    failed=0    skipped=116  rescued=0    ignored=0   
172.31.3.131               : ok=24   changed=21   unreachable=0    failed=0    skipped=116  rescued=0    ignored=0   
172.31.3.132               : ok=24   changed=21   unreachable=0    failed=0    skipped=116  rescued=0    ignored=0   
localhost                  : ok=33   changed=31   unreachable=0    failed=0    skipped=11   rescued=0    ignored=0

部署ETCD集群

root@k8s-deploy-01:/etc/kubeasz# ./ezctl setup k8s-cluster-01 02

PLAY RECAP ********************************************************************************************************
172.31.3.130               : ok=10   changed=9    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
172.31.3.131               : ok=10   changed=8    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
172.31.3.132               : ok=10   changed=8    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0 

#各etcd服务器验证etcd服务
export NODE_IPS="172.31.3.130 172.31.3.131 172.31.3.132"
root@k8s-etcd-01:/usr/local/bin# for ip in ${NODE_IPS}; do ETCDCTL_API=3 /usr/local/bin/etcdctl \
>  --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem \
>  --cert=/etc/kubernetes/ssl/etcd.pem \
>  --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health; done
https://172.31.3.130:2379 is healthy: successfully committed proposal: took = 5.678559ms
https://172.31.3.131:2379 is healthy: successfully committed proposal: took = 9.260109ms
https://172.31.3.132:2379 is healthy: successfully committed proposal: took = 7.170799ms

部署运⾏时

#添加本地harbor仓库 在大概156行  {% endif %}下面
root@k8s-deploy-01:/etc/kubeasz# vim roles/containerd/templates/config.toml.j2
 {% endif %}
         [plugins."io.containerd.grpc.v1.cri".registry.mirrors."xmtx.harbor.com"]
           endpoint = ["https://xmtx.harbor.com"]
         [plugins."io.containerd.grpc.v1.cri".registry.configs."xmtx.harbor.com".tls]
           insecure_skip_verify = true
         [plugins."io.containerd.grpc.v1.cri".registry.configs."xmtx.harbor.com".auth]
           username = "admin"
           password = "123456"
#部署运行时
root@k8s-deploy-01:/etc/kubeasz# ./ezctl setup k8s-cluster-01 03
PLAY RECAP *******************************************************************************************************
172.31.3.110               : ok=11   changed=10   unreachable=0    failed=0    skipped=18   rescued=0    ignored=0   
172.31.3.111               : ok=11   changed=10   unreachable=0    failed=0    skipped=15   rescued=0    ignored=0   
172.31.3.120               : ok=11   changed=10   unreachable=0    failed=0    skipped=15   rescued=0    ignored=0   
172.31.3.121               : ok=11   changed=10   unreachable=0    failed=0    skipped=15   rescued=0    ignored=0

#测试contarnerd
root@k8s-node-01:~# crictl pull xmtx.harbor.com/baseimages/pause:3.7
Image is up to date for sha256:221177c6082a88ea4f6240ab2450d540955ac6f4d5454f0e15751b653ebda165
#验证版本
root@k8s-node-01:~# containerd -v
containerd github.com/containerd/containerd v1.6.4 212e8b6fa2f44b9c21b2798135fc6fb7c53efc16

部署master

root@k8s-deploy-01:/etc/kubeasz# ./ezctl setup k8s-cluster-01 04
PLAY RECAP ********************************************************************************************************
172.31.3.110               : ok=55   changed=49   unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
172.31.3.111               : ok=53   changed=47   unreachable=0    failed=0    skipped=0    rescued=0    ignored=0

#验证
root@k8s-deploy-01:/etc/kubeasz# kubectl get node
NAME           STATUS                     ROLES    AGE   VERSION
172.31.3.110   Ready,SchedulingDisabled   master   74s   v1.24.2
172.31.3.111   Ready,SchedulingDisabled   master   74s   v1.24.2

部署node

root@k8s-deploy-01:/etc/kubeasz# ./ezctl setup k8s-cluster-01 05
PLAY RECAP *********************************************************************************************************
172.31.3.120               : ok=35   changed=33   unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
172.31.3.121               : ok=35   changed=33   unreachable=0    failed=0    skipped=0    rescued=0    ignored=0
#验证
root@k8s-deploy-01:/etc/kubeasz# kubectl get node
NAME           STATUS                     ROLES    AGE     VERSION
172.31.3.110   Ready,SchedulingDisabled   master   5m51s   v1.24.2
172.31.3.111   Ready,SchedulingDisabled   master   5m51s   v1.24.2
172.31.3.120   Ready                      node     67s     v1.24.2
172.31.3.121   Ready                      node     67s     v1.24.2

部署⽹络服务calico

#将镜像下载到本地harbor
root@k8s-deploy-01:/etc/kubeasz# docker tag calico/node:v3.19.4 xmtx.harbor.com/baseimages/calico-node:v3.19.4
root@k8s-deploy-01:/etc/kubeasz# docker tag calico/pod2daemon-flexvol:v3.19.4 xmtx.harbor.com/baseimages/calico-pod2daemon-flexvol:v3.19.4
root@k8s-deploy-01:/etc/kubeasz# docker tag calico/cni:v3.19.4 xmtx.harbor.com/baseimages/calico-cni:v3.19.4
root@k8s-deploy-01:/etc/kubeasz# docker tag calico/kube-controllers:v3.19.4 xmtx.harbor.com/baseimages/calico-kube-controllers:v3.19.4
root@k8s-deploy-01:/etc/kubeasz# docker push xmtx.harbor.com/baseimages/calico-node:v3.19.4
root@k8s-deploy-01:/etc/kubeasz# docker push xmtx.harbor.com/baseimages/calico-pod2daemon-flexvol:v3.19.4
root@k8s-deploy-01:/etc/kubeasz# docker push xmtx.harbor.com/baseimages/calico-cni:v3.19.4
root@k8s-deploy-01:/etc/kubeasz# docker push xmtx.harbor.com/baseimages/calico-kube-controllers:v3.19.4
#修改配置文件,使用本地镜像
root@k8s-deploy-01:/etc/kubeasz# vim roles/calico/templates/calico-v3.19.yaml.j2
        - name: install-cni
          image: xmtx.harbor.com/baseimages/calico-cni:v3.19.4
          
        - name: calico-node
          image: xmtx.harbor.com/baseimages/calico-node:v3.19.4

        - name: flexvol-driver
          image: xmtx.harbor.com/baseimages/calico-pod2daemon-flexvol:v3.19.4
#开始部署
root@k8s-deploy-01:/etc/kubeasz# ./ezctl setup k8s-cluster-01 06
PLAY RECAP *********************************************************************************************************
172.31.3.110               : ok=13   changed=11   unreachable=0    failed=0    skipped=36   rescued=0    ignored=0   
172.31.3.111               : ok=9    changed=7    unreachable=0    failed=0    skipped=22   rescued=0    ignored=0   
172.31.3.120               : ok=9    changed=7    unreachable=0    failed=0    skipped=22   rescued=0    ignored=0   
172.31.3.121               : ok=9    changed=8    unreachable=0    failed=0    skipped=22   rescued=0    ignored=0

#验证
root@k8s-deploy-01:/etc/kubeasz# kubectl get pod -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-556c8dd7c8-7p2d7   1/1     Running   0          2m53s
kube-system   calico-node-4dw6n                          1/1     Running   0          2m53s
kube-system   calico-node-kkpq2                          1/1     Running   0          2m53s
kube-system   calico-node-srj9z                          1/1     Running   0          2m53s
kube-system   calico-node-xggr6                          1/1     Running   0          2m53s

root@k8s-master-01:~# calicoctl node status
Calico process is running.
IPv4 BGP status
+--------------+-------------------+-------+----------+-------------+
| PEER ADDRESS |     PEER TYPE     | STATE |  SINCE   |    INFO     |
+--------------+-------------------+-------+----------+-------------+
| 172.31.3.111 | node-to-node mesh | up    | 08:53:01 | Established |
| 172.31.3.120 | node-to-node mesh | up    | 08:53:00 | Established |
| 172.31.3.121 | node-to-node mesh | up    | 08:52:59 | Established |
+--------------+-------------------+-------+----------+-------------+

验证⽹络

#创建两个pod
root@k8s-deploy-01:/etc/kubeasz# kubectl create ns myserver
namespace/myserver created
root@k8s-deploy-01:/etc/kubeasz# kubectl run net-test1 --image=xmtx.harbor.com/baseimages/centos:7.9.2009 -n myserver  sleep 10000000
pod/net-test1 created
root@k8s-deploy-01:/etc/kubeasz# kubectl run net-test2 --image=xmtx.harbor.com/baseimages/centos:7.9.2009 -n myserver  sleep 10000000
pod/net-test2 created
root@k8s-deploy-01:/etc/kubeasz# kubectl get pod -n myserver -o wide
NAME        READY   STATUS    RESTARTS   AGE   IP                NODE           NOMINATED NODE   READINESS GATES
net-test1   1/1     Running   0          39s   100.200.154.193   172.31.3.120   <none>           <none>
net-test2   1/1     Running   0          45s   100.200.44.195    172.31.3.121   <none>           <none>
#进入pod
root@k8s-deploy-01:/etc/kubeasz# kubectl exec -it net-test1 bash -n myserver 
#ping node节点
[root@net-test1 /]# ping 172.31.3.121
PING 172.31.3.121 (172.31.3.121) 56(84) bytes of data.
64 bytes from 172.31.3.121: icmp_seq=1 ttl=63 time=0.597 ms
#ping net-test2
[root@net-test1 /]# ping 100.200.44.195
PING 100.200.44.195 (100.200.44.195) 56(84) bytes of data.
64 bytes from 100.200.44.195: icmp_seq=1 ttl=62 time=0.581 ms

集群节点伸缩管理

添加master、添加node

当前集群状态:

root@k8s-deploy-01:/etc/kubeasz# kubectl get node
NAME           STATUS                     ROLES    AGE   VERSION
172.31.3.110   Ready,SchedulingDisabled   master   55m   v1.24.2
172.31.3.111   Ready,SchedulingDisabled   master   55m   v1.24.2
172.31.3.120   Ready                      node     50m   v1.24.2
172.31.3.121   Ready                      node     50m   v1.24.2

添加master节点

root@k8s-deploy-01:/etc/kubeasz# ./ezctl add-master k8s-cluster-01 172.31.3.112
root@k8s-deploy-01:/etc/kubeasz# kubectl get node
NAME           STATUS                     ROLES    AGE   VERSION
172.31.3.110   Ready,SchedulingDisabled   master   63m   v1.24.2
172.31.3.111   Ready,SchedulingDisabled   master   63m   v1.24.2
172.31.3.112   Ready,SchedulingDisabled   master   54s   v1.24.2
172.31.3.120   Ready                      node     58m   v1.24.2
172.31.3.121   Ready                      node     58m   v1.24.2

添加node节点

root@k8s-deploy-01:/etc/kubeasz# ./ezctl add-node k8s-cluster-01 172.31.3.122
root@k8s-deploy-01:/etc/kubeasz# kubectl get node
NAME           STATUS                     ROLES    AGE   VERSION
172.31.3.110   Ready,SchedulingDisabled   master   74m   v1.24.2
172.31.3.111   Ready,SchedulingDisabled   master   74m   v1.24.2
172.31.3.112   Ready,SchedulingDisabled   master   11m   v1.24.2
172.31.3.120   Ready                      node     69m   v1.24.2
172.31.3.121   Ready                      node     69m   v1.24.2
172.31.3.122   Ready                      node     44s   v1.24.2  #新加的node节点

验证calico

root@k8s-master-01:~# calicoctl node status
Calico process is running.

IPv4 BGP status
+--------------+-------------------+-------+----------+-------------+
| PEER ADDRESS |     PEER TYPE     | STATE |  SINCE   |    INFO     |
+--------------+-------------------+-------+----------+-------------+
| 172.31.3.111 | node-to-node mesh | up    | 09:33:16 | Established |
| 172.31.3.112 | node-to-node mesh | up    | 09:32:45 | Established |
| 172.31.3.120 | node-to-node mesh | up    | 09:33:06 | Established |
| 172.31.3.121 | node-to-node mesh | up    | 09:32:55 | Established |
| 172.31.3.122 | node-to-node mesh | up    | 09:43:42 | Established |
+--------------+-------------------+-------+----------+-------------+

升级

下载二进制包

root@k8s-deploy-01:/opt/soft/kubernetes-1.24.3# wget https://dl.k8s.io/v1.24.3/kubernetes-server-linux-amd64.tar.gz
root@k8s-deploy-01:/opt/soft/kubernetes-1.24.3# wget https://dl.k8s.io/v1.24.3/kubernetes-client-linux-amd64.tar.gz
root@k8s-deploy-01:/opt/soft/kubernetes-1.24.3# wget https://dl.k8s.io/v1.24.3/kubernetes-node-linux-amd64.tar.gz
root@k8s-deploy-01:/opt/soft/kubernetes-1.24.3# wget https://dl.k8s.io/v1.24.3/kubernetes.tar.gz
root@k8s-deploy-01:/opt/soft/kubernetes-1.24.3# tar xf kubernetes.tar.gz
root@k8s-deploy-01:/opt/soft/kubernetes-1.24.3# tar xf kubernetes-server-linux-amd64.tar.gz
root@k8s-deploy-01:/opt/soft/kubernetes-1.24.3# tar xf kubernetes-node-linux-amd64.tar.gz
root@k8s-deploy-01:/opt/soft/kubernetes-1.24.3# tar xf kubernetes-client-linux-amd64.tar.gz
root@k8s-deploy-01:/opt/soft/kubernetes-1.24.3# ll
total 489448
drwxr-xr-x  3 root root      4096 Jul 27 17:48 ./
drwxr-xr-x  3 root root      4096 Jul 27 17:46 ../
drwxr-xr-x 10 root root      4096 Jul 13 22:41 kubernetes/
-rw-r--r--  1 root root  30452505 Jul 14 03:55 kubernetes-client-linux-amd64.tar.gz
-rw-r--r--  1 root root 123370188 Jul 14 03:55 kubernetes-node-linux-amd64.tar.gz
-rw-r--r--  1 root root 346822609 Jul 14 03:55 kubernetes-server-linux-amd64.tar.gz
-rw-r--r--  1 root root    527537 Jul 14 03:55 kubernetes.tar.gz

升级master

在node节点中注掉要升级的master节点

#首先将node节点kube-lb.conf文件中要升级的master节点注释掉  每个node节点都要这样操作
root@k8s-node-01:~# vim /etc/kube-lb/conf/kube-lb.conf
stream {
    upstream backend {
        server 172.31.3.112:6443    max_fails=2 fail_timeout=3s;
       #server 172.31.3.110:6443    max_fails=2 fail_timeout=3s;
        server 172.31.3.111:6443    max_fails=2 fail_timeout=3s;
    }
#重新加载配置
root@k8s-node-01:~# systemctl reload kube-lb.service

停止master节点服务

root@k8s-master-01:~# systemctl stop kube-proxy kube-apiserver kube-controller-manager kube-scheduler kubelet
root@k8s-master-02:~# kubectl get node
NAME           STATUS                        ROLES    AGE     VERSION
172.31.3.110   NotReady,SchedulingDisabled   master   3h29m   v1.24.2
172.31.3.111   Ready,SchedulingDisabled      master   3h29m   v1.24.2
172.31.3.112   Ready,SchedulingDisabled      master   146m    v1.24.2

替换二进制文件

root@k8s-deploy-01:/opt/soft/kubernetes-1.24.3/kubernetes/server/bin# ./kube-apiserver --version
Kubernetes v1.24.3
root@k8s-deploy-01:/opt/soft/kubernetes-1.24.3/kubernetes/server/bin# scp kube-apiserver kube-controller-manager kube-scheduler kube-proxy kubelet kubectl 172.31.3.110:/usr/local/bin/
kube-apiserver                                                                                                                                              100%  120MB  95.0MB/s   00:01    
kube-controller-manager                                                                                                                                     100%  110MB 115.6MB/s   00:00    
kube-scheduler                                                                                                                                              100%   45MB 126.2MB/s   00:00    
kube-proxy                                                                                                                                                  100%   40MB 112.1MB/s   00:00    
kubelet                                                                                                                                                     100%  111MB  74.0MB/s   00:01    
kubectl                                                                                                                                                     100%   44MB  85.6MB/s   00:00

root@k8s-master-01:/usr/local/bin# ./kube-apiserver --version
Kubernetes v1.24.3

验证

root@k8s-master-01:/usr/local/bin# systemctl start kube-proxy kube-apiserver kube-controller-manager kube-scheduler kubelet
root@k8s-master-01:/usr/local/bin# kubectl get node
NAME           STATUS                     ROLES    AGE     VERSION
172.31.3.110   Ready,SchedulingDisabled   master   3h39m   v1.24.3  #升级的master节点
172.31.3.111   Ready,SchedulingDisabled   master   3h39m   v1.24.2
172.31.3.112   Ready,SchedulingDisabled   master   156m    v1.24.2
172.31.3.120   Ready                      node     3h34m   v1.24.2
172.31.3.121   Ready                      node     3h34m   v1.24.2
172.31.3.122   Ready                      node     145m    v1.24.2
root@k8s-master-01:/usr/local/bin# kube-scheduler --version
Kubernetes v1.24.3
root@k8s-master-01:/usr/local/bin# kube-proxy  --version
Kubernetes v1.24.3
root@k8s-master-01:/usr/local/bin# kube-controller-manager --version
Kubernetes v1.24.3
root@k8s-master-01:/usr/local/bin# kube-apiserver --version
Kubernetes v1.24.3
root@k8s-master-01:/usr/local/bin# kubelet --version
Kubernetes v1.24.3

其他两个master节点进行同样操作

最终结果:

root@k8s-master-01:/usr/local/bin# kubectl get node
NAME           STATUS                     ROLES    AGE     VERSION
172.31.3.110   Ready,SchedulingDisabled   master   3h44m   v1.24.3
172.31.3.111   Ready,SchedulingDisabled   master   3h44m   v1.24.3
172.31.3.112   Ready,SchedulingDisabled   master   161m    v1.24.3
172.31.3.120   Ready                      node     3h39m   v1.24.2
172.31.3.121   Ready                      node     3h39m   v1.24.2
172.31.3.122   Ready                      node     150m    v1.24.2

升级node节点

node节点升级需要在master节点执行驱逐:kubectl drain ip --ignore-daemonsets --force

驱逐node节点

root@k8s-master-01:~# kubectl drain 172.31.3.120 --ignore-daemonsets --force
node/172.31.3.120 cordoned
WARNING: ignoring DaemonSet-managed Pods: kube-system/calico-node-pdqwn; deleting Pods that declare no controller: myserver/net-test1
evicting pod myserver/net-test1
evicting pod kube-system/calico-kube-controllers-556c8dd7c8-7p2d7
pod/calico-kube-controllers-556c8dd7c8-7p2d7 evicted
pod/net-test1 evicted
node/172.31.3.120 drained
root@k8s-master-01:~# kubectl get node
NAME           STATUS                     ROLES    AGE    VERSION
172.31.3.110   Ready,SchedulingDisabled   master   4h6m   v1.24.3
172.31.3.111   Ready,SchedulingDisabled   master   4h6m   v1.24.3
172.31.3.112   Ready,SchedulingDisabled   master   3h3m   v1.24.3
172.31.3.120   Ready,SchedulingDisabled   node     4h1m   v1.24.2  #被驱逐的node的节点,为不可调度状态
172.31.3.121   Ready                      node     4h1m   v1.24.2
172.31.3.122   Ready                      node     172m   v1.24.2

停止服务

root@k8s-node-01:~# systemctl stop kube-proxy kubelet

替换二进制文件

root@k8s-deploy-01:/opt/soft/kubernetes-1.24.3/kubernetes/server/bin# scp kubelet  kube-proxy kubectl 172.31.3.120:/usr/local/bin

验证

#启动服务
root@k8s-node-01:/usr/local/bin# systemctl  start kubelet kube-proxy

#取消驱逐
root@k8s-master-01:~# kubectl uncordon 172.31.3.120
node/172.31.3.120 uncordoned
root@k8s-master-01:~# kubectl get nodes 
NAME           STATUS                     ROLES    AGE     VERSION
172.31.3.110   Ready,SchedulingDisabled   master   4h12m   v1.24.3
172.31.3.111   Ready,SchedulingDisabled   master   4h12m   v1.24.3
172.31.3.112   Ready,SchedulingDisabled   master   3h9m    v1.24.3
172.31.3.120   Ready                      node     4h7m    v1.24.3
172.31.3.121   Ready                      node     4h7m    v1.24.2
172.31.3.122   Ready                      node     178m    v1.24.2

剩下两个节点执行相同操作

最终结果

root@k8s-master-01:~# kubectl get nodes 
NAME           STATUS                     ROLES    AGE     VERSION
172.31.3.110   Ready,SchedulingDisabled   master   4h15m   v1.24.3
172.31.3.111   Ready,SchedulingDisabled   master   4h15m   v1.24.3
172.31.3.112   Ready,SchedulingDisabled   master   3h13m   v1.24.3
172.31.3.120   Ready                      node     4h10m   v1.24.3
172.31.3.121   Ready                      node     4h10m   v1.24.3
172.31.3.122   Ready                      node     3h2m    v1.24.3

升级运行时

升级containerd-正常情况下,应该先驱逐pod,然后将服务停止或重启服务器,然后替换二进制再启动服务

当前状态

root@k8s-master-01:~# kubectl get node -o wide
NAME           STATUS                     ROLES    AGE     VERSION   INTERNAL-IP    EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      CONTAINER-RUNTIME
172.31.3.110   Ready,SchedulingDisabled   master   4h54m   v1.24.3   172.31.3.110   <none>        Ubuntu 20.04.4 LTS   5.4.0-122-generic   containerd://1.6.4
172.31.3.111   Ready,SchedulingDisabled   master   4h54m   v1.24.3   172.31.3.111   <none>        Ubuntu 20.04.4 LTS   5.4.0-122-generic   containerd://1.6.4
172.31.3.112   Ready,SchedulingDisabled   master   3h52m   v1.24.3   172.31.3.112   <none>        Ubuntu 20.04.4 LTS   5.4.0-122-generic   containerd://1.6.4
172.31.3.120   Ready                      node     4h50m   v1.24.3   172.31.3.120   <none>        Ubuntu 20.04.4 LTS   5.4.0-122-generic   containerd://1.6.4
172.31.3.121   Ready                      node     4h50m   v1.24.3   172.31.3.121   <none>        Ubuntu 20.04.4 LTS   5.4.0-122-generic   containerd://1.6.4
172.31.3.122   Ready                      node     3h41m   v1.24.3   172.31.3.122   <none>        Ubuntu 20.04.4 LTS   5.4.0-122-generic   containerd://1.6.4

下载二进制包

#runc
root@k8s-deploy-01:/opt/soft/containerd# wget https://github.com/opencontainers/runc/releases/download/v1.1.3/runc.amd64
root@k8s-deploy-01:/opt/soft/containerd# mv runc.amd64 runc
root@k8s-deploy-01:/opt/soft/containerd# chmod a+x runc
#containerd
root@k8s-deploy-01:/opt/soft/containerd# wget https://github.com/containerd/containerd/releases/download/v1.6.6/containerd-1.6.6-linux-amd64.tar.gz
root@k8s-deploy-01:/opt/soft/containerd# tar zxvf containerd-1.6.6-linux-amd64.tar.gz 
bin/
bin/containerd-shim
bin/containerd
bin/containerd-shim-runc-v1
bin/containerd-stress
bin/containerd-shim-runc-v2
bin/ctr

升级

#驱逐节点
root@k8s-master-01:~# kubectl drain 172.31.3.120 --ignore-daemonsets --force
#停止服务
root@k8s-master-01:~# systemctl stop  kubelet kube-proxy containerd
#替换文件
root@k8s-deploy-01:/opt/soft/containerd/bin# scp ./* 172.31.3.120:/usr/local/bin/
#启动服务
root@k8s-node-01:~# systemctl  start kubelet kube-proxy containerd
#取消驱逐
root@k8s-master-01:~# kubectl uncordon 172.31.3.120
#验证版本
root@k8s-master-01:~# kubectl get node -o wide
NAME           STATUS                        ROLES    AGE     VERSION   INTERNAL-IP    EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      CONTAINER-RUNTIME
172.31.3.110   Ready,SchedulingDisabled      master   5h3m    v1.24.3   172.31.3.110   <none>        Ubuntu 20.04.4 LTS   5.4.0-122-generic   containerd://1.6.4
172.31.3.111   NotReady,SchedulingDisabled   master   5h3m    v1.24.3   172.31.3.111   <none>        Ubuntu 20.04.4 LTS   5.4.0-122-generic   containerd://1.6.4
172.31.3.112   Ready,SchedulingDisabled      master   4h1m    v1.24.3   172.31.3.112   <none>        Ubuntu 20.04.4 LTS   5.4.0-122-generic   containerd://1.6.4
172.31.3.120   Ready                         node     4h59m   v1.24.3   172.31.3.120   <none>        Ubuntu 20.04.4 LTS   5.4.0-122-generic   containerd://1.6.6 #升级后的containerd
172.31.3.121   Ready                         node     4h59m   v1.24.3   172.31.3.121   <none>        Ubuntu 20.04.4 LTS   5.4.0-122-generic   containerd://1.6.4
172.31.3.122   Ready                         node     3h50m   v1.24.3   172.31.3.122   <none>        Ubuntu 20.04.4 LTS   5.4.0-122-generic   containerd://1.6.4

查看

root@k8s-master-01:~# kubectl get node -o wide
NAME           STATUS                     ROLES    AGE     VERSION   INTERNAL-IP    EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      CONTAINER-RUNTIME
172.31.3.110   Ready,SchedulingDisabled   master   5h10m   v1.24.3   172.31.3.110   <none>        Ubuntu 20.04.4 LTS   5.4.0-122-generic   containerd://1.6.6
172.31.3.111   Ready,SchedulingDisabled   master   5h10m   v1.24.3   172.31.3.111   <none>        Ubuntu 20.04.4 LTS   5.4.0-122-generic   containerd://1.6.6
172.31.3.112   Ready,SchedulingDisabled   master   4h8m    v1.24.3   172.31.3.112   <none>        Ubuntu 20.04.4 LTS   5.4.0-122-generic   containerd://1.6.6
172.31.3.120   Ready                      node     5h6m    v1.24.3   172.31.3.120   <none>        Ubuntu 20.04.4 LTS   5.4.0-122-generic   containerd://1.6.6
172.31.3.121   Ready                      node     5h6m    v1.24.3   172.31.3.121   <none>        Ubuntu 20.04.4 LTS   5.4.0-122-generic   containerd://1.6.6
172.31.3.122   Ready                      node     3h57m   v1.24.3   172.31.3.122   <none>        Ubuntu 20.04.4 LTS   5.4.0-122-generic   containerd://1.6.6

posted @   xmtx97  阅读(154)  评论(0编辑  收藏  举报
(评论功能已被禁用)
相关博文:
阅读排行:
· 全程不用写代码,我用AI程序员写了一个飞机大战
· MongoDB 8.0这个新功能碉堡了,比商业数据库还牛
· 记一次.NET内存居高不下排查解决与启示
· 白话解读 Dapr 1.15:你的「微服务管家」又秀新绝活了
· DeepSeek 开源周回顾「GitHub 热点速览」
点击右上角即可分享
微信分享提示