架构第五次作业-20230807

一、基于二进制部署kubernetes v1.27.x高可用环境

【实验环境】

# master 节点
k8s-master1-120  192.168.119.120
k8s-master2-121  192.168.119.121 扩展用

# node 节点
k8s-node1-122  192.168.119.122
k8s-node2-123  192.168.119.123  扩展用

# HAproxy + keepalived + 部署节点
HAproxy-k8s-apply-124  192.168.119.124
VIP  192.168.119.200/201/202

# etcd 节点
k8s-etcd1-125  192.168.119.125
k8s-etcd2-126  192.168.119.126

# harbor 仓库
192.168.119.113

【实验过程】

实验之前确保各节点时区一致、时间同步。

# 确保时区一致
timedatectl set-timezone Asia/Shanghai
# 每隔5分钟同步一次时间
*/5 * * * * /usr/sbin/ntpdate time1.aliyun.com &> /dev/null && hwclock -w &> /dev/null

1.1 部署负载均衡

root@HAproxy-k8s-apply-124:~# apt -y install haproxy
root@HAproxy-k8s-apply-124:~# apt -y install keepalived
! Configuration File for keepalived

global_defs {
   notification_email {
     acassen
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    garp_master_delay 10
    smtp_alert
    virtual_router_id 200        # 注意router-id不可以冲突
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.119.200 label eth0:1        # 定义 VIP
        192.168.119.201 label eth0:2        # 定义 VIP
        192.168.119.202 label eth0:3        # 定义 VIP
    }
}
root@HAproxy-k8s-apply-124:~# systemctl restart keepalived.service
root@HAproxy-k8s-apply-124:~# systemctl enable keepalived
root@HAproxy-k8s-apply-124:~# hostname -I
192.168.119.124 192.168.119.200 192.168.119.201 192.168.119.202

# 因为目前准备启动一个master节点,故先只写一个
root@HAproxy-k8s-apply-124:~# vim /etc/haproxy/haproxy.cfg
···
listen k8s-api-6443
  bind 192.168.119.200:6443
  mode tcp
  server server1 192.168.119.120:6443 check inter 3s fall 3 rise 3
root@HAproxy-k8s-apply-124:~# systemctl restart haproxy.service 
root@HAproxy-k8s-apply-124:~# systemctl enable haproxy
root@HAproxy-k8s-apply-124:~# ss -ntl | grep 6443
LISTEN 0      4096   192.168.119.200:6443       0.0.0.0:*

1.2 部署https的harbor节点

# 部署过程不在详细展示
root@ubuntu20-server3-113:~# cat /usr/local/src/harbor/harbor.yml
···
hostname: harbor.wuhaolam.top
# http related config
http:
  port: 80
# https related config
https:
  port: 443
  certificate: /data/certfile/harbor.wuhaolam.top.pem
  private_key: /data/privatekey/harbor.wuhaolam.top.key
···

image

1.3 kubeasz 部署高可用 kubernets

官方文档:https://github.com/easzlab/kubeasz
image

  • kube-lb 是 node 节点访问 master 走的自己内部的负载均衡
  • HAproxy 是客户端访问 node 节点走的负载均衡服务
  • 也可一起使用外部的负载均衡服务,但为了避免外部负载均衡服务负载过大,故分开更推荐

1.3.1 在部署节点上配置免密登录至master、node、etcd节点

# 准备免密登录配置
root@HAproxy-k8s-apply-124:~# vim key_login.sh
#!/bin/bash
  
IPLIST='
192.168.119.120
192.168.119.121
192.168.119.122
192.168.119.123
192.168.119.125
192.168.119.126
'
PASSWORD='wuhaolam'

if [ ! -e /root/.ssh/id_rsa ];then
  echo -e "\E[1;32m开始生成密钥对...\E[0m"
  ssh-keygen -P "" -f /root/.ssh/id_rsa &> /dev/null
else
  echo -e "\E[1;34m密钥对已经存在\E[0m"
fi

if ! dpkg -L sshpass &> /dev/null;then
  apt -y install sshpass &> /dev/null || { echo -e '\E[1;31m"error: sshpass packet install false!"\E[0m'; exit; }
fi

for IP in $IPLIST; do
  sshpass -p $PASSWORD ssh-copy-id -o StrictHostKeyChecking=no $IP &> /dev/null
  echo $IP key_authentication already done.
done

echo -e "\E[1;32msuccessful\E[0m"

root@HAproxy-k8s-apply-124:~# bash key_login.sh

# 验证免密登录到指定设备

image

1.3.2 在部署节点上下载kubeasz项目及组件

# kubeasz 基于ansible部署k8s集群

root@HAproxy-k8s-apply-124:~# apt -y install git ansible
# 也可自定义选择其它版本
root@HAproxy-k8s-apply-124:~# wget https://github.com/easzlab/kubeasz/releases/download/3.6.1/ezdown
# 无法下载需手动下载
root@HAproxy-k8s-apply-124:~# ls
ezdown

# 手动安装docker,也可使用ezdown脚本自动安装
## 准备docker二进制文件
root@HAproxy-k8s-apply-124:~# wget https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/static/stable/x86_64/docker-24.0.2.tgz --no-check-certificate
root@HAproxy-k8s-apply-124:~# tar xvf docker-24.0.2.tgz -C /usr/local/src/
root@HAproxy-k8s-apply-124:~# cp /usr/local/src/docker/* /usr/local/bin/

## 准备service 文件
root@HAproxy-k8s-apply-124:~# cat /lib/systemd/system/docker.service 
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
BindsTo=containerd.service
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/local/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3

# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s

# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity

# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity

# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes

# kill only the docker process, not all processes in the cgroup
KillMode=process

[Install]
WantedBy=multi-user.target

[root@centos7-mini2 data]# cat containerd.service 
opyright The containerd Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target

[Service]
ExecStartPre=-/usr/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd

Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=infinity
# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target

## 准备socket文件
root@HAproxy-k8s-apply-124:~# cat /lib/systemd/system/docker.socket
[Unit]
Description=Docker Socket for the API
PartOf=docker.service
[Socket]
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target

## 准备docker的json文件
root@HAproxy-k8s-apply-124:~# cat /etc/docker/daemon.json
{
  "data-root": "/var/lib/docker",
  "storage-driver": "overlay2",
  "registry-mirrors": ["https://frncu3gx.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "live-restore": false,
  "log-opts": {
      "max-file": "5",
      "max-size": "100m"
  }
}

## 准备一些优化文件
root@HAproxy-k8s-apply-124:~# cat /etc/security/limits.conf
*             soft    core            unlimited
*             hard    core            unlimited
*	      soft    nproc           1000000
*             hard    nproc           1000000
*             soft    nofile          1000000
*             hard    nofile          1000000
*             soft    memlock         32000
*             hard    memlock         32000
*             soft    msgqueue        8192000
*             hard    msgqueue        8192000
root@HAproxy-k8s-apply-124:~# cat /etc/sysctl.conf
net.ipv4.ip_forward=1
vm.max_map_count=262144
kernel.pid_max=4194303
fs.file-max=1000000
net.ipv4.tcp_max_tw_buckets=6000
net.netfilter.nf_conntrack_max=2097152

net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0

## 准备docker-compose
### 下载地址
https://github.com/docker/compose/releases/download/1.28.6/docker-compose-Linux-x86_64
root@HAproxy-k8s-apply-124:~# ls
docker-compose-Linux-x86_64_1.28.6
root@HAproxy-k8s-apply-124:~# cp docker-compose-Linux-x86_64_1.28.6 /usr/bin/docker-compose

## 创建docker组
root@HAproxy-k8s-apply-124:~# groupadd docker

## 启动docker
systemctl daemon-reload
systemctl enable --now docker.service

# 开始下载kubeasz项目及其组件
root@HAproxy-k8s-apply-124:~# chmod +x ezdown 
root@HAproxy-k8s-apply-124:~# ./ezdown -D
root@HAproxy-k8s-apply-124:~# ll /etc/kubeasz/
total 140
drwxrwxr-x 12 root root  4096 Aug  9 22:33 ./
drwxr-xr-x 95 root root  4096 Aug  9 22:33 ../
drwxrwxr-x  4 root root  4096 May 28 12:38 .github/
-rw-rw-r--  1 root root   301 May 28 12:32 .gitignore
-rw-rw-r--  1 root root  5997 May 28 12:32 README.md
-rw-rw-r--  1 root root 20304 May 28 12:32 ansible.cfg
drwxr-xr-x  4 root root  4096 Aug  9 22:33 bin/
drwxrwxr-x  8 root root  4096 May 28 12:38 docs/
drwxr-xr-x  3 root root  4096 Aug  9 22:41 down/
drwxrwxr-x  2 root root  4096 May 28 12:38 example/
-rwxrwxr-x  1 root root 26507 May 28 12:32 ezctl*
-rwxrwxr-x  1 root root 32185 May 28 12:32 ezdown*
drwxrwxr-x 10 root root  4096 May 28 12:38 manifests/
drwxrwxr-x  2 root root  4096 May 28 12:38 pics/
drwxrwxr-x  2 root root  4096 May 28 12:38 playbooks/
drwxrwxr-x 22 root root  4096 May 28 12:38 roles/
drwxrwxr-x  2 root root  4096 May 28 12:38 tools/

1.3.3 生成并自定义hosts文件

root@HAproxy-k8s-apply-124:~# cd /etc/kubeasz/
# 生成一个k8s-cluster1的集群
root@HAproxy-k8s-apply-124:/etc/kubeasz# ./ezctl new k8s-cluster1
2023-08-09 23:25:38 DEBUG generate custom cluster files in /etc/kubeasz/clusters/k8s-cluster1
2023-08-09 23:25:38 DEBUG set versions
2023-08-09 23:25:38 DEBUG cluster k8s-cluster1: files successfully created.
2023-08-09 23:25:38 INFO next steps 1: to config '/etc/kubeasz/clusters/k8s-cluster1/hosts'
2023-08-09 23:25:38 INFO next steps 2: to config '/etc/kubeasz/clusters/k8s-cluster1/config.yml'

1.3.3.1 编辑ansible hosts文件

指定etcd节点、master节点、VIP、运行时、网络组件类型、service IP与pod IP范围等配置信息

root@HAproxy-k8s-apply-124:/etc/kubeasz# vim clusters/k8s-cluster1/hosts
# 'etcd' cluster should have odd member(s) (1,3,5,...)
[etcd]
192.168.119.125
192.168.119.126

# master node(s), set unique 'k8s_nodename' for each node
# CAUTION: 'k8s_nodename' must consist of lower case alphanumeric characters, '-' or '.',
# and must start and end with an alphanumeric character
[kube_master]
192.168.119.120 k8s_nodename='192.168.119.120'

# work node(s), set unique 'k8s_nodename' for each node
# CAUTION: 'k8s_nodename' must consist of lower case alphanumeric characters, '-' or '.',
# and must start and end with an alphanumeric character
[kube_node]
192.168.119.122 k8s_nodename='192.168.119.122'


# [optional] harbor server, a private docker registry
# 'NEW_INSTALL': 'true' to install a harbor server; 'false' to integrate with existed one
[harbor]
#192.168.1.8 NEW_INSTALL=false

# [optional] loadbalance for accessing k8s from outside
[ex_lb]
#192.168.1.6 LB_ROLE=backup EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443
#192.168.1.7 LB_ROLE=master EX_APISERVER_VIP=192.168.1.250 EX_APISERVER_PORT=8443

# [optional] ntp server for the cluster
[chrony]
#192.168.1.1

[all:vars]
# --------- Main Variables ---------------
# Secure port for apiservers
SECURE_PORT="6443"

# Cluster container-runtime supported: docker, containerd
# if k8s version >= 1.24, docker is not supported
CONTAINER_RUNTIME="containerd"

# Network plugins supported: calico, flannel, kube-router, cilium, kube-ovn
CLUSTER_NETWORK="calico"

# Service proxy mode of kube-proxy: 'iptables' or 'ipvs'
PROXY_MODE="ipvs"

# K8S Service CIDR, not overlap with node(host) networking
SERVICE_CIDR="10.100.0.0/16"

# Cluster CIDR (Pod CIDR), not overlap with node(host) networking
CLUSTER_CIDR="10.200.0.0/16"

# NodePort Range
NODE_PORT_RANGE="30000-32767"

# Cluster DNS Domain
CLUSTER_DNS_DOMAIN="cluster.local"

# -------- Additional Variables (don't change the default value right now) ---
# Binaries Directory
bin_dir="/usr/local/bin"

# Deploy Directory (kubeasz workspace)
base_dir="/etc/kubeasz"

# Directory for a specific cluster
cluster_dir="{{ base_dir }}/clusters/k8s-cluster1"

# CA and other components cert/key Directory
ca_dir="/etc/kubernetes/ssl"

# Default 'k8s_nodename' is empty
k8s_nodename=''

# Default python interpreter
ansible_python_interpreter=/usr/bin/python3

1.3.3.2 编辑 cluster config.yml 文件

# 只修改部分
root@HAproxy-k8s-apply-124:/etc/kubeasz# vim clusters/k8s-cluster1/config.yml
······
······
############################
# role:kube-master
############################
# k8s 集群 master 节点证书配置,可以添加多个ip和域名(比如增加公网ip和域名)
MASTER_CERT_HOSTS:
  - "192.168.119.200"
  - "api.wuhaolam.top"
  
# node节点最大pod 数
MAX_PODS: 210

······
······

# coredns 自动安装
dns_install: "no"
corednsVer: "1.9.3"
ENABLE_LOCAL_DNS_CACHE: false
dnsNodeCacheVer: "1.22.20"
# 设置 local dns cache 地址
LOCAL_DNS_CACHE: "169.254.20.10"

# metric server 自动安装
metricsserver_install: "no"
metricsVer: "v0.6.3"

# dashboard 自动安装
dashboard_install: "no"
dashboardVer: "v2.7.0"
dashboardMetricsScraperVer: "v1.0.8"

1.3.4 部署 k8s 集群

1.3.4.1 环境初始化

# 查看命令使用
root@HAproxy-k8s-apply-124:/etc/kubeasz# ./ezctl --help
Usage: ezctl COMMAND [args]
-------------------------------------------------------------------------------------
Cluster setups:
    list		             to list all of the managed clusters
    checkout    <cluster>            to switch default kubeconfig of the cluster
    new         <cluster>            to start a new k8s deploy with name 'cluster'
    setup       <cluster>  <step>    to setup a cluster, also supporting a step-by-step way
    start       <cluster>            to start all of the k8s services stopped by 'ezctl stop'
    stop        <cluster>            to stop all of the k8s services temporarily
    upgrade     <cluster>            to upgrade the k8s cluster
    destroy     <cluster>            to destroy the k8s cluster
    backup      <cluster>            to backup the cluster state (etcd snapshot)
    restore     <cluster>            to restore the cluster state from backups
    start-aio		             to quickly setup an all-in-one cluster with default settings

Cluster ops:
    add-etcd    <cluster>  <ip>      to add a etcd-node to the etcd cluster
    add-master  <cluster>  <ip>      to add a master node to the k8s cluster
    add-node    <cluster>  <ip>      to add a work node to the k8s cluster
    del-etcd    <cluster>  <ip>      to delete a etcd-node from the etcd cluster
    del-master  <cluster>  <ip>      to delete a master node from the k8s cluster
    del-node    <cluster>  <ip>      to delete a work node from the k8s cluster

Extra operation:
    kca-renew   <cluster>            to force renew CA certs and all the other certs (with caution)
    kcfg-adm    <cluster>  <args>    to manage client kubeconfig of the k8s cluster

Use "ezctl help <command>" for more information about a given command.

root@HAproxy-k8s-apply-124:/etc/kubeasz# ./ezctl setup --help
Usage: ezctl setup <cluster> <step>
available steps:
    01  prepare            to prepare CA/certs & kubeconfig & other system settings 
    02  etcd               to setup the etcd cluster
    03  container-runtime  to setup the container runtime(docker or containerd)
    04  kube-master        to setup the master nodes
    05  kube-node          to setup the worker nodes
    06  network            to setup the network plugin
    07  cluster-addon      to setup other useful plugins
    90  all                to run 01~07 all at once
    10  ex-lb              to install external loadbalance for accessing k8s from outside
    11  harbor             to install a new harbor server or to integrate with an existed one

examples: ./ezctl setup test-k8s 01  (or ./ezctl setup test-k8s prepare)
	  ./ezctl setup test-k8s 02  (or ./ezctl setup test-k8s etcd)
          ./ezctl setup test-k8s all
          ./ezctl setup test-k8s 04 -t restart_master

# 对集群进行初始化
## 删除 ex_lb 和 chrony host
root@HAproxy-k8s-apply-124:/etc/kubeasz# vim playbooks/01.prepare.yml
# [optional] to synchronize system time of nodes with 'chrony' 
- hosts:
  - kube_master
  - kube_node
  - etcd
  roles:                                                                                                                                                     
  - { role: os-harden, when: "OS_HARDEN|bool" }
  - { role: chrony, when: "groups['chrony']|length > 0" }

# to create CA, kubeconfig, kube-proxy.kubeconfig etc.
- hosts: localhost
  roles:
  - deploy

# prepare tasks for all nodes
- hosts:
  - kube_master
  - kube_node
  - etcd
  roles:
  - prepare
  
# 开始初始化
root@HAproxy-k8s-apply-124:/etc/kubeasz# ./ezctl setup k8s-cluster1 01

image

1.3.4.2 部署etcd集群

# 部署etcd集群
root@HAproxy-k8s-apply-124:/etc/kubeasz# ./ezctl setup k8s-cluster1 02

image

# 在etcd服务器验证etcd服务
root@k8s-etcd1-125:~# export NODE_IPS="192.168.119.125 192.168.119.126"
root@k8s-etcd1-125:~# for IP in ${NODE_IPS};do ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints=https://${IP}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health;done
https://192.168.119.125:2379 is healthy: successfully committed proposal: took = 6.749233ms
https://192.168.119.126:2379 is healthy: successfully committed proposal: took = 7.00636ms

1.3.4.3 部署容器运行时 containerd

# 验证基础容器镜像
root@HAproxy-k8s-apply-124:/etc/kubeasz# grep SANDBOX_IMAGE ./clusters/* -R
./clusters/k8s-cluster1/config.yml:SANDBOX_IMAGE: "easzlab.io.local:5000/easzlab/pause:3.9"

1、如果修改了基础容器镜像的位置,需要在配置文件中修改
# 因为我的本地已经下载完成,故此处不修改
root@HAproxy-k8s-apply-124:/etc/kubeasz# vim ./clusters/k8s-cluster1/config.yml
# [containerd]基础容器镜像                                                          SANDBOX_IMAGE: "easzlab.io.local:5000/easzlab/pause:3.9"

2、配置本地镜像仓库域名解析,如果有DNS服务器,可不用配置
root@HAproxy-k8s-apply-124:/etc/kubeasz# vim roles/containerd/tasks/main.yml
    - name: 准备域名解析
      shell: "echo '192.168.119.113 harbor.wuhaolam.top' >> /etc/hosts"

3、可自定义containerd的配置文件
# SystemdCgroup = true 必须为true
root@HAproxy-k8s-apply-124:/etc/kubeasz# vim roles/containerd/templates/config.toml.j2
107         [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
108             BinaryName = ""
109             CriuImagePath = ""
110             CriuPath = ""
111             CriuWorkPath = ""
112             IoGid = 0
113             IoUid = 0
114             NoNewKeyring = false
115             NoPivotRoot = false
116             Root = ""
117             ShimCgroup = ""
118             SystemdCgroup = true

4、配置nerdctl客户端
root@HAproxy-k8s-apply-124:/etc/kubeasz# wget https://github.com/containerd/nerdctl/releases/download/v1.5.0/nerdctl-1.5.0-linuxamd64.tar.gz
root@HAproxy-k8s-apply-124:/etc/kubeasz# tar xvf nerdctl-1.5.0-linux-amd64.tar.gz -C /etc/kubeasz/bin/containerd-bin/

root@HAproxy-k8s-apply-124:/etc/kubeasz# vim roles/containerd/tasks/main.yml 
- name: 获取是否已经安装containerd
  shell: 'systemctl is-active containerd || echo "NoFound"'
  register: containerd_svc

- block:
    - name: 准备containerd相关目录
      file: name={{ item }} state=directory
      with_items:
      - "{{ bin_dir }}/containerd-bin"
      - "/etc/containerd"
      - "/etc/nerdctl/"

    - name: 准备域名解析
      shell: "echo '192.168.119.113 harbor.wuhaolam.top' >> /etc/hosts"

    - name: 加载内核模块 overlay
      modprobe: name=overlay state=present

    - name: 下载 containerd 二进制文件
      copy: src={{ item }} dest={{ bin_dir }}/containerd-bin/ mode=0755
      with_fileglob:
      - "{{ base_dir }}/bin/containerd-bin/*"
      tags: upgrade

    - name: 下载 crictl
      copy: src={{ base_dir }}/bin/crictl dest={{ bin_dir }}/crictl mode=0755

    - name: 添加 crictl 自动补全
      lineinfile:
        dest: ~/.bashrc
        state: present
        regexp: 'crictl completion'
        line: 'source <(crictl completion bash) # generated by kubeasz'
  
    - name: 创建 containerd 配置文件
      template: src=config.toml.j2 dest=/etc/containerd/config.toml
      tags: upgrade

    - name: 创建 nerdctl 配置文件
      template: src=nerdctl.toml.j2 dest=/etc/nerdctl/nerdctl.toml
      tags: upgrade

    - name: 创建systemd unit文件
      template: src=containerd.service.j2 dest=/etc/systemd/system/containerd.service
      tags: upgrade

    - name: 创建 crictl 配置
      template: src=crictl.yaml.j2 dest=/etc/crictl.yaml

    - name: 开机启用 containerd 服务
      shell: systemctl enable containerd
      ignore_errors: true

    - name: 开启 containerd 服务
      shell: systemctl daemon-reload && systemctl restart containerd
      tags: upgrade

    - name: 轮询等待containerd服务运行
      shell: "systemctl is-active containerd.service"
      register: containerd_status
      until: '"active" in containerd_status.stdout'
      retries: 8
      delay: 2
      tags: upgrade
  when: "'NoFound' in containerd_svc.stdout"
  
5、nerdctl 配置文件
root@HAproxy-k8s-apply-124:/etc/kubeasz# vim roles/containerd/templates/nerdctl.toml.j2
namespace = "k8s.io"
debug = false
debug_full = false
insecure_registry = true

6、部署运行时
root@HAproxy-k8s-apply-124:/etc/kubeasz# ./ezctl setup k8s-cluster1 03

image

7、去节点进行验证containerd
root@k8s-master1-120:~# find / -name nerdctl
/etc/nerdctl
/usr/local/bin/containerd-bin/nerdctl
root@k8s-master1-120:~# ln -sv /usr/local/bin/containerd-bin/* /usr/local/bin
root@k8s-master1-120:~# nerdctl images
REPOSITORY    TAG    IMAGE ID    CREATED    PLATFORM    SIZE    BLOB SIZE
root@k8s-master1-120:~# containerd -v
containerd github.com/containerd/containerd v1.6.20 2806fc1057397dbaeefbea0e4e17bddfbd388f38

8、验证harbor仓库
root@k8s-node1-122:~# nerdctl login harbor.wuhaolam.top
Enter Username: admin
Enter Password: 
WARN[0004] skipping verifying HTTPS certs for "harbor.wuhaolam.top" 
WARNING: Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded

1.3.4.4 部署 k8s master 节点

root@HAproxy-k8s-apply-124:/etc/kubeasz# ./ezctl setup k8s-cluster1 04

image

root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl get node
NAME              STATUS                     ROLES    AGE     VERSION
192.168.119.120   Ready,SchedulingDisabled   master   3m29s   v1.27.2

1.3.4.5 部署 k8s node 节点

# 自定义配置路径
root@HAproxy-k8s-apply-124:/etc/kubeasz# vim roles/kube-node/tasks/main.yml
# 开始部署
root@HAproxy-k8s-apply-124:/etc/kubeasz# ./ezctl setup k8s-cluster1 05

image

root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl get node
NAME              STATUS                     ROLES    AGE     VERSION
192.168.119.120   Ready,SchedulingDisabled   master   9m11s   v1.27.2
192.168.119.122   Ready                      node     84s     v1.27.2

1.3.4.6 部署 calico 网络服务

# calico 的yaml文件
http://file.wuhaolam.top/calico3.26.1-ipip_ubuntu2204-k8s-1.27.x.yaml?e=1691668755&token=miUmOnzUyd6iPZJ_Mb0lwezwsmi9rGBR-TEVXr1z:hfGQt3MFblFwXRJOTBZIQ5GhzG0=

# 注意以下两处定义,pod 子网范围与之前在hosts文件中定义是否一致
root@HAproxy-k8s-apply-124:/etc/kubeasz# vim calico3.26.1-ipip_ubuntu2204-k8s-1.27.x.yaml

image
image

# 开始部署
## 此处是自己部署,没有使用ansible文件
root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl apply -f calico3.26.1-ipip_ubuntu2204-k8s-1.27.x.yaml
root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl get pod -A
NAMESPACE     NAME                                      READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-6655b6c4b-pkvsq   1/1     Running   0          4m3s
kube-system   calico-node-m9mmr                         1/1     Running   0          4m3s
kube-system   calico-node-vrxns                         1/1     Running   0          4m3s

# 拷贝calico二进制客户端到node节点进行验证
root@HAproxy-k8s-apply-124:/etc/kubeasz# scp ./bin/calicoctl root@192.168.119.122:/usr/local/bin
calicoctl
# 在node节点执行
root@k8s-node1-122:~# calicoctl node status
Calico process is running.

IPv4 BGP status
+-----------------+-------------------+-------+----------+-------------+
|  PEER ADDRESS   |     PEER TYPE     | STATE |  SINCE   |    INFO     |
+-----------------+-------------------+-------+----------+-------------+
| 192.168.119.120 | node-to-node mesh | up    | 12:46:33 | Established |
+-----------------+-------------------+-------+----------+-------------+

1.3.4.7 验证 pod 之间的通信

root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl run net-test1 --image=alpine sleep 360000
pod/net-test1 created
root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl run net-test2 --image=alpine sleep 360000
pod/net-test2 created
root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl run net-teste --image=alpine sleep 360000
pod/net-teste created

root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl get pod -A -o wide
NAMESPACE     NAME                                      READY   STATUS    RESTARTS   AGE   IP                NODE              NOMINATED NODE   READINESS GATES
default       net-test1                                 1/1     Running   0          68s   10.200.153.1      192.168.119.122   <none>           <none>
default       net-test2                                 1/1     Running   0          52s   10.200.153.2      192.168.119.122   <none>           <none>
default       net-teste                                 1/1     Running   0          48s   10.200.153.3      192.168.119.122   <none>           <none>
kube-system   calico-kube-controllers-6655b6c4b-pkvsq   1/1     Running   0          13m   10.200.0.2        192.168.119.122   <none>           <none>
kube-system   calico-node-m9mmr                         1/1     Running   0          13m   192.168.119.120   192.168.119.120   <none>           <none>
kube-system   calico-node-vrxns                         1/1     Running   0          13m   192.168.119.122   192.168.119.122   <none>           <none>

++ 由于我目前只有一个node节点所以创建的pod都在一个主机上,无法验证不同主机中pod的跨主机之间的通信 ++
++ 如需测试,进入到其中一个容器中,然后ping另一个主机中的pod的IP地址 ++  
root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl exec -it net-test1 sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # ping 10.200.153.2 -c2
PING 10.200.153.2 (10.200.153.2): 56 data bytes
64 bytes from 10.200.153.2: seq=0 ttl=63 time=0.346 ms
64 bytes from 10.200.153.2: seq=1 ttl=63 time=0.108 ms

--- 10.200.153.2 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.108/0.227/0.346 ms
/ # ping 223.6.6.6
PING 223.6.6.6 (223.6.6.6): 56 data bytes
64 bytes from 223.6.6.6: seq=0 ttl=127 time=16.297 ms
64 bytes from 223.6.6.6: seq=1 ttl=127 time=14.594 ms
^C
--- 223.6.6.6 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 14.594/15.445/16.297 ms

# 给master节点分发 kube config 认证文件
root@HAproxy-k8s-apply-124:/etc/kubeasz# scp /root/.kube/config root@192.168.119.120:/root/.kube/
root@k8s-master1-120:~# kubectl get pod
NAME        READY   STATUS    RESTARTS   AGE
net-test1   1/1     Running   0          16m
net-test2   1/1     Running   0          16m
net-teste   1/1     Running   0          16m

1.3.5 集群节点的伸缩管理

集群管理主要是添加master、node,删除master、node等,进行节点管理及监控
集群的创建与删除会涉及到某些配置文件的重新分发,如:

root@k8s-node1-122:~# cat /etc/kube-lb/conf/kube-lb.conf
user root;
worker_processes 1;

error_log  /etc/kube-lb/logs/error.log warn;

events {
    worker_connections  3000;
}
# master节点如果增加,此处会新增加一个后端master server
stream {
    upstream backend {
        server 192.168.119.120:6443    max_fails=2 fail_timeout=3s;
    }

    server {
        listen 127.0.0.1:6443;
        proxy_connect_timeout 1s;
        proxy_pass backend;
    }
}

当前集群状态

root@k8s-master1-120:~# kubectl get node
NAME              STATUS                     ROLES    AGE    VERSION
192.168.119.120   Ready,SchedulingDisabled   master   3h     v1.27.2
192.168.119.122   Ready                      node     172m   v1.27.2

1.3.5.1 添加 master 节点

# 将192.168.119.121这台主机添加为第二台master节点
root@HAproxy-k8s-apply-124:/etc/kubeasz# ./ezctl add-master k8s-cluster1 192.168.119.121

image

# 再次查看node1节点负载均衡配置
root@k8s-node1-122:~# cat /etc/kube-lb/conf/kube-lb.conf
···
···
stream {
    upstream backend {
        server 192.168.119.121:6443    max_fails=2 fail_timeout=3s;
        server 192.168.119.120:6443    max_fails=2 fail_timeout=3s;
    }
···
···
# 当前集群中master变为两个
root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl get node
NAME              STATUS                     ROLES    AGE     VERSION
192.168.119.120   Ready,SchedulingDisabled   master   3h31m   v1.27.2
192.168.119.121   Ready,SchedulingDisabled   master   7m51s   v1.27.2
192.168.119.122   Ready                      node     3h23m   v1.27.2

1.3.5.2 添加 node 节点

# 将192.168.119.123添加为第二个node节点
root@HAproxy-k8s-apply-124:/etc/kubeasz# ./ezctl add-node k8s-cluster1 192.168.119.123

image

root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl get node
NAME              STATUS                     ROLES    AGE     VERSION
192.168.119.120   Ready,SchedulingDisabled   master   3h39m   v1.27.2
192.168.119.121   Ready,SchedulingDisabled   master   15m     v1.27.2
192.168.119.122   Ready                      node     3h31m   v1.27.2
192.168.119.123   Ready                      node     3m15s   v1.27.2

添加节点注意:如果某一个节点出现问题,在没有先执行./ezctl del-node <cluster> <ip>删除该节点信息的情况下,重新初始化该节点后又加入集群中时,会无法加入。需要先编辑你集群所在的hosts文件,然后删除该节点的信息,重新添加即可。

1.3.6 升级集群

对当前版本 kubernetes 集群进行版本更新,解决已知bug或新增某些功能

1.3.6.1 批量更新

# 查看当前集群版本
root@HAproxy-k8s-apply-124:~# kubectl get node
NAME              STATUS                     ROLES    AGE   VERSION
192.168.119.120   Ready,SchedulingDisabled   master   18h   v1.27.2
192.168.119.121   Ready,SchedulingDisabled   master   15h   v1.27.2
192.168.119.122   Ready                      node     18h   v1.27.2
192.168.119.123   Ready                      node     14h   v1.27.2

# 准备好升级文件
下载地址:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#downloads-for-v1274
root@HAproxy-k8s-apply-124:~# ls kubernetes*
kubernetes-client-linux-amd64.tar.gz  kubernetes-node-linux-amd64.tar.gz  kubernetes-server-linux-amd64.tar.gz  kubernetes.tar.gz

root@HAproxy-k8s-apply-124:~# tar xf kubernetes-client-linux-amd64.tar.gz -C /usr/local/src/
root@HAproxy-k8s-apply-124:~# tar xf kubernetes-node-linux-amd64.tar.gz -C /usr/local/src/
root@HAproxy-k8s-apply-124:~# tar xf kubernetes-server-linux-amd64.tar.gz -C /usr/local/src/
root@HAproxy-k8s-apply-124:~# tar xf kubernetes.tar.gz -C /usr/local/src/

root@HAproxy-k8s-apply-124:~# cd /usr/local/src/kubernetes/server/bin/
root@HAproxy-k8s-apply-124:/usr/local/src/kubernetes/server/bin# \cp kube-apiserver kube-controller-manager kube-scheduler kubelet kubectl /etc/kubeasz/bin/

# 确认以下自己下载的版本
root@HAproxy-k8s-apply-124:/usr/local/src/kubernetes/server/bin# cd /etc/kubeasz/
root@HAproxy-k8s-apply-124:/etc/kubeasz# ./bin/kube-apiserver --version
Kubernetes v1.27.4

# 开始升级集群
root@HAproxy-k8s-apply-124:/etc/kubeasz# ./ezctl upgrade k8s-cluster1

# 查看集群版本是否升级成功
root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl get node
NAME              STATUS                     ROLES    AGE   VERSION
192.168.119.120   Ready,SchedulingDisabled   master   19h   v1.27.4
192.168.119.121   Ready,SchedulingDisabled   master   15h   v1.27.4
192.168.119.122   Ready                      node     18h   v1.27.4
192.168.119.123   Ready                      node     15h   v1.27.4

1.3.6.2 手动升级

method1: 将二进制文件同步到其它设备,修改service文件加载新版本二进制
method2: 关闭源服务、替换二进制文件然后启动服务  ++ 使用此方式演示 ++

root@HAproxy-k8s-apply-124:~# tar xf kubernetes-client-linux-amd64.tar.gz -C /usr/local/src/
root@HAproxy-k8s-apply-124:~# tar xf kubernetes-node-linux-amd64.tar.gz -C /usr/local/src/
root@HAproxy-k8s-apply-124:~# tar xf kubernetes-server-linux-amd64.tar.gz -C /usr/local/src/
root@HAproxy-k8s-apply-124:~# tar xf kubernetes.tar.gz -C /usr/local/src/
root@HAproxy-k8s-apply-124:~# cd /usr/local/src/kubernetes/server/bin/

# 升级master1节点,master2同理
root@k8s-master1-120:~# systemctl stop kube-apiserver.service kube-scheduler.service kube-controller-manager.service kube-proxy.service kubelet.service
root@HAproxy-k8s-apply-124:/usr/local/src/kubernetes/server/bin# scp kube-apiserver kube-controller-manager kube-scheduler kubelet kubectl root@192.168.119.120:/usr/local/bin
root@k8s-master1-120:~# systemctl start kube-apiserver.service kube-scheduler.service kube-controller-manager.service kube-proxy.service kubelet.service

# 升级node1节点,node2同理
root@k8s-node1-122:~# systemctl stop kubelet.service kube-proxy.service
root@HAproxy-k8s-apply-124:/usr/local/src/kubernetes/server/bin# scp kubelet kube-proxy kubectl root@192.168.119.122:/usr/local/bin/
root@k8s-node1-122:~# systemctl start kubelet.service kube-proxy.service

# 替换部署节点的二进制
root@HAproxy-k8s-apply-124:/usr/local/src/kubernetes/server/bin# \cp kube-apiserver kube-controller-manager kube-scheduler kubelet kubectl /etc/kubeasz/bin/

1.4 部署kubernetes内部DNS服务-CoreDNS

1.4.1 部署CoreDNS

# 修改默认CoreDNS的yaml文件
root@HAproxy-k8s-apply-124:/usr/local/src/kubernetes/cluster/addons/dns/coredns# pwd
/usr/local/src/kubernetes/cluster/addons/dns/coredns

# 修改集群dns名称,与/etc/kubeasz/clusters/k8s-cluster1/hosts文件中Cluster DNS Domain处定义的相同
# forward 处修改为转发的DNS地址,如:. 223.6.6.6

image

# 某些域名,想转发到指定的服务器进行解析,可自定义

image

# 指定CoreDNS的镜像地址以及资源限制
      containers:
      - name: coredns
        image: registry.cn-hangzhou.aliyuncs.com/zhangshijie/coredns:v1.10.1                                                                                             
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 512Mi
            cpu: 500m
          requests:
            cpu: 100m
            memory: 70Mi

image

# 修改集群的dns地址
# 修改的地址一定要与pod中的域名地址相同
root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl exec -it net-test4 sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # cat /etc/resolv.conf 
search default.svc.cluster.local svc.cluster.local cluster.local 192.168.119.2
nameserver 10.100.0.2
options ndots:5

image

# 开始部署CoreDNS
root@HAproxy-k8s-apply-124:/usr/local/src/kubernetes/cluster/addons/dns/coredns# cp coredns.yaml.base /etc/kubeasz/coredns-v1.10.1.yaml
root@HAproxy-k8s-apply-124:/usr/local/src/kubernetes/cluster/addons/dns/coredns# cd /etc/kubeasz/
root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl apply -f coredns-v1.10.1.yaml 
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created

# 若遇到错误需重新分发
root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl delete -f coredns-v1.10.1.yaml
root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl apply -f coredns-v1.10.1.yaml

1.4.2 测试域名解析服务

root@k8s-master1-120:~# kubectl get pod
NAME        READY   STATUS    RESTARTS   AGE
net-test1   0/1     Unknown   0          25h
net-test2   0/1     Unknown   0          25h
net-test4   1/1     Running   0          166m
net-teste   0/1     Unknown   0          25h

root@k8s-master1-120:~# kubectl exec -it net-test4 sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # ping www.baidu.com
PING www.baidu.com (14.119.104.189): 56 data bytes
64 bytes from 14.119.104.189: seq=0 ttl=127 time=29.280 ms
64 bytes from 14.119.104.189: seq=1 ttl=127 time=30.144 ms
^C
--- www.baidu.com ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 29.280/29.712/30.144 ms
/ # ping 223.6.6.6
PING 223.6.6.6 (223.6.6.6): 56 data bytes
64 bytes from 223.6.6.6: seq=0 ttl=127 time=14.382 ms
64 bytes from 223.6.6.6: seq=1 ttl=127 time=15.072 ms
^C
--- 223.6.6.6 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 14.382/14.727/15.072 ms

二、总结kubectl命令的使用

kubectl 命令使用简介:https://kubernetes.io/zh-cn/docs/reference/kubectl/cheatsheet/

# 配置kubectl命令自动补全
root@HAproxy-k8s-apply-124:~# kubectl completion bash > /etc/profile.d/kubectl-completion.sh
root@HAproxy-k8s-apply-124:~# chmod +x /etc/profile.d/kubectl-completion.sh
root@HAproxy-k8s-apply-124:~# source /etc/profile.d/kubectl-completion.sh

# 查看node或pod信息,可 -A -o wide,显示更详细信息
root@HAproxy-k8s-apply-124:~# kubectl get node
NAME              STATUS                     ROLES    AGE   VERSION
192.168.119.120   Ready,SchedulingDisabled   master   43h   v1.27.2
192.168.119.121   Ready,SchedulingDisabled   master   40h   v1.27.2
192.168.119.122   Ready                      node     43h   v1.27.2
192.168.119.123   Ready                      node     40h   v1.27.2
root@HAproxy-k8s-apply-124:~# kubectl get pod
NAME        READY   STATUS    RESTARTS        AGE
net-test1   1/1     Running   2 (6h42m ago)   42h
net-test2   1/1     Running   2 (6h42m ago)   42h
net-test5   1/1     Running   1 (6h42m ago)   16h
net-test8   1/1     Running   1 (6h42m ago)   16h
## 查看命名空间
root@HAproxy-k8s-apply-124:~# kubectl get namespaces 
NAME              STATUS   AGE
default           Active   44h
kube-node-lease   Active   44h
kube-public       Active   44h
kube-system       Active   44h
kuboard           Active   4h54m
## 查看指定命名空间下的pod信息
root@HAproxy-k8s-apply-124:~# kubectl get pods -n kube-system
NAME                                      READY   STATUS    RESTARTS        AGE
calico-kube-controllers-6655b6c4b-gn9pl   1/1     Running   2 (7h10m ago)   16h
calico-node-52lb5                         1/1     Running   1 (7h10m ago)   16h
calico-node-5kn52                         1/1     Running   1 (7h10m ago)   16h
calico-node-sdv9d                         1/1     Running   1 (7h10m ago)   16h
calico-node-sjmnd                         1/1     Running   1 (7h10m ago)   16h
coredns-bbbfcc99-ghqhb                    1/1     Running   1 (7h10m ago)   16h

# 删除node或pod
## 删除指定的pod
root@HAproxy-k8s-apply-124:~# kubectl delete pod net-test8
pod "net-test8" deleted
## 删除所有node
root@HAproxy-k8s-apply-124:~# kubectl delete nodes

# 创建节点,推荐使用 apply
root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl apply -f coredns-v1.10.1.yaml

# 查看一个节点或某一个资源文件的详细运行信息
root@HAproxy-k8s-apply-124:~# kubectl describe  pods net-test1
root@HAproxy-k8s-apply-124:~# kubectl describe -f /etc/kubeasz/coredns-v1.10.1.yaml

# 查看日志
root@HAproxy-k8s-apply-124:~# kubectl logs net-test1

# 进入容器
root@HAproxy-k8s-apply-124:~# kubectl exec -it net-test5 sh

# 查看某一个资源对象中某一个字段的详细说明
root@HAproxy-k8s-apply-124:~# kubectl explain pod
root@HAproxy-k8s-apply-124:~# kubectl explain pod.apiVersion

# 查看当前集群状态
root@HAproxy-k8s-apply-124:~# kubectl cluster-info 

# 标记某一个节点不会被调度
root@HAproxy-k8s-apply-124:~# kubectl cordon 192.168.119.123
## 取消不被调度
root@HAproxy-k8s-apply-124:~# kubectl uncordon 192.168.119.123

# 驱逐node节点上的pod,用于node下线
root@HAproxy-k8s-apply-124:~# kubectl drain 192.168.119.123

# 查看资源对象的api-version信息
root@HAproxy-k8s-apply-124:~# kubectl api-resources

# 查看指定主机上的pod
root@HAproxy-k8s-apply-124:~# kubectl get pods --all-namespaces -o wide --field-selector spec.nodeName=192.168.119.123
NAMESPACE     NAME                                      READY   STATUS    RESTARTS        AGE    IP                NODE              NOMINATED NODE   READINESS GATES
default       net-test5                                 1/1     Running   1 (7h17m ago)   16h    10.200.244.2      192.168.119.123   <none>           <none>
kube-system   calico-kube-controllers-6655b6c4b-gn9pl   1/1     Running   2 (7h17m ago)   16h    10.200.244.3      192.168.119.123   <none>           <none>
kube-system   calico-node-52lb5                         1/1     Running   1 (7h17m ago)   16h    192.168.119.123   192.168.119.123   <none>           <none>
kube-system   coredns-bbbfcc99-ghqhb                    1/1     Running   1 (7h17m ago)   16h    10.200.244.7      192.168.119.123   <none>           <none>
kuboard       kuboard-v3-57c66f4c4b-x88c8               1/1     Running   0               5h1m   10.200.244.9      192.168.119.123   <none>           <none>

三、测试第三方dashboard如Kuboard

github地址:https://github.com/eip-work/kuboard-press

3.1 部署Kuboard

# 安装 nfs 服务
root@HAproxy-k8s-apply-124:~# apt -y install nfs-server
root@HAproxy-k8s-apply-124:~# mkdir -p /data/k8sdata/kuboard
root@HAproxy-k8s-apply-124:~# echo -e "\n/data/k8sdata *(rw,no_root_squash)" >> /etc/exports
root@HAproxy-k8s-apply-124:~# systemctl restart nfs-server

# 在k8s中部署
root@HAproxy-k8s-apply-124:/etc/kubeasz# cat kuboard-all-in-one.yaml
---
apiVersion: v1
kind: Namespace
metadata:
  name: kuboard

---
apiVersion: apps/v1
kind: Deployment
metadata:
  annotations: {}
  labels:
    k8s.kuboard.cn/name: kuboard-v3
  name: kuboard-v3
  namespace: kuboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s.kuboard.cn/name: kuboard-v3
  template:
    metadata:
      labels:
        k8s.kuboard.cn/name: kuboard-v3
    spec:
      #affinity:
      #  nodeAffinity:
      #    preferredDuringSchedulingIgnoredDuringExecution:
      #      - preference:
      #          matchExpressions:
      #            - key: node-role.kubernetes.io/master
      #              operator: Exists
      #        weight: 100
      #      - preference:
      #          matchExpressions:
      #            - key: node-role.kubernetes.io/control-plane
      #              operator: Exists
      #        weight: 100
      volumes:
      - name: kuboard-data
        nfs:
          server: 192.168.119.124
          path: /data/k8sdata/kuboard 
      containers:
        - env:
            - name: "KUBOARD_ENDPOINT"
              value: "http://kuboard-v3:80"
            - name: "KUBOARD_AGENT_SERVER_TCP_PORT"
              value: "10081"
          image: swr.cn-east-2.myhuaweicloud.com/kuboard/kuboard:v3 
          volumeMounts:
          - name: kuboard-data 
            mountPath: /data 
            readOnly: false
          imagePullPolicy: Always
          livenessProbe:
            failureThreshold: 3
            httpGet:
              path: /
              port: 80
              scheme: HTTP
            initialDelaySeconds: 30
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1
          name: kuboard
          ports:
            - containerPort: 80
              name: web
              protocol: TCP
            - containerPort: 443
              name: https
              protocol: TCP
            - containerPort: 10081
              name: peer
              protocol: TCP
            - containerPort: 10081
              name: peer-u
              protocol: UDP
          readinessProbe:
            failureThreshold: 3
            httpGet:
              path: /
              port: 80
              scheme: HTTP
            initialDelaySeconds: 30
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 1
          resources: {}
      #dnsPolicy: ClusterFirst
      #restartPolicy: Always
      #serviceAccount: kuboard-boostrap
      #serviceAccountName: kuboard-boostrap
      #tolerations:
      #  - key: node-role.kubernetes.io/master
      #    operator: Exists

---
apiVersion: v1
kind: Service
metadata:
  annotations: {}
  labels:
    k8s.kuboard.cn/name: kuboard-v3
  name: kuboard-v3
  namespace: kuboard
spec:
  ports:
    - name: web
      nodePort: 30080
      port: 80
      protocol: TCP
      targetPort: 80
    - name: tcp
      nodePort: 30081
      port: 10081
      protocol: TCP
      targetPort: 10081
    - name: udp
      nodePort: 30081
      port: 10081
      protocol: UDP
      targetPort: 10081
  selector:
    k8s.kuboard.cn/name: kuboard-v3
  sessionAffinity: None
  type: NodePort

root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl apply -f kuboard-all-in-one.yaml

# pod在192.168.119.123节点上创建
root@k8s-master1-120:~# kubectl get pod -A -o wide | grep kuboard
kuboard       kuboard-v3-57c66f4c4b-x88c8               1/1     Running   0              2m45s   10.200.244.9      192.168.119.123   <none> <none>
# 浏览器中输入IP:30080进入,用户名admin,密码Kuboard123

image
image

3.2 在Kuboard中添加kubernetes集群并使用

# 点击添加集群,使用KubeConfig方式
# 将该配置文件中所有内容拷贝到图中的指定位置
root@HAproxy-k8s-apply-124:/etc/kubeasz# cat ~/.kube/config

image
image

# 查看命名空间下运行的容器

image

四、总结etcd的集群选举机制

首次选举

  • 集群中各 etcd 节点启动成功后默认为 follower 角色,termID为0,如果发现集群中没有leader,则会转为 candidate 角色并参与 leader 的选举
  • candidate(候选节点)默认首先会将投票投给自己,然后向其它候选节点发送投票信息(RequestVote)
  • 各候选节点相互收到其余节点发送过来的投票信息后,比对日志查看是否比自己的更新,如果比自己的更新,则将自己的选票投给目的候选人,并回复一个包含自己最新日志信息的响应消息。集群中投票数只有超过半数以上的节点才可以成为 leader
  • 之后当选的 leader节点发送自己的 leader 心跳信息,维护自己的身份。(heartbeat-interval 默认为 100ms)
  • 之后其它节点将角色切换为 Follower 并向 leader 同步数据
  • 如果选举超时(election-timeout),则重新选举

后期选举

  • 当一个 Follower 节点在规定的时间内没有收到 leader 的消息时,就会转变成为 candidate 状态,并向其它节点发送投票请求(自己的term ID和日志更新记录),等待其它节点的响应。如果该 candidate 的日志更新记录最新,则会获得多数投票,成为新的 leader
  • 新的 leader 会将自己的 termID + 1 并通告其它节点
  • 如果旧的 leader 恢复了,发现已经有新的 leader,则自己会加入到已有的 leader 中并将自己的 termID 更新为和 leader 一致,在同一个任期内所有节点的 termID 是一致的
# 查看当前集群成员列表
root@k8s-etcd1-125:~# etcdctl member list
root@k8s-etcd1-125:~# etcdctl --write-out=table member list
+------------------+---------+----------------------+------------------------------+------------------------------+------------+
|        ID        | STATUS  |         NAME         |          PEER ADDRS          |         CLIENT ADDRS         | IS LEARNER |
+------------------+---------+----------------------+------------------------------+------------------------------+------------+
| 6e43d99bdd9598de | started | etcd-192.168.119.125 | https://192.168.119.125:2380 | https://192.168.119.125:2379 |      false |
| 94ce4429fcfb6596 | started | etcd-192.168.119.126 | https://192.168.119.126:2380 | https://192.168.119.126:2379 |      false |
+------------------+---------+----------------------+------------------------------+------------------------------+------------+

# 查看节点心跳状态
root@k8s-etcd1-125:~# export NODE_IPS="192.168.119.125 192.168.119.126"
root@k8s-etcd1-125:~# for ip in ${NODE_IPS}; do ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health; done
https://192.168.119.125:2379 is healthy: successfully committed proposal: took = 5.394099ms
https://192.168.119.126:2379 is healthy: successfully committed proposal: took = 6.160221ms

# 查看节点的状态
root@k8s-etcd1-125:~# for ip in ${NODE_IPS}; do ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table endpoint status --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem; done

image

五、对kubernetes集群进行版本升级

对当前版本 kubernetes 集群进行版本更新,解决已知bug或新增某些功能

5.1 批量更新

# 查看当前集群版本
root@HAproxy-k8s-apply-124:~# kubectl get node
NAME              STATUS                     ROLES    AGE   VERSION
192.168.119.120   Ready,SchedulingDisabled   master   18h   v1.27.2
192.168.119.121   Ready,SchedulingDisabled   master   15h   v1.27.2
192.168.119.122   Ready                      node     18h   v1.27.2
192.168.119.123   Ready                      node     14h   v1.27.2

# 准备好升级文件
下载地址:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#downloads-for-v1274
root@HAproxy-k8s-apply-124:~# ls kubernetes*
kubernetes-client-linux-amd64.tar.gz  kubernetes-node-linux-amd64.tar.gz  kubernetes-server-linux-amd64.tar.gz  kubernetes.tar.gz

root@HAproxy-k8s-apply-124:~# tar xf kubernetes-client-linux-amd64.tar.gz -C /usr/local/src/
root@HAproxy-k8s-apply-124:~# tar xf kubernetes-node-linux-amd64.tar.gz -C /usr/local/src/
root@HAproxy-k8s-apply-124:~# tar xf kubernetes-server-linux-amd64.tar.gz -C /usr/local/src/
root@HAproxy-k8s-apply-124:~# tar xf kubernetes.tar.gz -C /usr/local/src/

root@HAproxy-k8s-apply-124:~# cd /usr/local/src/kubernetes/server/bin/
root@HAproxy-k8s-apply-124:/usr/local/src/kubernetes/server/bin# \cp kube-apiserver kube-controller-manager kube-scheduler kubelet kubectl /etc/kubeasz/bin/

# 确认以下自己下载的版本
root@HAproxy-k8s-apply-124:/usr/local/src/kubernetes/server/bin# cd /etc/kubeasz/
root@HAproxy-k8s-apply-124:/etc/kubeasz# ./bin/kube-apiserver --version
Kubernetes v1.27.4

# 开始升级集群
root@HAproxy-k8s-apply-124:/etc/kubeasz# ./ezctl upgrade k8s-cluster1

# 查看集群版本是否升级成功
root@HAproxy-k8s-apply-124:/etc/kubeasz# kubectl get node
NAME              STATUS                     ROLES    AGE   VERSION
192.168.119.120   Ready,SchedulingDisabled   master   19h   v1.27.4
192.168.119.121   Ready,SchedulingDisabled   master   15h   v1.27.4
192.168.119.122   Ready                      node     18h   v1.27.4
192.168.119.123   Ready                      node     15h   v1.27.4

5.2 手动升级

method1: 将二进制文件同步到其它设备,修改service文件加载新版本二进制
method2: 关闭源服务、替换二进制文件然后启动服务  ++ 使用此方式演示 ++

root@HAproxy-k8s-apply-124:~# tar xf kubernetes-client-linux-amd64.tar.gz -C /usr/local/src/
root@HAproxy-k8s-apply-124:~# tar xf kubernetes-node-linux-amd64.tar.gz -C /usr/local/src/
root@HAproxy-k8s-apply-124:~# tar xf kubernetes-server-linux-amd64.tar.gz -C /usr/local/src/
root@HAproxy-k8s-apply-124:~# tar xf kubernetes.tar.gz -C /usr/local/src/
root@HAproxy-k8s-apply-124:~# cd /usr/local/src/kubernetes/server/bin/

# 升级master1节点,master2同理
root@k8s-master1-120:~# systemctl stop kube-apiserver.service kube-scheduler.service kube-controller-manager.service kube-proxy.service kubelet.service
root@HAproxy-k8s-apply-124:/usr/local/src/kubernetes/server/bin# scp kube-apiserver kube-controller-manager kube-scheduler kubelet kubectl root@192.168.119.120:/usr/local/bin
root@k8s-master1-120:~# systemctl start kube-apiserver.service kube-scheduler.service kube-controller-manager.service kube-proxy.service kubelet.service

# 升级node1节点,node2同理
root@k8s-node1-122:~# systemctl stop kubelet.service kube-proxy.service
root@HAproxy-k8s-apply-124:/usr/local/src/kubernetes/server/bin# scp kubelet kube-proxy kubectl root@192.168.119.122:/usr/local/bin/
root@k8s-node1-122:~# systemctl start kubelet.service kube-proxy.service

# 替换部署节点的二进制
root@HAproxy-k8s-apply-124:/usr/local/src/kubernetes/server/bin# \cp kube-apiserver kube-controller-manager kube-scheduler kubelet kubectl /etc/kubeasz/bin/
posted @ 2023-08-12 21:51  wuhaolam  阅读(67)  评论(0编辑  收藏  举报