kubeasz使用containerd二进制搭建k8s的1.26

系统基础优化

#所有服务器完成先基础优化配置
#部署基础常用命令
apt install iproute2  ntpdate  tcpdump telnet traceroute nfs-kernel-server nfs-common  lrzsz tree  openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev gcc openssh-server iotop unzip zip  apt-transport-https ca-certificates curl software-properties-common vim-common inetutils-ping iptables net-tools   -y
#配置python软连接
ln -s /usr/bin/python3 /usr/bin/python

时区优化配置
#修改为24小时制
echo  "LC_TIME=en_DK.UTF-8" >> /etc/default/locale
#修改时区为上海
#方法1
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime 
#方法2
#timedatectl set-timezone Asia/Shanghai
#配置时间同步
echo "*/5 * * * * ntpdate time1.aliyun.com &> /dev/null  && hwclock -w" >> /var/spool/cron/crontabs/root


#limits优化
cat >> /etc/security/limits.conf <<EOF
root                soft    core            unlimited
root                hard    core            unlimited
root                soft    nproc           1000000
root                hard    nproc           1000000
root                soft    nofile          1000000
root                hard    nofile          1000000
root                soft    memlock         32000
root                hard    memlock         32000
root                soft    msgqueue        8192000
root                hard    msgqueue        8192000
   
  
*                soft    core            unlimited
*                hard    core            unlimited
*                soft    nproc           1000000
*                hard    nproc           1000000
*                soft    nofile          1000000
*                hard    nofile          1000000
*                soft    memlock         32000
*                hard    memlock         32000
*                soft    msgqueue        8192000
*                hard    msgqueue        8192000
EOF


#内核参数
cat >/etc/sysctl.conf <<EOF
# Controls source route verification
net.ipv4.conf.default.rp_filter = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
vm.max_map_count=262144
kernel.pid_max=4194303
fs.file-max=1000000
net.netfilter.nf_conntrack_max=2097152

   
# Do not accept source routing
net.ipv4.conf.default.accept_source_route = 0
   
# Controls the System Request debugging functionality of the kernel
kernel.sysrq = 0
   
# Controls whether core dumps will append the PID to the core filename.
# Useful for debugging multi-threaded
applications. kernel.core_uses_pid = 1
   
# Controls the use of TCP syncookies
net.ipv4.tcp_syncookies = 1
   
# Disable netfilter on bridges.
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 0
   
# Controls the default maxmimum size of a mesage queue
kernel.msgmnb = 65536
   
# # Controls the maximum size of a message, in bytes
kernel.msgmax = 65536
   
# Controls the maximum shared segment size, in bytes
kernel.shmmax = 68719476736
   
# # Controls the maximum number of shared memory segments, in pages
kernel.shmall = 4294967296
   
# TCP kernel paramater
net.ipv4.tcp_mem = 786432 1048576 1572864
net.ipv4.tcp_rmem = 4096        87380   4194304
net.ipv4.tcp_wmem = 4096        16384   4194304
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_sack = 1
   
# socket buffer
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.core.netdev_max_backlog = 262144
net.core.somaxconn = 20480
net.core.optmem_max = 81920
   
# TCP conn
net.ipv4.tcp_max_syn_backlog = 262144
net.ipv4.tcp_syn_retries = 3
net.ipv4.tcp_retries1 = 3
net.ipv4.tcp_retries2 = 15
   
# tcp conn reuse
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_tw_reuse = 0
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_fin_timeout = 1
   
   
net.ipv4.tcp_max_tw_buckets = 20000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.tcp_synack_retries = 1
net.ipv4.tcp_syncookies = 1
   
# keepalive conn
net.ipv4.tcp_keepalive_time = 300
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.ip_local_port_range = 10001    65000
   
# swap
vm.overcommit_memory = 0
vm.swappiness = 0
   
#net.ipv4.conf.eth1.rp_filter = 0
#net.ipv4.conf.lo.arp_ignore = 1
#net.ipv4.conf.lo.arp_announce = 2
#net.ipv4.conf.all.arp_ignore = 1
#net.ipv4.conf.all.arp_announce = 2
EOF

#生成新的主机id,在同一个集群里面主机的id也不能一样
root@mb:~# cat /etc/machine-id 
f4af3e6c06f74989a1cc01905cd24f85
root@mb:~# dbus-uuidgen >/etc/machine-id
root@mb:~# cat /etc/machine-id 
8702ded717fc0176d080747d652dff9a


#修改主机名称
hostnamectl set-hostname harbor-deploy
hostnamectl set-hostname master1-etcd1
hostnamectl set-hostname master2-etcd2
hostnamectl set-hostname master3-etcd3
hostnamectl set-hostname node1
hostnamectl set-hostname node2
hostnamectl set-hostname node3
hostnamectl set-hostname lb-01
hostnamectl set-hostname lb-02


#配置hosts解析
cat >>/etc/hosts<<'EOF'
10.10.1.206 harbor          
10.10.1.200 master1-etcd1   
10.10.1.201 master2-etcd2   
10.10.1.202 master3-etcd3   
10.10.1.203 node1            
10.10.1.204 node2           
10.10.1.205 node3           
10.10.1.80  lb-01           
10.10.1.81  lb-02           
10.10.1.206 harbor.yyq.cn
EOF

负载均衡器搭建

给客户端kubectl和dashboard,减轻node到master节点的负载使用

#在负载均衡服务器安装haprxoy和
apt install -y haproxy keepalived

#lb-01和lb-02配置keepalived
#修改master1-haproxy1-keepalived1的配置文件
cat >/etc/keepalived/keepalived.conf<<'EOF'
! Configuration File for keepalived
  
global_defs {
   notification_email {
     acassen
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}
  
vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 50
    nopreempt
    priority 100
    advert_int 1
    authentication {
       auth_type PASS
       auth_pass 1111
    }
    virtual_ipaddress {
        10.0.0.111 dev eth0 label eth0:0
        10.0.0.112 dev eth0 label eth0:1
        10.0.0.113 dev eth0 label eth0:2
        10.0.0.114 dev eth0 label eth0:3
    }
}
EOF


#修改master2-haproxy2-keepalived2的配置文件
cat >/etc/keepalived/keepalived.conf<<'EOF'
! Configuration File for keepalived
  
global_defs {
   notification_email {
     acassen
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}
  
vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 50
    nopreempt
    priority 80
    advert_int 1
    authentication {
       auth_type PASS
       auth_pass 1111
    }
    virtual_ipaddress {
        10.0.0.111 dev eth0 label eth0:0
        10.0.0.112 dev eth0 label eth0:1
        10.0.0.113 dev eth0 label eth0:2
        10.0.0.114 dev eth0 label eth0:3
    }
}
EOF

#启动和配置自启动
systemctl restart keepalived
systemctl enable keepalived


haproxy搭建
#配置haproxy
cat >>/etc/haproxy/haproxy.cfg<<'EOF'
listen k8s-6443
        bind 10.0.0.111:6443
        mode tcp
        server master1-haproxy1-keepalived1 10.0.0.101:6443 check inter 3000s fall 3 rise 5
        server master2-haproxy2-keepalived2 10.0.0.102:6443 check inter 3000s fall 3 rise 5
        server master3-haproxy3-keepalived3 10.0.0.103:6443 check inter 3000s fall 3 rise 5
EOF

#启动haproxy
systemctl restart haproxy
systemctl enable haproxy

harbor搭建

go1.15的版本以上开始要求特色的证书了,可以直接使用阿里云域名的免费证书
root@harbor:/tools# chmod +x runtime-install.sh 
root@harbor:/tools# bash runtime-install.sh docker
当前系统是Ubuntu 20.04.3 LTS \n \l,即将开始系统初始化、配置docker-compose与安装docker
docker/
docker/docker-proxy
docker/docker-init
docker/containerd
docker/containerd-shim
docker/dockerd
docker/runc
docker/ctr
docker/docker
docker/containerd-shim-runc-v2
正在启动docker server并设置为开机自启动!
Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /lib/systemd/system/containerd.service.
Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /lib/systemd/system/docker.service.
Created symlink /etc/systemd/system/sockets.target.wants/docker.socket → /lib/systemd/system/docker.socket.
docker server安装完成,欢迎进入docker世界!


#上传harbor的安装包
root@harbor:/apps# ll
total 644452
drwxr-xr-x  2 root root        74 Oct 17 07:11 ./
drwxr-xr-x 21 root root       317 Oct 17 06:38 ../
-rw-r--r--  1 root root 659907775 Oct 17 07:08 harbor-offline-installer-v2.5.2.tgz
-rw-r--r--  1 root root      7543 Oct 17 07:11 install_harbor.sh
#使用脚本一键安装
root@harbor:/apps# bash install_harbor.sh 

#创建证书目录
mkdir -p /apps/harbor/certs/
#创建数据目录
mkdir -p /data 
#生成ca的私钥
cd /apps/harbor/certs/
openssl genrsa -out ca.key 4096
#生成ca的自签名证书
#-days 3650有效期10年
#/C=CN/ST=Beijing(省份)/L=Beijing(城市)/O=example/OU=Personal/CN=wang.org(访问harbor的1级域名)"
openssl req -x509 -new -nodes -sha512 -days 3650 \
 -subj "/C=CN/ST=Beijing/L=Beijing/O=example/OU=Personal/CN=harbor.yyq.cn" \
 -key ca.key \
 -out ca.crt
#检查
root@harbor1-jenkins:/apps/harbor/certs# ls
ca.crt  ca.key
#生成harbor主机的私钥,harbor.wang.org.key注意这里是harbor的访问域名.key的格式
openssl genrsa -out harbor.yyq.cn.key 4096
#检查
root@harbor1-jenkins:/apps/harbor/certs# ls
ca.crt  ca.key  harbor.yyq.cn.key
#生成harbor主机的证书申请
#/C=CN/ST=Beijing(省份)/L=Beijing(城市)/O=example/OU=Personal/CN=harbor.wang.org(harbor的访问域名)" \
openssl req -sha512 -new \
    -subj "/C=CN/ST=Beijing/L=Beijing/O=example/OU=Personal/CN=harbor.yyq.cn" \
    -key harbor.yyq.cn.key \
    -out harbor.yyq.cn.csr    
#检查
root@harbor1-jenkins:/apps/harbor/certs# ls -l
total 16
-rw-r--r-- 1 root root 2049 Sep 29 16:02 ca.crt
-rw------- 1 root root 3247 Sep 29 16:02 ca.key
-rw-r--r-- 1 root root 1708 Sep 29 16:05 harbor.yyq.cn.csr
-rw------- 1 root root 3247 Sep 29 16:04 harbor.yyq.cn.key
#创建x509 v3 扩展文件(新版新增加的要求)
cd /certs #切换到创建harbor证书的目录
cat > v3.ext <<-EOF
authorityKeyIdentifier=keyid,issuer
basicConstraints=CA:FALSE
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names

[alt_names]
DNS.1=yyq.cn
DNS.2=yyq
DNS.3=harbor.yyq.cn
EOF
#检查
root@harbor1-jenkins:/apps/harbor/certs# cat v3.ext 
authorityKeyIdentifier=keyid,issuer
basicConstraints=CA:FALSE
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names

[alt_names]
DNS.1=yyq.cn
DNS.2=yyq
DNS.3=harbor.yyq.cn
#检查当前的文件
root@harbor1-jenkins:/apps/harbor/certs# ls
ca.crt  ca.key  harbor.yyq.cn.csr  harbor.yyq.cn.key  v3.ext
#给 harbor主机颁发证书
openssl x509 -req -sha512 -days 3650 \
    -extfile v3.ext \
    -CA ca.crt -CAkey ca.key -CAcreateserial \
    -in harbor.yyq.cn.csr \
    -out harbor.yyq.cn.crt 
#最终文件列表如下
root@harbor1-jenkins:/etc/docker/certs.d/harbor.yyq.cn# cd /apps/harbor/certs/
root@harbor1-jenkins:/apps/harbor/certs# ls -l
total 28
-rw-r--r-- 1 root root 2049 Sep 30 00:04 ca.crt
-rw------- 1 root root 3243 Sep 30 00:04 ca.key
-rw-r--r-- 1 root root   41 Sep 30 00:05 ca.srl
-rw-r--r-- 1 root root 2098 Sep 30 00:05 harbor.yyq.cn.crt
-rw-r--r-- 1 root root 1708 Sep 30 00:04 harbor.yyq.cn.csr
-rw------- 1 root root 3243 Sep 30 00:04 harbor.yyq.cn.key
-rw-r--r-- 1 root root  255 Sep 30 00:04 v3.ext
#要使用的文件是harbor.yyq.cn.crt和harbor.yyq.cn.crt

#打开https配置
root@harbor1-jenkins:/apps/harbor# cat /apps/harbor/harbor.yml
# Configuration file of Harbor

# The IP address or hostname to access admin UI and registry service.
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
hostname: harbor.yyq.cn

# http related config
http:
  # port for http, default is 80. If https enabled, this port will redirect to https port
  port: 80

# https related config
https:
  # https port for harbor, default is 443
  port: 443
  # The path of cert and key files for nginx
  certificate: /apps/harbor/certs/harbor.yyq.cn.crt
  private_key: /apps/harbor/certs/harbor.yyq.cn.key
。。。省略

#重新初始化
#--with-notary#启用notary对https的数据校验,配置了https可以不加
#--with-trivy#对harbor启用扫描组件,启用
#--with-chartmuseum#支持heml的chagt包
root@harbor1-jenkins:/apps/harbor# ./install.sh --with-trivy --with-chartmuseum
[Step 5]: starting Harbor ...
Creating network "harbor_harbor" with the default driver
Creating network "harbor_harbor-chartmuseum" with the default driver
Creating harbor-log ... done
Creating harbor-portal ... done
Creating registry      ... done
Creating redis         ... done
Creating harbor-db     ... done
Creating registryctl   ... done
Creating chartmuseum   ... done
Creating harbor-core   ... done
Creating trivy-adapter ... done
Creating nginx             ... done
Creating harbor-jobservice ... done----Harbor has been installed and started successfully.----

#配置systemd启动,脚本安装好后自动配置systemd启动
[root@harbor ~]#vim /lib/systemd/system/harbor.service
[Unit]
Description=Harbor
After=docker.service systemd-networkd.service systemd-resolved.service
Requires=docker.service
Documentation=http://github.com/vmware/harbor

[Service]
Type=simple
Restart=on-failure
RestartSec=5
ExecStart=/usr/bin/docker-compose -f /apps/harbor/docker-compose.yml up#可能需要调整这里的文件名称
ExecStop=/usr/bin/docker-compose -f /apps/harbor/docker-compose.yml down

[Install]
WantedBy=multi-user.target

[root@harbor ~]#systemctl daemon-reload
[root@harbor ~]#systemctl enable harbor
[root@harbor1-jenkins:/apps/harbor/certs# systemctl start harbor.service
root@harbor1-jenkins:/apps/harbor/certs# docker ps -a
CONTAINER ID        IMAGE                                  COMMAND                  CREATED             STATUS                            PORTS                                         NAMES
8357b414accd        goharbor/harbor-jobservice:v2.4.2      "/harbor/entrypoint.…"   2 seconds ago       Up 1 second (health: starting)                                                  harbor-jobservice
6530f725a60c        goharbor/nginx-photon:v2.4.2           "nginx -g 'daemon of…"   2 seconds ago       Up 1 second (health: starting)    0.0.0.0:80->8080/tcp, 0.0.0.0:443->8443/tcp   nginx
a993812bfa65        goharbor/harbor-core:v2.4.2            "/harbor/entrypoint.…"   2 seconds ago       Up 1 second (health: starting)                                                  harbor-core
0b3d96fb0742        goharbor/trivy-adapter-photon:v2.4.2   "/home/scanner/entry…"   3 seconds ago       Up 2 seconds (health: starting)                                                 trivy-adapter
e79dec2f09c7        goharbor/registry-photon:v2.4.2        "/home/harbor/entryp…"   4 seconds ago       Up 2 seconds (health: starting)                                                 registry
4beda271864a        goharbor/harbor-db:v2.4.2              "/docker-entrypoint.…"   4 seconds ago       Up 2 seconds (health: starting)                                                 harbor-db
697b09684c3e        goharbor/harbor-registryctl:v2.4.2     "/home/harbor/start.…"   4 seconds ago       Up 2 seconds (health: starting)                                                 registryctl
bc4b5e179d2b        goharbor/redis-photon:v2.4.2           "redis-server /etc/r…"   4 seconds ago       Up 2 seconds (health: starting)                                                 redis
23fc51ee3590        goharbor/chartmuseum-photon:v2.4.2     "./docker-entrypoint…"   4 seconds ago       Up 3 seconds (health: starting)                                                 chartmuseum
51a428519546        goharbor/harbor-portal:v2.4.2          "nginx -g 'daemon of…"   4 seconds ago       Up 3 seconds (health: starting)                                                 harbor-portal
fdb4ba2d9a02        goharbor/harbor-log:v2.4.2             "/bin/sh -c /usr/loc…"   5 seconds ago       Up 4 seconds (health: starting)   127.0.0.1:1514->10514/tcp                     harbor-log

创建测试镜像仓库

 

登录harbor测试镜像下载和上传

#证书配置,需要拷贝harbor.yyq.cn.crt,harbor.yyq.cn.key,ca.crt三个文件
root@harbor:/apps/harbor# mkdir -p /etc/docker/certs.d/harbor.yyq.cn
root@harbor:/apps/harbor# cd /apps/harbor/certs/
root@harbor:/apps/harbor/certs# cp -r /apps/harbor/certs/harbor.yyq.cn.crt /apps/harbor/certs/harbor.yyq.cn.key /etc/docker/certs.d/harbor.yyq.cn
root@harbor:/apps/harbor/certs# cp -ar /apps/harbor/certs/ca.crt /etc/docker/certs.d/harbor.yyq.cn/
root@harbor:/apps/harbor/certs# ls -l /etc/docker/certs.d/harbor.yyq.cn/
total 12
-rw-r--r-- 1 root root 2049 Oct 17 07:15 ca.crt
-rw-r--r-- 1 root root 2098 Oct 17 07:22 harbor.yyq.cn.crt
-rw------- 1 root root 3243 Oct 17 07:22 harbor.yyq.cn.key

#配置hosts解析
root@harbor:/apps/harbor/certs# cat /etc/hosts
127.0.0.1 localhost
127.0.1.1 mb

# The following lines are desirable for IPv6 capable hosts
::1     ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
10.10.1.206 harbor.yyq.cn


#登录测试
root@harbor:/apps/harbor/certs# docker login harbor.yyq.cn
Username: admin
Password: 
Error response from daemon: Missing client certificate harbor.yyq.cn.cert for key harbor.yyq.cn.key

#查看证书文件
root@harbor:/etc/docker/certs.d/harbor.yyq.cn# ls
ca.crt  harbor.yyq.cn.crt  harbor.yyq.cn.key
#把harbor.yyq.cn.crt文件拷贝复制为harbor.yyq.cn.cert
root@harbor:/etc/docker/certs.d/harbor.yyq.cn# cp harbor.yyq.cn.crt harbor.yyq.cn.cert
#再次登录测试
root@harbor:/etc/docker/certs.d/harbor.yyq.cn# docker login harbor.yyq.cn
Username: admin
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded


#测试登录harbor需要的文件为ca.crt/harbor.yyq.cn.cert/harbor.yyq.cn.key
#把harbor.yyq.cn.crt文件移动到/opt目录
root@harbor:/etc/docker/certs.d/harbor.yyq.cn# mv harbor.yyq.cn.crt /opt/
#检查证书文件
root@harbor:/etc/docker/certs.d/harbor.yyq.cn# ls -l
total 12
-rw-r--r-- 1 root root 2049 Oct 17 07:15 ca.crt
-rw-r--r-- 1 root root 2098 Oct 17 07:50 harbor.yyq.cn.cert
-rw------- 1 root root 3243 Oct 17 07:22 harbor.yyq.cn.key
#登出harbor
root@harbor:/etc/docker/certs.d/harbor.yyq.cn# docker logout harbor.yyq.cn
Removing login credentials for harbor.yyq.cn
#再次登录harbor仓库
root@harbor:/etc/docker/certs.d/harbor.yyq.cn# docker login harbor.yyq.cn
Username: admin
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded


#记得在harbor创建baseimage的仓库
#测试镜像上传下载
#下载镜像
root@harbor:/etc/docker/certs.d/harbor.yyq.cn# docker pull alpine
Using default tag: latest
latest: Pulling from library/alpine
59bf1c3509f3: Pull complete 
Digest: sha256:21a3deaa0d32a8057914f36584b5288d2e5ecc984380bc0118285c70fa8c9300
Status: Downloaded newer image for alpine:latest
docker.io/library/alpine:latest

#对镜像打tag
root@harbor:/etc/docker/certs.d/harbor.yyq.cn# docker tag alpine harbor.yyq.cn/baseimage/alpine
#上传镜像测试
root@harbor:/etc/docker/certs.d/harbor.yyq.cn# docker push harbor.yyq.cn/baseimage/alpine
Using default tag: latest
The push refers to repository [harbor.yyq.cn/baseimage/alpine]
8d3ac3489996: Pushed 
latest: digest: sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3 size: 528

#删除镜像
root@harbor:/etc/docker/certs.d/harbor.yyq.cn# docker rmi harbor.yyq.cn/baseimage/alpine:latest
Untagged: harbor.yyq.cn/baseimage/alpine:latest
Untagged: harbor.yyq.cn/baseimage/alpine@sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3
Deleted: sha256:c059bfaa849c4d8e4aecaeb3a10c2d9b3d85f5165c66ad3a4d937758128c4d18
Deleted: sha256:8d3ac3489996423f53d6087c81180006263b79f206d3fdec9e66f0e27ceb8759
root@harbor:/etc/docker/certs.d/harbor.yyq.cn# docker images
REPOSITORY                      TAG       IMAGE ID       CREATED         SIZE
goharbor/harbor-exporter        v2.5.2    bde625474c7c   15 months ago   87.1MB
goharbor/chartmuseum-photon     v2.5.2    ea9acf3f7093   15 months ago   225MB
goharbor/redis-photon           v2.5.2    f96e95a13362   15 months ago   154MB
goharbor/trivy-adapter-photon   v2.5.2    6f18860f1130   15 months ago   251MB
goharbor/notary-server-photon   v2.5.2    b67bb36a5b49   15 months ago   112MB
goharbor/notary-signer-photon   v2.5.2    59816b3b6e1a   15 months ago   109MB
goharbor/harbor-registryctl     v2.5.2    ac57f56c9873   15 months ago   136MB
goharbor/registry-photon        v2.5.2    7d360fb0bf6a   15 months ago   77.9MB
goharbor/nginx-photon           v2.5.2    b8b4cea63416   15 months ago   44.3MB
goharbor/harbor-log             v2.5.2    1744f9718bfc   15 months ago   161MB
goharbor/harbor-jobservice      v2.5.2    23a0bbc68336   15 months ago   227MB
goharbor/harbor-core            v2.5.2    9d0502876731   15 months ago   203MB
goharbor/harbor-portal          v2.5.2    657c03f18cf1   15 months ago   52.6MB
goharbor/harbor-db              v2.5.2    1de1f9ea3602   15 months ago   224MB
goharbor/prepare                v2.5.2    18f129815c71   15 months ago   166MB
#测试下载镜像
root@harbor:/etc/docker/certs.d/harbor.yyq.cn# docker pull harbor.yyq.cn/baseimage/alpine
Using default tag: latest
latest: Pulling from baseimage/alpine
59bf1c3509f3: Pull complete 
Digest: sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3
Status: Downloaded newer image for harbor.yyq.cn/baseimage/alpine:latest
harbor.yyq.cn/baseimage/alpine:latest
#检查
root@harbor:/etc/docker/certs.d/harbor.yyq.cn# docker images
REPOSITORY                       TAG       IMAGE ID       CREATED         SIZE
goharbor/harbor-exporter         v2.5.2    bde625474c7c   15 months ago   87.1MB
goharbor/chartmuseum-photon      v2.5.2    ea9acf3f7093   15 months ago   225MB
goharbor/redis-photon            v2.5.2    f96e95a13362   15 months ago   154MB
goharbor/trivy-adapter-photon    v2.5.2    6f18860f1130   15 months ago   251MB
goharbor/notary-server-photon    v2.5.2    b67bb36a5b49   15 months ago   112MB
goharbor/notary-signer-photon    v2.5.2    59816b3b6e1a   15 months ago   109MB
goharbor/harbor-registryctl      v2.5.2    ac57f56c9873   15 months ago   136MB
goharbor/registry-photon         v2.5.2    7d360fb0bf6a   15 months ago   77.9MB
goharbor/nginx-photon            v2.5.2    b8b4cea63416   15 months ago   44.3MB
goharbor/harbor-log              v2.5.2    1744f9718bfc   15 months ago   161MB
goharbor/harbor-jobservice       v2.5.2    23a0bbc68336   15 months ago   227MB
goharbor/harbor-core             v2.5.2    9d0502876731   15 months ago   203MB
goharbor/harbor-portal           v2.5.2    657c03f18cf1   15 months ago   52.6MB
goharbor/harbor-db               v2.5.2    1de1f9ea3602   15 months ago   224MB
goharbor/prepare                 v2.5.2    18f129815c71   15 months ago   166MB
harbor.yyq.cn/baseimage/alpine   latest    c059bfaa849c   23 months ago   5.59MB

使用kubeasz部署

配置免密钥

#deploy服务器安装ansible
apt install -y ansible
apt install -y sshpass

#生成密钥对
root@deploy:~# ssh-keygen -t rsa-sha2-512 -b 4096
Generating public/private rsa-sha2-512 key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa
Your public key has been saved in /root/.ssh/id_rsa.pub
The key fingerprint is:
SHA256:CA3rkdTyank7M1Z1Wmu/yDhFmR18eE00Zh2mAKBoTXg root@deploy
The key's randomart image is:
+---[RSA 4096]----+
|    +o ..... . X*|
|   o=Eo     . O *|
|   o*=.      = + |
|  .. o..  . * .  |
|    .o. S. = .   |
|    + . . . +    |
|   . . o   o .   |
|      *   .o ..  |
|     . +  ..o .. |
+----[SHA256]-----+

#密钥分发脚本
cat >/tools/ssh-scp.sh<<'EOF'
#!/bin/bash
#目标主机列表
IP="
10.0.0.100
10.0.0.101
10.0.0.102
10.0.0.103
10.0.0.104
10.0.0.105
10.0.0.106
10.0.0.80
10.0.0.81
"
for node in ${IP};do
    sshpass -p "qwe123" ssh-copy-id -i /root/.ssh/id_rsa.pub -o StrictHostKeyChecking=no root@${node} &> /dev/null
   if [ $? -eq 0 ];then
     echo "${node} 秘钥copy完成"
   else
     echo "${node} 秘钥copy失败"
   fi
   
done
EOF
root@deploy:/tools# bash /tools/ssh-scp.sh 
10.10.1.200 秘钥copy完成
10.10.1.201 秘钥copy完成
10.10.1.202 秘钥copy完成
10.10.1.203 秘钥copy完成
10.10.1.204 秘钥copy完成
10.10.1.205 秘钥copy完成
10.10.1.206 秘钥copy完成
10.10.1.80 秘钥copy完成
10.10.1.81 秘钥copy完成

#测试
root@deploy:/tools# ssh root@10.0.0.100 "hostname -I"
10.10.1.200 
root@deploy:/tools# ssh root@10.0.0.101 "hostname -I"
10.10.1.201 
root@deploy:/tools# ssh root@10.0.0.102 "hostname -I"
10.10.1.202 
root@deploy:/tools# ssh root@10.0.0.103 "hostname -I"
10.10.1.203 
root@deploy:/tools# ssh root@10.0.0.104 "hostname -I"
10.10.1.204 
root@deploy:/tools# ssh root@10.0.0.105 "hostname -I"
10.10.1.205 
root@deploy:/tools# ssh root@10.0.0.106 "hostname -I"
10.10.1.206 172.17.0.1 172.19.0.1 172.20.0.1 
root@deploy:/tools# ssh root@10.10.1.80 "hostname -I"
10.10.1.80 10.10.1.111 10.10.1.112 10.10.1.113 10.10.1.114 
root@deploy:/tools# ssh root@10.10.1.81 "hostname -I"
10.10.1.81 

#如果系统是22.04可以使用下面的脚本
root@deploy:/tools# cat key-scp.sh 
#!/bin/bash
#⽬标主机列表
IP="
10.10.1.200
10.10.1.201
10.10.1.202
10.10.1.203
10.10.1.204
10.10.1.205
10.10.1.206
10.10.1.80
10.10.1.81
"
REMOTE_PORT="22"
REMOTE_USER="root"
REMOTE_PASS="qwe123"
for REMOTE_HOST in $(IP);do

  REMOTE_CMD="echo ${REMOTE_HOST} is successfully!"
  #添加⽬标远程主机的公钥
  ssh-keyscan -p "${REMOTE_PORT}" "${REMOTE_HOST}" >> ~/.ssh/known_hosts

  #通过sshpass配置免秘钥登录、并创建python3软连接
  sshpass -p "${REMOTE_PASS}" ssh-copy-id "${REMOTE_USER}@${REMOTE_HOST}"
  ssh ${REMOTE_HOST} ln -sv /usr/bin/python3 /usr/bin/python
  echo ${REMOTE_HOST} 免秘钥配置完成!
done

#安装之前记得做python3的软链接
ln -sv /usr/bin/python3 /usr/bin/python

下载kubeasz开始部署

https://github.com/easzlab/kubeasz

#因为部署要安装docker所以不放在master节点
#下载3.6.2的版本安装1.26.1
export release=3.6.2
wget https://github.com/easzlab/kubeasz/releases/download/${release}/ezdown
chmod +x ./ezdown

#执行./ezdown -D下载相关的镜像
root@deploy:/tools# chmod +x ezdown
root@deploy:/tools# ./ezdown -D
2023-10-17 09:09:17 INFO Action begin: download_all
2023-10-17 09:09:17 INFO downloading docker binaries, arch:x86_64, version:24.0.5
--2023-10-17 09:09:17--  https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/static/stable/x86_64/docker-24.0.5.tgz
Resolving mirrors.tuna.tsinghua.edu.cn (mirrors.tuna.tsinghua.edu.cn)... 101.6.15.130, 2402:f000:1:400::2
Connecting to mirrors.tuna.tsinghua.edu.cn (mirrors.tuna.tsinghua.edu.cn)|101.6.15.130|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 69824517 (67M) [application/octet-stream]
Saving to: ‘docker-24.0.5.tgz’

docker-24.0.5.tgz     100%[==================]  66.59M  8.57MB/s    in 8.1s    

2023-10-17 09:09:26 (8.24 MB/s) - ‘docker-24.0.5.tgz’ saved [69824517/69824517]

Unit docker.service could not be found.
2023-10-17 09:09:27 DEBUG generate docker service file
2023-10-17 09:09:27 DEBUG generate docker config: /etc/docker/daemon.json
2023-10-17 09:09:27 DEBUG prepare register mirror for CN
2023-10-17 09:09:27 DEBUG enable and start docker
Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /etc/systemd/system/docker.service.
2023-10-17 09:09:31 INFO downloading kubeasz: 3.6.2
3.6.2: Pulling from easzlab/kubeasz
f56be85fc22e: Pull complete 
ea5757f4b3f8: Pull complete 
bd0557c686d8: Pull complete 
37d4153ce1d0: Pull complete 
b39eb9b4269d: Pull complete 
a3cff94972c7: Pull complete 
e4e1a9569db9: Pull complete 
Digest: sha256:c258cedea9cc73b015584797389a99a0344fb91761e30214fbc60002680c2a77
Status: Downloaded newer image for easzlab/kubeasz:3.6.2
docker.io/easzlab/kubeasz:3.6.2
2023-10-17 09:09:46 DEBUG  run a temporary container
e38f81c4bd103ec9ba3089f9f418d4122466743412138daccc85c530e67c3efc
2023-10-17 09:09:47 DEBUG cp kubeasz code from the temporary container
Successfully copied 2.9MB to /etc/kubeasz
2023-10-17 09:09:47 DEBUG stop&remove temporary container
temp_easz
2023-10-17 09:09:47 INFO downloading kubernetes: v1.26.1 binaries
v1.26.1: Pulling from easzlab/kubeasz-k8s-bin
1b7ca6aea1dd: Pull complete 
f590fe200b8f: Pull complete 
d85833b4cdaa: Downloading [================>                                  ]  116.3MB/355.1MB
#下载镜像就是下载二进制文件,把镜像里面的二进制文件拷贝出来使用ansible安装

#安装要使用的二进制文件
root@deploy:/etc/kubeasz# ls -l /etc/kubeasz/bin/
total 1092348
-rwxr-xr-x 1 root root  66163920 May 17 03:39 calicoctl
-rwxr-xr-x 1 root root  14578064 Aug 19 05:57 cfssl
-rwxr-xr-x 1 root root  12066064 Aug 19 05:57 cfssl-certinfo
-rwxr-xr-x 1 root root   7974912 Aug 19 05:57 cfssljson
-rwxr-xr-x 1 root root   1144320 Aug 19 05:32 chronyd
-rwxr-xr-x 1 root root  82845696 Aug  2 20:35 cilium
drwxr-xr-x 2 root root       259 Aug 19 05:57 cni-bin
-rwxr-xr-x 1 root root  39108608 Oct 17 09:09 containerd
drwxr-xr-x 2 root root       157 Aug 19 05:57 containerd-bin
-rwxr-xr-x 1 root root  12353536 Oct 17 09:09 containerd-shim-runc-v2
-rwxr-xr-x 1 root root  54939628 Aug 14 07:10 crictl
-rwxr-xr-x 1 root root  19120128 Oct 17 09:09 ctr
-rwxr-xr-x 1 root root  34749560 Oct 17 09:09 docker
-rwxr-xr-x 1 root root  59383631 Aug 11 09:34 docker-compose
-rwxr-xr-x 1 root root  67015032 Oct 17 09:09 dockerd
-rwxr-xr-x 1 root root    761712 Oct 17 09:09 docker-init
-rwxr-xr-x 1 root root   1972190 Oct 17 09:09 docker-proxy
-rwxr-xr-x 1 root root  22474752 May 11 11:40 etcd
-rwxr-xr-x 1 root root  16998400 May 11 11:40 etcdctl
-rwxr-xr-x 1 root root  50597888 Aug 10 20:19 helm
-rwxr-xr-x 1 root root  19058688 Jul 10 14:06 hubble
-rwxr-xr-x 1 root root   1805984 Aug 19 05:33 keepalived
-rwxr-xr-x 1 root root 129957888 Jan 18  2023 kube-apiserver
-rwxr-xr-x 1 root root 119775232 Jan 18  2023 kube-controller-manager
-rwxr-xr-x 1 root root  48021504 Jan 18  2023 kubectl
-rwxr-xr-x 1 root root 121256152 Jan 18  2023 kubelet
-rwxr-xr-x 1 root root  45010944 Jan 18  2023 kube-proxy
-rwxr-xr-x 1 root root  52441088 Jan 18  2023 kube-scheduler
-rwxr-xr-x 1 root root   1820288 Aug 19 05:32 nginx
-rwxr-xr-x 1 root root  15144240 Oct 17 09:09 runc

# 创建集群配置文件
root@deploy:/etc/kubeasz# chmod +x ezdown
root@deploy:/etc/kubeasz# ./ezctl new k8s-cluster1
2023-10-17 09:24:05 DEBUG generate custom cluster files in /etc/kubeasz/clusters/k8s-cluster1
2023-10-17 09:24:05 DEBUG set versions
2023-10-17 09:24:05 DEBUG cluster k8s-cluster1: files successfully created.
2023-10-17 09:24:05 INFO next steps 1: to config '/etc/kubeasz/clusters/k8s-cluster1/hosts'主机配置文件
2023-10-17 09:24:05 INFO next steps 2: to config '/etc/kubeasz/clusters/k8s-cluster1/config.yml'集群配置文件

#下载的镜像使用了默认的docker的镜像仓库
root@deploy:/etc/kubeasz# docker  ps -a
CONTAINER ID   IMAGE        COMMAND                  CREATED       STATUS       PORTS     NAMES
d3653596607d   registry:2   "/entrypoint.sh /etc…"   2 hours ago   Up 2 hours             local_registry
root@deploy:/etc/kubeasz# docker images
REPOSITORY                                           TAG       IMAGE ID       CREATED         SIZE
registry                                             2         0ae1560ca86f   2 weeks ago     25.4MB
easzlab/kubeasz                                      3.6.2     6ea48e5ea9d3   6 weeks ago     157MB
easzlab/kubeasz-ext-bin                              1.8.0     633a9737e967   2 months ago    636MB
coredns/coredns                                      1.11.1    cbb01a7bd410   2 months ago    59.8MB
easzlab.io.local:5000/coredns/coredns                1.11.1    cbb01a7bd410   2 months ago    59.8MB
easzlab/metrics-server                               v0.6.4    c1623971df95   2 months ago    68.9MB
easzlab.io.local:5000/easzlab/metrics-server         v0.6.4    c1623971df95   2 months ago    68.9MB
easzlab/k8s-dns-node-cache                           1.22.23   81d6450452ae   4 months ago    68.4MB
easzlab.io.local:5000/easzlab/k8s-dns-node-cache     1.22.23   81d6450452ae   4 months ago    68.4MB
calico/kube-controllers                              v3.24.6   baf4466ddf40   5 months ago    77.5MB
easzlab.io.local:5000/calico/kube-controllers        v3.24.6   baf4466ddf40   5 months ago    77.5MB
calico/cni                                           v3.24.6   ca9fea5e07cb   5 months ago    212MB
easzlab.io.local:5000/calico/cni                     v3.24.6   ca9fea5e07cb   5 months ago    212MB
calico/node                                          v3.24.6   3953a481aa9d   5 months ago    245MB
easzlab.io.local:5000/calico/node                    v3.24.6   3953a481aa9d   5 months ago    245MB
easzlab/kubeasz-k8s-bin                              v1.26.1   f5cb65506fc7   8 months ago    1.17GB
easzlab.io.local:5000/easzlab/pause                  3.9       78d53e70b442   12 months ago   744kB
easzlab/pause                                        3.9       78d53e70b442   12 months ago   744kB
easzlab.io.local:5000/kubernetesui/dashboard         v2.7.0    07655ddf2eeb   13 months ago   246MB
kubernetesui/dashboard                               v2.7.0    07655ddf2eeb   13 months ago   246MB
kubernetesui/metrics-scraper                         v1.0.8    115053965e86   16 months ago   43.8MB
easzlab.io.local:5000/kubernetesui/metrics-scraper   v1.0.8    115053965e86   16 months ago   43.8MB

easzlab.io.local:5000/calico/kube-controllers  #calico管理副本的镜像
easzlab.io.local:5000/calico/cni#集成cni网络插件的镜像
easzlab.io.local:5000/calico/node#每个node节点都是通过这个镜像生成路由器同步

hosts主机文件配置

root@deploy:/etc/kubeasz# cat /etc/kubeasz/clusters/k8s-cluster1/hosts
# 'etcd' cluster should have odd member(s) (1,3,5,...)
[etcd]
10.10.1.200
10.10.1.201
10.10.1.202

# master node(s), set unique 'k8s_nodename' for each node
# CAUTION: 'k8s_nodename' must consist of lower case alphanumeric characters, '-' or '.',
# and must start and end with an alphanumeric character
[kube_master]
10.10.1.200 k8s_nodename='master-10.10.1.200'
10.10.1.201 k8s_nodename='master-10.10.1.201'

# work node(s), set unique 'k8s_nodename' for each node
# CAUTION: 'k8s_nodename' must consist of lower case alphanumeric characters, '-' or '.',
# and must start and end with an alphanumeric character
[kube_node]
10.10.1.203 k8s_nodename='node-10.10.1.203'
10.10.1.204 k8s_nodename='node-10.10.1.204'

# [optional] harbor server, a private docker registry
# 'NEW_INSTALL': 'true' to install a harbor server; 'false' to integrate with existed one
[harbor]
#10.10.1.8 NEW_INSTALL=false

# [optional] loadbalance for accessing k8s from outside
[ex_lb]
#10.10.1.6 LB_ROLE=backup EX_APISERVER_VIP=10.10.1.250 EX_APISERVER_PORT=8443
#10.10.1.7 LB_ROLE=master EX_APISERVER_VIP=10.10.1.250 EX_APISERVER_PORT=8443

# [optional] ntp server for the cluster
[chrony]
#10.10.1.1

[all:vars]
# --------- Main Variables ---------------
# Secure port for apiservers
SECURE_PORT="6443"

# Cluster container-runtime supported: docker, containerd
# if k8s version >= 1.24, docker is not supported
CONTAINER_RUNTIME="containerd"

# Network plugins supported: calico, flannel, kube-router, cilium, kube-ovn
CLUSTER_NETWORK="calico"

# Service proxy mode of kube-proxy: 'iptables' or 'ipvs'
PROXY_MODE="ipvs"

# K8S Service CIDR, not overlap with node(host) networking
SERVICE_CIDR="10.100.0.0/16"

# Cluster CIDR (Pod CIDR), not overlap with node(host) networking
CLUSTER_CIDR="10.200.0.0/16"

# NodePort Range
NODE_PORT_RANGE="30000-65535"

# Cluster DNS Domain
CLUSTER_DNS_DOMAIN="yyq.local"

# -------- Additional Variables (don't change the default value right now) ---
# Binaries Directory
bin_dir="/usr/local/bin"

# Deploy Directory (kubeasz workspace)
base_dir="/etc/kubeasz"

# Directory for a specific cluster
cluster_dir="{{ base_dir }}/clusters/k8s-cluster1"

# CA and other components cert/key Directory
ca_dir="/etc/kubernetes/ssl"

# Default 'k8s_nodename' is empty
k8s_nodename=''

# Default python interpreter
ansible_python_interpreter=/usr/bin/python3

config集群配置文件

pod底层沙箱网络pod:registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9

root@deploy:/etc/kubeasz# cat /etc/kubeasz/clusters/k8s-cluster1/config.yml
############################
# prepare
############################
# 可选离线安装系统软件包 (offline|online)
INSTALL_SOURCE: "online"

# 可选进行系统安全加固 github.com/dev-sec/ansible-collection-hardening
OS_HARDEN: false


############################
# role:deploy
############################
# default: ca will expire in 100 years
# default: certs issued by the ca will expire in 50 years
CA_EXPIRY: "876000h"
CERT_EXPIRY: "876000h"

# force to recreate CA and other certs, not suggested to set 'true'
CHANGE_CA: false

# kubeconfig 配置参数
CLUSTER_NAME: "cluster1"
CONTEXT_NAME: "context-{{ CLUSTER_NAME }}"

# k8s version
K8S_VER: "1.26.1"

# set unique 'k8s_nodename' for each node, if not set(default:'') ip add will be used
# CAUTION: 'k8s_nodename' must consist of lower case alphanumeric characters, '-' or '.',
# and must start and end with an alphanumeric character (e.g. 'example.com'),
# regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*'
K8S_NODENAME: "{%- if k8s_nodename != '' -%} \
                    {{ k8s_nodename|replace('_', '-')|lower }} \
               {%- else -%} \
                    {{ inventory_hostname }} \
               {%- endif -%}"

############################
# role:etcd
############################
# 设置不同的wal目录,可以避免磁盘io竞争,提高性能
ETCD_DATA_DIR: "/var/lib/etcd"
ETCD_WAL_DIR: ""


############################
# role:runtime [containerd,docker]
############################
# [.]启用拉取加速镜像仓库
ENABLE_MIRROR_REGISTRY: true

# [.]添加信任的私有仓库
INSECURE_REG:
  - "http://easzlab.io.local:5000,harbor.yyq.cn,10.0.0.106"
  - "https://{{ HARBOR_REGISTRY }}"

# [.]基础容器镜像
SANDBOX_IMAGE: "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9"

# [containerd]容器持久化存储目录
CONTAINERD_STORAGE_DIR: "/var/lib/containerd"

# [docker]容器存储目录
DOCKER_STORAGE_DIR: "/var/lib/docker"

# [docker]开启Restful API
DOCKER_ENABLE_REMOTE_API: false


############################
# role:kube-master
############################
# k8s 集群 master 节点证书配置,可以添加多个ip和域名(比如增加公网ip和域名)
MASTER_CERT_HOSTS:
  - "10.10.1.111"
  - "k8s.easzlab.io"
  - "harbor.yyq.cn"
  #- "www.test.com"

# node 节点上 pod 网段掩码长度(决定每个节点最多能分配的pod ip地址)
# 如果flannel 使用 --kube-subnet-mgr 参数,那么它将读取该设置为每个节点分配pod网段
# https://github.com/coreos/flannel/issues/847
NODE_CIDR_LEN: 24


############################
# role:kube-node
############################
# Kubelet 根目录
KUBELET_ROOT_DIR: "/var/lib/kubelet"

# node节点最大pod 数
MAX_PODS: 200

# 配置为kube组件(kubelet,kube-proxy,dockerd等)预留的资源量
# 数值设置详见templates/kubelet-config.yaml.j2
KUBE_RESERVED_ENABLED: "no"

# k8s 官方不建议草率开启 system-reserved, 除非你基于长期监控,了解系统的资源占用状况;
# 并且随着系统运行时间,需要适当增加资源预留,数值设置详见templates/kubelet-config.yaml.j2
# 系统预留设置基于 4c/8g 虚机,最小化安装系统服务,如果使用高性能物理机可以适当增加预留
# 另外,集群安装时候apiserver等资源占用会短时较大,建议至少预留1g内存
SYS_RESERVED_ENABLED: "no"


############################
# role:network [flannel,calico,cilium,kube-ovn,kube-router]
############################
# ------------------------------------------- flannel
# [flannel]设置flannel 后端"host-gw","vxlan"等
FLANNEL_BACKEND: "vxlan"
DIRECT_ROUTING: false

# [flannel] 
flannel_ver: "v0.22.2"

# ------------------------------------------- calico
# [calico] IPIP隧道模式可选项有: [Always, CrossSubnet, Never],跨子网可以配置为Always与CrossSubnet(公有云建议使用always比较省事,其他的话需要修改各自公有云的网络配置,具体可以参考各个公有云说明)
# 其次CrossSubnet为隧道+BGP路由混合模式可以提升网络性能,同子网配置为Never即可.
CALICO_IPV4POOL_IPIP: "Always"

# [calico]设置 calico-node使用的host IP,bgp邻居通过该地址建立,可手工指定也可以自动发现
IP_AUTODETECTION_METHOD: "can-reach={{ groups['kube_master'][0] }}"

# [calico]设置calico 网络 backend: bird, vxlan, none
CALICO_NETWORKING_BACKEND: "bird"

# [calico]设置calico 是否使用route reflectors
# 如果集群规模超过50个节点,建议启用该特性
CALICO_RR_ENABLED: false

# CALICO_RR_NODES 配置route reflectors的节点,如果未设置默认使用集群master节点 
# CALICO_RR_NODES: ["192.168.1.1", "192.168.1.2"]
CALICO_RR_NODES: []

# [calico]更新支持calico 版本: ["3.19", "3.23"]
calico_ver: "v3.24.6"

# [calico]calico 主版本
calico_ver_main: "{{ calico_ver.split('.')[0] }}.{{ calico_ver.split('.')[1] }}"

# ------------------------------------------- cilium
# [cilium]镜像版本
cilium_ver: "1.13.6"
cilium_connectivity_check: true
cilium_hubble_enabled: false
cilium_hubble_ui_enabled: false

# ------------------------------------------- kube-ovn
# [kube-ovn]离线镜像tar包
kube_ovn_ver: "v1.11.5"

# ------------------------------------------- kube-router
# [kube-router]公有云上存在限制,一般需要始终开启 ipinip;自有环境可以设置为 "subnet"
OVERLAY_TYPE: "full"

# [kube-router]NetworkPolicy 支持开关
FIREWALL_ENABLE: true

# [kube-router]kube-router 镜像版本
kube_router_ver: "v1.5.4"


############################
# role:cluster-addon
############################
# coredns 自动安装
dns_install: "no"
corednsVer: "1.11.1"
ENABLE_LOCAL_DNS_CACHE: false#此处修改为false,为true就得配置好下面LOCAL_DNS_CACHE: "10.100.0.2"的地址
dnsNodeCacheVer: "1.22.23"
# 设置 local dns cache 地址
LOCAL_DNS_CACHE: "10.100.0.2"#这里的地址必须修改为service的ip段配置的第二个地址
#开启LOCAL_DNS_CACHE会开始k8s的dns缓存,开启之后他在每个node节点起个小镜像,作为缓存,在解析DNS的时候会先找本地的dns缓存镜像,如果缓存有的话会直接响应如果缓存没有的话,缓存再去找codedns解析,只针对当前的node节点生效,如果有1000个节点那就每个node节点都要进行缓存

# metric server 自动安装
metricsserver_install: "no"
metricsVer: "v0.6.4"

# dashboard 自动安装
dashboard_install: "no"
dashboardVer: "v2.7.0"
dashboardMetricsScraperVer: "v1.0.8"

# prometheus 自动安装
prom_install: "no"
prom_namespace: "monitor"
prom_chart_ver: "45.23.0"

# kubeapps 自动安装,如果选择安装,默认同时安装local-storage(提供storageClass: "local-path")
kubeapps_install: "no"
kubeapps_install_namespace: "kubeapps"
kubeapps_working_namespace: "default"
kubeapps_storage_class: "local-path"
kubeapps_chart_ver: "12.4.3"

# local-storage (local-path-provisioner) 自动安装
local_path_provisioner_install: "no"
local_path_provisioner_ver: "v0.0.24"
# 设置默认本地存储路径
local_path_provisioner_dir: "/opt/local-path-provisioner"

# nfs-provisioner 自动安装
nfs_provisioner_install: "no"
nfs_provisioner_namespace: "kube-system"
nfs_provisioner_ver: "v4.0.2"
nfs_storage_class: "managed-nfs-storage"
nfs_server: "192.168.1.10"
nfs_path: "/data/nfs"

# network-check 自动安装
network_check_enabled: false 
network_check_schedule: "*/5 * * * *"

############################
# role:harbor
############################
# harbor version,完整版本号
HARBOR_VER: "v2.6.4"
HARBOR_DOMAIN: "harbor.easzlab.io.local"
HARBOR_PATH: /var/data
HARBOR_TLS_PORT: 8443
HARBOR_REGISTRY: "{{ HARBOR_DOMAIN }}:{{ HARBOR_TLS_PORT }}"

# if set 'false', you need to put certs named harbor.pem and harbor-key.pem in directory 'down'
HARBOR_SELF_SIGNED_CERT: true

# install extra component
HARBOR_WITH_NOTARY: false
HARBOR_WITH_TRIVY: false
HARBOR_WITH_CHARTMUSEUM: true

执行安装步骤和相关关联文件

执行01步骤

#执行01步骤初始化集群环境
root@deploy:/etc/kubeasz/clusters/k8s-cluster1# cd /etc/kubeasz/
root@deploy:/etc/kubeasz/clusters/k8s-cluster1#./ezctl setup k8s-cluster1 01
2023-10-20 03:16:27 INFO cluster:k8s-cluster1 setup step:01 begins in 5s, press any key to abort:


PLAY [kube_master,kube_node,etcd,ex_lb,chrony] ***************************************************************************************************************************************************************************

TASK [Gathering Facts] ***************************************************************************************************************************************************************************************************
ok: [10.10.1.200]
ok: [10.10.1.203]
ok: [10.10.1.204]
ok: [10.10.1.202]
ok: [10.10.1.201]
省略
PLAY RECAP ***************************************************************************************************************************************************************************************************************
10.10.1.200                : ok=27   changed=22   unreachable=0    failed=0    skipped=115  rescued=0    ignored=0   
10.10.1.201                : ok=24   changed=19   unreachable=0    failed=0    skipped=111  rescued=0    ignored=0   
10.10.1.202                : ok=23   changed=18   unreachable=0    failed=0    skipped=112  rescued=0    ignored=0   
10.10.1.203                : ok=24   changed=19   unreachable=0    failed=0    skipped=111  rescued=0    ignored=0   
10.10.1.204                : ok=24   changed=19   unreachable=0    failed=0    skipped=111  rescued=0    ignored=0   
localhost                  : ok=33   changed=31   unreachable=0    failed=0    skipped=11   rescued=0    ignored=0  

执行02步骤安装etcd

/etc/kubeasz/roles/etcd/tasks/main.yml #etcd证书生成的配置文件
/etc/kubeasz/roles/etcd/templates/etcd.service.j2 #etcd的service启动文件

#etcd集群证书国家城市信息修改默认模板,可以自定义证书签发的信息文件
root@deploy:/etc/kubeasz# ls -l /etc/kubeasz/roles/deploy/templates/  #这里自定义证书的信息,一般不用修改
total 32
-rw-rw-r-- 1 root root 225 Sep  4 21:35 admin-csr.json.j2
-rw-rw-r-- 1 root root 491 Sep  4 21:35 ca-config.json.j2
-rw-rw-r-- 1 root root 254 Sep  4 21:35 ca-csr.json.j2
-rw-rw-r-- 1 root root 344 Sep  4 21:35 crb.yaml.j2
-rw-rw-r-- 1 root root 266 Sep  4 21:35 kube-controller-manager-csr.json.j2
-rw-rw-r-- 1 root root 226 Sep  4 21:35 kube-proxy-csr.json.j2
-rw-rw-r-- 1 root root 248 Sep  4 21:35 kube-scheduler-csr.json.j2
-rw-rw-r-- 1 root root 224 Sep  4 21:35 user-csr.json.j2

root@deploy:/etc/kubeasz/clusters/k8s-cluster1# cat /etc/kubeasz/roles/etcd/templates/etcd-csr.json.j2 
{
  "CN": "etcd",
  "hosts": [
{% for host in groups['etcd'] %}
    "{{ host }}",
{% endfor %}
    "127.0.0.1"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",#国家
      "ST": "HangZhou",#省份
      "L": "XS",#城市
      "O": "k8s",
      "OU": "System"
    }
  ]
}
/etc/kubeasz/roles/etcd/tasks/main.yml #etcd证书生成的配置文件
/etc/kubeasz/roles/etcd/templates/etcd.service.j2 #etcd的service启动文件
执行命令安装etcd集群
root@deploy:/etc/kubeasz# ./ezctl setup k8s-cluster1 02
ansible-playbook -i clusters/k8s-cluster1/hosts -e @clusters/k8s-cluster1/config.yml  playbooks/02.etcd.yml
*** Component Version *********************
*******************************************
*   kubernetes: v1.26.1
*   etcd: v3.5.9
*   calico: v3.24.6
*******************************************
#提升5
2023-10-20 03:35:28 INFO cluster:k8s-cluster1 setup step:02 begins in 5s, press any key to abort:
2023-10-20 03:35:28 INFO cluster:k8s-cluster1 setup step:02 begins in 5s, press any key to abort:


PLAY [etcd] **************************************************************************************************************************************************************************************************************

TASK [Gathering Facts] ***************************************************************************************************************************************************************************************************
ok: [10.10.1.200]
ok: [10.10.1.202]
ok: [10.10.1.201]
PLAY RECAP ***************************************************************************************************************************************************************************************************************
10.10.1.200                : ok=10   changed=9    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.10.1.201                : ok=8    changed=7    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.10.1.202                : ok=8    changed=7    unreachable=0    failed=0    skipped=0    rescued=0    ignored=0  
etcd部署完成


#检查etcd节点心跳状态
export NODE_IPS="10.0.0.100 10.0.0.101 10.0.0.102"
for ip in ${NODE_IPS};do ETCDCTL_AP1=3 /usr/local/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health; done
root@master2-etcd2:~# export NODE_IPS="10.10.1.200 10.10.1.201 10.10.1.202"
root@master2-etcd2:~# for ip in ${NODE_IPS};do ETCDCTL_AP1=3 /usr/local/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health; done
{"level":"warn","ts":"2023-10-20T11:49:42.014817+0800","caller":"flags/flag.go:93","msg":"unrecognized environment variable","environment-variable":"ETCDCTL_AP1=3"}
https://10.10.1.200:2379 is healthy: successfully committed proposal: took = 6.779002ms
{"level":"warn","ts":"2023-10-20T11:49:42.039577+0800","caller":"flags/flag.go:93","msg":"unrecognized environment variable","environment-variable":"ETCDCTL_AP1=3"}
https://10.10.1.201:2379 is healthy: successfully committed proposal: took = 9.86941ms
{"level":"warn","ts":"2023-10-20T11:49:42.065326+0800","caller":"flags/flag.go:93","msg":"unrecognized environment variable","environment-variable":"ETCDCTL_AP1=3"}
https://10.10.1.202:2379 is healthy: successfully committed proposal: took = 11.722037ms

执行03安装运行时

#修改默认安装runc的ansible配置文件实现域名解析和nerdctl客户端
#修改containerd配置文件模板/etc/kubeasz/roles/containerd/templates/config.toml.j2
root@deploy:/etc/kubeasz# cat /etc/kubeasz/roles/containerd/templates/config.toml.j2

 60     restrict_oom_score_adj = false
 61     sandbox_image = "{{ SANDBOX_IMAGE }}"#底层网络镜像可以写死为阿里云的镜像下载地址
 62     selinux_category_range = 1024


118             SystemdCgroup = true

151 {% if ENABLE_MIRROR_REGISTRY %}
152         [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
153           endpoint = ["https://docker.nju.edu.cn/", "https://kuamavit.mirror.aliyuncs.com"]
154         [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
155           endpoint = ["https://gcr.nju.edu.cn"]
156         [plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"]
157           endpoint = ["https://gcr.nju.edu.cn/google-containers/"]
158         [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
159           endpoint = ["https://quay.nju.edu.cn"]
160         [plugins."io.containerd.grpc.v1.cri".registry.mirrors."ghcr.io"]
161           endpoint = ["https://ghcr.nju.edu.cn"]
162         [plugins."io.containerd.grpc.v1.cri".registry.mirrors."nvcr.io"]
163           endpoint = ["https://ngc.nju.edu.cn"]
166 {% endif %}

部署服务器创建证书目录
mkdir -p /etc/docker/certs.d/harbor.yyq.cn/
harbor服务器拷贝证书到部署服务器
rsync -a /etc/docker/certs.d/harbor.yyq.cn/* 10.10.1.210:/etc/kubeasz/roles/containerd/templates
#harbor服务器拷贝登录harbor文件到部署服务器
rsync -a /root/.docker/config.json 10.10.1.210:/etc/docker/certs.d/harbor.yyq.cn/
#部署服务器检查
root@deploy:~# ll /etc/docker/certs.d/harbor.yyq.cn
total 16
drwxr-xr-x 2 root root   90 Oct 21 01:37 ./
drwxr-xr-x 3 root root   27 Oct 21 01:35 ../
-rw-r--r-- 1 root root 2049 Oct 17 15:15 ca.crt
-rw------- 1 root root   73 Oct 17 15:54 config.json
-rw-r--r-- 1 root root 2098 Oct 17 15:50 harbor.yyq.cn.cert
-rw------- 1 root root 3243 Oct 17 15:22 harbor.yyq.cn.key

#拷贝证书到模版文件目录
cd /apps/harbor/certs/
cp -a ca.crt harbor.yyq.cn.cert harbor.yyq.cn.key /etc/kubeasz/roles/containerd/templates/
cp /root/.docker/config.json /etc/kubeasz/roles/containerd/templates


#修改好的配置文件
root@deploy:/etc/kubeasz/bin/containerd-bin# cat /etc/kubeasz/roles/containerd/tasks/main.yml
- name: 获取是否已经安装containerd
  shell: 'systemctl is-active containerd || echo "NoFound"'
  register: containerd_svc

- block:
    - name: 准备containerd相关目录
      file: name={{ item }} state=directory
      with_items:
      - "{{ bin_dir }}/containerd-bin"
      - "/etc/containerd"
      - "/etc/nerdctl/"
      - "/etc/docker/certs.d/harbor.yyq.cn"
      - "/root/.docker/"

    - name: 加载内核模块 overlay
      modprobe: name=overlay state=present

    - name: 下载 containerd 二进制文件
      copy: src={{ item }} dest={{ bin_dir }}/ mode=0755#删除这里dest={{ bin_dir }}/ 的containerd-bin
      with_fileglob:
      - "{{ base_dir }}/bin/containerd-bin/*"##从此目录拷贝文件到master和node节点,因为这里是*所以直接把nerdctl的安装文件放在/etc/kubeasz/bin/containerd-bin也可以
      #- nerdctl#拷贝nerdctl的文件到安装目录
      #- containerd-rootless-setuptool.sh
      #- containerd-rootless.sh
      tags: upgrade

    - name: 下载 crictl
      copy: src={{ base_dir }}/bin/containerd-bin/crictl dest={{ bin_dir }}/crictl mode=0755#注意3.5.2需要修改为{{ base_dir }}/bin/containerd-bin/crictl

    - name: 添加 crictl 自动补全
      lineinfile:
        dest: ~/.bashrc
        state: present
        regexp: 'crictl completion'
        line: 'source <(crictl completion bash) # generated by kubeasz'
 


    - name: 创建 containerd 配置文件
      template: src=config.toml.j2 dest=/etc/containerd/config.toml
      tags: upgrade

    - name: 创建 nerdctl 配置文件
      template: src=nerdctl.toml.j2 dest=/etc/nerdctl/nerdctl.toml
      tags: upgrade

    - name: 创建systemd unit文件
      template: src=containerd.service.j2 dest=/etc/systemd/system/containerd.service
      tags: upgrade
      
    - name: 拷贝证书文件harbor.yyq.cn.cert到master和node节点
      template: src=harbor.yyq.cn.cert dest=/etc/docker/certs.d/harbor.yyq.cn/harbor.yyq.cn.cert
      tags: upgrade
      
    - name: 拷贝证书文件harbor.yyq.cn.key到master和node节点
      template: src=harbor.yyq.cn.key dest=/etc/docker/certs.d/harbor.yyq.cn/harbor.yyq.cn.key
      tags: upgrade
      
    - name: 拷贝证书文件ca.crt到master和node节点
      template: src=ca.crt dest=/etc/docker/certs.d/harbor.yyq.cn/ca.crt
      tags: upgrade
      
    - name: 拷贝harbor登录文件到master和node节点
      template: src=config.json dest=/root/.docker/config.json
      tags: upgrade
      
    - name:
      shell: "echo '10.10.1.206 harbor.yyq.cn' >> /etc/hosts"


    - name: 创建 crictl 配置
      template: src=crictl.yaml.j2 dest=/etc/crictl.yaml

    - name: 开机启用 containerd 服务
      shell: systemctl enable containerd
      ignore_errors: true

    - name: 开启 containerd 服务
      shell: systemctl daemon-reload && systemctl restart containerd
      tags: upgrade

    - name: 轮询等待containerd服务运行
      shell: "systemctl is-active containerd.service"
      register: containerd_status
      until: '"active" in containerd_status.stdout'
      retries: 8
      delay: 2
      tags: upgrade
  when: "'NoFound' in containerd_svc.stdout"


#在/etc/kubeasz/bin/containerd-bin/目录上传nerdctl的二进制安装包
root@deploy:/tools# tar -xf nerdctl-1.3.0-linux-amd64.tar.gz 
root@deploy:/tools# ls -l
total 33512
-rwxr-xr-x 1 root root    21622 Apr  5  2023 containerd-rootless-setuptool.sh
-rwxr-xr-x 1 root root     7032 Apr  5  2023 containerd-rootless.sh
-rwxr-xr-x 1 root root    32735 Oct 17 09:09 ezdown
-rwxr-xr-x 1 root root 24920064 Apr  5  2023 nerdctl
-rw-r--r-- 1 root root  9328539 Oct 10 17:16 nerdctl-1.3.0-linux-amd64.tar.gz
root@deploy:/tools# mv containerd-rootless-setuptool.sh containerd-rootless.sh nerdctl /etc/kubeasz/bin/containerd-bin/
root@deploy:/tools# ls -l /etc/kubeasz/bin/containerd-bin/
total 160828
-rwxr-xr-x 1 root root 52375288 Aug 14 16:12 containerd
-rwxr-xr-x 1 root root    21622 Apr  5  2023 containerd-rootless-setuptool.sh
-rwxr-xr-x 1 root root     7032 Apr  5  2023 containerd-rootless.sh
-rwxr-xr-x 1 root root  7356416 Aug 14 16:12 containerd-shim
-rwxr-xr-x 1 root root  9478144 Aug 14 16:12 containerd-shim-runc-v1
-rwxr-xr-x 1 root root  9510912 Aug 14 16:12 containerd-shim-runc-v2
-rwxr-xr-x 1 root root 23133752 Aug 14 16:12 containerd-stress
-rwxr-xr-x 1 root root 27193112 Aug 14 16:12 ctr
-rwxr-xr-x 1 root root 24920064 Apr  5  2023 nerdctl
-rwxr-xr-x 1 root root 10684992 Aug 11 01:57 runc




#添加nerdctl配置⽂件模板:
root@deploy:/tools# cat /etc/kubeasz/roles/containerd/templates/nerdctl.toml.j2
namespace      = "k8s.io"
debug          = false
debug_full     = false
insecure_registry = true

#修改containerd的启动模板文件
root@deploy:/tools# cat /etc/kubeasz/roles/containerd/templates/containerd.service.j2
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target

[Service]
Environment="PATH={{ bin_dir }}:/bin:/sbin:/usr/bin:/usr/sbin"
ExecStartPre=-/sbin/modprobe overlay
ExecStart={{ bin_dir }}/containerd
Restart=always
RestartSec=5
Delegate=yes
KillMode=process
OOMScoreAdjust=-999
LimitNOFILE=1048576
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity

[Install]
WantedBy=multi-user.target

#检查模版文件
root@harbor-deploy:/etc/kubeasz/roles/containerd/templates# ls -l /etc/kubeasz/roles/containerd/templates
total 36
-rw-r--r-- 1 root root 2049 Oct 22 00:57 ca.crt
-rw------- 1 root root   73 Oct 22 13:04 config.json
-rw-rw-r-- 1 root root 7385 Sep  4 21:35 config.toml.j2
-rw-rw-r-- 1 root root  585 Oct 22 13:03 containerd.service.j2
-rw-rw-r-- 1 root root   57 Sep  4 21:35 crictl.yaml.j2
-rw-r--r-- 1 root root 2098 Oct 22 00:58 harbor.yyq.cn.cert
-rw------- 1 root root 3243 Oct 22 00:57 harbor.yyq.cn.key
-rw-r--r-- 1 root root   97 Oct 22 13:02 nerdctl.toml.j2
#执行03
root@deploy:/etc/kubeasz# ./ezctl setup k8s-cluster1 03
ansible-playbook -i clusters/k8s-cluster1/hosts -e @clusters/k8s-cluster1/config.yml  playbooks/03.runtime.yml
*** Component Version *********************
*******************************************
*   kubernetes: v1.26.1
*   etcd: v3.5.9
*   calico: v3.24.6
*******************************************
2023-10-20 06:47:42 INFO cluster:k8s-cluster1 setup step:03 begins in 5s, press any key to abort:


PLAY [kube_master,kube_node] *********************************************************************************************************************************************************************************************

TASK [Gathering Facts] ***************************************************************************************************************************************************************************************************
ok: [10.10.1.201]
ok: [10.10.1.200]
ok: [10.10.1.204]
ok: [10.10.1.203]

TASK [containerd : 轮询等待containerd服务运行] ***********************************************************************************************************************************************************************************
changed: [10.10.1.200]
changed: [10.10.1.203]
changed: [10.10.1.201]
changed: [10.10.1.204]

PLAY RECAP ***************************************************************************************************************************************************************************************************************
10.10.1.200                : ok=15   changed=6    unreachable=0    failed=0    skipped=18   rescued=0    ignored=0   
10.10.1.201                : ok=15   changed=6    unreachable=0    failed=0    skipped=15   rescued=0    ignored=0   
10.10.1.203                : ok=15   changed=6    unreachable=0    failed=0    skipped=15   rescued=0    ignored=0   
10.10.1.204                : ok=15   changed=6    unreachable=0    failed=0    skipped=15   rescued=0    ignored=0 
安装完成

#使用nerdctl克隆镜像测试
root@master1-etcd1:~# nerdctl pull nginx
WARN[0000] skipping verifying HTTPS certs for "docker.io" 
docker.io/library/nginx:latest:                                                   resolved       |++++++++++++++++++++++++++++++++++++++| 
index-sha256:b4af4f8b6470febf45dc10f564551af682a802eda1743055a7dfc8332dffa595:    done           |++++++++++++++++++++++++++++++++++++++| 
manifest-sha256:3a12fc354e3c4dd62196a809e52a5d2f8f385b52fcc62145b0efec5954bb8fa1: done           |++++++++++++++++++++++++++++++++++++++| 
config-sha256:bc649bab30d150c10a84031a7f54c99a8c31069c7bc324a7899d7125d59cc973:   done           |++++++++++++++++++++++++++++++++++++++| 
layer-sha256:d57731fb9008721addb89b0f1eb461ac7484d0d925ddd637718bf50927811686:    done           |++++++++++++++++++++++++++++++++++++++| 
layer-sha256:a378f10b321842c3042cdeff4f6997f34f4cb21f2eff27704b7f6193ab7b5fea:    downloading    |++++++--------------------------------|  5.0 MiB/27.8 MiB 
layer-sha256:4dfff07085381c15800af316d036b96866786e371d50d1c62ded513f1dc68f17:    downloading    |++++++--------------------------------|  7.0 MiB/39.4 MiB 
layer-sha256:2135e49ace4b128ebc70405f034bd5273e216d282bd04349995cef07d59c276b:    done           |++++++++++++++++++++++++++++++++++++++| 
layer-sha256:c843f6b280ce81e71190e2519561e3d8d2482b8d227f0bf5b94f6d1391aab7b0:    done           |++++++++++++++++++++++++++++++++++++++| 
layer-sha256:6f35ab6f1400627ef6042fc7c6028cd61c0d3268f69af290396a55444922065a:    done           |++++++++++++++++++++++++++++++++++++++| 
layer-sha256:6c538b49fa4a5acc4bbcba3e8867d2234b553aa9ea298d69ff46ba433dce27bd:    done           |++++++++++++++++++++++++++++++++++++++| 
elapsed: 43.6s                                                                    total:  12.0 M (282.2 KiB/s)  

#检查hosts文件
root@master2-etcd2:~# cat /etc/hosts
127.0.0.1 localhost
127.0.1.1 mb

# The following lines are desirable for IPv6 capable hosts
::1     ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
10.10.1.210    easzlab.io.local
### BEGIN KUBEASZ MANAGED BLOCK
10.10.1.200    master01-10.10.1.200 # generated by kubeasz
10.10.1.201    master02-10.10.1.201 # generated by kubeasz
10.10.1.202    master03-10.10.1.202 # generated by kubeasz
10.10.1.203    worker01-10.10.1.203 # generated by kubeasz
10.10.1.204    worker02-10.10.1.203 # generated by kubeasz
### END KUBEASZ MANAGED BLOCK
10.10.1.206 harbor.yyq1.cn

root@master3-etcd3:~# nerdctl pull harbor.yyq.cn/baseimage/alpine:latest
WARN[0000] skipping verifying HTTPS certs for "harbor.yyq.cn" 
harbor.yyq.cn/baseimage/alpine:latest:                                            resolved       |++++++++++++++++++++++++++++++++++++++| 
manifest-sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3: done           |++++++++++++++++++++++++++++++++++++++| 
config-sha256:c059bfaa849c4d8e4aecaeb3a10c2d9b3d85f5165c66ad3a4d937758128c4d18:   done           |++++++++++++++++++++++++++++++++++++++| 
layer-sha256:59bf1c3509f33515622619af21ed55bbe26d24913cedbca106468a5fb37a50c3:    done           |++++++++++++++++++++++++++++++++++++++| 
elapsed: 0.3 s                                                                    total:  2.7 Mi (8.9 MiB/s)  
配置上传和下载镜像测试
#master和节点创建目录
mkdir -p /etc/docker/certs.d/harbor.yyq.cn
#harbor服务器分发证书到master和node节点
rsync -a /apps/harbor/certs/ca.crt /apps/harbor/certs/harbor.yyq.cn.key /apps/harbor/certs/harbor.yyq.cn.crt 10.10.1.200:/etc/docker/certs.d/harbor.yyq.cn/
rsync -a /apps/harbor/certs/ca.crt /apps/harbor/certs/harbor.yyq.cn.key /apps/harbor/certs/harbor.yyq.cn.crt 10.10.1.201:/etc/docker/certs.d/harbor.yyq.cn/
rsync -a /apps/harbor/certs/ca.crt /apps/harbor/certs/harbor.yyq.cn.key /apps/harbor/certs/harbor.yyq.cn.crt 10.10.1.202:/etc/docker/certs.d/harbor.yyq.cn/
rsync -a /apps/harbor/certs/ca.crt /apps/harbor/certs/harbor.yyq.cn.key /apps/harbor/certs/harbor.yyq.cn.crt 10.10.1.203:/etc/docker/certs.d/harbor.yyq.cn/
rsync -a /apps/harbor/certs/ca.crt /apps/harbor/certs/harbor.yyq.cn.key /apps/harbor/certs/harbor.yyq.cn.crt 10.10.1.204:/etc/docker/certs.d/harbor.yyq.cn/
rsync -a /apps/harbor/certs/ca.crt /apps/harbor/certs/harbor.yyq.cn.key /apps/harbor/certs/harbor.yyq.cn.crt 10.10.1.205:/etc/docker/certs.d/harbor.yyq.cn/

#随便一台服务器测试上传
root@master1-etcd1:~# nerdctl tag nginx:latest harbor.yyq.cn/baseimage/nginx:latest
root@master1-etcd1:~# nerdctl push harbor.yyq.cn/baseimage/nginx:latest
INFO[0000] pushing as a reduced-platform image (application/vnd.docker.distribution.manifest.list.v2+json, sha256:35d6d9876b6e968ff2ba5f6e57a01bd271ca4f5bef682e6e7c68100b14a2b954) 
WARN[0000] skipping verifying HTTPS certs for "harbor.yyq.cn" 
index-sha256:35d6d9876b6e968ff2ba5f6e57a01bd271ca4f5bef682e6e7c68100b14a2b954:    done           |++++++++++++++++++++++++++++++++++++++| 
manifest-sha256:3a12fc354e3c4dd62196a809e52a5d2f8f385b52fcc62145b0efec5954bb8fa1: done           |++++++++++++++++++++++++++++++++++++++| 
config-sha256:bc649bab30d150c10a84031a7f54c99a8c31069c7bc324a7899d7125d59cc973:   done           |++++++++++++++++++++++++++++++++++++++| 
elapsed: 0.8 s                                                                    total:  10.0 K (12.5 KiB/s)  

root@master1-etcd1:~# cat /root/.docker/config.json 
{
    "auths": {
        "harbor.yyq.cn": {
            "auth": "YWRtaW46MTIzNDU2"
        }
    }
}

执行04步骤安装master

#master执行关联角色文件:/etc/kubeasz/roles/kube-master/tasks/main.yml
root@deploy:/etc/kubeasz# ls -l /etc/kubeasz/roles/kube-master/
total 0
drwxrwxr-x 2 root root  22 Sep  4 21:54 tasks
drwxrwxr-x 2 root root 180 Sep  4 21:54 templates
drwxrwxr-x 2 root root  22 Sep  4 21:54 vars
root@deploy:/etc/kubeasz# ls -l /etc/kubeasz/roles/kube-master/templates/
total 20
-rw-rw-r-- 1 root root  219 Sep  4 21:35 aggregator-proxy-csr.json.j2
-rw-rw-r-- 1 root root 1710 Sep  4 21:35 kube-apiserver.service.j2
-rw-rw-r-- 1 root root  979 Sep  4 21:35 kube-controller-manager.service.j2
-rw-rw-r-- 1 root root  487 Sep  4 21:35 kube-scheduler.service.j2
-rw-rw-r-- 1 root root  681 Sep  4 21:35 kubernetes-csr.json.j2
#跟lb负载相关的role角色文件,lb配置文件的和vip的监听一定是tcp的监听
#可以直接安装一个负载均衡器haproxy自己写ansible的role角色
/etc/kubeasz/roles/kube-lb/templates/kube-lb.service.j2
/etc/kubeasz/roles/kube-lb/templates/kube-lb.conf.j2
/etc/kubeasz/roles/kube-lb/tasks/main.yml

#安装master
root@deploy:/etc/kubeasz# ./ezctl setup k8s-cluster1 04
ansible-playbook -i clusters/k8s-cluster1/hosts -e @clusters/k8s-cluster1/config.yml  playbooks/04.kube-master.yml
*** Component Version *********************
*******************************************
*   kubernetes: v1.26.1
*   etcd: v3.5.9
*   calico: v3.24.6
*******************************************
2023-10-20 23:22:44 INFO cluster:k8s-cluster1 setup step:04 begins in 5s, press any key to abort:
PLAY RECAP ***************************************************************************************************************************************************************************************************************
10.10.1.200                : ok=58   changed=37   unreachable=0    failed=0    skipped=1    rescued=0    ignored=0   
10.10.1.201                : ok=57   changed=47   unreachable=0    failed=0    skipped=0    rescued=0    ignored=0   
10.10.1.202                : ok=57   changed=37   unreachable=0    failed=0    skipped=0    rescued=0    ignored=0 

执行05安装node节点

#和node相关的role角色文件
root@deploy:/etc/kubeasz# ls -l /etc/kubeasz/roles/kube-node/
tasks/     templates/ vars/      
root@deploy:/etc/kubeasz# ls -l /etc/kubeasz/roles/kube-node/tasks/
total 12
-rw-rw-r-- 1 root root 2141 Sep  4 21:35 create-kubelet-kubeconfig.yml
-rw-rw-r-- 1 root root 4488 Sep  4 21:35 main.yml
root@deploy:/etc/kubeasz# ls -l /etc/kubeasz/roles/kube-node/templates/
total 24
-rw-rw-r-- 1 root root  223 Sep  4 21:35 cni-default.conf.j2
-rw-rw-r-- 1 root root  947 Sep  4 21:35 kube-proxy-config.yaml.j2
-rw-rw-r-- 1 root root  356 Sep  4 21:35 kube-proxy.service.j2
-rw-rw-r-- 1 root root 2071 Sep  4 21:35 kubelet-config.yaml.j2
-rw-rw-r-- 1 root root  325 Sep  4 21:35 kubelet-csr.json.j2
-rw-rw-r-- 1 root root 1596 Sep  4 21:35 kubelet.service.j2
root@deploy:/etc/kubeasz# ls -l /etc/kubeasz/roles/kube-node/vars/
total 4
-rw-rw-r-- 1 root root 547 Sep  4 21:35 main.yml

#执行安装命令
root@deploy:/etc/kubeasz# ./ezctl setup k8s-cluster1 05
ansible-playbook -i clusters/k8s-cluster1/hosts -e @clusters/k8s-cluster1/config.yml  playbooks/05.kube-node.yml
*** Component Version *********************
*******************************************
*   kubernetes: v1.26.1
*   etcd: v3.5.9
*   calico: v3.24.6
*******************************************
2023-10-21 00:01:19 INFO cluster:k8s-cluster1 setup step:05 begins in 5s, press any key to abort:
PLAY RECAP *******************************************************************************************************
10.10.1.203   : ok=38   changed=36   unreachable=0    failed=0    skipped=2    rescued=0    ignored=0   
10.10.1.204   : ok=38   changed=36   unreachable=0    failed=0    skipped=2    rescued=0    ignored=0   
#检查
root@deploy:~# kubectl get node
NAME                   STATUS                     ROLES    AGE     VERSION
master01-10.10.1.200   Ready,SchedulingDisabled   master   3m38s   v1.26.1
master02-10.10.1.201   Ready,SchedulingDisabled   master   3m38s   v1.26.1
worker01-10.10.1.203   Ready                      node     2m56s   v1.26.1
worker02-10.10.1.203   Ready                      node     2m56s   v1.26.1

执行06安装calico网络组件

#calico相关安装文件
root@deploy:/etc/kubeasz# ls -l /etc/kubeasz/roles/calico/templates/
total 80
-rw-rw-r-- 1 root root   180 Sep  4 21:35 bgp-default.yaml.j2
-rw-rw-r-- 1 root root   162 Sep  4 21:35 bgp-rr.yaml.j2
-rw-rw-r-- 1 root root   215 Sep  4 21:35 calico-csr.json.j2
-rw-rw-r-- 1 root root 19015 Sep  4 21:35 calico-v3.19.yaml.j2
-rw-rw-r-- 1 root root 19637 Sep  4 21:35 calico-v3.23.yaml.j2
-rw-rw-r-- 1 root root 21589 Sep  4 21:35 calico-v3.24.yaml.j2
-rw-rw-r-- 1 root root   263 Sep  4 21:35 calicoctl.cfg.j2
root@deploy:/etc/kubeasz# ls -l /etc/kubeasz/roles/calico/tasks/main.yml 
-rw-rw-r-- 1 root root 2504 Sep  4 21:35 /etc/kubeasz/roles/calico/tasks/main.yml#部署calico的变量和证书签发
#/etc/kubeasz/roles/calico/tasks/main.yml

06步骤执行文件
/etc/kubeasz/playbooks/06.network.yml 
/etc/kubeasz/roles/calico/tasks/main.yml 
/etc/kubeasz/roles/calico/templates/calico-v3.15.yaml.j2 #修改此文件里面的镜像为自己的harbor仓库的,先下载好镜像打tag上传到自己的harbor上面
比较重要的CALICO_NETWORKING_BACKEND: "brid"
root@k8s-master1:~/.docker# grep CALICO_NETWORKING_BACKEND /etc/kubeasz/clusters/k8s-01/ -R
/etc/kubeasz/clusters/k8s-01/config.yml:CALICO_NETWORKING_BACKEND: "brid"  选中brid的路由学习,默认即可
比较重要的2个配置  value: "{{ CLUSTER_CIDR }}"
root@k8s-master1:~/.docker# cat /etc/kubeasz/roles/calico/templates/calico-v3.15.yaml.j2|grep  'CLUSTER_CIDR'
value: "{{ CLUSTER_CIDR }}" 此变量是调用/etc/kubeasz/clusters/k8s-01/hosts文件里面配置的SERVICE_CIDR="10.100.0.0/16"和CLUSTER_CIDR="10.200.0.0/16"


#修改calico镜像下载地址
#如果把镜像做重新修改的话需要修改这个文件/etc/kubeasz/roles/calico/templates/calico-v3.24.yaml.j2
root@deploy:/etc/kubeasz# cat /etc/kubeasz/roles/calico/templates/calico-v3.24.yaml.j2 |grep image
#这些镜像下载地址可以修改为自己的镜像仓库
          image: easzlab.io.local:5000/calico/cni:{{ calico_ver }}
          imagePullPolicy: IfNotPresent
          image: easzlab.io.local:5000/calico/node:{{ calico_ver }} 
          imagePullPolicy: IfNotPresent
          image: easzlab.io.local:5000/calico/node:{{ calico_ver }}
          imagePullPolicy: IfNotPresent
          image: easzlab.io.local:5000/calico/kube-controllers:{{ calico_ver }}
          imagePullPolicy: IfNotPresent

#配置pod的ip的变量 CALICO_IPV4POOL_CIDR在/etc/kubeasz/clusters/k8s-cluster1/hosts配置
root@deploy:/etc/kubeasz# cat /etc/kubeasz/roles/calico/templates/calico-v3.24.yaml.j2 |grep  CALICO_IPV4POOL_CIDR
            - name: CALICO_IPV4POOL_CIDR
root@deploy:/etc/kubeasz# cat /etc/kubeasz/clusters/k8s-cluster1/hosts|grep CLUSTER_CIDR
CLUSTER_CIDR="10.200.0.0/16"
#官网下载的yaml文件需要修改这里的ip地址,一定修改为自己部署k8s的pod子网,kubeasz的话就是在hosts的主机配置文件写死了的
#kubeasz支持arm64的部署k8s集群

#网络慢可以本地部署机器的镜像上传到harbor仓库从harbor仓库下载
root@deploy:/etc/kubeasz# docker images
REPOSITORY                                           TAG       IMAGE ID       CREATED         SIZE
registry                                             2         0ae1560ca86f   2 weeks ago     25.4MB
easzlab/kubeasz                                      3.6.2     6ea48e5ea9d3   6 weeks ago     157MB
easzlab/kubeasz-ext-bin                              1.8.0     633a9737e967   2 months ago    636MB
coredns/coredns                                      1.11.1    cbb01a7bd410   2 months ago    59.8MB
easzlab.io.local:5000/coredns/coredns                1.11.1    cbb01a7bd410   2 months ago    59.8MB
easzlab/metrics-server                               v0.6.4    c1623971df95   2 months ago    68.9MB
easzlab.io.local:5000/easzlab/metrics-server         v0.6.4    c1623971df95   2 months ago    68.9MB
easzlab/k8s-dns-node-cache                           1.22.23   81d6450452ae   4 months ago    68.4MB
easzlab.io.local:5000/easzlab/k8s-dns-node-cache     1.22.23   81d6450452ae   4 months ago    68.4MB
calico/kube-controllers                              v3.24.6   baf4466ddf40   5 months ago    77.5MB
easzlab.io.local:5000/calico/kube-controllers        v3.24.6   baf4466ddf40   5 months ago    77.5MB
easzlab.io.local:5000/calico/cni                     v3.24.6   ca9fea5e07cb   5 months ago    212MB
calico/cni                                           v3.24.6   ca9fea5e07cb   5 months ago    212MB
easzlab.io.local:5000/calico/node                    v3.24.6   3953a481aa9d   5 months ago    245MB
calico/node                                          v3.24.6   3953a481aa9d   5 months ago    245MB
easzlab/kubeasz-k8s-bin                              v1.26.1   f5cb65506fc7   8 months ago    1.17GB
easzlab/pause                                        3.9       78d53e70b442   12 months ago   744kB
easzlab.io.local:5000/easzlab/pause                  3.9       78d53e70b442   12 months ago   744kB
kubernetesui/dashboard                               v2.7.0    07655ddf2eeb   13 months ago   246MB
easzlab.io.local:5000/kubernetesui/dashboard         v2.7.0    07655ddf2eeb   13 months ago   246MB
kubernetesui/metrics-scraper                         v1.0.8    115053965e86   16 months ago   43.8MB
easzlab.io.local:5000/kubernetesui/metrics-scraper   v1.0.8    115053965e86   16 months ago   43.8MB

#部署服务器创建证书目录
mkdir -p /etc/docker/certs.d/harbor.yyq.cn/
#harbor服务器拷贝证书到部署服务器
rsync -a /apps/harbor/certs/ca.crt /apps/harbor/certs/harbor.yyq.cn.key /apps/harbor/certs/harbor.yyq.cn.crt /apps/harbor/certs/harbor.yyq.cn.cert  10.10.1.210:/etc/docker/certs.d/harbor.yyq.cn/

#登录
root@deploy:/etc/docker/certs.d/harbor.yyq.cn# docker login harbor.yyq.cn
Username: admin
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded

#对进行打tag
docker tag calico/cni:v3.24.5  harbor.yyq.cn/baseimage/cni:v3.24.5 #每个node节点都是通过这个镜像生成路由器同步
docker tag calico/node:v3.24.5  harbor.yyq.cn/baseimage/node:v3.24.5 #集成cni网络插件的镜像
docker tag calico/kube-controllers:v3.24.5  harbor.yyq.cn/baseimage/kube-controllers:v3.24.5 #calico管理副本的镜像
#上传镜像
docker push harbor.yyq.cn/baseimage/node:v3.24.5
docker push harbor.yyq.cn/baseimage/cni:v3.24.5
docker push harbor.yyq.cn/baseimage/kube-controllers:v3.24.5

#修改镜像下载地址
root@harbor-deploy:/etc/kubeasz/clusters/k8s-cluster1# cat /etc/kubeasz/roles/calico/templates/calico-v3.24.yaml.j2 |grep image
          image: harbor.yyq.cn/baseimage/cni:v3.24.5
          imagePullPolicy: IfNotPresent
          image: harbor.yyq.cn/baseimage/node:v3.24.5
          imagePullPolicy: IfNotPresent
          image: harbor.yyq.cn/baseimage/node:v3.24.5
          imagePullPolicy: IfNotPresent
          image: harbor.yyq.cn/baseimage/kube-controllers:v3.24.5
          imagePullPolicy: IfNotPresent


#harbor服务器下载底层网络沙箱镜像到直接的harbor仓库
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9 harbor.yyq.cn/baseimage/pause:3.9
docker push harbor.yyq.cn/baseimage/pause:3.9
#修改部署服务器的k8s配置文件
root@deploy:~# cat /etc/kubeasz/clusters/k8s-cluster1/config.yml |grep pause:3.9
SANDBOX_IMAGE: "harbor.yyq.cn/baseimage/pause:3.9"


#这里可以直接在master和node节点把镜像提前下载好,记得在harbor创建baseimage的仓库
nerdctl pull harbor.yyq.cn/baseimage/node:v3.24.5
nerdctl pull harbor.yyq.cn/baseimage/cni:v3.24.5
nerdctl pull harbor.yyq.cn/baseimage/kube-controllers:v3.24.5
#3.6.2使用镜像
nerdctl pull harbor.yyq.cn/baseimage/cni:v3.24.6
nerdctl pull harbor.yyq.cn/baseimage/node:v3.24.6
nerdctl pull harbor.yyq.cn/baseimage/kube-controllers:v3.24.6

#这里直接下载进行部署,pod的网络通不通取决于06步骤
root@deploy:/etc/kubeasz# ./ezctl setup k8s-cluster1 06
ansible-playbook -i clusters/k8s-cluster1/hosts -e @clusters/k8s-cluster1/config.yml  playbooks/06.network.yml
*** Component Version *********************
*******************************************
*   kubernetes: v1.26.1
*   etcd: v3.5.9
*   calico: v3.24.6
*******************************************
2023-10-21 00:17:59 INFO cluster:k8s-cluster1 setup step:06 begins in 5s, press any key to abort:
TASK [calico : 准备 calicoctl配置文件] ************************************************************************************************************#这一步长时间无法过去可能是pause的镜像无法下载
changed: [10.10.1.200]
changed: [10.10.1.203]
changed: [10.10.1.202]
changed: [10.10.1.201]
changed: [10.10.1.204]
FAILED - RETRYING: 轮询等待calico-node 运行 (15 retries left).
FAILED - RETRYING: 轮询等待calico-node 运行 (15 retries left).
FAILED - RETRYING: 轮询等待calico-node 运行 (15 retries left).
FAILED - RETRYING: 轮询等待calico-node 运行 (15 retries left).
FAILED - RETRYING: 轮询等待calico-node 运行 (15 retries left).
FAILED - RETRYING: 轮询等待calico-node 运行 (14 retries left).
FAILED - RETRYING: 轮询等待calico-node 运行 (14 retries left).
FAILED - RETRYING: 轮询等待calico-node 运行 (14 retries left).
FAILED - RETRYING: 轮询等待calico-node 运行 (14 retries left).
FAILED - RETRYING: 轮询等待calico-node 运行 (14 retries left).
#这一步长时间无法过去可能是pause的镜像无法下载


#检查
root@harbor-deploy:/etc/kubeasz/roles/kube-master# kubectl get pod -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-8698b4b5f5-47pkf   1/1     Running   0          11m
kube-system   calico-node-2258d                          1/1     Running   0          11m
kube-system   calico-node-qmqlc                          1/1     Running   0          11m
kube-system   calico-node-trr7m                          1/1     Running   0          11m
kube-system   calico-node-v5zfk                          1/1     Running   0          11m
##检查caclico网络
root@node1:~# calicoctl node status
Calico process is running.

IPv4 BGP status
+--------------+-------------------+-------+----------+-------------+
| PEER ADDRESS |     PEER TYPE     | STATE |  SINCE   |    INFO     |
+--------------+-------------------+-------+----------+-------------+
| 10.0.0.100   | node-to-node mesh | up    | 07:22:50 | Established |
| 10.0.0.101   | node-to-node mesh | up    | 07:22:50 | Established |
| 10.0.0.104   | node-to-node mesh | up    | 07:22:50 | Established |
+--------------+-------------------+-------+----------+-------------+

IPv6 BGP status
No IPv6 peers found.
#查看路由
root@node1:~# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         10.0.0.2        0.0.0.0         UG    0      0        0 eth0
10.0.0.0        0.0.0.0         255.255.255.0   U     0      0        0 eth0
10.200.61.64    10.0.0.101      255.255.255.192 UG    0      0        0 tunl0
10.200.104.0    10.0.0.104      255.255.255.192 UG    0      0        0 tunl0
10.200.166.128  0.0.0.0         255.255.255.192 U     0      0        0 *
10.200.207.0    10.0.0.100      255.255.255.192 UG    0      0        0 tunl0

#拷贝管理文件到master1节点
rsync -a /root/.kube 10.0.0.100:/root
root@master1-etcd1:~# kubectl get pod -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-8698b4b5f5-47pkf   1/1     Running   0          13m
kube-system   calico-node-2258d                          1/1     Running   0          13m
kube-system   calico-node-qmqlc                          1/1     Running   0          13m
kube-system   calico-node-trr7m                          1/1     Running   0          13m
kube-system   calico-node-v5zfk                          1/1     Running   0          13m
#查看/root/.kube/config 文件
root@master1-etcd1:~# cat /root/.kube/config 
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURtakNDQW9LZ0F3SUJBZ0lVVEZoMzVmU3NtYVV2TjZrbEFBeWE1cEU2cDNvd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1pERUxNQWtHQTFVRUJoTUNRMDR4RVRBUEJnTlZCQWdUQ0VoaGJtZGFhRzkxTVFzd0NRWURWUVFIRXdKWQpVekVNTUFvR0ExVUVDaE1EYXpoek1ROHdEUVlEVlFRTEV3WlRlWE4wWlcweEZqQVVCZ05WQkFNVERXdDFZbVZ5CmJtVjBaWE10WTJFd0lCY05Nak14TURJeU1EY3hNREF3V2hnUE1qRXlNekE1TWpnd056RXdNREJhTUdReEN6QUoKQmdOVkJBWVRBa05PTVJFd0R3WURWUVFJRXdoSVlXNW5XbWh2ZFRFTE1Ba0dBMVVFQnhNQ1dGTXhEREFLQmdOVgpCQW9UQTJzNGN6RVBNQTBHQTFVRUN4TUdVM2x6ZEdWdE1SWXdGQVlEVlFRREV3MXJkV0psY201bGRHVnpMV05oCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdkpycDNVWmtDZHNiN0MxREk1dTIKUlFQcnRJcFJ6ZmNVajEzNnZkTDFqanZqRWRxQTVYU3BjQTIrYm54RTNBM093cTRFai9wNHMzS05wdHU4VXJ4aAp6QmlsMlJrMjNIN0xITVh5bmFKdXlHTldoNURQRVErMGJVdmNKdGNrNlZmdUhlaEFQTE1xQmxoOXBleGl3Wk0zCkdSZVg0QjRCOTI2T2lyRkVFVUpzTEZvMFRoZEwxTERjcVBlakkzTG9jbVRWc3ZzN0xxc3dPU0Q0ZDRHQmZkakgKVU1LZ2NtVmZaNkRiYkppQ1dtVmw2ZzZObENrZ2dYOGVVdERBU2Jid0swUTNyeDc3ZU96RVEyZXVHOXJuQ2F4WgpSY1NwbVdEVXZWYlRPdzFyUm4zNkFYZ1lMUW5DbHpaRWV2TTNFWkZHU0ppam14N0FSRXlKL3gyL0JRSThFOXloCjB3SURBUUFCbzBJd1FEQU9CZ05WSFE4QkFmOEVCQU1DQVFZd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlYKSFE0RUZnUVVVNy9WMytqczNRQWo5cG5lOGgvZlk1Vml1MFF3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUR3QQpLT1Z0RDIyWTZnc1QxVWs2ako2eUZPS1JFVU9nQmZEVjJSSHY0M3V4TUFVSWExcnRFam1tQW53elZOeEswdTNPClNCY2lHQ1FJTGU5cExBWkdkUzVUMVdBTTVOeDdjMWRXSXdqdzRoYXYvNXpEd01WNnNCNm0vV0RGZXNtUFRtaFkKUDZmTW1Lb3hTOWt0NWFjRnlqTzdBZlpYdElTSnNjSlgwQ2RtbUJBS3huN3pPalhRaTJTcGs1SklJbFNya1lZNgpOeEFvV0lLRnN2dzB5eHFQaXF4ckxVZzlPY1dudHZtbnlWNGVIRmQyNGpKTXhFeVlQaHQzTHZLeDFrbnQ5RHlPClp1Z2V3bGxwYThZQVlCaG9sV21hdlFGOHBEQVIvT1hTQi9vMXhrZ2o0aVVpRWkzS29TTklhUFJzWEM0K3pLUFUKOEF1cUhJcjR5ckdMU09UaGpmST0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
    server: https://10.0.0.100:6443#这里可以指向vip测试自己搭建的负载均衡可不可以使用
  name: cluster1
contexts:
- context:
    cluster: cluster1
    user: admin
  name: context-cluster1
current-context: context-cluster1
kind: Config
preferences: {}
users:
- name: admin
  user:
    client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQyakNDQXNLZ0F3SUJBZ0lVRnpXSEk2VVE3b010cHhDNGVuYm5FMENEOHpzd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1pERUxNQWtHQTFVRUJoTUNRMDR4RVRBUEJnTlZCQWdUQ0VoaGJtZGFhRzkxTVFzd0NRWURWUVFIRXdKWQpVekVNTUFvR0ExVUVDaE1EYXpoek1ROHdEUVlEVlFRTEV3WlRlWE4wWlcweEZqQVVCZ05WQkFNVERXdDFZbVZ5CmJtVjBaWE10WTJFd0lCY05Nak14TURJeU1EY3hNREF3V2hnUE1qRXhPVEExTWpneU16RXdNREJhTUdjeEN6QUoKQmdOVkJBWVRBa05PTVJFd0R3WURWUVFJRXdoSVlXNW5XbWh2ZFRFTE1Ba0dBMVVFQnhNQ1dGTXhGekFWQmdOVgpCQW9URG5ONWMzUmxiVHB0WVhOMFpYSnpNUTh3RFFZRFZRUUxFd1pUZVhOMFpXMHhEakFNQmdOVkJBTVRCV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTdZUzdTdDlNZ3NVcVJGbU4KUHEwNTBZTWhyWEFIc1NnSnU4dWZ5S3hWbzNuSzRCL1hhdmJ0ZzYxWlBFanJQQVVmakR5LzB0VGRqN2NhNmFyVwpJNUk2TU85S3dqaDk1ejlIcFZIQVRkd2VrNUJmc2I5ZHdGMWZrMFEvRXowcklwTTdLY2E3bnNCbFFOQlN1VVdnCmhhT1cyMjlneHI1UVJrbisra0NVdVlib3FXV2c0VC9aMzZYS3B1WkY3d2NXNldFRU5SSW5ROTRLMThZeXZoY1IKTlVyazRIejFyVDZYRGZKV1d1QW02L3dibjM5RFludEJBbGVDY1hSTTdaL3N4Z29CazREcEhJRU5pa1hBc0ttdgpEMnl3bWNGcFhkZG1oOTluTkhOZUtJV0xka3hneU5lZ0FzS3VzTjFpbld3UFVRREg3cFM2cVJsT1lTVk9QaFBMCldJa1lGd0lEQVFBQm8zOHdmVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdIUVlEVlIwbEJCWXdGQVlJS3dZQkJRVUgKQXdFR0NDc0dBUVVGQndNQ01Bd0dBMVVkRXdFQi93UUNNQUF3SFFZRFZSME9CQllFRkxaNWtsWEFKT3NISHpweApka2ZzcTgxaVdOdnBNQjhHQTFVZEl3UVlNQmFBRkZPLzFkL283TjBBSS9hWjN2SWYzMk9WWXJ0RU1BMEdDU3FHClNJYjNEUUVCQ3dVQUE0SUJBUUJnT2F4NU1LQWNEMysvUWZldTBtN1lpcXBVcjJBOWNvanBTck9adk5TeFkxUXAKUXEwMC9pazNpYlZKYzBveWZ2alI4S1hkSjYvOXZpcG45VUIwTFdabTRyMlpLRUU0ZEJURHFSWE1NbjkrcU1XNApVNEJjU1hwdjBldXNiQlVRYzdCR1BFeWlRZU9pRWFSM0h4OGhmK09PSWtJSzBoNStOL3dVQWdYKzBEM2ZTVXo3CkNhU1FPOFd6KytjcDdYbmFwYkxZV0l6QkpYN0drQ25WVDVqaXF4TFdaaXJ3NHJIcmZqa1ZEcFJ5WkpudmNnL28KOWFKSWRmQUw0NXlaNFJYQ1ZTd094WnoxdENadDNkb1ZUOXlQQmEwUDZEaUdBNWpQK2h0K0JRVWNCdEJwMmZ4SwozSEhwN0plOWZwZTY1RlA4R1VIZkxEbjUwMmhnNFg0NTVGUUg0UjVlCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
    client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBN1lTN1N0OU1nc1VxUkZtTlBxMDUwWU1oclhBSHNTZ0p1OHVmeUt4Vm8zbks0Qi9YCmF2YnRnNjFaUEVqclBBVWZqRHkvMHRUZGo3Y2E2YXJXSTVJNk1POUt3amg5NXo5SHBWSEFUZHdlazVCZnNiOWQKd0YxZmswUS9FejBySXBNN0tjYTduc0JsUU5CU3VVV2doYU9XMjI5Z3hyNVFSa24rK2tDVXVZYm9xV1dnNFQvWgozNlhLcHVaRjd3Y1c2V0VFTlJJblE5NEsxOFl5dmhjUk5Vcms0SHoxclQ2WERmSldXdUFtNi93Ym4zOURZbnRCCkFsZUNjWFJNN1ovc3hnb0JrNERwSElFTmlrWEFzS212RDJ5d21jRnBYZGRtaDk5bk5ITmVLSVdMZGt4Z3lOZWcKQXNLdXNOMWluV3dQVVFESDdwUzZxUmxPWVNWT1BoUExXSWtZRndJREFRQUJBb0lCQUFFOFlKRWFLbGEvMmVGegpLZFg2cHNOZFVFNkx0NjZIUzgycldKQjl2UkJWc09XTHBONU1uSjNEeUNRVktRd3Q1cVVmSjh4Y0NTOFhQOCtaCmNQWE1hL3NYTTZkaERkNm5LS05Ha3A2VkY1K2k3NnlJRjEvSjNKWnlrM1Z4S3dsS0JOUEowZTRZaUM1WEp5Y0oKb3JVdlRiaWtrcmRXQkNycGRhY3RzTmhxU0VtaFdDeTNwYXlCbEROcFZ6ejBwVFZBaHVlbVVqWGtnNXlnOVg1OAplN3hZRjJvc25hSUdJRVdMYjNCRUpidDRJa1NmeDZQTGwweG8rNGloc1JPTW0ySURaVzdsRFJFQzVkcllQalB0CmtXWWd5aHpMbFJMZ0FLUFN3ZUN5TmR2ZVRVRW80b0YrMUxKek9WdTBiVzl3MzVuRDRYSW8zU0tJcnRKaHorb2cKalZFMlpMa0NnWUVBL2xVeTI5WitiOGl2emR5ckdRSUE1SWNGRjBac1ZoV3pXeUJ0VnZ1MFlhZGpGNFZkN1JaeApucFRNbE52dHdJOVVyZDdmcjdaeFExdXpTRERkNHR2R1FtaHdiMVlkaFh2R3lMbkhzWklsMGJaSG5oUEkrZU9JClhHNlM2bnArR0hnQ3BRN1BqSUVuZ2c5L1hnWFZMT3RRWU1VZThCaDJlTzJyb2sybmd3YVpHUE1DZ1lFQTd4TlIKQSswdjFUNm1sMkZHR1FIWm4rd3hNOTFlangwOENsVGIzQ015R0d0Z2xOcTdwL1k0cVB1d2VaT0k3Q0xtRWJRNwpaMEV0bTdiK0RCQlR1MStMZTJKQlRXOVVuZ1ZwNlpKMDQzRWZwdVUwMHpRNlZML1JvdFRUUmROeDlnWnhCY09KCnp2cUlqTWNRbGxWTEIzUlhxSElNZDYrTmk5U1lPcCsraDQ3MnpVMENnWUVBc0Zaa0Y5WkUvYjQ0WGxSVWNOZFoKeFJqMFo2blZMVzZJVjNOSlBCanlmUTZ3ZnJ0aVh5TERqajcvQldyakVqS09kS3Jsam8vQlFnR1BjVXNvWE1VaQpWaUxSYWZCY01aT2JSTXJDY1JTeWtnOSs0bFEzY1VMWDViWm91SmhMV3kzQ0w2endMQ3ZVTjdJRkRpeWZIRERmCkh1a3RSU3pBMzZGeDhldERiRzRqc2c4Q2dZRUFrY2Q3alNOaUdXdThkVzR4S2lxQW93Mkk2eEUvQ1pGUC9SVEYKZzdSS1phMkVGUUJUa1JXSWN6SVY1K3hZZjNUQVNXZFhHWjBhekdxRytxQXlEbjB5TmlneG5hQVhQc3dhU3Z5dApIY3JxKy8ramVHM09hOE1UZDNEQzZ6OEZySGs5RGNDd0ZXK2JPQXhpUnVqM2VUSXVDVjZSejZGU2RFTk1GK00wCmNNQnlLYmtDZ1lCTGJwSWpVZjhRMTVXdnhwUFdYczFQMDcyOGQ3bWhTMGQrU0drd3FPRTN4NmhETFBnNnY0VDEKdlVod0lkU1ZyT3VYSXltUzkrSXp1OVY2SThXMkFHWHZ6ZGRWVVhpVlZRR0tpUmlLZG9aMDlWUEoyUCtQTUprWgpTUU5YSHV2N0xIeHJ4K1oreFl0bFk0cVBJY1ZPM1dCWEJtajVzV2tpSS85ZFZDK2tyQVNXQnc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=


#修改为vip地址进行测试,修改以后可以降低master个node节点的lb负载,并且通过vip实现外网访问
root@master1-etcd1:~# cat /root/.kube/config |grep server
    server: https://10.0.0.111:6443
root@master1-etcd1:~# kubectl get pod -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-8698b4b5f5-47pkf   1/1     Running   0          16m
kube-system   calico-node-2258d                          1/1     Running   0          16m
kube-system   calico-node-qmqlc                          1/1     Running   0          16m
kube-system   calico-node-trr7m                          1/1     Running   0          16m
kube-system   calico-node-v5zfk                          1/1     Running   0          16m
测试pod网络
kubectl run net-test --image=centos:7.9.2009 sleep 100000000
kubectl run net-test1 --image=centos:7.9.2009 sleep 100000000
root@harbor-deploy:/etc/kubeasz/roles/kube-master# kubectl run net-test --image=centos:7.9.2009 sleep 100000000
pod/net-test created
root@harbor-deploy:/etc/kubeasz/roles/kube-master# kubectl run net-test1 --image=centos:7.9.2009 sleep 100000000
pod/net-test1 created

查看pod的所在的node节点,2个测试的pod必须在不同的node节点
root@harbor-deploy:/etc/kubeasz# vim /etc/kubeasz/roles/calico/tasks/main.yml 
root@harbor-deploy:/etc/kubeasz# kubectl get pod -A -o wide
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE     IP               NODE                   NOMINATED NODE   READINESS GATES
default       net-test                                   1/1     Running   0          8m33s   10.200.104.1     worker-02-10.0.0.104   <none>           <none>
default       net-test1                                  1/1     Running   0          8m32s   10.200.166.129   worker-01-10.0.0.103   <none>           <none>
kube-system   calico-kube-controllers-8698b4b5f5-47pkf   1/1     Running   0          26m     10.0.0.103       worker-01-10.0.0.103   <none>           <none>
kube-system   calico-node-2258d                          1/1     Running   0          26m     10.0.0.103       worker-01-10.0.0.103   <none>           <none>
kube-system   calico-node-qmqlc                          1/1     Running   0          26m     10.0.0.104       worker-02-10.0.0.104   <none>           <none>
kube-system   calico-node-trr7m                          1/1     Running   0          26m     10.0.0.101       master-02-10.0.0.101   <none>           <none>
kube-system   calico-node-v5zfk                          1/1     Running   0          26m     10.0.0.100       master-01-10.0.0.100   <none>           <none>

#进入到net-test的pod
root@harbor-deploy:/etc/kubeasz# kubectl exec -it net-test bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
#ping net-test1 的pod网络
[root@net-test /]# ping 10.200.166.129
PING 10.200.166.129 (10.200.166.129) 56(84) bytes of data.
64 bytes from 10.200.166.129: icmp_seq=1 ttl=62 time=0.797 ms
64 bytes from 10.200.166.129: icmp_seq=2 ttl=62 time=0.452 ms
^C
--- 10.200.166.129 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1024ms
rtt min/avg/max/mdev = 0.452/0.624/0.797/0.174 ms
#测试外网
[root@net-test /]# ping 223.5.5.5     
PING 223.5.5.5 (223.5.5.5) 56(84) bytes of data.
64 bytes from 223.5.5.5: icmp_seq=1 ttl=127 time=13.2 ms
64 bytes from 223.5.5.5: icmp_seq=2 ttl=127 time=14.6 ms
^C
--- 223.5.5.5 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1001ms
rtt min/avg/max/mdev = 13.286/13.950/14.614/0.664 ms
#此时没有部署coredns所以无法ping通域名
[root@net-test /]# ping www.baidu.com
ping: www.baidu.com: Name or service not known

配置kubectl和nerdctl的命令补全ubuntu和centos都是这样配置

apt install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
apt install -y install -y epel-release bash-completion
source /usr/share/bash-completion/bash_completion
source <(nerdctl completion bash)
echo "source <(nerdctl completion bash)" >> ~/.bashrc
source ~/.bashr

coredn部署

#下载镜像
nerdctl pull coredns/coredns:1.9.4

#打tag上传到自己的harbor仓库
root@master1-etcd1:/yaml/coredns# nerdctl tag  coredns/coredns:1.9.4 harbor.yyq.cn/baseimage/coredns:1.9.4
root@master1-etcd1:/yaml/coredns# nerdctl push harbor.yyq.cn/baseimage/coredns:1.9.4
INFO[0000] pushing as a reduced-platform image (application/vnd.docker.distribution.manifest.list.v2+json, sha256:c5786b43be1d391702d4aebaa470f3d0d882f2e6aeb2184ac63082d292c06cfa) 
WARN[0000] skipping verifying HTTPS certs for "harbor.yyq.cn" 
index-sha256:c5786b43be1d391702d4aebaa470f3d0d882f2e6aeb2184ac63082d292c06cfa:    done           |++++++++++++++++++++++++++++++++++++++| 
manifest-sha256:490711c06f083f563700f181b52529dab526ef36fdac7401f11c04eb1adfe4fd: done           |++++++++++++++++++++++++++++++++++++++| 
config-sha256:a81c2ec4e946de3f8baa403be700db69454b42b50ab2cd17731f80065c62d42d:   done           |++++++++++++++++++++++++++++++++++++++| 
elapsed: 0.8 s                                                                    total:  2.9 Ki (3.7 KiB/s)   

#修改yaml文件镜像下载地址
root@master1-etcd1:/yaml/coredns# cat coredns-v1.9.4.yaml|grep image
        image: harbor.yyq.cn/baseimage/coredns:1.9.4
        imagePullPolicy: IfNotPresent

#部署
root@master1-etcd1:/yaml/coredns# kubectl apply -f coredns-v1.9.4.yaml 
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created

#检查
root@harbor-deploy:/etc/kubeasz# kubectl get pod -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS      AGE
default       net-test                                   1/1     Running   0             149m
default       net-test1                                  1/1     Running   0             149m
kube-system   calico-kube-controllers-8698b4b5f5-47pkf   1/1     Running   1 (28m ago)   167m
kube-system   calico-node-8hbv5                          1/1     Running   0             75m
kube-system   calico-node-9kgrs                          1/1     Running   0             74m
kube-system   calico-node-gj6xh                          1/1     Running   0             77m
kube-system   calico-node-l88vt                          1/1     Running   0             77m
kube-system   calico-node-qkxlg                          1/1     Running   0             73m
kube-system   calico-node-xqvgt                          1/1     Running   0             73m
kube-system   coredns-5cd748d9cb-p8zb9                   1/1     Running   0             8m9s
kube-system   coredns-5cd748d9cb-qsc2k                   1/1     Running   0             8m9s
#测试域名解析
root@master1-etcd1:~# kubectl get pod -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
default       net-test                                   1/1     Running   0          6m48s
default       net-test1                                  1/1     Running   0          6m48s
kube-system   calico-kube-controllers-8698b4b5f5-z5qln   1/1     Running   0          9m15s
kube-system   calico-node-7lhjv                          1/1     Running   0          9m15s
kube-system   calico-node-bhbls                          1/1     Running   0          9m15s
kube-system   calico-node-fd8lj                          1/1     Running   0          9m15s
kube-system   calico-node-hdn77                          1/1     Running   0          9m15s
kube-system   calico-node-qs6b8                          1/1     Running   0          9m15s
kube-system   calico-node-s9gd7                          1/1     Running   0          9m15s
kube-system   coredns-5879bb4b8c-65f5j                   1/1     Running   0          2m29s
kube-system   coredns-5879bb4b8c-s4zs2                   1/1     Running   0          2m29s
#进入容器
root@master1-etcd1:~# kubectl exec -it net-test bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
[root@net-test /]# cat /etc/resolv.conf 
search default.svc.yyq.local svc.yyq.local yyq.local
nameserver 10.100.0.2
options ndots:5
#ping百度
[root@net-test /]# ping www.baidu.com
PING www.a.shifen.com (120.232.145.144) 56(84) bytes of data.
64 bytes from 120.232.145.144 (120.232.145.144): icmp_seq=1 ttl=127 time=16.3 ms
64 bytes from 120.232.145.144 (120.232.145.144): icmp_seq=2 ttl=127 time=17.1 ms
64 bytes from 120.232.145.144 (120.232.145.144): icmp_seq=3 ttl=127 time=20.9 ms
^C
--- www.a.shifen.com ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2004ms
rtt min/avg/max/mdev = 16.386/18.149/20.927/1.990 ms
#有coredns内网和外网的域名都能解析,coredns启动以后要保证可用性原则不能宕机可能不会丢失数据,如果宕机会导致服务之间访问不通因为默认缓存只有30秒,30秒之后缓存就被重置,此时coredns挂了30秒之后没缓存了只要是通过service来调用的服务之间都会不通,针对dns有高可用使用多副本实现

#查看deployment控制器把启动的coredns的pod转换为yaml文件
kubectl get deployments.apps -n kube-system -o yaml
#使用edit编辑配置文件
kubectl edit deployments coredns -n kube-system
spec:
  progressDeadlineSeconds: 600
  replicas: 2#此处修改副本的数量
  revisionHistoryLimit: 10
  selector:
    matchLabels:

#coredns扩容,第二个coredns的pod被创建,是不影响域名解析的,等新的coredns启动以后相当于请求是轮询转发给2个coredns,如果并行请求量很大可以多扩容几个,还有就是增加limits的配置cpu2核,内存3个G,性能提升的方式如果还不够就加副本是性能上的优化

#这里显示的名称是因为以前的k8s的是使用了kube-dns,能修改,但是很多组件找dns的时候就是去找kube-dns这个域名写死了的,要修改的话就改动太大了所以没有修改
root@harbor-deploy:/etc/kubeasz# kubectl get svc -A
NAMESPACE     NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
default       kubernetes   ClusterIP   10.100.0.1   <none>        443/TCP                  34m
kube-system   kube-dns     ClusterIP   10.100.0.2   <none>        53/UDP,53/TCP,9153/TCP   9m8s
#查看默认的域名解析,一个是kubernetes是apiserver的域名解析,一个是kube-dns()是coredns的域名解析,显示kube-dns是遗留的问题很难修改,就是在使用kube-dns的时候为了方便别的服务和组件去找coredns去做域名解析,比如dashbord去找域名解析的时候都是在程序里面写死了的,都是去找kube-dns的service name去做dns解析,所以缓存coredns还是显示kube-dns,因为你换成coredns的话那些组件都得去修改不然就访问不了程序写死dns是kube-dns,所以部署了coredns还是显示未kube-dns,这样的话kube-dns也是可以解析的。

#容器直接ping,kubernetes就是apiserver的域名解析,apiserver通过域名后缀去etcd的域名解析记录
root@master1-etcd1:/yaml/coredns# kubectl exec -it net-test bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
[root@net-test /]# ping kubernetes
PING kubernetes.default.svc.yyq.local (10.100.0.1) 56(84) bytes of data.
From kubernetes.default.svc.yyq.local (10.100.0.1) icmp_seq=1 Destination Port Unreachable
From kubernetes.default.svc.yyq.local (10.100.0.1) icmp_seq=2 Destination Port Unreachable
From kubernetes.default.svc.yyq.local (10.100.0.1) icmp_seq=3 Destination Port Unreachable
From kubernetes.default.svc.yyq.local (10.100.0.1) icmp_seq=4 Destination Port Unreachable
From kubernetes.default.svc.yyq.local (10.100.0.1) icmp_seq=5 Destination Port Unreachable
From kubernetes.default.svc.yyq.local (10.100.0.1) icmp_seq=6 Destination Port Unreachable
From kubernetes.default.svc.yyq.local (10.100.0.1) icmp_seq=7 Destination Port Unreachable
From kubernetes.default.svc.yyq.local (10.100.0.1) icmp_seq=8 Destination Port Unreachable
From kubernetes.default.svc.yyq.local (10.100.0.1) icmp_seq=9 Destination Port Unreachable

#查看coredns错误日志的标准输出方法
root@master1-etcd1:/yaml/coredns# kubectl logs -f coredns-5879bb4b8c-z2z2j -n kube-system 
.:53
[INFO] plugin/reload: Running configuration SHA512 = a47400b66ffbd4d0dad79da730ccfe4cad2f24d01fa793d0e8424b421f31fa8d2aa65e147fc2dcd029e643538e760e7568ebac022814a3d08c5bb6764f111c22
CoreDNS-1.9.4
linux/amd64, go1.19.1, 1f0a41a

dashbord部署

#查看yaml文件
root@master1-etcd1:/yaml/dashbord# ls -l
total 16
-rw-r--r-- 1 root root  212 Oct 22 17:55 admin-secret.yaml
-rw-r--r-- 1 root root  374 Oct 22 17:55 admin-user.yaml
-rw-r--r-- 1 root root 7690 Oct 24 16:41 dashboard-v2.7.0.yaml

root@master1-etcd1:/yaml/dashbord# kubectl apply -f .
secret/dashboard-admin-user created
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user unchanged
namespace/kubernetes-dashboard unchanged
serviceaccount/kubernetes-dashboard unchanged
service/kubernetes-dashboard unchanged
secret/kubernetes-dashboard-certs unchanged
secret/kubernetes-dashboard-csrf unchanged
secret/kubernetes-dashboard-key-holder unchanged
configmap/kubernetes-dashboard-settings unchanged
role.rbac.authorization.k8s.io/kubernetes-dashboard unchanged
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard unchanged
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard unchanged
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard unchanged
deployment.apps/kubernetes-dashboard unchanged
service/dashboard-metrics-scraper unchanged
deployment.apps/dashboard-metrics-scraper unchanged


#检查
root@master1-etcd1:/yaml/dashbord# kubectl get pod -n kubernetes-dashboard
NAME                                        READY   STATUS    RESTARTS   AGE
dashboard-metrics-scraper-7bc864c59-tbbgr   1/1     Running   0          4m17s
kubernetes-dashboard-56f46996f7-9nmb7       1/1     Running   0          4m17s


#获取sercets的名称
root@master1-etcd1:/yaml/dashbord# cp /root/.kube/config /opt/kubeconfig
root@master1-etcd1:/yaml/dashbord# kubectl get secrets -A|grep admin
kubernetes-dashboard   dashboard-admin-user              kubernetes.io/service-account-token   3      18s
#通过sercets的名称获取到tonken
root@master1-etcd1:/yaml/dashbord# kubectl describe secrets -n kubernetes-dashboard dashboard-admin-user
Name:         dashboard-admin-user
Namespace:    kubernetes-dashboard
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: 3b5c3c85-3774-41c2-b326-dd0e40200fcf

Type:  kubernetes.io/service-account-token

Data
====
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6ImVXeWJBc2VMVlg4OVVfa2RoUXFNbDhNVDBJZ1dNQzZDLVJTb2V1U0lPWjQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdXNlciIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJhZG1pbi11c2VyIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiM2I1YzNjODUtMzc3NC00MWMyLWIzMjYtZGQwZTQwMjAwZmNmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmFkbWluLXVzZXIifQ.BETsTOpL_I-34N9tQRj7T3KJYONe8GJ5qLEUckPEEFqRce7cIo42S2ibxv42wzPNZdi8URwWrzlA1cHkw2kENMeR7ps3XbIxrN4ec9mZ0A6Bl9W2Op7gs43JhpAESMrav9EXonE3pfdnXX135S2ll8UlpVwXs1Aod3L_gCW_iu-mgK1ZRnTh0Z69aAV1TCA11vLRsCTnI7jodLaPDzJ9SgBjzkafv0BGUgvQX4KsLlOsf9I6CT3UZL5WSrenBeWKmmoY8rV-sKpooTh0XfAICgyCDgf0ZF_dMjtQFPP9N7yeQ-n_d6ZQBZ9hIS2VPj3cZds8UiMUxBhF_Fgv7sFpbA
ca.crt:     1310 bytes
namespace:  20 bytes

#通过token制作kubeconfig的登录文件
root@master1-etcd1:/yaml/dashbord# cp /root/.kube/config /opt/kubeconfig
root@master1-etcd1:/yaml/dashbord# cat /opt/kubeconfig
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURtakNDQW9LZ0F3SUJBZ0lVRjJEMWhkZERxM2RJT1AzbnZmSk5VOUhYYVFZd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1pERUxNQWtHQTFVRUJoTUNRMDR4RVRBUEJnTlZCQWdUQ0VoaGJtZGFhRzkxTVFzd0NRWURWUVFIRXdKWQpVekVNTUFvR0ExVUVDaE1EYXpoek1ROHdEUVlEVlFRTEV3WlRlWE4wWlcweEZqQVVCZ05WQkFNVERXdDFZbVZ5CmJtVjBaWE10WTJFd0lCY05Nak14TURJeU1UZ3dNREF3V2hnUE1qRXlNekE1TWpneE9EQXdNREJhTUdReEN6QUoKQmdOVkJBWVRBa05PTVJFd0R3WURWUVFJRXdoSVlXNW5XbWh2ZFRFTE1Ba0dBMVVFQnhNQ1dGTXhEREFLQmdOVgpCQW9UQTJzNGN6RVBNQTBHQTFVRUN4TUdVM2x6ZEdWdE1SWXdGQVlEVlFRREV3MXJkV0psY201bGRHVnpMV05oCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBblZEeDllMVh1SVIzT0tuRXpINkQKVEpBOHl3QXUxV2hwaTlNTUZYQWVoNnozRXozL2pTYmVTaTNqc3VnWklaNUpQNE9yN2NvUVRHbUt6b3p1UU5EbApITWEzZFVsVHZYQ3lxcS91VGx3Z3h0TjRrOHVyMDBONFVkVG11Mk0vVkJ6S2dMVjA4MmZyQTJLRFFXK3NuRFhXCkRTWlZXaFZ3U2grU2p1UWxWSHVWd2toQ2NlNVZvT2pFWjNTUFVPaVZGY0R0TzFqQzhTaVJsbjVxVm0zWUpXaHEKWXRJMVNpdzhTbE5nTjk1WC9QbVdKcjRFcXpCTlZ3Qm9tOVpxeFZXTEQvZ01xbG9DSlBIVU1LWU01ZlVQd1hBbgordDFqYWJHWGMzY1lWalZGTkZvUXRNUUxDeEE3bmR1M3VWRkF6Zkg0QnFFV0htVVdSMTNqNUlvOC9ETjZETUFXCjJRSURBUUFCbzBJd1FEQU9CZ05WSFE4QkFmOEVCQU1DQVFZd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlYKSFE0RUZnUVVFMjhqMS9iT1A4amwyWk9TY0lkaEgyWFg5OHN3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUMyTQo5YzVrVHpBTTdUM2dRZEplcGJFOUVnWnlYWC9OeHZNaklsYnRXKzVhdS9QQjZXQi9sd2NDUndqOW9TNHV0Yi9GCjhLM3YveHhkK3ZRZGRmNkdzYnNvR0ZvRWJxcGpjeFVwUGI0MFBUcTg5K0dPSElDR0tJMHZNT0c4WXh3UFNsNHAKaS9OSzVuUFY2V09PakxTd3F5dG05OTZDTEdMbVlUMUYxcGV1bUNLd2VTOEZjQ3dyTTFRZnlTR3hQd2VFZm5MUApLQVUvYVVBeDdFOTRmbTR2QWF5N2hYVGlRVjJjQUVuTytvMnJrR0RYWkVBTVplbFJTdzhOUkd6eXRyRXJadmlXCmFVSU5EUXBRd0FkVU9FWHdKRm4wWWZneXVYYWFVRHR0Z255bXJqeG96SE1GbFlnc3E4WGlka2FjSTkxM2xzc2wKVm1EVkRZYkgydStiQU0wSWVTRT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
    server: https://10.0.0.111:6443
  name: cluster1
contexts:
- context:
    cluster: cluster1
    user: admin
  name: context-cluster1
current-context: context-cluster1
kind: Config
preferences: {}
users:
- name: admin
  user:
    client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQyakNDQXNLZ0F3SUJBZ0lVVktzZFJoVnE1RHMyRkxpY2duMGpwTnM0LzRvd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1pERUxNQWtHQTFVRUJoTUNRMDR4RVRBUEJnTlZCQWdUQ0VoaGJtZGFhRzkxTVFzd0NRWURWUVFIRXdKWQpVekVNTUFvR0ExVUVDaE1EYXpoek1ROHdEUVlEVlFRTEV3WlRlWE4wWlcweEZqQVVCZ05WQkFNVERXdDFZbVZ5CmJtVjBaWE10WTJFd0lCY05Nak14TURJeU1UZ3dNREF3V2hnUE1qRXhPVEExTWpreE1EQXdNREJhTUdjeEN6QUoKQmdOVkJBWVRBa05PTVJFd0R3WURWUVFJRXdoSVlXNW5XbWh2ZFRFTE1Ba0dBMVVFQnhNQ1dGTXhGekFWQmdOVgpCQW9URG5ONWMzUmxiVHB0WVhOMFpYSnpNUTh3RFFZRFZRUUxFd1pUZVhOMFpXMHhEakFNQmdOVkJBTVRCV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXVYZ2pQWkNBczdCb0x0dHMKMWlUekI1TGRoWi9TVURwVnlZc1pxMXRRd2pGaU1zWjNYNHFQZzBjYVBiYnVuaUxTa3ZzclN4UXBoYW9PUzIzWQpwMWVEUVZLSWVPR3JYa2c1ZERkQU5ra0tmQ3BpOWJmZjdocFIxc3g5dEpkd3I5VXhWTm1QbW8zcXlFN09wdEVGCkw2OFg5cS90dkg4aVlKbmpGaHdTeVY0eWp5ckE4UU9wNjhPb2xGZ0pEQUE2U1ExS3NadW9BT2ZkOWdqOVRQWE8KN2Z2SzBsRFpsdHkyNGxpajJyOGRzSXZGTk1VN2VzSzRvVmhxdDNERXAyWS9CMHAzdk9HKzRIeFlURGJOSFgwUgpLUGRpR3NDYm53b3doTWFNbmVHZVovYUNOQisveVBHcmxVeXcxVlcvVTVCZHcrRlcvVERUVllPS2xGUHl1YlZWCmx6WU4rUUlEQVFBQm8zOHdmVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdIUVlEVlIwbEJCWXdGQVlJS3dZQkJRVUgKQXdFR0NDc0dBUVVGQndNQ01Bd0dBMVVkRXdFQi93UUNNQUF3SFFZRFZSME9CQllFRkhuU1dmMkxoRHZxZG9mRgpoZktIbXVGUGhHUnpNQjhHQTFVZEl3UVlNQmFBRkJOdkk5ZjJ6ai9JNWRtVGtuQ0hZUjlsMS9mTE1BMEdDU3FHClNJYjNEUUVCQ3dVQUE0SUJBUUNZN2JGTEJHNXdySXd3ZUJCRGlyenM5UFBwSGtENkt6RFNwVk1KS21JRzZlMDMKelVmdlZxRk5kRmhraWtsMHQ2LzJHcWZGOW9VOE5WblM2SG1aN3R1d2NIWnlUVmFhMVF5R1pTQVljNmF4U0cwMAoycm4wRDhkVG9nZWhOZS9qMVgzNGVVVWlJWUhMQUI4Y0F3WWtQb1NXOURscnBzRFlKNGRvc0hnMVlVK0NyRi81CjBVR3hOYkt0U0VVMXliQUJBc0V0UmhLWnRDOHYwWWRhVFQxcWJNRlQrVC9YOGVaOTVPTnk2UHRaK3ZYWS9JVUcKNTdCNmFPR2RDdmZiK1M1Znc1NUpqSVZKWXRaSXR5Tk5KZEwzV0kzbkJVWTNwS1FSZzIxQURMQmR4TjNzSG1lTApHalN6WFpzcXNuZHFML3hVTVBDaHRNRmQrZTZ2bUd5NytqcmFZRmg0Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
    client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdVhnalBaQ0FzN0JvTHR0czFpVHpCNUxkaFovU1VEcFZ5WXNacTF0UXdqRmlNc1ozClg0cVBnMGNhUGJidW5pTFNrdnNyU3hRcGhhb09TMjNZcDFlRFFWS0llT0dyWGtnNWREZEFOa2tLZkNwaTliZmYKN2hwUjFzeDl0SmR3cjlVeFZObVBtbzNxeUU3T3B0RUZMNjhYOXEvdHZIOGlZSm5qRmh3U3lWNHlqeXJBOFFPcAo2OE9vbEZnSkRBQTZTUTFLc1p1b0FPZmQ5Z2o5VFBYTzdmdkswbERabHR5MjRsaWoycjhkc0l2Rk5NVTdlc0s0Cm9WaHF0M0RFcDJZL0IwcDN2T0crNEh4WVREYk5IWDBSS1BkaUdzQ2Jud293aE1hTW5lR2VaL2FDTkIrL3lQR3IKbFV5dzFWVy9VNUJkdytGVy9URFRWWU9LbEZQeXViVlZsellOK1FJREFRQUJBb0lCQUE1bGE5N1llNVQzQ1c4eApxd1lJUWpFQlRsNlQ1RUJEOXVxNCtGb2JzVzRSWlR6cHVOUmFyZjhUb3M3TmgwSVJFK1ZDeDc2Qkp5UHY2a0lpCm1hR0xvbTJNbVhsQWt3dXpjWG82MjZvYkMweE9BZlp1dWx3Q0FQNDZTenVjVWF2a1VYcnpsRGo3OFJCeFJZc2YKU0ZiR01ZYjlhdWVwdXFKaTc0U0UwMVI4RWxLWWJoM3FKb0pqb3lVNkFwVCtrTkhocmw2eVZ2VGNicVRKRTF5MApKcHZPd1ByV3V6c2NqNGc3c01UZTc0WkY1M0Q1Yk4walRIOHRmYnZQOXVZbzNFVFhLTUFKUWsrOG5vMlB6WCtXCnY4SjJURjZ1NFVkRGdWNm0rQ04wMnVyNnhSVWZJdVJ0QlRHZlFtYytwSFQwWWNwMFFvWUFtT2RTdEhLTTZ1eWgKeG9SeCtSMENnWUVBMGc4UmtMQmk5NnVJelEzVHJCYXpUTFF2TzN1Nm5ZcnYvdk5UWnA0TmhHL2VNSHRUMXI4TQplTG9mRVpBcGJ6QUI4VTd0cFRjTE93SHJmTHA2RW4rK3Nja0lpWEVCSlh2M1hlajVURjBKUFAwR1BzSmJXRVdrCkxPMXlsOUJuV0UvQXdWMitrU2VSQlBOTnpuTGp1N2d6WkF5VTI4djNqRGFESDFPTEJPL2dJUHNDZ1lFQTRnaFUKYVRoemxnc0ZzZWNSVnQ0aU9GamdvK3JQK2FhL0FaVDl4K0ZaazhEcjFvN3dYRCtFcnI5cDFBRWNjeW1iWkFKMQpJQ3gwRjIvODBOb1FJbUVHdkh0U09LZ01udzFTMjBnNnV3WG1wN1Z4ZnJrZFV6cytBeXZ1RTI3Y3o1MmFvS0xjCjNjNUlZSVdpR1hXWG5EZGJ4VHR2ZkNWZTZoK0x6cVFVVU5nR1lwc0NnWUI2UUtiYWJLM01zeHFWTE56cXBQT0oKU3J6S2c4SWllb0FmQTRLZ2tKQVJhZ0lJZVY1Yjl5ak5BcFdlWlNhWVBYcUQ5SW0reEFCWFFWREtjTVVuQ3czeQo3cXQzUWp1R2sxVnVaRzdqRUtxVndqVG1SYnRmN25nVVpjVnhzYXJUN09ha0kya0xsZG1DNWc5OGZjVW90WlhEClBzZEwvTE4vK1NIckUzWXZVMlJBRFFLQmdRRFdCUmtZQURIQnNuSVQvengxeWlNUXcyY0JEa2NhbGY1cG13K3AKQVFGMlc4ZHpBeWtuejZUc2FhWGs3YUJva3M3bHBVWU56c21sbjQzOG0xMkhqK04yK0RYa0c0ckgxeS9MSGRzdQpWdW9OaGtvT3VXekE4R2xKUzRGRHVVY3U5b1lBNy9TOXVLTGpjZVVJd2tWcHp5ai8yY1U0QVpLYmtIOHFqRVlJClBtUGRYUUtCZ0dWSy9tM0kxTVY0K3BTNzdMa3R3Z25FSUU1ZXlmdzZNWUQ3cTRUSkJUSWp6Ni9pRnp5Wi9Mb2oKbFl1MGFFZHJaanJZdE8wZmgveFBJajA3U1dQS2RhZVdJMGRoNE4xU0tERUl5N2o0WVA3aUxhcWNCT3dxdjRaOQp6OUhGUXlpb0tXc2RlN2RhRUlFbm52TWlOY25wMnlUR0g3SGhET09WOEl5TnVUNk5NelZ2Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
(开头4个空格)    token:      eyJhbGciOiJSUzI1NiIsImtpZCI6ImVXeWJBc2VMVlg4OVVfa2RoUXFNbDhNVDBJZ1dNQzZDLVJTb2V1U0lPWjQifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdXNlciIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJhZG1pbi11c2VyIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiM2I1YzNjODUtMzc3NC00MWMyLWIzMjYtZGQwZTQwMjAwZmNmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmVybmV0ZXMtZGFzaGJvYXJkOmFkbWluLXVzZXIifQ.BETsTOpL_I-34N9tQRj7T3KJYONe8GJ5qLEUckPEEFqRce7cIo42S2ibxv42wzPNZdi8URwWrzlA1cHkw2kENMeR7ps3XbIxrN4ec9mZ0A6Bl9W2Op7gs43JhpAESMrav9EXonE3pfdnXX135S2ll8UlpVwXs1Aod3L_gCW_iu-mgK1ZRnTh0Z69aAV1TCA11vLRsCTnI7jodLaPDzJ9SgBjzkafv0BGUgvQX4KsLlOsf9I6CT3UZL5WSrenBeWKmmoY8rV-sKpooTh0XfAICgyCDgf0ZF_dMjtQFPP9N7yeQ-n_d6ZQBZ9hIS2VPj3cZds8UiMUxBhF_Fgv7sFpbA

#获取登录端口
root@master1-etcd1:/yaml/dashbord# kubectl get pod -n kubernetes-dashboard -o wide
NAME                                        READY   STATUS    RESTARTS   AGE   IP              NODE                   NOMINATED NODE   READINESS GATES
dashboard-metrics-scraper-7bc864c59-tbbgr   1/1     Running   0          16m   10.200.104.14   worker-02-10.0.0.104   <none>           <none>
kubernetes-dashboard-56f46996f7-9nmb7       1/1     Running   0          16m   10.200.135.16   worker-03-10.0.0.105   <none>           <none>
root@master1-etcd1:/yaml/dashbord# kubectl get svc -n kubernetes-dashboard -o wide
NAME                        TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE   SELECTOR
dashboard-metrics-scraper   ClusterIP   10.100.105.3    <none>        8000/TCP        16m   k8s-app=dashboard-metrics-scraper
kubernetes-dashboard        NodePort    10.100.128.38   <none>        443:30000/TCP   16m   k8s-app=kubernetes-dashboard
#端口为30000任意node的ip+端口就能访问,使用https

echo '加密私钥或token' |base64 -d进行解密e: 获取是否已经安装containerd
  

 

posted @ 2023-12-16 22:13  YYQ-  阅读(200)  评论(0编辑  收藏  举报