k8s-1.23.6 安装部署文档(超详细)
一、文档简介
作者:lanjiaxuan
邮箱:lanheader@163.com
博客地址:https://www.cnblogs.com/lanheader/
更新时间:2022-09-09
二、使用kubeadm部署文档
注意:所有操作无特殊说明都需要在所有节点(k8s-master 和 k8s-node)上执行
1、环境准备
准备主机(根据自己的情况进行设置)
host | ip地址 | 系统版本 | 内核版本 |
---|---|---|---|
k8s-master01 | 192.168.8.10 | CentOS Linux 7 (Core) | 5.19.1-1.el7.elrepo.x86_64 |
k8s-master02 | 192.168.8.11 | CentOS Linux 7 (Core) | 5.19.1-1.el7.elrepo.x86_64 |
k8s-master03 | 192.168.8.12 | CentOS Linux 7 (Core) | 5.19.1-1.el7.elrepo.x86_64 |
k8s-node01 | 192.168.8.13 | CentOS Linux 7 (Core) | 5.19.1-1.el7.elrepo.x86_64 |
k8s-node02 | 192.168.8.14 | CentOS Linux 7 (Core) | 5.19.1-1.el7.elrepo.x86_64 |
1.1 、升级内核
查阅相关资料显示,centos内核3.10版本有bug,会导致集群不稳定,所以内核升级到了5.19.1,
1.1.1、载入ELRepo仓库的公共密钥
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
或者安装ELRepo仓库的yum源
yum install -y https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm
1.1.2、 检查是否安装ELRepo
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
1.1.3、 安装最新的内核
yum --enablerepo=elrepo-kernel install -y kernel-lt
1.1.4、查看可用内核版本及启动顺序
awk -F\' '$1=="menuentry " {print i++ " : " $2}' /boot/grub2/grub.cfg
1.1.5、设置内核默认启动顺序
grub2-set-default 0
1.1.6、编辑/etc/default/grub文件
vim /etc/default/grub
设置 GRUB_DEFAULT=0 # 只需要修改这里即可
GRUB_TIMEOUT=1
GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)"
GRUB_DEFAULT=saved #---将这里的saved修改成0----
GRUB_DISABLE_SUBMENU=true
GRUB_TERMINAL_OUTPUT="console"
GRUB_CMDLINE_LINUX="crashkernel=auto spectre_v2=retpoline rhgb quiet net.ifnames=0 console=tty0 console=ttyS0,115200n8 noibrs"
GRUB_DISABLE_RECOVERY="true"
1.1.7、生成grub 配置文件、运行grub2-mkconfig命令来重新创建内核配置
grub2-mkconfig -o /boot/grub2/grub.cfg
1.1.8、重启系统
reboot
以下为整理一套升级脚本,请根据情况执行
#! /bin/bash
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
yum -y install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm
yum --disablerepo="*" --enablerepo=elrepo-kernel repolist
yum --disablerepo="*" --enablerepo=elrepo-kernel list kernel*
# 内核安装,服务器里我们选择长期lt版本,安全稳定是我们最大的需求,除非有特殊的需求内核版本需求;
yum update -y --enablerepo=elrepo-kernel
# 内核版本介绍, lt:longterm 的缩写长期维护版, ml:mainline 的缩写最新主线版本;
yum install -y --enablerepo=elrepo-kernel --skip-broken kernel-lt kernel-lt-devel kernel-lt-tools
# yum -y --enablerepo=elrepo-kernel --skip-broken install kernel-ml.x86_64 kernel-ml-devel.x86_64 kernel-ml-tools.x86_64
echo "[*] 当前 CentOS 操作系统可切换的内核内核版本"
awk -F \' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg
sudo grub2-set-default 0
reboot
1.2、主机名及hosts设置
1.2.1、主机名设置
对应上面的主机地址分别在不同的机器上执行
# 192.168.8.10
hostnamectl set-hostname k8s-master01
# 192.168.8.11
hostnamectl set-hostname k8s-master02
# 192.168.8.12
hostnamectl set-hostname k8s-master03
# 192.168.8.13
hostnamectl set-hostname k8s-node01
# 192.168.8.14
hostnamectl set-hostname k8s-node02
192.168.8.10 k8s-master01
192.168.8.11 k8s-master02
192.168.8.12 k8s-master03
192.168.8.13 k8s-node01
192.168.8.14 k8s-node02
# 注意,高可用部署需要指定VIP地址
192.168.8.9 k8s-master-lb
1.3、关闭firewalld、selinux、swap
1.3.1、关闭firewalld
systemctl stop firewalld.service
systemctl disable firewalld.service
yum upgrade
1.3.2、关闭swap
注意:kubernetes1.8开始不关闭swap无法启动
swapoff -a # 临时关闭
### 以下为永久关闭
cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap > /etc/fstab
cat /etc/fstab
# /etc/fstab
# Created by anaconda on Tue Jul 21 11:51:16 2020
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos_virtual--machine-root / xfs defaults 0 0
UUID=1694f89b-5c62-4a4a-9c86-46c3f202e4f6 /boot xfs defaults 0 0
/dev/mapper/centos_virtual--machine-home /home xfs defaults 0 0
#/dev/mapper/centos_virtual--machine-swap swap swap defaults 0 0
1.3.3、关闭selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
1.4、修改内核参数及优化
1.4.1、安装ipvs模块
yum -y install ipvsadm ipset sysstat conntrack libseccomp
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.tcp_keepalive_time=600
net.ipv4.tcp_keepalive_intvl=30
net.ipv4.tcp_keepalive_probes=10
net.ipv6.conf.all.disable_ipv6=1
net.ipv6.conf.default.disable_ipv6=1
net.ipv6.conf.lo.disable_ipv6=1
net.ipv4.neigh.default.gc_stale_time=120
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
net.ipv4.conf.default.arp_announce=2
net.ipv4.conf.lo.arp_announce=2
net.ipv4.conf.all.arp_announce=2
net.ipv4.ip_local_port_range= 45001 65000
net.ipv4.ip_forward=1
net.ipv4.tcp_max_tw_buckets=6000
net.ipv4.tcp_syncookies=1
net.ipv4.tcp_synack_retries=2
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
net.netfilter.nf_conntrack_max=2310720
net.ipv6.neigh.default.gc_thresh1=8192
net.ipv6.neigh.default.gc_thresh2=32768
net.ipv6.neigh.default.gc_thresh3=65536
net.core.netdev_max_backlog=16384
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_max_syn_backlog = 8096
net.core.somaxconn = 32768
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=524288
fs.file-max=52706963
fs.nr_open=52706963
kernel.pid_max = 4194303
net.bridge.bridge-nf-call-arptables=1
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
vm.max_map_count = 262144
EOF
对以上内核参数解释
net.ipv4.tcp_keepalive_time=600 #此参数表示TCP发送keepalive探测消息的间隔时间(秒)
net.ipv4.tcp_keepalive_intvl=30 #tcp检查间隔时间(keepalive探测包的发送间隔)
net.ipv4.tcp_keepalive_probes=10 #tcp检查次数(如果对方不予应答,探测包的发送次数)
net.ipv6.conf.all.disable_ipv6=1 #禁用IPv6,修为0为启用IPv6
net.ipv6.conf.default.disable_ipv6=1 #禁用IPv6,修为0为启用IPv6
net.ipv6.conf.lo.disable_ipv6=1 #禁用IPv6,修为0为启用IPv6
net.ipv4.neigh.default.gc_stale_time=120 #ARP缓存条目超时
net.ipv4.conf.all.rp_filter=0 #默认为1,系统会严格校验数据包的反向路径,可能导致丢包
net.ipv4.conf.default.rp_filter=0 #不开启源地址校验
net.ipv4.conf.default.arp_announce=2 #始终使用与目的IP地址对应的最佳本地IP地址作为ARP请求的源IP地址
net.ipv4.conf.lo.arp_announce=2 #始终使用与目的IP地址对应的最佳本地IP地址作为ARP请求的源IP地址
net.ipv4.conf.all.arp_announce=2 #始终使用与目的IP地址对应的最佳本地IP地址作为ARP请求的源IP地址
net.ipv4.ip_local_port_range= 45001 65000 # 定义网络连接可用作其源(本地)端口的最小和最大端口的限制,同时适用于TCP和UDP连接。
net.ipv4.ip_forward=1 # 其值为0,说明禁止进行IP转发;如果是1,则说明IP转发功能已经打开。
net.ipv4.tcp_max_tw_buckets=6000 #配置服务器 TIME_WAIT 数量
net.ipv4.tcp_syncookies=1 #此参数应该设置为1,防止SYN Flood
net.ipv4.tcp_synack_retries=2 #表示回应第二个握手包(SYN+ACK包)给客户端IP后,如果收不到第三次握手包(ACK包),进行重试的次数(默认为5)
net.bridge.bridge-nf-call-ip6tables=1 # 是否在ip6tables链中过滤IPv6包
net.bridge.bridge-nf-call-iptables=1 # 二层的网桥在转发包时也会被iptables的FORWARD规则所过滤,这样有时会出现L3层的iptables rules去过滤L2的帧的问题
net.netfilter.nf_conntrack_max=2310720 #连接跟踪表的大小,建议根据内存计算该值CONNTRACK_MAX = RAMSIZE (in bytes) / 16384 / (x / 32),并满足nf_conntrack_max=4*nf_conntrack_buckets,默认262144
net.ipv6.neigh.default.gc_thresh1=8192
net.ipv6.neigh.default.gc_thresh2=32768
net.ipv6.neigh.default.gc_thresh3=65536
#gc_thresh3 是表大小的绝对限制
#gc_thresh2 设置为等于系统的最大预期邻居条目数的值
#在这种情况下,gc_thresh3 应该设置为一个比 gc_thresh2 值高的值,例如,比 gc_thresh2 高 25%-50%,将其视为浪涌容量。
#gc_thresh1 提高到较大的值;此设置的作用是,如果表包含的条目少于 gc_thresh1,内核将永远不会删除(超时)过时的条目。
net.core.netdev_max_backlog=16384 # 每CPU网络设备积压队列长度
net.core.rmem_max = 16777216 # 所有协议类型读写的缓存区大小
net.core.wmem_max = 16777216 # 最大的TCP数据发送窗口大小
net.ipv4.tcp_max_syn_backlog = 8096 # 第一个积压队列长度
net.core.somaxconn = 32768 # 第二个积压队列长度
fs.inotify.max_user_instances=8192 # 表示每一个real user ID可创建的inotify instatnces的数量上限,默认128.
fs.inotify.max_user_watches=524288 # 同一用户同时可以添加的watch数目,默认8192。
fs.file-max=52706963 # 文件描述符的最大值
fs.nr_open=52706963 #设置最大微博号打开数
kernel.pid_max = 4194303 #最大进程数
net.bridge.bridge-nf-call-arptables=1 #是否在arptables的FORWARD中过滤网桥的ARP包
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
vm.max_map_count = 262144
1.4.2、加载相关内核模块
临时加载模块
modprobe overlay
modprobe br_netfilter
永久性加载模块
cat > /etc/modules-load.d/containerd.conf << EOF
overlay
br_netfilter
ip_conntrack
EOF
设置为开机启动
systemctl enable --now systemd-modules-load.service
1.5、安装docker/containerd
k8s 1.24.0 版本以后对containerd更好一些
1.5.1、docker安装参考docker 安装、升级、修改数据目录
# 卸载旧版 docker
docker stop `docker ps -a -q`
docker rm `docker ps -a -q`
docker rmi -f `docker images -a -q` //这里将会强制删除
# 移除旧版本的软件信息
yum -y remove docker docker-common container-selinux
# 设置最新稳定版本的Docker仓库
yum-config-manager \
--add-repo \
https://docs.docker.com/v1.13/engine/installation/linux/repo_files/centos/docker.repo
# 安装Docker
# 更新yum源
yum makecache fast
# 选择你要的Docker版本
yum list docker-engine.x86_64 --showduplicates |sort -r
yum -y install docker-engine-<VERSION_STRING>
docker -v
# 启动
systemctl start docker
systemctl enable docker
# 卸载
yum -y remove docker-engine docker-engine-selinux
docker配置文件/etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {"max-size":"200m", "max-file":"3"}
}
1.5.2、containerd安装
yum-config-manager \
--add-repo \
https://docs.docker.com/v1.13/engine/installation/linux/repo_files/centos/docker.repo
# 更新yum源
yum makecache fast
# 选择你要的Docker版本
yum -y install containerd
# 启动
systemctl start containerd
systemctl enable containerd
# 卸载
yum -y remove containerd
containerd配置文件/etc/docker/daemon.json
root = "/data/containerd/root"
state = "/data/containerd/state"
oom_score = -999
[grpc]
address = "/run/containerd/containerd.sock"
uid = 0
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
[debug]
address = ""
uid = 0
gid = 0
level = ""
[metrics]
address = ""
grpc_histogram = false
[cgroup]
path = ""
[plugins]
[plugins.cgroups]
no_prometheus = false
[plugins.cri]
stream_server_address = "127.0.0.1"
stream_server_port = "0"
enable_selinux = false
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6" # 这里填写国内地址
stats_collect_period = 10
systemd_cgroup = true
enable_tls_streaming = false
max_container_log_line_size = 16384
[plugins.cri.containerd]
snapshotter = "overlayfs"
no_pivot = false
[plugins.cri.containerd.default_runtime]
runtime_type = "io.containerd.runtime.v1.linux"
runtime_engine = ""
runtime_root = ""
[plugins.cri.containerd.untrusted_workload_runtime]
runtime_type = ""
runtime_engine = ""
runtime_root = ""
[plugins.cri.cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = "/etc/cni/net.d/10-default.conf"
[plugins.cri.registry]
[plugins.cri.registry.mirrors]
[plugins.cri.registry.mirrors."docker.io"]
endpoint = [
"https://docker.mirrors.ustc.edu.cn",
"http://hub-mirror.c.163.com"
]
[plugins.cri.registry.mirrors."gcr.io"]
endpoint = [
"https://gcr.mirrors.ustc.edu.cn"
]
[plugins.cri.registry.mirrors."k8s.gcr.io"]
endpoint = [
"https://gcr.mirrors.ustc.edu.cn/google-containers/"
]
[plugins.cri.registry.mirrors."quay.io"]
endpoint = [
"https://quay.mirrors.ustc.edu.cn"
]
[plugins.cri.registry.mirrors."harbor.kubemsb.com"]
endpoint = [
"http://harbor.kubemsb.com"
]
[plugins.cri.x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins.diff-service]
default = ["walking"]
[plugins.linux]
shim = "containerd-shim"
runtime = "runc"
runtime_root = ""
no_shim = false
shim_debug = false
[plugins.opt]
path = "/opt/containerd"
[plugins.restart]
interval = "10s"
[plugins.scheduler]
pause_threshold = 0.02
deletion_threshold = 0
mutation_threshold = 100
schedule_delay = "0s"
startup_delay = "100ms"
1.6、创建共享存储
如果选择使用nfs-server执行以下步骤
注意:
/data/k8s *(rw,sync,no_root_squash)执行这步,如果没有no_root_squash,pod启动会报错没有权限
# 安装nfs组件
yum -y install nfs-utils rpcbind
# 创建nfs路径
mkdir -p /data/k8s/
# 配置路径权限
chmod 755 /data/k8s/
# 配置nfs参数
vim /etc/exports
/data/k8s *(rw,sync,no_root_squash)
# 启动服务
systemctl start rpcbind.service
systemctl enable rpcbind
systemctl status rpcbind
systemctl start nfs.service
systemctl enable nfs
systemctl status nfs
# 分别在node服务器执行挂载
showmount -e 192.168.8.10
这里提供里官方的存储方案
https://github.com/kubernetes/examples/tree/master/volumes
2、安装负载均衡
2.1、安装haproxy与keepalived
分别在k8s-master01、k8s-master02、k8s-master03安装执行
yum -y install haproxy keepalived
2.2、keepalibed配置
1、物理机网卡和虚拟机网卡不一样,查看网卡在/etc/sysconfig/network-scripts下
2、配置一台主,两台备,并且需要权限需要不同
# k8s-master01
cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script check-haproxy {
script "killall -0 haproxy"
interval 5
weight -30
}
vrrp_instance VI-kube-master {
state MASTER #MASTER或BACKUP
priority 100 #权限
dont_track_primary
interface em1 #网卡地址
virtual_router_id 68
advert_int 3
track_script {
check-haproxy
}
virtual_ipaddress {
192.168.8.9
}
}
# k8s-master02
cat /etc/keepalived/keepalived.conf
global_defs {
router_id LVS_DEVEL
}
vrrp_script check-haproxy {
script "killall -0 haproxy"
interval 5
weight -30
}
vrrp_instance VI-kube-master {
state BACKUP #MASTER或BACKUP
priority 90 #权限
dont_track_primary
interface eth0 #网卡地址
virtual_router_id 68
advert_int 3
track_script {
check-haproxy
}
virtual_ipaddress {
192.168.8.9
}
}
# k8s-master03
global_defs {
router_id LVS_DEVEL
}
vrrp_script check-haproxy {
script "killall -0 haproxy"
interval 5
weight -30
}
vrrp_instance VI-kube-master {
state BACKUP #MASTER或BACKUP
priority 80 #权限
dont_track_primary
interface eth0 #网卡地址
virtual_router_id 68
advert_int 3
track_script {
check-haproxy
}
virtual_ipaddress {
192.168.8.9
}
}
haproxy配置
master01、02、03配置相同
cat /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
# to have these messages end up in /var/log/haproxy.log you will
# need to:
#
# 1) configure syslog to accept network log events. This is done
# by adding the '-r' option to the SYSLOGD_OPTIONS in
# /etc/sysconfig/syslog
#
# 2) configure local2 events to go to the /var/log/haproxy.log
# file. A line like the following can be added to
# /etc/sysconfig/syslog
#
# local2.* /var/log/haproxy.log
#
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
#---------------------------------------------------------------------
# kubernetes apiserver frontend which proxys to the backends
#---------------------------------------------------------------------
frontend kubernetes-apiserver
mode tcp
bind *:16443
option tcplog
default_backend kubernetes-apiserver
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-apiserver
mode tcp
balance roundrobin
server k8s-master0 192.168.8.10:6443 check
server k8s-master1 192.168.8.11:6443 check
server k8s-master2 192.168.8.12:6443 check
#---------------------------------------------------------------------
# collection haproxy statistics message
#---------------------------------------------------------------------
listen stats
bind *:1080
stats auth admin:awesomePassword
stats refresh 5s
stats realm HAProxy\ Statistics
stats uri /admin?stats
systemctl daemon-reload
systemctl enable --now haproxy
systemctl enable --now keepalived
3、用kubeadm 部署 kubernetes
3.1、安装kubeadm, kubelet
注意:yum install 安装的时候一定要看一下kubernetes的版本号后面kubeadm init 的时候需要用到
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
# 结果
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
注意:如果需要指定版本 用下面的命令kubelet-
yum install kubelet-1.23.6 kubeadm-1.23.6 kubectl-1.23.6 --disableexcludes=Kubernetes
3.2、初始化集群
选项说明:
--image-repository:选择用于拉取镜像的镜像仓库(默认为“k8s.gcr.io” )
--kubernetes-version:选择特定的Kubernetes版本(默认为“stable-1”)
--service-cidr:为服务的VIP指定使用的IP地址范围(默认为“10.96.0.0/12”)
--pod-network-cidr:指定Pod网络的IP地址范围。如果设置,则将自动为每个节点分配CIDR。
kubeadm init \
--kubernetes-version v1.23.6 \
--image-repository registry.aliyuncs.com/google_containers \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16 \
--control-plane-endpoint=192.168.8.9:16443 \
--upload-certs \
--v=5
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join 192.168.8.9:16443 --token xxxxxxxxxxxxx \
--discovery-token-ca-cert-hash xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx \
--control-plane --certificate-key xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.8.9:16443 --token xxxxxxxxxxxxxxxx \
--discovery-token-ca-cert-hash xxxxxxxxxxxxxxxxxxxxxxx
初始化成功后会提示在使用之前需要再配置一下,配置方法已经给出,另外会生成一个临时token以及增加节点的方法
普通用户要使用k8s 需要执行下面操作:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
在master2 和master3分别执行
kubeadm join 192.168.8.9:16443 --token xxxxxxxxxxxxx \
--discovery-token-ca-cert-hash xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx \
--control-plane --certificate-key xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
如果是root 可以直接执行
export KUBECONFIG=/etc/kubernetes/admin.conf
以上两个二选一即可,这里我是直接用的root 所以直接执行
export KUBECONFIG=/etc/kubernetes/admin.conf
现在我们查看一下 kubelet 的状态 已经是 running 状态 ,启动成功
查看状态 #确认每个 组件都是 Healthy 状态
kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health": "true"}
查看node状态
kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 NotReady master 11m v1.23.6
4、二进制部署kubernets
4.1 部署ETCD集群
4.1.1 创建工作目录
mkdir -p /data/k8s-work
4.1.2 、安装cfssl工具
说明:
cfssl是使用go编写,由CloudFlare开源的一款PKI/TLS工具。主要程序有:
- cfssl,是CFSSL的命令行工具
- cfssljson用来从cfssl程序获取JSON输出,并将证书,密钥,CSR和bundle写入文件中。
cd /data/k8s-work
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl*
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
# cfssl version
Version: 1.2.0
Revision: dev
Runtime: go1.6
4.1.3、创建CA证书
4.1.3.1 、配置ca证书请求文件
说明:"expiry": "87600h" 为过期时间,kubeadm默认为一年,我们这里手动指定为10年
cat > ca-csr.json <<"EOF"
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Xian",
"L": "Xian",
"O": "lanheader",
"OU": "CN"
}
],
"ca": {
"expiry": "87600h"
}
}
EOF
4.1.3.2、创建ca证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
4.1.3.3、配置ca证书策略
server auth 表示client可以对使用该ca对server提供的证书进行验证
client auth 表示server可以使用该ca对client提供的证书进行验证
cfssl print-defaults config > ca-config.json
cat > ca-config.json <<"EOF"
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}
}
EOF
4.1.4、创建etcd证书
4.1.4.1 、配置etcd请求文件
1、hosts主机填写master对应的IP地址,如果是单master,地址写单个就可以,
2、注意是json格式,如果hosts文件增加或删除后,数组最后一个不要加逗号
cat > etcd-csr.json <<"EOF"
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"192.168.8.10",
"192.168.8.11",
"192.168.8.12"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [{
"C": "CN",
"ST": "Xian",
"L": "Xian",
"O": "lanheader",
"OU": "CN"
}]
}
EOF
4.1.4.2、生成etcd证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
ls
ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem etcd.csr etcd-csr.json etcd-key.pem etcd.pem
4.1.5、 部署etcd集群
4.1.5 .1、下载etcd软件包
wget https://github.com/etcd-io/etcd/releases/download/v3.5.2/etcd-v3.5.2-linux-amd64.tar.gz
4.1.5 .2、安装etcd软件
tar -xvf etcd-v3.5.2-linux-amd64.tar.gz
cp -p etcd-v3.5.2-linux-amd64/etcd* /usr/local/bin/
4.1.5 .3、分发软件
scp etcd-v3.5.2-linux-amd64/etcd* k8s-master02:/usr/local/bin/
scp etcd-v3.5.2-linux-amd64/etcd* k8s-master03:/usr/local/bin/
4.1.5 .3、创建配置文件
说明:
ETCD_NAME:节点名称,集群中唯一
ETCD_DATA_DIR:数据目录
ETCD_LISTEN_PEER_URLS:集群通信监听地址
ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址
ETCD_INITIAL_ADVERTISE_PEER_URLS:集群通告地址
ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址
ETCD_INITIAL_CLUSTER:集群节点地址
ETCD_INITIAL_CLUSTER_TOKEN:集群Token
ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态,new是新集群,existing表示加入已有集群
cat > /etc/etcd/etcd.conf <<"EOF"
#[Member]
ETCD_NAME="etcd1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.10.10:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.10.10:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.10.10:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.10.10:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.10.10:2380,etcd2=https://192.168.10.11:2380,etcd3=https://192.168.10.12:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
4.1.5 .4、创建服务配置文件
注意配置文件中的WorkingDirectory建议和配置文件中ETCD_DATA_DIR编写一致
部署过程中,如果某个节点需要重新部署时,请删除所有节点数据目录下的数据,重新启动
mkdir -p /etc/etcd/ssl
mkdir -p /var/lib/etcd/default.etcd
cd /data/k8s-work
cp ca*.pem /etc/etcd/ssl
cp etcd*.pem /etc/etcd/ssl
cat > /etc/systemd/system/etcd.service <<"EOF"
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=-/etc/etcd/etcd.conf
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
--cert-file=/etc/etcd/ssl/etcd.pem \
--key-file=/etc/etcd/ssl/etcd-key.pem \
--trusted-ca-file=/etc/etcd/ssl/ca.pem \
--peer-cert-file=/etc/etcd/ssl/etcd.pem \
--peer-key-file=/etc/etcd/ssl/etcd-key.pem \
--peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
--peer-client-cert-auth \
--client-cert-auth
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
4.1.5 .5、步etcd配置到集群其它master节点
若是单节点,只在master执行即可
若是多节点,需要在每台master上创建目录
# 创建目录
mkdir -p /etc/etcd
mkdir -p /etc/etcd/ssl
mkdir -p /var/lib/etcd/default.etcd
如果是单master可不执行以下步骤
# 服务配置文件,需要修改etcd节点名称及IP地址
scp /etc/etcd/etcd.conf k8s-master02:/etc/etcd/
scp /etc/etcd/etcd.conf k8s-master03:/etc/etcd/
# k8s-master02:
cat /etc/etcd/etcd.conf
#[Member]
ETCD_NAME="etcd2"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.8.11:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.8.11:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.8.11:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.8.11:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.8.10:2380,etcd2=https://192.168.8.11:2380,etcd3=https://192.168.8.12:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
# k8s-master03:
cat /etc/etcd/etcd.conf
#[Member]
ETCD_NAME="etcd2"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.8.12:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.8.12:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.8.12:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.8.12:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.8.10:2380,etcd2=https://192.168.8.11:2380,etcd3=https://192.168.8.12:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
# 证书文件
scp /etc/etcd/ssl/* k8s-master02:/etc/etcd/ssl
scp /etc/etcd/ssl/* k8s-master03:/etc/etcd/ssl
# 服务启动配置文件
scp /etc/systemd/system/etcd.service k8s-master02:/etc/systemd/system/
scp /etc/systemd/system/etcd.service k8s-master02:/etc/systemd/system/
4.1.5 .6、启动etcd集群
多master分别执行
systemctl daemon-reload
systemctl enable --now etcd.service
systemctl status etcd
验证集群状态
ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.8.10:2379,https://192.168.8.11:2379,https://192.168.8.12:2379 endpoint health
+----------------------------+--------+-------------+-------+
| ENDPOINT | HEALTH | TOOK | ERROR |
+----------------------------+--------+-------------+-------+
| https://192.168.8.10:2379 | true | 10.393062ms | |
| https://192.168.8.11:2379 | true | 15.70437ms | |
| https://192.168.8.12:2379 | true | 15.871684ms | |
+----------------------------+--------+-------------+-------+
检查ETCD数据库性能
ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.8.10:2379,https://192.168.8.11:2379,https://192.168.8.12:2379 check perf
59 / 60 Boooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooom ! 98.33%
PASS: Throughput is 151 writes/s
PASS: Slowest request took 0.066478s
PASS: Stddev is 0.002354s
PASS
ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.8.10:2379,https://192.168.8.11:2379,https://192.168.8.12:2379 member list
+------------------+---------+-------+----------------------------+----------------------------+------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER |
+------------------+---------+-------+----------------------------+----------------------------+------------+
| 9b449b0ff1d4c375 | started | etcd1 | https://192.168.8.10:2380 | https://192.168.8.10:2379 | false |
| d1fbb74bc6a61e5c | started | etcd2 | https://192.168.8.11:2380 | https://192.168.8.11:2379 | false |
| f60b205fb02fe23c | started | etcd3 | https://192.168.8.12:2380 | https://192.168.8.12:2379 | false |
+------------------+---------+-------+----------------------------+----------------------------+------------+
ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.8.10:2379,https://192.168.8.11:2379,https://192.168.8.12:2379 endpoint status
+----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://192.168.8.10:2379 | 9b449b0ff1d4c375 | 3.5.2 | 24 MB | true | false | 2 | 403774 | 403774 | |
| https://192.168.8.10:2379 | d1fbb74bc6a61e5c | 3.5.2 | 24 MB | false | false | 2 | 403774 | 403774 | |
| https://192.168.8.10:2379 | f60b205fb02fe23c | 3.5.2 | 24 MB | false | false | 2 | 403774 | 403774 | |
+----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
4.2 Kubernetes集群部署
4.2.1 Kubernetes软件包下载
wget https://dl.k8s.io/v1.23.6/kubernetes-server-linux-amd64.tar.gz
4.2.2 Kubernetes软件包安装
tar -xvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin/
cp kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/
4.2.3 Kubernetes软件分发
scp kube-apiserver kube-controller-manager kube-scheduler kubectl k8s-master02:/usr/local/bin/
scp kube-apiserver kube-controller-manager kube-scheduler kubectl k8s-master03:/usr/local/bin/
scp kubelet kube-proxy k8s-master01:/usr/local/bin
scp kubelet kube-proxy k8s-master02:/usr/local/bin
scp kubelet kube-proxy k8s-master03:/usr/local/bin
scp kubelet kube-proxy k8s-node01:/usr/local/bin
4.2.4 在集群节点上创建目录
所有节点
mkdir -p /etc/kubernetes/
mkdir -p /etc/kubernetes/ssl
mkdir -p /var/log/kubernetes
4.2.5 部署api-server
4.2.5.1 创建apiserver证书请求文件
说明:
如果 hosts 字段不为空则需要指定授权使用该证书的 IP(含VIP) 或域名列表。由于该证书被 集群使用,需要将节点的IP都填上,为了方便后期扩容可以多写几个预留的IP。
同时还需要填写 service 网络的首个IP(一般是 kube-apiserver 指定的 service-cluster-ip-range 网段的第一个IP,如 10.96.0.1)。
cat > kube-apiserver-csr.json << "EOF"
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"192.168.8.10",
"192.168.8.11",
"192.168.8.12",
"192.168.8.13",
"192.168.8.14",
"192.168.8.15",
"192.168.8.16",
"192.168.8.17",
"192.168.8.18",
"192.168.8.9",
"10.96.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "lanheader",
"OU": "CN"
}
]
}
EOF
4.2.5.2 生成apiserver证书及token文件
说明:
创建TLS机制所需TOKEN
TLS Bootstraping:Master apiserver启用TLS认证后,Node节点kubelet和kube-proxy与kube-apiserver进行通信,必须使用CA签发的有效证书才可以,当Node节点很多时,这种客户端证书颁发需要大量工作,同样也会增加集群扩展复杂度。为了简化流程,Kubernetes引入了TLS bootstraping机制来自动颁发客户端证书,kubelet会以一个低权限用户自动向apiserver申请证书,kubelet的证书由apiserver动态签署。所以强烈建议在Node上使用这种方式,目前主要用于kubelet,kube-proxy还是由我们统一颁发一个证书。
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver
cat > token.csv << EOF
$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
4.2.5.3 创建apiserver服务配置文件
cat > /etc/kubernetes/kube-apiserver.conf << "EOF"
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
--anonymous-auth=false \
--bind-address=192.168.8.10 \
--secure-port=6443 \
--advertise-address=192.168.8.10 \
--insecure-port=0 \
--authorization-mode=Node,RBAC \
--runtime-config=api/all=true \
--enable-bootstrap-token-auth \
--service-cluster-ip-range=10.96.0.0/16 \
--token-auth-file=/etc/kubernetes/token.csv \
--service-node-port-range=30000-32767 \
--tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
--kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-issuer=api \
--etcd-cafile=/etc/etcd/ssl/ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--etcd-servers=https://192.168.8.10:2379,https://192.168.8.11:2379,https://192.168.8.12:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/kube-apiserver-audit.log \
--event-ttl=1h \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=4"
EOF
4.2.5.4 创建apiserver服务管理配置文件
cat > /etc/systemd/system/kube-apiserver.service << "EOF"
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=etcd.service
Wants=etcd.service
[Service]
EnvironmentFile=-/etc/kubernetes/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
4.2.5.5 同步文件到集群master节点
cp ca*.pem /etc/kubernetes/ssl/
cp kube-apiserver*.pem /etc/kubernetes/ssl/
cp token.csv /etc/kubernetes/
scp /etc/kubernetes/token.csv k8s-master02:/etc/kubernetes
scp /etc/kubernetes/token.csv k8s-master03:/etc/kubernetes
scp /etc/kubernetes/ssl/kube-apiserver*.pem k8s-master02:/etc/kubernetes/ssl
scp /etc/kubernetes/ssl/kube-apiserver*.pem k8s-master03:/etc/kubernetes/ssl
scp /etc/kubernetes/ssl/ca*.pem k8s-master02:/etc/kubernetes/ssl
scp /etc/kubernetes/ssl/ca*.pem k8s-master03:/etc/kubernetes/ssl
scp /etc/kubernetes/kube-apiserver.conf k8s-master02:/etc/kubernetes/kube-apiserver.conf
# cat /etc/kubernetes/kube-apiserver.conf
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
--anonymous-auth=false \
--bind-address=192.168.10.11 \
--secure-port=6443 \
--advertise-address=192.168.10.11 \
--insecure-port=0 \
--authorization-mode=Node,RBAC \
--runtime-config=api/all=true \
--enable-bootstrap-token-auth \
--service-cluster-ip-range=10.96.0.0/16 \
--token-auth-file=/etc/kubernetes/token.csv \
--service-node-port-range=30000-32767 \
--tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
--kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-issuer=api \
--etcd-cafile=/etc/etcd/ssl/ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--etcd-servers=https://192.168.8.10:2379,https://192.168.8.11:2379,https://192.168.8.12:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/kube-apiserver-audit.log \
--event-ttl=1h \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=4"
cp /etc/kubernetes/kube-apiserver.conf k8s-master03:/etc/kubernetes/kube-apiserver.conf
# cat /etc/kubernetes/kube-apiserver.conf
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
--anonymous-auth=false \
--bind-address=192.168.8.12 \
--secure-port=6443 \
--advertise-address=192.168.8.12 \
--insecure-port=0 \
--authorization-mode=Node,RBAC \
--runtime-config=api/all=true \
--enable-bootstrap-token-auth \
--service-cluster-ip-range=10.96.0.0/16 \
--token-auth-file=/etc/kubernetes/token.csv \
--service-node-port-range=30000-32767 \
--tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
--kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-issuer=api \
--etcd-cafile=/etc/etcd/ssl/ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--etcd-servers=https://192.168.8.10:2379,https://192.168.8.11:2379,https://192.168.8.12:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/kube-apiserver-audit.log \
--event-ttl=1h \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=4"
scp /etc/systemd/system/kube-apiserver.service k8s-master02:/etc/systemd/system/kube-apiserver.service
scp /etc/systemd/system/kube-apiserver.service k8s-master03:/etc/systemd/system/kube-apiserver.service
4.2.5.6 启动apiserver服务
systemctl daemon-reload
systemctl enable --now kube-apiserver
systemctl status kube-apiserver
# 测试
curl --insecure https://192.168.8.10:6443/
curl --insecure https://192.168.8.11:6443/
curl --insecure https://192.168.8.12:6443/
curl --insecure https://192.168.8.9:16443/
4.3、 部署kubectl
4.3.1 创建kubectl证书请求文件
说明:
后续 kube-apiserver 使用 RBAC 对客户端(如 kubelet、kube-proxy、Pod)请求进行授权;
kube-apiserver 预定义了一些 RBAC 使用的 RoleBindings,如 cluster-admin 将 Group system:masters 与 Role cluster-admin 绑定,该 Role 授予了调用kube-apiserver 的所有 API的权限;
O指定该证书的 Group 为 system:masters,kubelet 使用该证书访问 kube-apiserver 时 ,由于证书被 CA 签名,所以认证通过,同时由于证书用户组为经过预授权的 system:masters,所以被授予访问所有 API 的权限;
注:
这个admin 证书,是将来生成管理员用的kubeconfig 配置文件用的,现在我们一般建议使用RBAC 来对kubernetes 进行角色权限控制, kubernetes 将证书中的CN 字段 作为User, O 字段作为 Group;
"O": "system:masters", 必须是system:masters,否则后面kubectl create clusterrolebinding报错。
cat > admin-csr.json << "EOF"
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "xian",
"L": "xian",
"O": "system:masters",
"OU": "system"
}
]
}
EOF
4.3.2 生成证书文件
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
4.3.3 复制文件到指定目录
cp admin*.pem /etc/kubernetes/ssl/
4.3.4 生成kubeconfig配置文件
kube.config 为 kubectl 的配置文件,包含访问 apiserver 的所有信息,如 apiserver 地址、CA 证书和自身使用的证书
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.8.9:16443 --kubeconfig=kube.config
kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=kube.config
kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config
kubectl config use-context kubernetes --kubeconfig=kube.config
4.3.5 准备kubectl配置文件并进行角色绑定
mkdir ~/.kube
cp kube.config ~/.kube/config
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes --kubeconfig=/root/.kube/config
4.3.6 查看集群状态
export KUBECONFIG=$HOME/.kube/config
查看集群信息
kubectl cluster-info
查看集群组件状态
kubectl get componentstatuses
查看命名空间中资源对象
kubectl get all --all-namespaces
4.3.7 同步kubectl配置文件到集群其它master节点
k8s-master02:
mkdir /root/.kube
k8s-master03:
mkdir /root/.kube
scp /root/.kube/config k8s-master2:/root/.kube/config
scp /root/.kube/config k8s-master3:/root/.kube/config
4.3.8 配置kubectl命令补全(可选)
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
kubectl completion bash > ~/.kube/completion.bash.inc
source '/root/.kube/completion.bash.inc'
source $HOME/.bash_profile
4.4 、部署kube-controller-manager
4.4.1 创建kube-controller-manager证书请求文件
说明:
hosts 列表包含所有 kube-controller-manager 节点 IP;
CN 为 system:kube-controller-manager;
O 为 system:kube-controller-manager,kubernetes 内置的 ClusterRoleBindings system:kube-controller-manager 赋予 kube-controller-manager 工作所需的权限
cat > kube-controller-manager-csr.json << "EOF"
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"127.0.0.1",
"192.168.8.10",
"192.168.8.11",
"192.168.8.12"
],
"names": [
{
"C": "CN",
"ST": "xian",
"L": "xian",
"O": "system:kube-controller-manager",
"OU": "system"
}
]
}
EOF
4.4.2、 创建kube-controller-manager证书文件
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
# ls
kube-controller-manager.csr
kube-controller-manager-csr.json
kube-controller-manager-key.pem
kube-controller-manager.pem
4.4.3 创建kube-controller-manager的kube-controller-manager.kubeconfig
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.8.9:16443 --kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
4.4.4 创建kube-controller-manager配置文件
旧版本
cat > kube-controller-manager.conf << "EOF"
KUBE_CONTROLLER_MANAGER_OPTS="--port=10252 \
--secure-port=10257 \
--bind-address=127.0.0.1 \
--kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
--service-cluster-ip-range=10.96.0.0/16 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--allocate-node-cidrs=true \
--cluster-cidr=10.244.0.0/16 \
--experimental-cluster-signing-duration=87600h \
--root-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
--leader-elect=true \
--feature-gates=RotateKubeletServerCertificate=true \
--controllers=*,bootstrapsigner,tokencleaner \
--horizontal-pod-autoscaler-use-rest-clients=true \
--horizontal-pod-autoscaler-sync-period=10s \
--tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
--use-service-account-credentials=true \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2"
EOF
新版本
/usr/local/bin/kube-controller-manager \
--secure-port=10257 \
--bind-address=127.0.0.1 \
--kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
--service-cluster-ip-range=10.96.0.0/16 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--allocate-node-cidrs=true \
--cluster-cidr=10.244.0.0/16 \
--experimental-cluster-signing-duration=87600h \
--root-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
--leader-elect=true \
--feature-gates=RotateKubeletServerCertificate=true \
--controllers=*,bootstrapsigner,tokencleaner \
--tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
--use-service-account-credentials=true \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2
4.4.5 创建服务启动文件
cat > kube-controller-manager.service << "EOF"
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
4.4.6 同步文件到集群master节点
cp kube-controller-manager*.pem /etc/kubernetes/ssl/
cp kube-controller-manager.kubeconfig /etc/kubernetes/
cp kube-controller-manager.conf /etc/kubernetes/
cp kube-controller-manager.service /usr/lib/systemd/system/
scp kube-controller-manager*.pem k8s-master02:/etc/kubernetes/ssl/
scp kube-controller-manager*.pem k8s-master03:/etc/kubernetes/ssl/
scp kube-controller-manager.kubeconfig kube-controller-manager.conf k8s-master2:/etc/kubernetes/
scp kube-controller-manager.kubeconfig kube-controller-manager.conf k8s-master3:/etc/kubernetes/
scp kube-controller-manager.service k8s-master02:/usr/lib/systemd/system/
scp kube-controller-manager.service k8s-master03:/usr/lib/systemd/system/
#查看证书
openssl x509 -in /etc/kubernetes/ssl/kube-controller-manager.pem -noout -text
4.4.7 启动服务
systemctl daemon-reload
systemctl enable --now kube-controller-manager
systemctl status kube-controller-manager
kubectl get componentstatuses
4.5、 部署kube-scheduler
4.5.1 创建kube-scheduler证书请求文件
cat > kube-scheduler-csr.json << "EOF"
{
"CN": "system:kube-scheduler",
"hosts": [
"127.0.0.1",
"192.168.8.10",
"192.168.8.11",
"192.168.8.12"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "xian",
"L": "xian",
"O": "system:kube-scheduler",
"OU": "system"
}
]
}
EOF
4.5.2 生成kube-scheduler证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
# ls
kube-scheduler.csr
kube-scheduler-csr.json
kube-scheduler-key.pem
kube-scheduler.pem
4.5.3 创建kube-scheduler的kubeconfig
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.8.9:16443 --kubeconfig=kube-scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig
kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
4.5.4 创建服务配置文件
cat > kube-scheduler.conf << "EOF"
KUBE_SCHEDULER_OPTS="--address=127.0.0.1 \
--kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
--leader-elect=true \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2"
EOF
4.5.5创建服务启动配置文件
cat > kube-scheduler.service << "EOF"
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
4.5.6 同步文件至集群master节点
cp kube-scheduler*.pem /etc/kubernetes/ssl/
cp kube-scheduler.kubeconfig /etc/kubernetes/
cp kube-scheduler.conf /etc/kubernetes/
cp kube-scheduler.service /usr/lib/systemd/system/
scp kube-scheduler*.pem k8s-master02:/etc/kubernetes/ssl/
scp kube-scheduler*.pem k8s-master03:/etc/kubernetes/ssl/
scp kube-scheduler.kubeconfig kube-scheduler.conf k8s-master2:/etc/kubernetes/
scp kube-scheduler.kubeconfig kube-scheduler.conf k8s-master3:/etc/kubernetes/
scp kube-scheduler.service k8s-master02:/usr/lib/systemd/system/
scp kube-scheduler.service k8s-master03:/usr/lib/systemd/system/
4.5.7 启动服务
systemctl daemon-reload
systemctl enable --now kube-scheduler
systemctl status kube-scheduler
4.6、工作节点(worker node)部署
4.6.1 Containerd安装及配置
4.6.1.1 获取软件包
wget https://github.com/containerd/containerd/releases/download/v1.6.1/cri-containerd-cni-1.6.1-linux-amd64.tar.gz
4.6.1.2 安装containerd
默认解压后会有如下目录:
etc
opt
usr
会把对应的目解压到/下对应目录中,这样就省去复制文件步骤。
tar -xf cri-containerd-cni-1.6.1-linux-amd64.tar.gz -C /
4.6.1.3 生成配置文件并修改
mkdir /etc/containerd
containerd config default >/etc/containerd/config.toml
# ls /etc/containerd/
config.toml
sed -i 's@systemd_cgroup = false@systemd_cgroup = true@' /etc/containerd/config.toml
sed -i 's@k8s.gcr.io/pause:3.6@registry.aliyuncs.com/google_containers/pause:3.6@' /etc/containerd/config.toml
# cat >/etc/containerd/config.toml<<EOF
root = "/var/lib/containerd"
state = "/run/containerd"
oom_score = -999
[grpc]
address = "/run/containerd/containerd.sock"
uid = 0
gid = 0
max_recv_message_size = 16777216
max_send_message_size = 16777216
[debug]
address = ""
uid = 0
gid = 0
level = ""
[metrics]
address = ""
grpc_histogram = false
[cgroup]
path = ""
[plugins]
[plugins.cgroups]
no_prometheus = false
[plugins.cri]
stream_server_address = "127.0.0.1"
stream_server_port = "0"
enable_selinux = false
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6"
stats_collect_period = 10
systemd_cgroup = true
enable_tls_streaming = false
max_container_log_line_size = 16384
[plugins.cri.containerd]
snapshotter = "overlayfs"
no_pivot = false
[plugins.cri.containerd.default_runtime]
runtime_type = "io.containerd.runtime.v1.linux"
runtime_engine = ""
runtime_root = ""
[plugins.cri.containerd.untrusted_workload_runtime]
runtime_type = ""
runtime_engine = ""
runtime_root = ""
[plugins.cri.cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
conf_template = "/etc/cni/net.d/10-default.conf"
[plugins.cri.registry]
[plugins.cri.registry.mirrors]
[plugins.cri.registry.mirrors."docker.io"]
endpoint = [
"https://docker.mirrors.ustc.edu.cn",
"http://hub-mirror.c.163.com"
]
[plugins.cri.registry.mirrors."gcr.io"]
endpoint = [
"https://gcr.mirrors.ustc.edu.cn"
]
[plugins.cri.registry.mirrors."k8s.gcr.io"]
endpoint = [
"https://gcr.mirrors.ustc.edu.cn/google-containers/"
]
[plugins.cri.registry.mirrors."quay.io"]
endpoint = [
"https://quay.mirrors.ustc.edu.cn"
]
[plugins.cri.registry.mirrors."harbor.kubemsb.com"]
endpoint = [
"http://harbor.kubemsb.com"
]
[plugins.cri.x509_key_pair_streaming]
tls_cert_file = ""
tls_key_file = ""
[plugins.diff-service]
default = ["walking"]
[plugins.linux]
shim = "containerd-shim"
runtime = "runc"
runtime_root = ""
no_shim = false
shim_debug = false
[plugins.opt]
path = "/opt/containerd"
[plugins.restart]
interval = "10s"
[plugins.scheduler]
pause_threshold = 0.02
deletion_threshold = 0
mutation_threshold = 100
schedule_delay = "0s"
startup_delay = "100ms"
EOF
4.6.1.4 安装runc
由于上述软件包中包含的runc对系统依赖过多,所以建议单独下载安装。
默认runc执行时提示:runc: symbol lookup error: runc: undefined symbol: seccomp_notify_respond
wget https://github.com/opencontainers/runc/releases/download/v1.1.0/runc.amd64
chmod +x runc.amd64
替换掉原软件包中的runc
mv runc.amd64 /usr/local/sbin/runc
# runc -v
runc version 1.1.0
commit: v1.1.0-0-g067aaf85
spec: 1.0.2-dev
go: go1.17.6
libseccomp: 2.5.3
systemctl enable containerd
systemctl start containerd
4.6.2 部署kubelet
在k8s-master1上操作
4.6.2.1 创建kubelet-bootstrap.kubeconfig
BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /etc/kubernetes/token.csv)
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.8.9:16443 --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubelet-bootstrap
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl describe clusterrolebinding cluster-system-anonymous
kubectl describe clusterrolebinding kubelet-bootstrap
4.6.2.2 创建kubelet配置文件
cat > kubelet.json << "EOF"
{
"kind": "KubeletConfiguration",
"apiVersion": "kubelet.config.k8s.io/v1beta1",
"authentication": {
"x509": {
"clientCAFile": "/etc/kubernetes/ssl/ca.pem"
},
"webhook": {
"enabled": true,
"cacheTTL": "2m0s"
},
"anonymous": {
"enabled": false
}
},
"authorization": {
"mode": "Webhook",
"webhook": {
"cacheAuthorizedTTL": "5m0s",
"cacheUnauthorizedTTL": "30s"
}
},
"address": "192.168.8.10",
"port": 10250,
"readOnlyPort": 10255,
"cgroupDriver": "systemd",
"hairpinMode": "promiscuous-bridge",
"serializeImagePulls": false,
"clusterDomain": "cluster.local.",
"clusterDNS": ["10.96.0.2"]
}
EOF
4.6.2.3 创建kubelet服务启动管理文件
cat > kubelet.service << "EOF"
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
--bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \
--cert-dir=/etc/kubernetes/ssl \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--config=/etc/kubernetes/kubelet.json \
--cni-bin-dir=/opt/cni/bin \
--cni-conf-dir=/etc/cni/net.d \
--container-runtime=remote \
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \
--network-plugin=cni \
--rotate-certificates \
--pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.2 \
--root-dir=/etc/cni/net.d \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
4.6.2.4 同步文件到集群节点
说明:
kubelet.json中address需要修改为当前主机IP地址。
cp kubelet-bootstrap.kubeconfig /etc/kubernetes/
cp kubelet.json /etc/kubernetes/
cp kubelet.service /usr/lib/systemd/system/
for i in k8s-master02 k8s-master03 k8s-worker1;do scp kubelet-bootstrap.kubeconfig kubelet.json $i:/etc/kubernetes/;done
for i in k8s-master02 k8s-master03 k8s-worker1;do scp ca.pem $i:/etc/kubernetes/ssl/;done
for i in k8s-master02 k8s-master03 k8s-worker1;do scp kubelet.service $i:/usr/lib/systemd/system/;done
4.6.2.5 创建目录及启动服务
说明:
确认kubelet服务启动成功后,接着到master上Approve一下bootstrap请求。
mkdir -p /var/lib/kubelet
mkdir -p /var/log/kubernetes
systemctl daemon-reload
systemctl enable --now kubelet
systemctl status kubelet
# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 NotReady <none> 2m55s v1.21.10
k8s-master02 NotReady <none> 45s v1.21.10
k8s-master03 NotReady <none> 39s v1.21.10
k8s-node01 NotReady <none> 5m1s v1.21.10
# kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
csr-b949p 7m55s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued
csr-c9hs4 3m34s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued
csr-r8vhp 5m50s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued
csr-zb4sr 3m40s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued
4.6.3 部署kube-proxy
4.6.3.1 创建kube-proxy证书请求文件
cat > kube-proxy-csr.json << "EOF"
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "xian",
"L": "xian",
"O": "kubemsb",
"OU": "CN"
}
]
}
EOF
4.6.3.2 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
# ls kube-proxy*
kube-proxy.csr kube-proxy-csr.json kube-proxy-key.pem kube-proxy.pem
4.6.3.3 创建kubeconfig文件
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.10.100:6443 --kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
4.6.3.4 创建服务配置文件
cat > kube-proxy.yaml << "EOF"
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 192.168.8.10
clientConnection:
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
clusterCIDR: 10.244.0.0/16
healthzBindAddress: 192.168.8.10:10256
kind: KubeProxyConfiguration
metricsBindAddress: 192.168.8.10:10249
mode: "ipvs"
EOF
4.6.3.5 创建服务启动管理文件
cat > kube-proxy.service << "EOF"
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
--config=/etc/kubernetes/kube-proxy.yaml \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
4.6.3.6 同步文件到集群工作节点主机
说明:
修改kube-proxy.yaml中IP地址为当前主机IP.
cp kube-proxy*.pem /etc/kubernetes/ssl/
cp kube-proxy.kubeconfig kube-proxy.yaml /etc/kubernetes/
cp kube-proxy.service /usr/lib/systemd/system/
for i in k8s-master02 k8s-master03 k8s-worker1;do scp kube-proxy.kubeconfig kube-proxy.yaml $i:/etc/kubernetes/;done
for i in k8s-master02 k8s-master03 k8s-worker1;do scp kube-proxy.service $i:/usr/lib/systemd/system/;done
4.6.3.7 服务启动
mkdir -p /var/lib/kube-proxy
systemctl daemon-reload
systemctl enable --now kube-proxy
systemctl status kube-proxy
4.7网络组件部署 Calico
4.7.1 下载
官网地址
https://projectcalico.docs.tigera.io/getting-started/kubernetes/quickstart
wget https://docs.projectcalico.org/v3.19/manifests/calico.yaml
4.7.2 修改文件
3683 - name: CALICO_IPV4POOL_CIDR
3684 value: "10.244.0.0/16"
4.7.3 应用文件
kubectl apply -f calico.yaml
4.7.4 验证应用结果
# kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-7cc8dd57d9-tf2m5 1/1 Running 0 72s
kube-system calico-node-llw5w 1/1 Running 0 72s
kube-system calico-node-mhh6g 1/1 Running 0 72s
kube-system calico-node-twj99 1/1 Running 0 72s
kube-system calico-node-zh6xl 1/1 Running 0 72s
# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready <none> 55m v1.21.10
k8s-master02 Ready <none> 53m v1.21.10
k8s-master03 Ready <none> 53m v1.21.10
k8s-node01 Ready <none> 57m v1.21.10
4.7部署CoreDNS
cat > coredns.yaml << "EOF"
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. Default is 1.
# 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.8.4
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.96.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
EOF
kubectl apply -f coredns.yaml
# kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-7cc8dd57d9-tf2m5 1/1 Running 0 4m7s
kube-system calico-node-llw5w 1/1 Running 0 4m7s
kube-system calico-node-mhh6g 1/1 Running 0 4m7s
kube-system calico-node-twj99 1/1 Running 0 4m7s
kube-system calico-node-zh6xl 1/1 Running 0 4m7s
kube-system coredns-675db8b7cc-ncnf6 1/1 Running 0 26s