环境准备
Hostname |
Outer-IP |
Inner-IP |
k8s-master-001 |
10.0.0.100 |
172.16.1.113 |
K8s-node-01 |
10.0.0.101 |
172.16.1.114 |
K8s-node-02 |
10.0.0.102 |
172.16.1.115 |
#1.在所有节点上安装 Docker 和 kubeadm
#2.部署 Kubernetes Master
#3.部署容器网络插件
#4.部署 Kubernetes Node,将节点加入 Kubernetes 集群中
#5.部署 Dashboard Web 页面,可视化查看 Kubernetes 资源
二、系统初始化(所有节点)
1添加host解析
#1.修改主机名
[root@ip-172-16-1-113 ~]# hostnamectl set-hostname k8s-master-001
[root@ip-172-16-1-114 ~]# hostnamectl set-hostname k8s-node-001
[root@ip-172-16-1-115 ~]# hostnamectl set-hostname k8s-node-002
#2.Master节点添加hosts解析
[root@k8s-master-001 ~]# vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.0.0.100 k8s-admin-master01 m1
10.0.0.101 k8s-admin-node01 n1
10.0.0.102 k8s-admin-node02 n2
#3.分发hosts文件到Node节点
[root@k8s-master-001 ~]# scp /etc/hosts root@n1:/etc/hosts
hosts 100% 247 2.7KB/s 00:00
[root@k8s-master-001 ~]# scp /etc/hosts root@n2:/etc/hosts
hosts
2 配置免密登录
[root@k8s-admin-master01 ~]# ssh-keygen
[root@k8s-admin-node01 ~]# ssh-keygen
[root@k8s-admin-node02 ~]# ssh-keygen
[root@k8s-admin-master01 ~]# ssh-copy-id m1
[root@k8s-admin-master01 ~]# ssh-copy-id n1
[root@k8s-admin-master01 ~]# ssh-copy-id n2
[root@k8s-admin-node01 ~]# ssh-copy-id m1
[root@k8s-admin-node01 ~]# ssh-copy-id n1
[root@k8s-admin-node01 ~]# ssh-copy-id n2
[root@k8s-admin-node02 ~]# ssh-copy-id m1
[root@k8s-admin-node02 ~]# ssh-copy-id n1
[root@k8s-admin-node02 ~]# ssh-copy-id n2
3关闭交换分区 swap,提升性能
#临时关闭
[root@k8s-admin-master01 ~]# swapoff -a
[root@k8s-admin-node01 ~]# swapoff -a
[root@k8s-admin-node02 ~]# swapoff -a
[root@k8s-admin-master01 ~]# vim /etc/fstab
#UUID=ff05d4f6-5d80-4d32-90a2-8268a0d4d0d3 swap swap defaults 0 0
#如果是克隆的虚拟机,需要删除 UUID
[root@k8s-admin-node01 ~]# vim /etc/fstab
#UUID=ff05d4f6-5d80-4d32-90a2-8268a0d4d0d3 swap swap defaults 0 0
#如果是克隆的虚拟机,需要删除 UUID
[root@k8s-admin-node02 ~]# vim /etc/fstab
#UUID=ff05d4f6-5d80-4d32-90a2-8268a0d4d0d3 swap swap defaults 0 0
#如果是克隆的虚拟机,需要删除 UUID
解释:
Swap 是交换分区,如果机器内存不够,会使用 swap 分区,但是 swap 分区的性能较低,k8s 设计的
时候为了能提升性能,默认是不允许使用姜欢分区的。Kubeadm 初始化的时候会检测 swap 是否关
闭,如果没关闭,那就初始化失败。如果不想要关闭交换分区,安装 k8s 的时候可以指定--ignorepreflight-errors=Swap
来解决。
4.修改机器内核参数
[root@xianchaomaster1 ~]# modprobe br_netfilter
[root@xianchaomaster1 ~]# echo "modprobe br_netfilter" >> /etc/profile
[root@xianchaomaster1 ~]# cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
[root@xianchaomaster1 ~]# sysctl -p /etc/sysctl.d/k8s.conf
[root@xianchaonode1 ~]# modprobe br_netfilter
[root@xianchaonode1 ~]# echo "modprobe br_netfilter" >> /etc/profile
[root@xianchaonode1 ~]# cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
[root@xianchaonode1 ~]# sysctl -p /etc/sysctl.d/k8s.conf
[root@xianchaonode2 ~]# modprobe br_netfilter
[root@xianchaonode2 ~]# echo "modprobe br_netfilter" >> /etc/profile
[root@xianchaonode2 ~]# cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
[root@xianchaonode2 ~]# sysctl -p /etc/sysctl.d/k8s.conf
5.关闭防火墙和Selinux
#1.关闭防火墙
[root@k8s-master-001 ~]# systemctl disable --now firewalld
#2.关闭Selinux
1)临时关闭
[root@k8s-master-001 ~]# setenforce 0
2)永久关闭
[root@k8s-master-001 ~]# sed -i 's#enforcing#disabled#g' /etc/selinux/config
3)检查
[root@xianchaonode2~]#getenforce
Disabled
#显示 Disabled 说明 selinux 已经关
5.配置国内yum源
默认情况下,CentOS使用的是官方yum源,所以一般情况下在国内使用是非常慢,所以我们可以替换成国内的一些比较成熟的yum源,例如:清华大学镜像源,网易云镜像源等等。
#1.更改yum源
[root@k8s-master-001 ~]# mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
[root@k8s-master-001 ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
[root@k8s-master-001 ~]# curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
#2.刷新缓存
[root@k8s-master-001 ~]# yum makecache
#3.禁止自动更新更新内核版本
[root@k8s-master-001 ~]# yum update -y --exclud=kernel*
6.时间同步
在集群当中,时间是一个很重要的概念,一旦集群当中某台机器时间跟集群时间不一致,可能会导致集群面临很多问题。所以,在部署集群之前,需要同步集群当中的所有机器的时间。
#1.CentOS7版
yum install ntp -y
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' > /etc/timezone
ntpdate time2.aliyun.com
# 写入定时任务
#Timing synchronization time
* * * * * /usr/sbin/ntpdate ntp1.aliyun.com &>/dev/null
#2.CentOS8版
rpm -ivh http://mirrors.wlnmp.com/centos/wlnmp-release-centos.noarch.rpm
yum install wntp -y
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' > /etc/timezone
ntpdate time2.aliyun.com
# 写入定时任务
#Timing synchronization time
* * * * * /usr/sbin/ntpdate ntp1.aliyun.com &>/dev/null
7.开启ipvs
ipvs是系统内核中的一个模块,其网络转发性能很高。一般情况下,我们首选ipvs。
#1.安装IPVS
[root@k8s-master-001 ~]# yum install -y conntrack-tools ipvsadm ipset conntrack libseccomp
# 加载IPVS模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
8.安装基础软件包
[root@master1 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlibdevel python-devel epel-release openssh-server socat ipvsadm conntrack ntpdate telnet ipvsadm
10.安装docker
#1准备docker源
[rootmaster1 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@node1 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/dockerce/linux/centos/docker-ce.repo
[root@node2 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/dockerce/linux/centos/docker-ce.repo
[root@master1 ~]# yum install docker-ce-20.10.6 docker-ce-cli-20.10.6
containerd.io -y
[root@master1 ~]# systemctl start docker && systemctl enable docker.service
[root@node1 ~]# yum install docker-ce-20.10.6 docker-ce-cli-20.10.6
containerd.io -y
[root@node1 ~]# systemctl start docker && systemctl enable docker.service
[root@onode2 ~]# yum install docker-ce-20.10.6 docker-ce-cli-20.10.6
containerd.io -y
[root@node2 ~]# systemctl start docker && systemctl enable docker.service
#2 配置 docker 镜像加速器和驱动
[root@master1 ~]#vim /etc/docker/daemon.json
{
"registry-mirrors":["https://rsbud4vc.mirror.aliyuncs.com","https://registry.dockercn.com","https://docker.mirrors.ustc.edu.cn","https://dockerhub.azk8s.cn","http://hubmirror.c.163.com","http://qtid6917.mirror.aliyuncs.com",
"https://rncxm540.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
#解释:
#修改 docker 文件驱动为 systemd,默认为 cgroupfs,kubelet 默认使用 systemd,两者必须一致才可
以。
修改了dockers的配置文件需要重新启动
[root@xianchaonode1 ~]# systemctl daemon-reload
[root@xianchaonode1 ~]# systemctl restart docker
[root@xianchaonode1 ~]# systemctl status docker
三.初始化k8s
第一步配置源
#1配置安装 k8s 组件需要的阿里云的 repo 源
[root@xianchaomaster1 ~]#vim /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
配置安装 k8s 组件需要的阿里云的 repo 源
[root@master1 ~]#vim /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
配置安装 k8s 组件需要的阿里云的 repo 源
[root@master1 ~]#vim /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
2.步下载并启动
可以提前把初始化需要的镜像 给拉下来 我下载好了 做成离线镜像包了 只需要下载下来上传到服务器 手动解压就好 这样初始化会更快
#把初始化 k8s 集群需要的离线镜像包上传到 master1、node1、node2 机器上,手动解压:
[root@xianchaomaster1 ~]# docker load -i k8simage-1-20-6.tar.gz
[root@xianchaonode1 ~]# docker load -i k8simage-1-20-6.tar.gz
[root@xianchaonode2 ~]# docker load -i k8simage-1-20-6.tar.gz
不拉下来也没事 直接执行下一步 不过会慢一点而已
[root@xianchaomaster1 ~]# yum install -y kubelet-1.20.6 kubeadm-1.20.6 kubectl-1.20.6
[root@xianchaomaster1 ~]# systemctl enable kubelet
[root@xianchaomaster1]# systemctl status kubelet
[root@xianchaonode1 ~]# yum install -y kubelet-1.20.6 kubeadm-1.20.6 kubectl-1.20.6
[root@xianchaonode1 ~]# systemctl enable kubelet
[root@xianchaonode1]# systemctl status kubelet
[root@xianchaonode2 ~]# yum install -y kubelet-1.20.6 kubeadm-1.20.6 kubectl-1.20.6
[root@xianchaonode2 ~]# systemctl enable kubelet
[root@xianchaonode2]# systemctl status kubele
解释:
注:每个软件包的作用
Kubeadm: kubeadm 是一个工具,用来初始化 k8s 集群的
kubelet: 安装在集群所有节点上,用于启动 Pod 的
kubectl: 通过 kubectl 可以部署和管理应用,查看各种资源,创建、删除和更新各种组件
3.使用 kubeadm 初始化 k8s 集群
使用kubeadm初始化k8s集群
[root@xianchaomaster1~]# kubeadm config print init-defaults > kubeadm.yaml
根据我们自己的需求修改配置,比如修改 imageRepository 的值,kube-proxy 的模式为 ipvs,初始化节点的时候需要指定cgroupDriver为systemd
kubeadm.yaml配置如下:
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.40.180 #控制节点的ip
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock #注意 k8s 1.24 以上版本可能显示的不一样
name: xianchaomaster1 #控制节点主机名
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.20.6
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12 #默认指定service网段 不需要修改
podSubnet: 10.244.0.0/16 #指定pod网段, 需要新增加这个
scheduler: {}
#追加如下几行
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
#基于kubeadm.yaml文件初始化k8s
[root@xianchaomaster1~]# kubeadm init --config=kubeadm.yaml --ignore-preflight-errors=SystemVerification
出现一下内容说明初始化成功
#配置kubectl的配置文件config,相当于对kubectl进行授权,这样kubectl命令可以使用这个证书对k8s集群进行管理
[root@xianchaomaster1 ~]# mkdir -p $HOME/.kube
[root@xianchaomaster1 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@xianchaomaster1 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@xianchaomaster1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
xianchaomaster1 NotReady control-plane,master 60s v1.20.6
此时集群状态还是NotReady状态,因为没有安装网络插件。
4扩容k8s集群-添加第一个工作节点
在xianchaomaster1上查看加入节点的命令:
[root@xianchaomaster1 ~]# kubeadm token create --print-join-command
显示如下:
kubeadm join 192.168.40.180:6443 --token vulvta.9ns7da3saibv4pg1 --discovery-token-ca-cert-hash sha256:72a0896e27521244850b8f1c3b600087292c2d10f2565adb56381f1f4ba7057a
把xianchaonode1加入k8s集群:
[root@xianchaonode1~]# kubeadm join 192.168.40.180:6443 --token vulvta.9ns7da3saibv4pg1 --discovery-token-ca-cert-hash sha256:72a0896e27521244850b8f1c3b600087292c2d10f2565adb56381f1f4ba7057a --ignore-preflight-errors=SystemVerification
看到一以下说明node1节点已经加入到集群了,充当工作节点
#验证
#在xianchaomaster1上查看集群节点状况:
[root@xianchaomaster1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
xianchaomaster1 NotReady control-plane,master 53m v1.20.6
xianchaonode1 NotReady <none> 59s v1.20.6
5扩容k8s集群-添加第二个工作节点
在xianchaomaster1上查看加入节点的命令:
[root@xianchaomaster1 ~]# kubeadm token create --print-join-command
显示如下:
kubeadm join 192.168.40.180:6443 --token i3u8gu.n1d6fy40jdxgqjpu --discovery-token-ca-cert-hash sha256:72a0896e27521244850b8f1c3b600087292c2d10f2565adb56381f1f4ba7057a
把xianchaonode2加入k8s集群:
[root@xianchaonode2~]# kubeadm join 192.168.40.180:6443 --token i3u8gu.n1d6fy40jdxgqjpu --discovery-token-ca-cert-hash sha256:72a0896e27521244850b8f1c3b600087292c2d10f2565adb56381f1f4ba7057a --ignore-preflight-errors=SystemVerification
出现以下内容 说明 第二台机器加入成功
#验证
#在xianchaomaster1上查看集群节点状况:
[root@xianchaomaster1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
xianchaomaster1 NotReady control-plane,master 53m v1.20.6
xianchaonode1 NotReady <none> 59s v1.20.6
xianchaonode2 NotReady <none> 59s v1.20.6
#可以看到xianchaonode1、xianchaonode2的ROLES角色为空,<none>就表示这个节点是工作节点。
#可以把xianchaonode1和xianchaonode2的ROLES变成work,按照如下方法:
[root@xianchaomaster1 ~]# kubectl label node xianchaonode1 node-role.kubernetes.io/worker=worker
[root@xianchaomaster1 ~]# kubectl label node xianchaonode2 node-role.kubernetes.io/worker=worker
注意:上面状态都是NotReady状态,说明没有安装网络插件
6、安装kubernetes网络组件-Calico
上传calico.yaml到xianchaomaster1上,使用yaml文件安装calico 网络插件 。
[root@xianchaomaster1 ~]# kubectl apply -f calico.yaml
注:在线下载配置文件地址是: https://docs.projectcalico.org/manifests/calico.yaml
。
[root@xianchaomaster1 ~]# kubectl get pod -n kube-system
再次查看集群状态。
[root@xianchaomaster1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
xianchaomaster1 Ready control-plane,master 58m v1.20.6
xianchaonode1 Ready <none> 5m46s v1.20.6
xianchaonode2 Ready <none> 5m46s v1.20.6
[root@xianchaomaster1 ~]# kubectl get pods -n kube-system
#STATUS状态是Ready,说明k8s集群正常运行了
7、测试在k8s创建pod是否可以正常访问网络
#把busybox-1-28.tar.gz上传到xianchaonode1、xianchaonode2节点,手动解压(如果没有 不用解压 直接七拉镜像稍微慢一点而已)
[root@xianchaonode1 ~]# docker load -i busybox-1-28.tar.gz
[root@xianchaonode2 ~]# docker load -i busybox-1-28.tar.gz
[root@xianchaomaster1 ~]# [root@xianchaomaster1 ~]# kubectl run busybox --image busybox:1.28 --image-pull-policy=IfNotPresent --restart=Never --rm -it busybox -- sh
/ # ping www.baidu.com
PING www.baidu.com (39.156.66.18): 56 data bytes
64 bytes from 39.156.66.18: seq=0 ttl=127 time=39.3 ms
#通过上面可以看到能访问网络,说明calico网络插件已经被正常安装了
8、测试coredns是否正常
[root@xianchaomaster1 ~]# kubectl run busybox --image busybox:1.28 --restart=Never --rm -it busybox -- sh
/ # nslookup kubernetes.default.svc.cluster.local
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
Name: kubernetes.default.svc.cluster.local
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
10.96.0.10 就是我们coreDNS的clusterIP,说明coreDNS配置好了。
解析内部Service的名称,是通过coreDNS去解析的。
#注意:
busybox要用指定的1.28版本,不能用最新版本,最新版本,nslookup会解析不到dns和ip
9、kubeadm初始化k8s证书过期解决方案
查看证书有效时间:
openssl x509 -in /etc/kubernetes/pki/ca.crt -noout -text |grep Not
openssl x509 -in /etc/kubernetes/pki/apiserver.crt -noout -text |grep Not
延长证书过期时间
1.把update-kubeadm-cert.sh文件上传到xianchaomaster1节点(如果有多个master节点 每个节点上都执行一下这个脚本)
2.在xianchaomaster1上执行如下:
1)给update-kubeadm-cert.sh证书授权可执行权限
[root@xianchaomaster1~]#chmod +x update-kubeadm-cert.sh
2)执行下面命令,修改证书过期时间,把时间延长到10年
[root@xianchaomaster1~]# ./update-kubeadm-cert.sh all
3)在xianchaomaster1节点查询Pod是否正常,能查询出数据说明证书签发完成
kubectl get pods -n kube-system
4)再次查看证书有效期,可以看到会延长到10年
openssl x509 -in /etc/kubernetes/pki/ca.crt -noout -text |grep Not
openssl x509 -in /etc/kubernetes/pki/apiserver.crt -noout -text |grep Not
#注意 只要是 /etc/kubernetes/pki/这个目录下的所有 以.crt的文件都可以查看一下证书到期时间
##脚本
#!/bin/bash
set -o errexit
set -o pipefail
# set -o xtrace
log::err() {
printf "[$(date +'%Y-%m-%dT%H:%M:%S.%N%z')]: \033[31mERROR: \033[0m$@\n"
}
log::info() {
printf "[$(date +'%Y-%m-%dT%H:%M:%S.%N%z')]: \033[32mINFO: \033[0m$@\n"
}
log::warning() {
printf "[$(date +'%Y-%m-%dT%H:%M:%S.%N%z')]: \033[33mWARNING: \033[0m$@\n"
}
check_file() {
if [[ ! -r ${1} ]]; then
log::err "can not find ${1}"
exit 1
fi
}
# get x509v3 subject alternative name from the old certificate
cert::get_subject_alt_name() {
local cert=${1}.crt
check_file "${cert}"
local alt_name=$(openssl x509 -text -noout -in ${cert} | grep -A1 'Alternative' | tail -n1 | sed 's/[[:space:]]*Address//g')
printf "${alt_name}\n"
}
# get subject from the old certificate
cert::get_subj() {
local cert=${1}.crt
check_file "${cert}"
local subj=$(openssl x509 -text -noout -in ${cert} | grep "Subject:" | sed 's/Subject:/\//g;s/\,/\//;s/[[:space:]]//g')
printf "${subj}\n"
}
cert::backup_file() {
local file=${1}
if [[ ! -e ${file}.old-$(date +%Y%m%d) ]]; then
cp -rp ${file} ${file}.old-$(date +%Y%m%d)
log::info "backup ${file} to ${file}.old-$(date +%Y%m%d)"
else
log::warning "does not backup, ${file}.old-$(date +%Y%m%d) already exists"
fi
}
# generate certificate whit client, server or peer
# Args:
# $1 (the name of certificate)
# $2 (the type of certificate, must be one of client, server, peer)
# $3 (the subject of certificates)
# $4 (the validity of certificates) (days)
# $5 (the x509v3 subject alternative name of certificate when the type of certificate is server or peer)
cert::gen_cert() {
local cert_name=${1}
local cert_type=${2}
local subj=${3}
local cert_days=${4}
local alt_name=${5}
local cert=${cert_name}.crt
local key=${cert_name}.key
local csr=${cert_name}.csr
local csr_conf="distinguished_name = dn\n[dn]\n[v3_ext]\nkeyUsage = critical, digitalSignature, keyEncipherment\n"
check_file "${key}"
check_file "${cert}"
# backup certificate when certificate not in ${kubeconf_arr[@]}
# kubeconf_arr=("controller-manager.crt" "scheduler.crt" "admin.crt" "kubelet.crt")
# if [[ ! "${kubeconf_arr[@]}" =~ "${cert##*/}" ]]; then
# cert::backup_file "${cert}"
# fi
case "${cert_type}" in
client)
openssl req -new -key ${key} -subj "${subj}" -reqexts v3_ext \
-config <(printf "${csr_conf} extendedKeyUsage = clientAuth\n") -out ${csr}
openssl x509 -in ${csr} -req -CA ${CA_CERT} -CAkey ${CA_KEY} -CAcreateserial -extensions v3_ext \
-extfile <(printf "${csr_conf} extendedKeyUsage = clientAuth\n") -days ${cert_days} -out ${cert}
log::info "generated ${cert}"
;;
server)
openssl req -new -key ${key} -subj "${subj}" -reqexts v3_ext \
-config <(printf "${csr_conf} extendedKeyUsage = serverAuth\nsubjectAltName = ${alt_name}\n") -out ${csr}
openssl x509 -in ${csr} -req -CA ${CA_CERT} -CAkey ${CA_KEY} -CAcreateserial -extensions v3_ext \
-extfile <(printf "${csr_conf} extendedKeyUsage = serverAuth\nsubjectAltName = ${alt_name}\n") -days ${cert_days} -out ${cert}
log::info "generated ${cert}"
;;
peer)
openssl req -new -key ${key} -subj "${subj}" -reqexts v3_ext \
-config <(printf "${csr_conf} extendedKeyUsage = serverAuth, clientAuth\nsubjectAltName = ${alt_name}\n") -out ${csr}
openssl x509 -in ${csr} -req -CA ${CA_CERT} -CAkey ${CA_KEY} -CAcreateserial -extensions v3_ext \
-extfile <(printf "${csr_conf} extendedKeyUsage = serverAuth, clientAuth\nsubjectAltName = ${alt_name}\n") -days ${cert_days} -out ${cert}
log::info "generated ${cert}"
;;
*)
log::err "unknow, unsupported etcd certs type: ${cert_type}, supported type: client, server, peer"
exit 1
esac
rm -f ${csr}
}
cert::update_kubeconf() {
local cert_name=${1}
local kubeconf_file=${cert_name}.conf
local cert=${cert_name}.crt
local key=${cert_name}.key
# generate certificate
check_file ${kubeconf_file}
# get the key from the old kubeconf
grep "client-key-data" ${kubeconf_file} | awk {'print$2'} | base64 -d > ${key}
# get the old certificate from the old kubeconf
grep "client-certificate-data" ${kubeconf_file} | awk {'print$2'} | base64 -d > ${cert}
# get subject from the old certificate
local subj=$(cert::get_subj ${cert_name})
cert::gen_cert "${cert_name}" "client" "${subj}" "${CAER_DAYS}"
# get certificate base64 code
local cert_base64=$(base64 -w 0 ${cert})
# backup kubeconf
# cert::backup_file "${kubeconf_file}"
# set certificate base64 code to kubeconf
sed -i 's/client-certificate-data:.*/client-certificate-data: '${cert_base64}'/g' ${kubeconf_file}
log::info "generated new ${kubeconf_file}"
rm -f ${cert}
rm -f ${key}
# set config for kubectl
if [[ ${cert_name##*/} == "admin" ]]; then
mkdir -p ~/.kube
cp -fp ${kubeconf_file} ~/.kube/config
log::info "copy the admin.conf to ~/.kube/config for kubectl"
fi
}
cert::update_etcd_cert() {
PKI_PATH=${KUBE_PATH}/pki/etcd
CA_CERT=${PKI_PATH}/ca.crt
CA_KEY=${PKI_PATH}/ca.key
check_file "${CA_CERT}"
check_file "${CA_KEY}"
# generate etcd server certificate
# /etc/kubernetes/pki/etcd/server
CART_NAME=${PKI_PATH}/server
subject_alt_name=$(cert::get_subject_alt_name ${CART_NAME})
cert::gen_cert "${CART_NAME}" "peer" "/CN=etcd-server" "${CAER_DAYS}" "${subject_alt_name}"
# generate etcd peer certificate
# /etc/kubernetes/pki/etcd/peer
CART_NAME=${PKI_PATH}/peer
subject_alt_name=$(cert::get_subject_alt_name ${CART_NAME})
cert::gen_cert "${CART_NAME}" "peer" "/CN=etcd-peer" "${CAER_DAYS}" "${subject_alt_name}"
# generate etcd healthcheck-client certificate
# /etc/kubernetes/pki/etcd/healthcheck-client
CART_NAME=${PKI_PATH}/healthcheck-client
cert::gen_cert "${CART_NAME}" "client" "/O=system:masters/CN=kube-etcd-healthcheck-client" "${CAER_DAYS}"
# generate apiserver-etcd-client certificate
# /etc/kubernetes/pki/apiserver-etcd-client
check_file "${CA_CERT}"
check_file "${CA_KEY}"
PKI_PATH=${KUBE_PATH}/pki
CART_NAME=${PKI_PATH}/apiserver-etcd-client
cert::gen_cert "${CART_NAME}" "client" "/O=system:masters/CN=kube-apiserver-etcd-client" "${CAER_DAYS}"
# restart etcd
docker ps | awk '/k8s_etcd/{print$1}' | xargs -r -I '{}' docker restart {} || true
log::info "restarted etcd"
}
cert::update_master_cert() {
PKI_PATH=${KUBE_PATH}/pki
CA_CERT=${PKI_PATH}/ca.crt
CA_KEY=${PKI_PATH}/ca.key
check_file "${CA_CERT}"
check_file "${CA_KEY}"
# generate apiserver server certificate
# /etc/kubernetes/pki/apiserver
CART_NAME=${PKI_PATH}/apiserver
subject_alt_name=$(cert::get_subject_alt_name ${CART_NAME})
cert::gen_cert "${CART_NAME}" "server" "/CN=kube-apiserver" "${CAER_DAYS}" "${subject_alt_name}"
# generate apiserver-kubelet-client certificate
# /etc/kubernetes/pki/apiserver-kubelet-client
CART_NAME=${PKI_PATH}/apiserver-kubelet-client
cert::gen_cert "${CART_NAME}" "client" "/O=system:masters/CN=kube-apiserver-kubelet-client" "${CAER_DAYS}"
# generate kubeconf for controller-manager,scheduler,kubectl and kubelet
# /etc/kubernetes/controller-manager,scheduler,admin,kubelet.conf
cert::update_kubeconf "${KUBE_PATH}/controller-manager"
cert::update_kubeconf "${KUBE_PATH}/scheduler"
cert::update_kubeconf "${KUBE_PATH}/admin"
# check kubelet.conf
# https://github.com/kubernetes/kubeadm/issues/1753
set +e
grep kubelet-client-current.pem /etc/kubernetes/kubelet.conf > /dev/null 2>&1
kubelet_cert_auto_update=$?
set -e
if [[ "$kubelet_cert_auto_update" == "0" ]]; then
log::warning "does not need to update kubelet.conf"
else
cert::update_kubeconf "${KUBE_PATH}/kubelet"
fi
# generate front-proxy-client certificate
# use front-proxy-client ca
CA_CERT=${PKI_PATH}/front-proxy-ca.crt
CA_KEY=${PKI_PATH}/front-proxy-ca.key
check_file "${CA_CERT}"
check_file "${CA_KEY}"
CART_NAME=${PKI_PATH}/front-proxy-client
cert::gen_cert "${CART_NAME}" "client" "/CN=front-proxy-client" "${CAER_DAYS}"
# restart apiserve, controller-manager, scheduler and kubelet
docker ps | awk '/k8s_kube-apiserver/{print$1}' | xargs -r -I '{}' docker restart {} || true
log::info "restarted kube-apiserver"
docker ps | awk '/k8s_kube-controller-manager/{print$1}' | xargs -r -I '{}' docker restart {} || true
log::info "restarted kube-controller-manager"
docker ps | awk '/k8s_kube-scheduler/{print$1}' | xargs -r -I '{}' docker restart {} || true
log::info "restarted kube-scheduler"
systemctl restart kubelet
log::info "restarted kubelet"
}
main() {
local node_tpye=$1
KUBE_PATH=/etc/kubernetes
CAER_DAYS=3650
# backup $KUBE_PATH to $KUBE_PATH.old-$(date +%Y%m%d)
cert::backup_file "${KUBE_PATH}"
case ${node_tpye} in
etcd)
# update etcd certificates
cert::update_etcd_cert
;;
master)
# update master certificates and kubeconf
cert::update_master_cert
;;
all)
# update etcd certificates
cert::update_etcd_cert
# update master certificates and kubeconf
cert::update_master_cert
;;
*)
log::err "unknow, unsupported certs type: ${cert_type}, supported type: all, etcd, master"
printf "Documentation: https://github.com/yuyicai/update-kube-cert
example:
'\033[32m./update-kubeadm-cert.sh all\033[0m' update all etcd certificates, master certificates and kubeconf
/etc/kubernetes
├── admin.conf
├── controller-manager.conf
├── scheduler.conf
├── kubelet.conf
└── pki
├── apiserver.crt
├── apiserver-etcd-client.crt
├── apiserver-kubelet-client.crt
├── front-proxy-client.crt
└── etcd
├── healthcheck-client.crt
├── peer.crt
└── server.crt
'\033[32m./update-kubeadm-cert.sh etcd\033[0m' update only etcd certificates
/etc/kubernetes
└── pki
├── apiserver-etcd-client.crt
└── etcd
├── healthcheck-client.crt
├── peer.crt
└── server.crt
'\033[32m./update-kubeadm-cert.sh master\033[0m' update only master certificates and kubeconf
/etc/kubernetes
├── admin.conf
├── controller-manager.conf
├── scheduler.conf
├── kubelet.conf
└── pki
├── apiserver.crt
├── apiserver-kubelet-client.crt
└── front-proxy-client.crt
"
exit 1
esac
}
main "$@"