1:环境
kubernetes-master-1 |
2C4G |
10.0.0.10 |
kubernetes-worker-1 |
2C2G |
10.0.0.11 |
kubernetes-worker-2 |
2C2G |
10.0.0.12 |
2:基础配置(统一操作)
1:配置主机名
[root@virtual_host ~]# hostnamectl set-hostname kubernetes-master-1
[root@virtual_host ~]# hostnamectl set-hostname kubernetes-worker-1
[root@virtual_host ~]# hostnamectl set-hostname kubernetes-worker-2
2:配置hosts
[root@kubernetes-master-1 ~]# cat << eof>> /etc/hosts
10.0.0.10 kubernetes-master-1
10.0.0.11 kubernetes-worker-1
10.0.0.12 kubernetes-worker-2
eof
3:配置时间同步并配置crontab
[root@kubernetes-master-1 ~]# yum install -y ntpdate
[root@kubernetes-master-1 ~]# ntpdate ntp.aliyun.com
6 May 12:24:23 ntpdate[8177]: adjust time server 203.107.6.88 offset 0.002772 sec
[root@kubernetes-master-1 ~]# crontab -l
0 */1 * * * /usr/sbin/ntpdate ntp.aliyun.com
4:关闭防火墙
[root@kubernetes-master-1 ~]# systemctl disable firewalld --now
5:关闭swap分区与selinux
[root@kubernetes-master-1 ~]# setenforce 0 #临时关闭selinux
[root@kubernetes-master-1 ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config #永久关闭
[root@kubernetes-master-1 ~]# swapoff -a # 临时关闭
[root@kubernetes-master-1 ~]# sed -i 's/.*swap.*/#&/g' /etc/fstab 永久关闭
6:加载IPVS模块
[root@kubernetes-master-1 ~]# yum -y install ipset ipvsadm
[root@kubernetes-master-1 ~]# cat << eof>> /etc/sysconfig/modules/ipvs.modules
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
eof
[root@kubernetes-master-1 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack
ip_vs_sh 12688 0
ip_vs_wrr 12697 0
ip_vs_rr 12600 0
ip_vs 145458 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 139264 1 ip_vs
libcrc32c 12644 3 xfs,ip_vs,nf_conntrack
7:升级内核
[root@kubernetes-master-1 ~]# rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-5.el7.elrepo.noarch.rpm
[root@kubernetes-master-1 ~]# yum --enablerepo=elrepo-kernel install -y kernel-lt
[root@kubernetes-master-1 ~]# grub2-set-default 0
[root@kubernetes-master-1 ~]# reboot
[root@kubernetes-master-1 ~]# uname -r
5.4.191-1.el7.elrepo.x86_64
[root@kubernetes-master-1 ~]# cat kernel.sh
#!/bin/bash
kernel_version=$(uname -r | cut -d- -f1)
echo $kernel_version
if [ `expr $kernel_version \> 4.19` -eq 1 ]
then
modprobe -- nf_conntrack
else
modprobe -- nf_conntrack_ipv4
fi
[root@kubernetes-master-1 ~]# bash kernel.sh
5.4.191
3:部署Containerd(三台节点同时操作)
[root@kubernetes-master-1 ~]# cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
overlay
br_netfilter
# 1.20+需要开启br_netfilter
[root@kubernetes-master-1 ~]# modprobe overlay
[root@kubernetes-master-1 ~]# modprobe br_netfilter
[root@kubernetes-master-1 ~]# cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
> net.bridge.bridge-nf-call-iptables = 1
> net.ipv4.ip_forward = 1
> net.bridge.bridge-nf-call-ip6tables = 1
> EOF
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
[root@kubernetes-master-1 ~]# sysctl --system
# 部署源并安装containerd
[root@kubernetes-master-1 ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
[root@kubernetes-master-1 ~]# yum list |grep containerd
containerd.x86_64 1.2.14-1.el7 epel
containerd.io.x86_64 1.6.4-3.1.el7 docker-ce-stable
[root@kubernetes-master-1 ~]# yum -y install containerd.io-1.6.4-3.1.el7.x86_64
[root@kubernetes-master-1 ~]# mkdir -p /etc/containerd
[root@kubernetes-master-1 ~]# containerd config default | sudo tee /etc/containerd/config.toml
# 修改cgroup Driver为systemd
[root@kubernetes-master-1 ~]# sed -ri 's#SystemdCgroup = false#SystemdCgroup = true#' /etc/containerd/config.toml
# 更改sandbox_image
[root@kubernetes-master-1 ~]# sed -ri 's#k8s.gcr.io\/pause:3.6#registry.aliyuncs.com\/google_containers\/pause:3.7#' /etc/containerd/config.toml
# endpoint位置添加阿里云的镜像源
[root@kubernetes-master-1 ~]# systemctl daemon-reload
[root@kubernetes-master-1 ~]# systemctl enable containerd --now
Created symlink from /etc/systemd/system/multi-user.target.wants/containerd.service to /usr/lib/systemd/system/containerd.service.
[root@kubernetes-master-1 ~]# systemctl status containerd
?containerd.service - containerd container runtime
Loaded: loaded (/usr/lib/systemd/system/containerd.service; enabled; vendor preset: disabled)
Active: active (running) since Fri 2022-05-06 12:55:44 EDT; 4s ago
---
4:部署Kubernetes(三台节点同时操作)
# 添加源地址
[root@kubernetes-master-1 ~]# cat << eof>> /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
eof
# 安装kubeadm kubectl kubelet
[root@kubernetes-master-1 ~]# yum -y install kubeadm-1.24.0-0 kubelet-1.24.0-0 kubectl-1.24.0-0
[root@kubernetes-master-1 ~]# systemctl enable --now kubelet
#设置crictl
[root@kubernetes-master-1 ~]# cat << EOF >> /etc/crictl.yaml
runtime-endpoint: unix:///var/run/containerd/containerd.sock
image-endpoint: unix:///var/run/containerd/containerd.sock
timeout: 10
debug: false
EOF
# 这里只需要在master操作即可
[root@kubernetes-master-1 ~]# kubeadm config print init-defaults > kubeadm-init.yaml
[root@kubernetes-master-1 ~]# cat kubeadm-init.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 10.0.0.10
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
name: master
taints:
- effect: "NoSchedule"
key: "node-role.kubernetes.io/master"
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.24.0
networking:
dnsDomain: cluster.local
serviceSubnet: 200.1.0.0/16
podSubnet: 100.1.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
# 查看所需镜像列表
[root@kubernetes-master-1 ~]# kubeadm config images list --config kubeadm-init.yaml
registry.aliyuncs.com/google_containers/kube-apiserver:v1.24.0
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.24.0
registry.aliyuncs.com/google_containers/kube-scheduler:v1.24.0
registry.aliyuncs.com/google_containers/kube-proxy:v1.24.0
registry.aliyuncs.com/google_containers/pause:3.7
registry.aliyuncs.com/google_containers/etcd:3.5.3-0
registry.aliyuncs.com/google_containers/coredns:v1.8.6
# 预拉取镜像
[root@kubernetes-master-1 ~]# kubeadm config images pull --config kubeadm-init.yaml
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.24.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.24.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.24.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.24.0
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.7
[config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.5.3-0
[config/images] Pulled registry.aliyuncs.com/google_containers/coredns:v1.8.6
# 查看镜像
[root@kubernetes-master-1 ~]# crictl images
IMAGE TAG IMAGE ID SIZE
registry.aliyuncs.com/google_containers/coredns v1.8.6 a4ca41631cc7a 13.6MB
registry.aliyuncs.com/google_containers/etcd 3.5.3-0 aebe758cef4cd 102MB
registry.aliyuncs.com/google_containers/kube-apiserver v1.24.0 529072250ccc6 33.8MB
registry.aliyuncs.com/google_containers/kube-controller-manager v1.24.0 88784fb4ac2f6 31MB
registry.aliyuncs.com/google_containers/kube-proxy v1.24.0 77b49675beae1 39.5MB
registry.aliyuncs.com/google_containers/kube-scheduler v1.24.0 e3ed7dee73e93 15.5MB
registry.aliyuncs.com/google_containers/pause 3.7 221177c6082a8 311kB
# 初始化集群
[root@kubernetes-master-1 ~]# kubeadm init --config=kubeadm-init.yaml
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
echo "source <(kubectl completion bash)" >> ~/.bashrc
source ~/.bashrc
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.0.0.10:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:4f57b91fc3a9ca0981ebe1d25694442e7f22d77b9266ec42023f13700ddadc57
# 加入集群
[root@kubernetes-worker-1 ~]# kubeadm join 10.0.0.10:6443 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:4f57b91fc3a9ca0981ebe1d25694442e7f22d77b9266ec42023f13700ddadc57
[root@kubernetes-worker-2 ~]# kubeadm join 10.0.0.10:6443 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:4f57b91fc3a9ca0981ebe1d25694442e7f22d77b9266ec42023f13700ddadc57
# 查看集群
[root@kubernetes-master-1 ~]# kubectl get nodes -owide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
kubernetes-worker-1 NotReady <none> 39s v1.24.0 10.0.0.11 <none> CentOS Linux 7 (Core) 5.4.191-1.el7.elrepo.x86_64 containerd://1.6.4
kubernetes-worker-2 NotReady <none> 37s v1.24.0 10.0.0.12 <none> CentOS Linux 7 (Core) 5.4.191-1.el7.elrepo.x86_64 containerd://1.6.4
master NotReady control-plane 2m45s v1.24.0 10.0.0.10 <none> CentOS Linux 7 (Core) 5.4.191-1.el7.elrepo.x86_64 containerd://1.6.4
# 部署网络插件
[root@kubernetes-master-1 ~]# kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
# 部署完成
[root@kubernetes-master-1 ~]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-77484fbbb5-qxt6s 1/1 Running 0 5m12s
kube-system calico-node-9psr8 1/1 Running 0 3m47s
kube-system calico-node-n4lxv 1/1 Running 0 5m12s
kube-system calico-node-snkw7 1/1 Running 0 5m12s
kube-system coredns-74586cf9b6-c8kvv 1/1 Running 0 9m9s
kube-system coredns-74586cf9b6-jgqqk 1/1 Running 0 9m9s
kube-system etcd-master 1/1 Running 0 9m23s
kube-system kube-apiserver-master 1/1 Running 0 9m23s
kube-system kube-controller-manager-master 1/1 Running 0 9m23s
kube-system kube-proxy-dg6pk 1/1 Running 0 7m21s
kube-system kube-proxy-ff5xq 1/1 Running 0 9m9s
kube-system kube-proxy-mrmv8 1/1 Running 0 7m19s
kube-system kube-scheduler-master 1/1 Running 0 9m24s