centos8下搭建k8s集群
1.环境准备
主机:
主机名 | 角色 | IP |
---|---|---|
master | 控制节点 | 172.20.251.148 |
node1 | 工作节点1 | 172.20.251.123 |
node2 | 工作节点2 | 172.20.251.120 |
以下操作需在所有主机执行
1.主机名及绑定
[root@master ~]# hostnamectl set-hostname xxx
# 各主机设置对应主机名
[root@master ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
172.20.251.148 master
172.20.251.123 node1
172.20.251.120 node2
2.关闭防火墙以及selinux
[root@master ~]# systemctl stop firewalld && systemctl disable firewalld
[root@master ~]# setenforce 0
[root@master ~]# sed -ri "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
3.时间同步(网络源)
[root@master ~]# dnf -y install chrony
# 安装时间同步
[root@master ~]# timedatectl set-timezone "Asia/Shanghai"
# 时间同步中国上海
[root@master ~]# systemctl restart chronyd && systemctl enable chronyd
4.准备yum源(centos默认源的基础下加上docker和k8s的yum仓库)
[root@master ~]# cat >> /etc/yum.repos.d/k8s.repo <<EOF
[k8s]
name=k8s
baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=0
enabled=1
EOF
[root@master ~]# sudo yum install -y yum-utils
# 安装yum-utils工具增加
[root@master ~]# sudo yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
Adding repo from: https://download.docker.com/linux/centos/docker-ce.repo
# yum-utils工具增加docker源
[root@master ~]# sudo yum-config-manager --enable docker-ce-nightly
[root@master ~]# sudo yum-config-manager --enable docker-ce-test
5.关闭swap(kubernetes1.8开始不关闭swap无法启动)
[root@master ~]# swapoff -a
# 注释fstab文件swap一行
# 本人使用云主机默认没有
6.开启路由转发并加载网桥过滤模块
[root@master ~]# cat > /etc/sysctl.d/k8s.conf <<EOF
net.ipv4.ip_forward = 1
vm.swappiness = 0
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@master ~]# modprobe br_netfilter
# 加载网桥过滤模块
[root@master ~]# sysctl -p /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
vm.swappiness = 0
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
7.设置kube-proxy开启ipvs的前置条件
[root@master ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
EOF
[root@master ~]# chmod +x /etc/sysconfig/modules/ipvs.modules
# 增加执行权限
[root@master ~]# sh /etc/sysconfig/modules/ipvs.modules
[root@master ~]# lsmod |egrep 'ip_vs|nf_conntrack'
...
# 查询是否开启
8.安装软件(docker kubelet kubeadm kubectl)
[root@master ~]# sudo yum install docker-ce docker-ce-cli containerd.io -y
# 安装docker
[root@master ~]# mkdir -p /etc/docker
[root@master ~]# cat >> /etc/docker/daemon.json <<EOF
{
"registry-mirrors": ["https://32yzbld0.mirror.aliyuncs.com"],
# 配置镜像加速器,可在阿里云的镜像加速功能中找到
"exec-opts": ["native.cgroupdriver=systemd"]
# 设置docker的cgroup为systemd,不然会有报错
}
EOF
[root@master ~]# systemctl daemon-reload
# 重新加载配置文件
[root@master ~]# systemctl restart docker && systemctl enable docker
Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /usr/lib/systemd/system/docker.service.
# 重新启动docker并设置开机自启
[root@master ~]# dnf -y install kubelet kubeadm kubectl
# 安装kubelet kubeadm kubectl
[root@master ~]# kubectl version
# 查询安装版本
[root@master ~]# systemctl enable kubelet
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.
# 设置kubelet开机自启,暂时不启动
2.集群搭建
1.kubeadm初始化
master节点操作
注意:k8s 1.24不再支持docker,只是为了搭建k8s,做实验的话不要用与笔者不同的环境
k8s 1.24不再支持docker的解决方法:
https://baijiahao.baidu.com/s?id=1726360603534861943&wfr=spider&for=pc
https://www.modb.pro/db/501405
[root@master ~]# kubeadm init --kubernetes-version=1.23.6 --apiserver-advertise-address=172.20.251.148 --image-repository registry.aliyuncs.com/google_containers --service-cidr=10.2.0.0/16 --pod-network-cidr=10.3.0.0/16
# 初始化,需要等待一段时间
# 注意初始化后的提示,之后操作基本为提示内容
[root@master ~]# docker images
# 验证镜像
[root@master ~]# systemctl status kubelet.service
# 检查kubelet服务是否启动
[root@master ~]# cat >> /etc/profile << EOF
> export KUBECONFIG=/etc/kubernetes/admin.conf
> EOF
# 添加环境变量
[root@master ~]# source /etc/profile
# 生效profile文件
[root@master ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Healthy {"health":"true","reason":""}
[root@master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
master.novalocal NotReady control-plane,master 5m14s v1.23.6
# 查看集群状态
[root@master k8s]# kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
# 创建flannel网络,需要等待一段时间
[root@master k8s]# kubectl apply -f kube-flannel.yml
Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
# 执行较慢也可以拷贝到本地执行
[root@master k8s]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
etcd-master.novalocal 1/1 Running 0 29m
kube-apiserver-master.novalocal 1/1 Running 0 29m
kube-controller-manager-master.novalocal 1/1 Running 0 29m
kube-flannel-ds-pdmp6 1/1 Running 0 82s
kube-proxy-n9dc9 1/1 Running 0 29m
kube-scheduler-master.novalocal 1/1 Running 0 29m
# 确认所有pod为running状态,有些pod启动较慢
[root@master ~]# kubectl get pods
No resources found in default namespace.
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master.novalocal Ready control-plane,master 31m v1.23.6
# 验证master节点ok
2.加入其他节点
node1和node2操作
[root@node1 ~]# kubeadm join 172.20.251.148:6443 --token eflcio.66go0ymydpwdhm5v \
> --discovery-token-ca-cert-hash sha256:3494b6e7f54b1f6c52a07a949510d1410e381d7f1165d2987e5287b90e39ed5a
# node1上join集群
[root@node2 ~]# kubeadm join 172.20.251.148:6443 --token eflcio.66go0ymydpwdhm5v \
> --discovery-token-ca-cert-hash sha256:3494b6e7f54b1f6c52a07a949510d1410e381d7f1165d2987e5287b90e39ed5a
# node2上join集群
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master.novalocal Ready control-plane,master 34m v1.23.6
node1 NotReady <none> 69s v1.23.6
node2 NotReady <none> 41s v1.23.6
# master集群验证
# 等待node1和node2节点ready
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master.novalocal Ready control-plane,master 36m v1.23.6
node1 Ready <none> 3m23s v1.23.6
node2 Ready <none> 2m55s v1.23.6
# 确认集群ok
至此,k8s集群搭建完成