kubeadm部署k8s

1.配置主机hosts(所有主机上)
cat >> /etc/hosts << EOF
192.168.0.232 k8s-master
192.168.0.117 k8s-node1
EOF

2.关闭防火墙和安全设置
systemctl stop firewalld
systemctl disable firewalld

setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config
sed -i 's/^SELINUX=permissive$/SELINUX=disabled/' /etc/selinux/config

3.关闭swap分区
swapoff -a
sed -i "s/\/dev\/mapper\/centos-swap*/#&/" /etc/fstab

4.时间同步
yum -y install ntpdate 
在 /etc/rc.local 文件末尾加入 ntpdate 和 hwclock 命令(全路径):
/usr/sbin/ntpdate 0.cn.pool.ntp.org
/sbin/hwclock -w

5.修改主机名
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node1


6.重启生效
reboot


7.配置免密码登录
7.1在每个节点都执行ssh-keygen产生公私钥对,都选缺省值,一路回车即可。

ssh-keygen

7.2用ssh-copy-id将本节点的公钥复制到其它节点
如k8s-master节点,需要将公钥复制到k8s-node1、k8s-node2和k8s-node3三个节点,其它节点都要类似操作。

ssh-copy-id -p 16384 k8s-node1
ssh-copy-id k8s-node2
ssh-copy-id k8s-node3

8.修改内核参数(所有机器)
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF

modprobe br_netfilter
sysctl  --system

9.Kube-proxy开启ipvs
#Kubernetes开启ipvs路由转发,而不是使用iptables,如果不开启ipvs,则默认使用的是iptables
cat > /etc/modules-load.d/ipvs.conf <<EOF
# Load IPVS at boot
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
EOF

systemctl enable --now systemd-modules-load.service
 
9.1 确认内核模块加载成功
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
# or
cut -f1 -d " "  /proc/modules | grep -e ip_vs -e nf_conntrack_ipv4
 
9.2 安装ipset、ipvsadm
yum install -y ipset ipvsadm


10.设置Kubernetes yum源
cat <<eof> kubernetes.repo
[kubernetes]
name=kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgchek=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
eof

11.安装kubernetes
yum list kubeadm --showduplicates | sort -r
yum -y install kubeadm-1.22.0 kubectl-1.22.0 kubelet-1.22.0 #生产环境一般使用小版本≥5;该版本会要求下载K8S 1.15.*镜像;
yum list kube*
#默认配置的pause镜像使用改成人gcr.io仓库,以下修改kubelet为阿里云的pause镜像源;
cat <<eof> /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.2"
eof
cat /etc/sysconfig/kubelet
systemctl enable --now kubelet.service
systemctl status kubelet.service
kubeadm version;kubelet --version;kubectl version

#------------------------------ 以上内容在master和node所有机器上执行---------------------------------#

 

12.master节点拉取镜像
kubeadm config images pull --image-repository=registry.aliyuncs.com/google_containers
docker images

13.master节点初始化
kubeadm init --kubernetes-version=v1.22.0 \
  --pod-network-cidr=10.244.0.0/16 \
  --service-cidr=10.1.0.0/16 \
  --image-repository=registry.aliyuncs.com/google_containers


13.1 初始化时候的信息
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.0.232:6443 --token t30op2.6tbai2kjcfqj5kb5 \
	--discovery-token-ca-cert-hash sha256:ddc5d033714bc93517c69f6d61ccd6d5224d134d0860ffede0b08c01ec279723


13.2 执行kubernetes配置
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config


14.calico网络插件安装
wget https://docs.projectcalico.org/manifests/calico.yaml 
kubectl apply -f calico.yaml 
wget https://github.com/projectcalico/calicoctl/releases/download/v3.5.4/calicoctl -O /usr/bin/calicoctl 
chmod +x /usr/bin/calicoctl 

#------------------------------ 以上内容在master节点机器上执行---------------------------------#


15.node节点加入集群(在所有node节点执行)
kubeadm join 192.168.0.232:6443 --token t30op2.6tbai2kjcfqj5kb5 \
	--discovery-token-ca-cert-hash sha256:ddc5d033714bc93517c69f6d61ccd6d5224d134d0860ffede0b08c01ec279723


16.K8s Node节点ROLES为none修改
kubectl get nodes
#查看所有标签
kubectl get nodes --show-labels kubectl label node k8s-node1 node-role.kubernetes.io/worker=worker

 

17.kubectl命令自动补全
1.安装bash-completion工具

$ yum install bash-completion -y
否则报错:

-bash: _get_comp_words_by_ref: command not found
2.执行bash_completion

$ source /usr/share/bash-completion/bash_completion
3.加载kubectl completion

$ source <(kubectl completion bash) 
# 在 bash 中设置当前 shell 的自动补全,要先安装 bash-completion 包。
$ echo "source <(kubectl completion bash)" >> ~/.bashrc 
# 在您的 bash shell 中永久的添加自动补全

 

#pv删除失败处理
kubectl get pv
kubectl delete pv xxx
解决办法:
# 把default-pv换成你自己的pv名字即可
kubectl patch pv default-pv -p '{"metadata":{"finalizers":null}}'

  

 

问题总结:

不能使用mysql_pv这样的命名方式

 

 

 

posted @ 2022-08-10 16:55  Leonardo-li  阅读(37)  评论(0编辑  收藏  举报