kubeadm安装k8s
目录
kubeadm安装k8s
1、系统优化(所有机器)
# 关闭swap分区
swapoff -a
echo 'KUBELET_EXTRA_ARGS="--fail-swap-on=false"' > /etc/sysconfig/kubelet
# 关闭selinux
sed -i 's#enforcing#disabled#g' /etc/selinux/config
setenforce 0
# 时间同步
yum install ntpdate -y
ntpdate time.windows.com
# 关闭防火墙
systemctl disable firewalld
# 修改主机名并且做域名解析
[root@haoda-100 ~]#
cat >> /etc/hosts << EOF
192.168.0.214 sg-14
192.168.0.215 sg-15
192.168.0.216 sg-16
EOF
hostnamectl set-hostname k8s-master-01
hostnamectl set-hostname k8s-node-01
hostnamectl set-hostname k8s-node-02
# 免密登录
[root@k8s-master-01 ~]# ssh-keygen -t rsa
[root@k8s-master-01 ~]# for i in sg-14 sg-15 sg-16;do ssh-copy-id -i ~/.ssh/id_rsa.pub root@$i;done
# 更新系统
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum update -y --exclud=kernel*
# 系统优化
# 安装 IPVS
yum install -y conntrack-tools ipvsadm ipset conntrack libseccomp
# 加载 IPVS 模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
# 修改系统内核参数
cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp.keepaliv.probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp.max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp.max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.top_timestamps = 0
net.core.somaxconn = 16384
EOF
# 立即生效
sysctl --system
# 更新系统内核
[root@k8s-master-01 ~]# wget https://elrepo.org/linux/kernel/el7/x86_64/RPMS/kernel-lt-5.4.154-1.el7.elrepo.x86_64.rpm
[root@k8s-master-01 ~]# wget https://elrepo.org/linux/kernel/el7/x86_64/RPMS/kernel-lt-devel-5.4.154-1.el7.elrepo.x86_64.rpm
[root@k8s-master-01 ~]# for i in sg-16; do scp kernel-lt-* $i:/opt; done
yum localinstall -y kernel-lt*
grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg
grubby --default-kernel
# 重启机器
reboot
# 安装基础软件
[root@k8s-master-01 ~]# yum install wget expect vim net-tools ntp bash-completion ipvsadm ipset jq iptables conntrack sysstat libseccomp -y
2、安装docker(所有机器)
3、安装kubernetes(所有机器)
# 安装kubernetes
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
setenforce 0
yum clean all && yum makecache
# 搜索版本
yum list kubeadm --showduplicates
yum install -y kubelet kubeadm kubectl // 装最新版本
yum install -y kubelet-1.19.4 kubeadm-1.19.4 kubectl-1.19.4
kubeadm version //查看版本
systemctl enable kubelet && systemctl start kubelet
4、初始化master节点(master节点)
# 创建一个master节点,如果是加入一个节点kubeadm join
# 参数说明:
- --apiserver-advertise-address 集群通告地址
- --image-repository 由于默认拉取镜像地址k8s.gcr.io国内无法访问,这里指定阿里云镜像仓库地址
- --kubernetes-version K8s版本,与上面安装的一致
- --service-cidr 集群内部虚拟网络,Pod统一访问入口
- --pod-network-cidr Pod网络,,与下面部署的CNI网络组件yaml中保持一致
# 集群初始化
$ kubeadm init \
--apiserver-advertise-address=192.168.0.214 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.19.4 \
--service-cidr=192.168.124.0/12 \
--pod-network-cidr=192.168.125.0/16 \
--ignore-preflight-errors=all
kubeadm init \
--image-repository=registry.cn-hangzhou.aliyuncs.com/k8sos \
--kubernetes-version=v1.20.2 \
--service-cidr=192.168.124.0/12 \
--pod-network-cidr=192.168.125.0/16
最后执行:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get nodes
初始化内部做的事情:
1.[preflight] 环境检查和拉去镜像
2.[kubelet-start] 生成kubectl配置文件 /var/lib/kubelet/config.yaml
3.[certs] 生成k8s证书和etcd证书 /etc/kubernetes/pki
4.[kubeconfig] 生成kubeconfig文件
5.[control-plane] 部署管理节点组件,用镜像启动容器 kubectl get pods -n kube-system
6.[etcd] 部署etcd数据库,用镜像启动容器
7.[upload-config] [kubectl][upload-certs]上传配置文件导k8s中
8.[mark-control-plane] 给管理节点添加一个标签 node-role.kubernetes.io/master='',在添加一个节点[node-role.kubernetes.io/master:NoSchedule]
9.[bootstrap-token] 为每个节点颁发证书
10.[addons] 部署插件,CoreDNS,kube-proxy
5、安装集群cni网络插件(所有节点)
注意:只需要部署下面其中一个,推荐Calico。
Calico是一个纯三层的数据中心网络方案,Calico支持广泛的平台,包括Kubernetes、OpenStack等。
Calico 在每一个计算节点利用 Linux Kernel 实现了一个高效的虚拟路由器( vRouter) 来负责数据转发,而每个 vRouter 通过 BGP 协议负责把自己上运行的 workload 的路由信息向整个 Calico 网络内传播。
此外,Calico 项目还实现了 Kubernetes 网络策略,提供ACL功能。
https://docs.projectcalico.org/getting-started/kubernetes/quickstart
$ wget https://docs.projectcalico.org/manifests/calico.yaml
下载完后还需要修改里面定义Pod网络(CALICO_IPV4POOL_CIDR),与前面kubeadm init指定的一样
修改完后应用清单:
$ kubectl apply -f calico.yaml
$ kubectl get pods -n kube-system
错误解决
1.查看/opt/cni/bin下有无calico和calico-ipam,如果没有从master节点拷贝
2.查看/etc/cni/net.d文件是否存在,如果不存在从master节点拷贝,chmod 777 /etc/cni/net.d,拷贝之后10-calico.conflist修改nodename为node的name
6、加入节点
kubeadm token list
kubeadm join 192.168.0.215:6443 --token cbywny.zad8kficz08q7mfx --discovery-token-ca-cert-hash sha256:e87de786137e1b4735a66ccea7e81349e2efa3d936b137f69ffb8aa04799c680
问题排查:时间不同步,token过期,防火墙没有关闭
#setenforce 0
#sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
#systemctl disable firewalld --now
join问题
The connection to the server localhost:8080 was refused - did you specify the right host or port?
:scp /etc/kubernetes/admin.conf root@192.168.0.216:/etc/kubernetes/admin.conf //宿主机执行
:echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile //node机器执行
:source ~/.bash_profile //node机器执行
error execution phase kubelet-start: error uploading crisocket: timed out waiting for the condition
:swapoff -a
:kubeadm reset
:systemctl daemon-reload
:systemctl restart kubelet
:iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
7、token过期
$ kubeadm token create
$ kubeadm token list
$ openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
63bca849e0e01691ae14eab449570284f0c3ddeea590f8da988c07fe2729e924
8、 测试kubernetes集群
- 验证Pod工作
- 验证Pod网络通信
- 验证DNS解析
在Kubernetes集群中创建一个pod,验证是否正常运行:
$ kubectl create deployment nginx --image=nginx
$ kubectl expose deployment nginx --port=80 --type=NodePort
$ kubectl get pod,svc
http:192.168.0.214:
访问地址:http://NodeIP:Port
9、删除k8s
yum remove -y kubelet kubeadm kubectl
systemctl stop kubelet && systemctl disable kubelet
kubeadm reset -f && rm -rf ~/.kube/ && rm -rf /etc/kubernetes/ &&
rm -rf /etc/systemd/system/kubelet.service.d && rm -rf /etc/systemd/system/kubelet.service && rm -rf /usr/bin/kube* && rm -rf /etc/cni && rm -rf /opt/cni && rm -rf /var/lib/etcd && rm -rf /var/etcd
docker image prune -a
docker
kubeadm version
10、相关命令
kubectl get pods -n kube-system // 查看运行的插件
kubectl get pods -o wide // 查看运行pod的位置
journalctl -f -u kubelet.service //查看报错日志
kubectl describe pod pod服务名字 //查看pod启动失败原因
tail -f /var/log/messages
//查看系统日志
11、优化命令行
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
选择了IT,必定终身学习