k8s安装
k8s 安装流程
机器要求:CPU大于2核,RAM大于2GB。网络互通,设置不同主机名、MAC地址
各个机器设置自己的域名,修改hosts文件
192.168.56.121 ubuntu01
192.168.56.122 ubuntu02
192.168.56.123 ubuntu03
192.168.56.121 cluster-endpoint
配置docker /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"], # 修改cgroup组,让docker处于k8s默认的systemd组
"registry-mirrors": [
"https://registry.docker-cn.com",
"https://hub-mirror.c.163.com",
"https://reg-mirror.qiniu.com",
"https://docker.mirrors.ustc.edu.cn"
]
}
将 SELinux 设置为 permissive 模式(相当于将其禁用)
apt install selinux-utils
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
关闭swap
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
配置网络
echo 'br_netfilter' >> /etc/sysctl.d/k8s.conf
echo 'net.ipv4.ip_forward=1' >> /etc/sysctl.d/k8s.conf
echo 'net.bridge.bridge-nf-call-ip6tables = 0 ' >> /etc/sysctl.d/k8s.conf
echo 'net.bridge.bridge-nf-call-iptables = 1 ' >> /etc/sysctl.d/k8s.conf
echo 'net.bridge.bridge-nf-call-arptables = 0' >> /etc/sysctl.d/k8s.conf
sudo sysctl --system
安装
apt-get -y install apt-transport-https ca-certificates curl software-properties-common
curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
echo 'deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main' >> /etc/apt/sources.list.d/kubernetes.list
apt update
# 注意版本,可能存在版本冲突。
apt install docker-ce=5:20.10.7~3-0~ubuntu-bionic docker-ce-cli=5:20.10.7~3-0~ubuntu-bionic containerd.io=1.4.6-1 -y
apt install kubelet=1.20.9-00 kubeadm=1.20.9-00 kubectl=1.20.9-00 -y
提前下载需要的docker镜像
sudo tee ./images.sh <<-'EOF'
#!/bin/bash
images=(
kube-apiserver:v1.20.15
kube-controller-manager:v1.20.15
kube-scheduler:v1.20.15
kube-proxy:v1.20.15
pause:3.2
etcd:3.4.13-0
coredns:1.7.0
)
for imageName in ${images[@]} ; do
docker pull registry.aliyuncs.com/google_containers/$imageName
done
EOF
chmod +x images.sh
./images.sh
主节点初始化
kubeadm init \
--apiserver-advertise-address=192.168.56.121 \ # 此处为master主机IP
--control-plane-endpoint=cluster-endpoint \ # 入口机器名称,在hosts指定
--image-repository registry.aliyuncs.com/google_containers \ # 指定下载镜像,与预先下载的网址一致
--service-cidr=10.96.0.0/16 \
--pod-network-cidr=192.169.0.0/16 # 注意:这两个网络区域还有主机的其他网络接口的网段不能有重复
初始化失败后删除生成的文件
rm /etc/kubernetes/manifests/*
kubeadm reset
主节点初始化成功后的输出
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
# 执行一下三行
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
# 安装网络插件
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
# 其他节点以master身份加入
kubeadm join cluster-endpoint:6443 --token 58vlmk.qdb8bhg6tcel9r4j \
--discovery-token-ca-cert-hash sha256:60c428f9127e7fafb60deab87925c956e8a30e87eaee4422c1219815082d18f5 \
--control-plane
Then you can join any number of worker nodes by running the following on each as root:
# 其他节点以worker身份加入
kubeadm join cluster-endpoint:6443 --token 58vlmk.qdb8bhg6tcel9r4j \
--discovery-token-ca-cert-hash sha256:60c428f9127e7fafb60deab87925c956e8a30e87eaee4422c1219815082d18f5
加入node
kubeadm join cluster-endpoint:6443 --token x5g4uy.wpjjdbgra92s25pp \
--discovery-token-ca-cert-hash sha256:6255797916eaee52bf9dda9429db616fcd828436708345a308f4b917d3457a22
令牌24小时有效,master生成新令牌
kubeadm token create --print-join-command
下载安装网络插件
curl https://docs.projectcalico.org/v3.20/manifests/calico.yaml -O
kubectl apply -f calico.yaml # 通过配置文件安装 网络插件。注意其中 CALICO_IPV4POOL_CIDR 192.168.0.0/16 需要修改为和init命令中--cluster-cidr一致
验证集群
kubectl get nodes
部署官方可视化界面——dashboard
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml
# 设置访问端口
kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard # 将出现界面中 type: ClusterIP 改为 type: NodePort
kubectl get svc -A |grep kubernetes-dashboard # 找到端口,在服务器的安全组放行
访问: https://集群任意IP:端口
创建 k8s dashboard 访问账号
#创建访问账号,准备一个yaml文件; vi dash.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
# kubectl apply -f dashboard.yml
#获取访问令牌
kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/admin-user -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}"
# 获得 jwt-token 用于 dashboard 登录