Kubeadm快速部署Kubernetes集群(v1.22.3)

集群规划

ip                  hostname        cpu/memory
192.168.200.11      master01        2C4G
192.168.200.12      master02        2C4G
192.168.200.13      master03        2C4G
192.168.200.14      node01          4C8G
192.168.200.15      node02          4C8G
192.168.200.50      lb              1C2G

1.基础环境

modprobe br_netfilter

cat >> /etc/sysctl.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF
sysctl -p

cat > /etc/sysconfig/modules/ipvs.modules << EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules
source /etc/sysconfig/modules/ipvs.modules
yum -y install ipvsadm ipset

2.配置负载均衡节点

yum -y install nginx keepalived
yum -y install nginx-all-modules.noarch
 
cat >> /etc/nginx/nginx.conf << EOF
stream {
    log_format proxy '$time_local|$remote_addr|$upstream_addr|$protocol|$status|'
                     '$session_time|$upstream_connect_time|$bytes_sent|$bytes_received|'
                     '$upstream_bytes_sent|$upstream_bytes_received' ;
    upstream kube-apiserver {
        server 192.168.200.11:6443;
        server 192.168.200.12:6443;
        server 192.168.200.13:6443;
    }
 
    server {
        listen 6443 backlog=65535 so_keepalive=on;
        allow 10.10.0.0/16;
        allow 172.16.0.0/16;
        allow 192.168.200.0/24;
        deny all;
 
        proxy_connect_timeout 3s;
        proxy_next_upstream on;
        proxy_next_upstream_timeout 5;
        proxy_next_upstream_tries 1;
 
        proxy_pass kube-apiserver;
        access_log /var/log/nginx/kube-apiserver.log proxy;
    }
}
EOF
 
systemctl enable nginx
systemctl start nginx

3.安装容器运行时环境和kubeadm工具

wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum -y install docker-ce
mkdir -p /etc/docker
cat > /etc/docker/daemon.json << EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": ["https://registry.cn-hangzhou.aliyuncs.com"]
}
EOF

systemctl daemon-reload
systemctl start docker
systemctl enable docker

cat > /etc/yum.repos.d/kubeadm.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum install -y kubeadm-1.22.3 kubelet-1.22.3 kubectl-1.22.3
systemctl enable kubelet.service

4.部署kubernetes主节点

kubeadm init --control-plane-endpoint "192.168.200.50:6443" --pod-network-cidr 10.10.0.0/16 --service-cidr 172.16.0.0/16 \
--image-repository registry.aliyuncs.com/google_containers --upload-certs | tee kubeadm-init.log
# 增加节点过程略  
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

5.部署cni网络插件

wget -O calico.yaml --no-check-certificate https://docs.projectcalico.org/manifests/calico.yaml

vim calico.yaml # 修改CALICO_IPV4POOL_CIDR的值

   - name: CALICO_IPV4POOL_CIDR
     value: "10.10.0.0/16"

kubectl apply -f calico.yaml

6.修改service调度策略

kubectl edit cm kube-proxy -n kube-system  

mode: "ipvs"   # 将模式修改为ipvs

kubectl get pod -n kube-system | grep kube-proxy | awk '{print $1}' | xargs kubectl -n kube-system delete pod

7.kubectl命令补全

yum install bash-completion -y
source /usr/share/bash-completion/bash_completion
kubectl completion bash > /etc/bash_completion.d/kubectl
source /etc/bash_completion.d/kubectl

8.配置负载均衡节点

# 未签发证书 配置ssl部分略

cat > /etc/nginx/conf.d/kube-ingress.yaml << EOF
server {
    server_name *.example.com;
    listen 80;

    location / {
        proxy_pass http://kube_http_pool;
        proxy_set_header Host $http_host;
        proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
    }
}

upstream kube_http_pool {
    server 192.168.200.11:80    max_fails=3 fail_timeout=10s;
    server 192.168.200.12:80    max_fails=3 fail_timeout=10s;
    server 192.168.200.13:80    max_fails=3 fail_timeout=10s;
}

upstream kube_https_pool {
    server 192.168.200.11:443    max_fails=3 fail_timeout=10s;
    server 192.168.200.12:443    max_fails=3 fail_timeout=10s;
    server 192.168.200.13:443    max_fails=3 fail_timeout=10s;
}
EOF

systemctl restart nginx

9.部署Ingress Controller

# 部署traefik2.5作为Ingress Controller

vim traefik.yaml
...
# 其他资源不需要更改
# 修改控制器类型为DaemonSet、使用主机网络、并运行在master节点
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: traefik
  namespace: kube-system
  labels:
    app: traefik
spec:
  selector:
    matchLabels:
      app: traefik
  template:
    metadata:
      name: traefik
      labels:
        app: traefik
    spec:
      serviceAccountName: traefik-ingress-controller
      containers:
        - name: traefik
          image: traefik:v2.5
          args:
            - --ping=true
            - --accesslog=true
            - --api=true
            - --api.insecure=true
            - --api.dashboard=true
            - --providers.kubernetesingress
            - --providers.kubernetescrd
            - --entrypoints.web.Address=:80
            - --entrypoints.websecure.Address=:443
          ports:
            - name: web
              containerPort: 80
              hostPort: 80
            - name: websecure
              containerPort: 443
              hostPort: 443
            - name: admin
              containerPort: 8080
              hostPort: 8080
      hostNetwork: true
      tolerations:
        - operator: "Exists"
      nodeSelector:
        node-role.kubernetes.io/master: ""

---
# traefik的dashboard
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: traefik-dashboard-ingress
  namespace: kube-system
  annotations:
    kubernetes.io/ingress.class: traefik
    traefik.ingress.kubernetes.io/router.entrypoints: web, websecure
    traefik.ingress.kubernetes.io/router.tls: "false"
	# traefik.ingress.kubernetes.io/router.tls: "true" 可配置为https
spec:
  rules:
  - host: traefik.example.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: traefik
            port:
              number: 8080
---

kubectl apply -f traefik.yaml

10.部署kubernetes dashboard

# 部署kubernetes dashboard v2.4.0
vim dashboard.yaml
...
# 其他资源不需要更改
# Ingress配置
---

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
  annotations:
    kubernetes.io/ingress.class: traefik
    traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
    traefik.ingress.kubernetes.io/router.tls: "true"
spec:
  rules:
  - host: dashboard.example.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: kubernetes-dashboard
            port:
              number: 80

---

# kubernetes-dashboard-admin权限绑定
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
  name: kubernetes-dashboard-admin
  namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard-admin
  namespace: kubernetes-dashboard
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard-admin
  namespace: kubernetes-dashboard

---

kubectl apply -f dashboard.yaml


# 获取访问kubernetes dashboard的token或配置文件
# 1.直接获取token
kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/kubernetes-dashboard-admin -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}"

# 2.生成配置文件
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.crt \
--server="https://192.168.200.50:6443" \
--embed-certs=true \
--kubeconfig=/root/kubernetes-dashboard-admin.kubeconfig

kubectl config set-credentials kubernetes-dashboard-admin \
--token=`kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/kubernetes-dashboard-admin -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}"` \
--kubeconfig=/root/kubernetes-dashboard-admin.kubeconfig

kubectl config set-context kubernetes-dashboard-admin@kubernetes \
--cluster=kubernetes \
--user=kubernetes-dashboard-admin \
--kubeconfig=/root/kubernetes-dashboard-admin.kubeconfig

kubectl config use-context kubernetes-dashboard-admin@kubernetes \
--kubeconfig=/root/kubernetes-dashboard-admin.kubeconfig
posted @ 2021-11-11 13:20  wanghongwei-dev  阅读(236)  评论(0编辑  收藏  举报