高可用部署二进制 Kubernetes

二进制安装k8s

节点名称 IP
k8s-master-01 172.16.1.71
k8s-master-02 172.16.1.72
k8s-master-03 172.16.1.73
k8s-node-01 172.16.1.74
k8s-node-02 172.16.1.75
# 修改IP和主机名

# 关闭防火墙和selinux

# host解析 (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# vim /etc/hosts
----------------------------------------------------------------------
172.16.1.71  k8s-master-01 m1
172.16.1.72  k8s-master-02 m2
172.16.1.73  k8s-master-03 m3
172.16.1.74  k8s-node-01   n1
172.16.1.75  k8s-node-02   n2
----------------------------------------------------------------------

# 关闭swap分区  (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# vim /etc/fstab
----------------------------------------------------------------------
# UUID=43e1bca3-991b-4cbf-bf73-e198e975f24e swap                    swap    defaults        0 0
---------------------------------------------------------------------- 

# 设置忽略swap分区  (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# echo 'KUBELET_EXTRA_ARGS="--fail-swap-on=false"' > /etc/sysconfig/kubelet 

# 关闭selinux (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# sed -i 's#enforcing#disabled#g' /etc/selinux/config  # 永久关闭
[root@k8s-master-01 ~]# setenforce 0   # 临时关闭

# 刷新缓存 (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# yum makecache 

# 更新系统 (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# yum update -y --exclud=kernel* 

# 更新内核版本
[root@k8s-master-01 ~]# ll   # 上传包
-r-xr-xr-x  1 root root 41857400 2021-01-18 09:20 kernel-lt-4.4.245-1.el7.elrepo.x86_64.rpm
-r-xr-xr-x  1 root root 10731836 2021-01-18 09:19 kernel-lt-devel-4.4.245-1.el7.elrepo.x86_64.rpm

# 做五台机器的免密
[root@k8s-master-01 ~]# ssh-keygen
[root@k8s-master-01 ~]# ssh-copy-id 192.168.13.71
[root@k8s-master-01 ~]# ssh-copy-id 192.168.13.72
[root@k8s-master-01 ~]# ssh-copy-id 192.168.13.73
[root@k8s-master-01 ~]# ssh-copy-id 192.168.13.74
[root@k8s-master-01 ~]# ssh-copy-id 192.168.13.75

# 把/root的文件传给另外两台机器
[root@k8s-master-01 ~]# for i in n1 n2; do scp kernel* $i:/root;done

# 安装  (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# yum localinstall -y kernel*

# 更新内核版本  (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# yum localinstall -y kernel-lt*   安装
[root@k8s-master-01 ~]# grub2-set-default  0 && grub2-mkconfig -o /etc/grub2.cfg  # 设置启动优先级
[root@k8s-master-01 ~]# grubby --default-kernel   # 查看内核版本

# 安装ipvs (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# yum install -y conntrack-tools ipvsadm ipset conntrack libseccomp

# 加载IPVS模块  (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
  /sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
  if [ $? -eq 0 ]; then
    /sbin/modprobe \${kernel_module}
  fi
done
EOF

# 测试是否成功 (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs

# 优化系统内核参数 (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp.keepaliv.probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp.max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp.max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.top_timestamps = 0
net.core.somaxconn = 16384
EOF

# 重启  (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# reboot

# 查看内核   (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# uname -a

# 安装基础软件  (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# yum install wget expect vim net-tools ntp bash-completion ipvsadm ipset jq iptables conntrack sysstat libseccomp -y

# 安装docker  (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@k8s-master-01 ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@k8s-master-01 ~]# yum install docker-ce -y   # 安装
[root@k8s-master-01 ~]# sudo mkdir -p /etc/docker
[root@k8s-master-01 ~]#  sudo tee /etc/docker/daemon.json <<-'EOF'    
{
  "registry-mirrors": ["https://8mh75mhz.mirror.aliyuncs.com"]
}
EOF
[root@k8s-master-01 ~]# sudo systemctl daemon-reload ; systemctl restart docker;systemctl enable --now docker.service
[root@k8s-master-01 ~]# docker info  # 测试是否安装上

# 同步时间  (五台机器都要做,太长不重复复制) 
[root@k8s-master-01 ~]# crontab -e
--------------------------------------------------------------------
* * * * *  /usr/sbin/ntpdate  ntp.aliyun.com  &> /dev/null
--------------------------------------------------------------------

安装cfssl证书生成工具

# 下载
[root@k8s-master-01 ~]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
[root@k8s-master-01 ~]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64

# 设置执行权限
[root@k8s-master-01 ~]# chmod +x cfssljson_linux-amd64
[root@k8s-master-01 ~]# chmod +x cfssl_linux-amd64

# 移动到/usr/local/bin
[root@k8s-master-01 ~]# mv cfssljson_linux-amd64 cfssljson
[root@k8s-master-01 ~]# mv cfssl_linux-amd64 cfssl
[root@k8s-master-01 ~]# mv cfssljson cfssl /usr/local/bin

# 验证
[root@k8s-master-01 ~]# cfssl version
Version: 1.2.0
Revision: dev
Runtime: go1.6

创建集群根证书

[root@k8s-master-01 ~]#  mkdir -p /opt/cert/ca
[root@k8s-master-01 ~]# cat > /opt/cert/ca/ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "8760h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
          "signing",
          "key encipherment",
          "server auth",
          "client auth"
        ],
           "expiry": "8760h"
      }
    }
  }
}
EOF

创建根CA证书签名请求文件

[root@k8s-master-01 ~]# cat > /opt/cert/ca/ca-csr.json << EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names":[{
    "C": "CN",
    "ST": "ShangHai",
    "L": "ShangHai"
  }]
}
EOF

生成根证书

[root@k8s-master-01 ~]# cd /opt/cert/ca/
[root@k8s-master-01 /opt/cert/ca]# ll
-rw-r--r-- 1 root root 285 2021-01-19 15:28 ca-config.json
-rw-r--r-- 1 root root 153 2021-01-19 15:29 ca-csr.json
[root@k8s-master-01 /opt/cert/ca]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

部署ETCD

[root@k8s-master-01 /opt/cert/ca]# mkdir /opt/data
[root@k8s-master-01 /opt/cert/ca]# cd /opt/data
[root@k8s-master-01 /opt/data]# wget https://mirrors.huaweicloud.com/etcd/v3.3.24/etcd-v3.3.24-linux-amd64.tar.gz
[root@k8s-master-01 /opt/data]# tar xf etcd-v3.3.24-linux-amd64.tar.gz
[root@k8s-master-01 /opt/data]# cd etcd-v3.3.24-linux-amd64/
[root@k8s-master-01 /opt/data/etcd-v3.3.24-linux-amd64]#for i in m1 m2 m3;do scp etc* $i:/usr/local/bin;done

# 测试
[root@k8s-master-01 /opt/data/etcd-v3.3.24-linux-amd64]# etcd --version
etcd Version: 3.3.24
Git SHA: bdd57848d
Go Version: go1.12.17
Go OS/Arch: linux/amd64

创建ETCD证书

[root@k8s-master-01 ~]# mkdir -p /opt/cert/etcd
[root@k8s-master-01 ~]# cd /opt/cert/etcd
[root@k8s-master-01 /opt/cert/etcd]# cat > etcd-csr.json << EOF
{
    "CN": "etcd",
    "hosts": [
    "127.0.0.1",
    "172.16.1.71",
    "172.16.1.72",
    "172.16.1.73",
    "172.16.1.74",
    "172.16.1.75"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
          "C": "CN",
          "ST": "ShangHai",
          "L": "ShangHai"
        }
    ]
}
EOF
[root@k8s-master-01 /opt/cert/etcd]# ll
-rw-r--r-- 1 root root 335 2021-01-19 15:40 etcd-csr.json

生成证书

[root@k8s-master-01 /opt/cert/etcd]# cfssl gencert -ca=../ca/ca.pem -ca-key=../ca/ca-key.pem -config=../ca/ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
[root@k8s-master-01 /opt/cert/etcd]# ll
总用量 16
-rw-r--r-- 1 root root 1041 2021-01-19 15:41 etcd.csr
-rw-r--r-- 1 root root  335 2021-01-19 15:40 etcd-csr.json
-rw------- 1 root root 1675 2021-01-19 15:41 etcd-key.pem
-rw-r--r-- 1 root root 1371 2021-01-19 15:41 etcd.pem

分发证书

[root@k8s-master-01 /opt/cert/etcd]# for ip in m1 m2 m3 n1 n2;do
  ssh root@${ip} "mkdir -pv /etc/etcd/ssl"
  scp ../ca/ca*.pem  root@${ip}:/etc/etcd/ssl
  scp ./etcd*.pem  root@${ip}:/etc/etcd/ssl
done

注册etcd服务. (master机器都要做,太长不复制)

[root@k8s-master-01 /opt/cert/etcd]# cd
[root@k8s-master-01 ~]# ETCD_NAME=`hostname`
[root@k8s-master-01 ~]# INTERNAL_IP=`hostname -i`
[root@k8s-master-01 ~]# INITIAL_CLUSTER=k8s-master-01=https://172.16.1.71:2380,k8s-master-02=https://172.16.1.72:2380,k8s-master-03=https://172.16.1.73:2380
[root@k8s-master-01 ~]# cat << EOF | sudo tee /usr/lib/systemd/system/etcd.service
[Unit]
Description=etcd
Documentation=https://github.com/coreos

[Service]
ExecStart=/usr/local/bin/etcd \\
  --name ${ETCD_NAME} \\
  --cert-file=/etc/etcd/ssl/etcd.pem \\
  --key-file=/etc/etcd/ssl/etcd-key.pem \\
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \\
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \\
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \\
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \\
  --peer-client-cert-auth \\
  --client-cert-auth \\
  --initial-advertise-peer-urls https://${INTERNAL_IP}:2380 \\
  --listen-peer-urls https://${INTERNAL_IP}:2380 \\
  --listen-client-urls https://${INTERNAL_IP}:2379,https://127.0.0.1:2379 \\
  --advertise-client-urls https://${INTERNAL_IP}:2379 \\
  --initial-cluster-token etcd-cluster \\
  --initial-cluster ${INITIAL_CLUSTER} \\
  --initial-cluster-state new \\
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

启动etcd (master机器都要做,太长不复制)

[root@k8s-master-01 ~]# systemctl enable --now etcd
[root@k8s-master-01 ~]# systemctl status etcd

测试ETCD集群

[root@k8s-master-01 ~]# ETCDCTL_API=3 etcdctl \
--cacert=/etc/etcd/ssl/etcd.pem \
--cert=/etc/etcd/ssl/etcd.pem \
--key=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://172.16.1.71:2379,https://172.16.1.72:2379,https://172.16.1.73:2379" \
endpoint status --write-out='table'
+--------------------------+------------------+---------+---------+-----------+-----------+------------+
|         ENDPOINT         |        ID        | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX |
+--------------------------+------------------+---------+---------+-----------+-----------+------------+
| https://172.16.1.71:2379 | 80d0ace027643b4e |  3.3.24 |   20 kB |      true |         7 |          9 |
| https://172.16.1.72:2379 | 9a7cf2dc57ec669f |  3.3.24 |   20 kB |     false |         7 |          9 |
| https://172.16.1.73:2379 | 54f8db1a175b9c73 |  3.3.24 |   20 kB |     false |         7 |          9 |
+--------------------------+------------------+---------+---------+-----------+-----------+------------+

创建master CA节点证书

[root@k8s-master-01 ~]# mkdir /opt/cert/k8s
[root@k8s-master-01 ~]# cd /opt/cert/k8s/
[root@k8s-master-01 /opt/cert/k8s]# cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

[root@k8s-master-01 /opt/cert/k8s]# cat > ca-csr.json << EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "ShangHai",
            "ST": "ShangHai"
        }
    ]
}
EOF

[root@k8s-master-01 /opt/cert/k8s]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

创建kube-apiserver证书

[root@k8s-master-01 /opt/cert/k8s]# cat > server-csr.json << EOF
{
    "CN": "kubernetes",
    "hosts": [
        "127.0.0.1",
        "172.16.1.71",
        "172.16.1.72",
        "172.16.1.73",
        "172.16.1.80",
        "10.96.0.1",
        "kubernetes",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "ShangHai",
            "ST": "ShangHai"
        }
    ]
}
EOF

[root@k8s-master-01 /opt/cert/k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server

创建kube-controller-manager证书

[root@k8s-master-01 /opt/cert/k8s]# cat > kube-controller-manager-csr.json << EOF
{
    "CN": "system:kube-controller-manager",
    "hosts": [
        "127.0.0.1",
        "172.16.1.71",
        "172.16.1.72",
        "172.16.1.73",
        "172.16.1.80"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "system:kube-controller-manager",
            "OU": "System"
        }
    ]
}
EOF

[root@k8s-master-01 /opt/cert/k8s]#  cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager

创建kube-scheduler证书

[root@k8s-master-01 /opt/cert/k8s]# cat > kube-scheduler-csr.json << EOF
{
    "CN": "system:kube-scheduler",
    "hosts": [
        "127.0.0.1",
        "172.16.1.71",
        "172.16.1.72",
        "172.16.1.73",
        "172.16.1.80"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "system:kube-scheduler",
            "OU": "System"
        }
    ]
}
EOF

[root@k8s-master-01 /opt/cert/k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler

创建kube-proxy证书

[root@k8s-master-01 /opt/cert/k8s]# cat > kube-proxy-csr.json << EOF
{
    "CN":"system:kube-proxy",
    "hosts":[],
    "key":{
        "algo":"rsa",
        "size":2048
    },
    "names":[
        {
            "C":"CN",
            "L":"BeiJing",
            "ST":"BeiJing",
            "O":"system:kube-proxy",
            "OU":"System"
        }
    ]
}
EOF

[root@k8s-master-01 /opt/cert/k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

签发管理员用户证书

[root@k8s-master-01 /opt/cert/k8s]#  cat > admin-csr.json << EOF
{
    "CN":"admin",
    "key":{
        "algo":"rsa",
        "size":2048
    },
    "names":[
        {
            "C":"CN",
            "L":"BeiJing",
            "ST":"BeiJing",
            "O":"system:masters",
            "OU":"System"
        }
    ]
}
EOF

[root@k8s-master-01 /opt/cert/k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

颁发证书

[root@k8s-master-01 /opt/cert/k8s]# mkdir /etc/kubernetes/ssl
[root@k8s-master-01 /opt/cert/k8s]# cp -p ./{ca*pem,server*pem,kube-controller-manager*pem,kube-scheduler*.pem,kube-proxy*pem,admin*.pem} /etc/kubernetes/ssl
[root@k8s-master-01 /opt/cert/k8s]# ll /etc/kubernetes/ssl/
总用量 48
-rw------- 1 root root 1679 2021-01-19 16:11 admin-key.pem
-rw-r--r-- 1 root root 1363 2021-01-19 16:11 admin.pem
-rw------- 1 root root 1675 2021-01-19 16:04 ca-key.pem
-rw-r--r-- 1 root root 1281 2021-01-19 16:04 ca.pem
-rw------- 1 root root 1679 2021-01-19 16:06 kube-controller-manager-key.pem
-rw-r--r-- 1 root root 1476 2021-01-19 16:06 kube-controller-manager.pem
-rw------- 1 root root 1679 2021-01-19 16:07 kube-proxy-key.pem
-rw-r--r-- 1 root root 1383 2021-01-19 16:07 kube-proxy.pem
-rw------- 1 root root 1679 2021-01-19 16:06 kube-scheduler-key.pem
-rw-r--r-- 1 root root 1452 2021-01-19 16:06 kube-scheduler.pem
-rw------- 1 root root 1679 2021-01-19 16:05 server-key.pem
-rw-r--r-- 1 root root 1558 2021-01-19 16:05 server.pem
[root@k8s-master-01 /opt/cert/k8s]# for i in m2 m3 ;do
ssh root@$i "mkdir -pv /etc/kubernetes/ssl"
scp /etc/kubernetes/ssl/* root@$i:/etc/kubernetes/ssl
done

下载安装包

[root@k8s-master-01 ~]#  cd /opt/data

# 下载server安装包(应该下载不了)
[root@k8s-master-01 /opt/data]# wget https://dl.k8s.io/v1.18.8/kubernetes-server-linux-amd64.tar.gz

[root@k8s-master-01 /opt/data]# docker run -dit registry.cn-hangzhou.aliyuncs.com/k8sos/k8s:v1.18.8.1 bash
[root@k8s-master-01 /opt/data]# docker ps  (看ID)
[root@k8s-master-01 /opt/data]# docker exec fdeed1e0b5a1 ls
[root@k8s-master-01 /opt/data]# docker cp fdeed1e0b5a1:kubernetes-server-linux-amd64.tar.gz .
[root@k8s-master-01 /opt/data]# tar xf kubernetes-server-linux-amd64.tar.gz
[root@k8s-master-01 /opt/data]# cd kubernetes/server/bin/
[root@k8s-master-01 /opt/data/kubernetes/server/bin]# for i in m1 m2 m3;do scp kube-apiserver kube-controller-manager kubectl kubelet  kube-proxy kube-scheduler $i:/usr/local/bin ; done

# 测试(三台master机器都要测试)
[root@k8s-master-01 /opt/data/kubernetes/server/bin]# kube-apiserver --version
Kubernetes v1.18.8

创建kube-controller-manager集群配置文件

[root@k8s-master-01 /opt/data/kubernetes/server/bin]# cd /opt/cert/k8s/

# 设置集群参数
[root@k8s-master-01 /opt/cert/k8s]# export KUBE_APISERVER="https://172.16.1.80:8443"
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-controller-manager.kubeconfig
  
# 设置客户端认证参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-credentials "kube-controller-manager" \
  --client-certificate=/etc/kubernetes/ssl/kube-controller-manager.pem \
  --client-key=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-controller-manager.kubeconfig
  
# 设置上下文参数(在上下文参数中将集群参数和 用户参数关联起来)
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-context default \
  --cluster=kubernetes \
  --user="kube-controller-manager" \
  --kubeconfig=kube-controller-manager.kubeconfig
  
# 配置默认上下文
[root@k8s-master-01 /opt/cert/k8s]# kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
[root@k8s-master-01 /opt/cert/k8s]# ll
-rw------- 1 root root 6289 2021-01-19 16:32 kube-controller-manager.kubeconfig   # 其中一个文件

创建kube-scheduler集群配置文件

[root@k8s-master-01 /opt/cert/k8s]# export KUBE_APISERVER="https://172.16.1.80:8443"

# 设置集群参数
[root@k8s-master-01 /opt/cert/k8s]#kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-scheduler.kubeconfig
  
# 设置客户端认证参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-credentials "kube-scheduler" \
  --client-certificate=/etc/kubernetes/ssl/kube-scheduler.pem \
  --client-key=/etc/kubernetes/ssl/kube-scheduler-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-scheduler.kubeconfig
  
# 设置上下文参数(在上下文参数中将集群参数和 用户参数关联起来)
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-context default \
  --cluster=kubernetes \
  --user="kube-scheduler" \
  --kubeconfig=kube-scheduler.kubeconfig
  
# 配置默认上下文
[root@k8s-master-01 /opt/cert/k8s]# kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
[root@k8s-master-01 /opt/cert/k8s]# ll
-rw------- 1 root root 6239 2021-01-19 16:36 kube-scheduler.kubeconfig   # 其中一个文件

创建kube-proxy集群配置文件

[root@k8s-master-01 /opt/cert/k8s]# export KUBE_APISERVER="https://172.16.1.80:8443"

# 设置集群参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-proxy.kubeconfig
  
# 设置客户端认证参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-credentials "kube-proxy" \
  --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig
  
# 设置上下文参数(在上下文参数中将集群参数和 用户参数关联起来)
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-context default \
  --cluster=kubernetes \
  --user="kube-proxy" \
  --kubeconfig=kube-proxy.kubeconfig
  
# 配置默认上下文
[root@k8s-master-01 /opt/cert/k8s]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
[root@k8s-master-01 /opt/cert/k8s]# ll
-rw------- 1 root root 6139 2021-01-19 16:38 kube-proxy.kubeconfig     # 其中一个文件

创建集群管理员集群配置文件

[root@k8s-master-01 /opt/cert/k8s]# export KUBE_APISERVER="https://172.16.1.80:8443"

# 设置集群参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=admin.kubeconfig

# 设置客户端认证参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-credentials "admin" \
  --client-certificate=/etc/kubernetes/ssl/admin.pem \
  --client-key=/etc/kubernetes/ssl/admin-key.pem \
  --embed-certs=true \
  --kubeconfig=admin.kubeconfig
  
# 设置上下文参数(在上下文参数中将集群参数和 用户参数关联起来)
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-context default \
  --cluster=kubernetes \
  --user="admin" \
  --kubeconfig=admin.kubeconfig
  
# 配置默认上下文
[root@k8s-master-01 /opt/cert/k8s]# kubectl config use-context default --kubeconfig=admin.kubeconfig
[root@k8s-master-01 /opt/cert/k8s]# ll
-rw------- 1 root root 6105 2021-01-19 16:41 admin.kubeconfig  # 其中一个文件

配置TLS bootstrapping

[root@k8s-master-01 /opt/cert/k8s]# TLS_BOOTSTRAPPING_TOKEN=`head -c 16 /dev/urandom | od -An -t x | tr -d ' '`
[root@k8s-master-01 /opt/cert/k8s]# cat > token.csv << EOF
${TLS_BOOTSTRAPPING_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

[root@k8s-master-01 /opt/cert/k8s]# ll
-rw-r--r-- 1 root root   84 2021-01-19 16:51 token.csv   # 其中一个文件
[root@k8s-master-01 /opt/cert/k8s]# cat token.csv
3358c2b56753366ebf7d02bb00eeb3fc,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

创建bootstrapping集群配置文件

[root@k8s-master-01 /opt/cert/k8s]# export KUBE_APISERVER="https://172.16.1.80:8443"

# 设置集群参数
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kubelet-bootstrap.kubeconfig

# 设置客户端认证参数,此处token必须用上叙token.csv中的token (3358c2b56753366ebf7d02bb00eeb3fc)
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-credentials "kubelet-bootstrap" \
  --token=3358c2b56753366ebf7d02bb00eeb3fc \
  --kubeconfig=kubelet-bootstrap.kubeconfig

# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
[root@k8s-master-01 /opt/cert/k8s]# kubectl config set-context default \
  --cluster=kubernetes \
  --user="kubelet-bootstrap" \
  --kubeconfig=kubelet-bootstrap.kubeconfig

# 配置默认上下文
[root@k8s-master-01 /opt/cert/k8s]# kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig

[root@k8s-master-01 /opt/cert/k8s]# ll
-rw------- 1 root root 2061 2021-01-19 16:55 kubelet-bootstrap.kubeconfig   # 其中一个文件

分发集群配置文件

[root@k8s-master-01 /opt/cert/k8s]# for i in m1 m2 m3; do
   ssh root@$i "mkdir -p  /etc/kubernetes/cfg";
   scp token.csv kube-scheduler.kubeconfig kube-controller-manager.kubeconfig admin.kubeconfig kube-proxy.kubeconfig kubelet-bootstrap.kubeconfig root@$i:/etc/kubernetes/cfg;
 done

部署kube-apiserver (三台master机器上面都需要执行)

[root@k8s-master-01 /opt/cert/k8s]#  KUBE_APISERVER_IP=`hostname -i`

[root@k8s-master-01 /opt/cert/k8s]#  cat > /etc/kubernetes/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--advertise-address=${KUBE_APISERVER_IP} \\
--default-not-ready-toleration-seconds=360 \\
--default-unreachable-toleration-seconds=360 \\
--max-mutating-requests-inflight=2000 \\
--max-requests-inflight=4000 \\
--default-watch-cache-size=200 \\
--delete-collection-workers=2 \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--allow-privileged=true \\
--service-cluster-ip-range=10.96.0.0/16 \\
--service-node-port-range=10-52767 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/etc/kubernetes/cfg/token.csv \\
--kubelet-client-certificate=/etc/kubernetes/ssl/server.pem \\
--kubelet-client-key=/etc/kubernetes/ssl/server-key.pem \\
--tls-cert-file=/etc/kubernetes/ssl/server.pem  \\
--tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \\
--client-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/log/kubernetes/k8s-audit.log \\
--etcd-servers=https://172.16.1.71:2379,https://172.16.1.72:2379,https://172.16.1.73:2379 \\
--etcd-cafile=/etc/etcd/ssl/ca.pem \\
--etcd-certfile=/etc/etcd/ssl/etcd.pem \\
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem"
EOF

# 如果hostname -i获取的是外网IP,则需要执行:
[root@k8s-master-01 /opt/cert/k8s]#  sed -i 's#192.168.13#172.16.1#g' /etc/kubernetes/cfg/kube-apiserver.conf

注册kube-apiserver服务

[root@k8s-master-01 /opt/cert/k8s]# cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=10
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

[root@k8s-master-01 /opt/cert/k8s]# for i in m2 m3;do scp /usr/lib/systemd/system/kube-apiserver.service $i:/usr/lib/systemd/system/kube-apiserver.service;done

[root@k8s-master-01 /opt/cert/k8s]# mkdir -p /var/log/kubernetes/
[root@k8s-master-01 /opt/cert/k8s]# systemctl daemon-reload
[root@k8s-master-01 /opt/cert/k8s]# systemctl enable --now kube-apiserver

# 查看是否启动 (三台master机器上面都需要查看)
[root@k8s-master-01 /opt/cert/k8s]# systemctl status kube-apiserver

kube-apiserver高可用

[root@k8s-master-01 /opt/cert/k8s]# yum install -y keepalived haproxy
[root@k8s-master-02 /opt/cert/k8s]# yum install -y keepalived haproxy
[root@k8s-master-03 /opt/cert/k8s]# yum install -y keepalived haproxy
[root@k8s-master-03 /opt/cert/k8s]# cat > /etc/haproxy/haproxy.cfg <<EOF
global
  maxconn  2000
  ulimit-n  16384
  log  127.0.0.1 local0 err
  stats timeout 30s

defaults
  log global
  mode  http
  option  httplog
  timeout connect 5000
  timeout client  50000
  timeout server  50000
  timeout http-request 15s
  timeout http-keep-alive 15s

frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor

listen stats
  bind    *:8006
  mode    http
  stats   enable
  stats   hide-version
  stats   uri       /stats
  stats   refresh   30s
  stats   realm     Haproxy\ Statistics
  stats   auth      admin:admin

frontend k8s-master
  bind 0.0.0.0:8443
  bind 127.0.0.1:8443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-master

backend k8s-master
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server k8s-master-01    172.16.1.71:6443  check inter 2000 fall 2 rise 2 weight 100
  server k8s-master-02    172.16.1.72:6443  check inter 2000 fall 2 rise 2 weight 100
  server k8s-master-03    172.16.1.73:6443  check inter 2000 fall 2 rise 2 weight 100
EOF

[root@k8s-master-01 /opt/cert/k8s]# for i in m2 m3;do scp /etc/haproxy/haproxy.cfg $i:/etc/haproxy/haproxy.cfg;done

[root@k8s-master-01 /opt/cert/k8s]#  systemctl enable --now haproxy.service
[root@k8s-master-02 /opt/cert/k8s]#  systemctl enable --now haproxy.service
[root@k8s-master-03 /opt/cert/k8s]#  systemctl enable --now haproxy.service
[root@k8s-master-01 /opt/cert/k8s]# systemctl status haproxy.service
[root@k8s-master-02 /opt/cert/k8s]# systemctl status haproxy.service
[root@k8s-master-03 /opt/cert/k8s]# systemctl status haproxy.service

[root@k8s-master-01 /opt/cert/k8s]# mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf_bak
[root@k8s-master-01 /opt/cert/k8s]# cd /etc/keepalived
[root@k8s-master-01 /opt/cert/k8s]# cat > /etc/keepalived/keepalived.conf <<EOF
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_kubernetes {
    script "/etc/keepalived/check_kubernetes.sh"
    interval 2
    weight -5
    fall 3
    rise 2
}
vrrp_instance VI_1 {
    state MASTER
    interface eth1
    mcast_src_ip 172.16.1.71
    virtual_router_id 51
    priority 100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        172.16.1.80
    }
#    track_script {
#       chk_kubernetes
#    }
}
EOF

[root@k8s-master-01 /etc/keepalived]# for i in m2 m3;do scp keepalived.conf $i:/etc/keepalived/keepalived.conf;done

## k8s-master-02操作
[root@k8s-master-02 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_kubernetes {
    script "/etc/keepalived/check_kubernetes.sh"
    interval 2
    weight -5
    fall 3
    rise 2
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth1
    mcast_src_ip 172.16.1.72
    virtual_router_id 51
    priority 90
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        172.16.1.80
    }
#    track_script {
#       chk_kubernetes
#    }
}

## k8s-master-03操作
[root@k8s-master-03 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_kubernetes {
    script "/etc/keepalived/check_kubernetes.sh"
    interval 2
    weight -5
    fall 3
    rise 2
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth1
    mcast_src_ip 172.16.1.73
    virtual_router_id 51
    priority 80
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        172.16.1.80
    }
#    track_script {
#       chk_kubernetes
#    }
}

设置监控检车脚本

[root@k8s-master-03 ~]# cat > /etc/keepalived/check_kubernetes.sh <<EOF
#!/bin/bash

function chech_kubernetes() {
    for ((i=0;i<5;i++));do
        apiserver_pid_id=$(pgrep kube-apiserver)
        if [[ ! -z $apiserver_pid_id ]];then
            return
        else
            sleep 2
        fi
        apiserver_pid_id=0
    done
}

# 1:running  0:stopped
check_kubernetes
if [[ $apiserver_pid_id -eq 0 ]];then
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi
EOF

[root@k8s-master-01 /etc/keepalived]# for i in m2 m3;do scp /etc/keepalived/check_kubernetes.sh $i:/etc/keepalived/check_kubernetes.sh; done

# 给监控脚本加权限(三台master都要做)
[root@k8s-master-01 /etc/keepalived]# chmod +x /etc/keepalived/check_kubernetes.sh
[root@k8s-master-02 ~]# chmod +x /etc/keepalived/check_kubernetes.sh
[root@k8s-master-03 ~]# chmod +x /etc/keepalived/check_kubernetes.sh

# 动keeplived和haproxy服务 (三台master都要做)
[root@k8s-master-01 /etc/keepalived]# systemctl enable --now keepalived haproxy
[root@k8s-master-02 ~]# systemctl enable --now keepalived haproxy
[root@k8s-master-03 ~]# systemctl enable --now keepalived haproxy

# 查看是否启动 (三台master都要做)
[root@k8s-master-01 /etc/keepalived]# systemctl status keepalived.service
[root@k8s-master-02 ~]# systemctl status keepalived.service
[root@k8s-master-03 ~]# systemctl status keepalived.service

# 查看是否有vip (三台master都要做)
[root@k8s-master-01 /etc/keepalived]# ip a | grep 172.16.1.80    # 有
    inet 172.16.1.80/32 scope global eth1
[root@k8s-master-02 ~]# ip a | grep 172.16.1.80   # 没有
[root@k8s-master-03 ~]# ip a | grep 172.16.1.80   # 没有

授权TLS Bootrapping用户请求

[root@k8s-master-01 /etc/keepalived]# kubectl create clusterrolebinding kubelet-bootstrap \
 --clusterrole=system:node-bootstrapper \
 --user=kubelet-bootstrap

创建kube-controller-manager配置文件

[root@k8s-master-01 /etc/keepalived]# cat > /etc/kubernetes/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--leader-elect=true \\
--cluster-name=kubernetes \\
--bind-address=127.0.0.1 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/12 \\
--service-cluster-ip-range=10.96.0.0/16 \\
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \\
--root-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--kubeconfig=/etc/kubernetes/cfg/kube-controller-manager.kubeconfig \\
--tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \\
--tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s \\
--controllers=*,bootstrapsigner,tokencleaner \\
--use-service-account-credentials=true \\
--node-monitor-grace-period=10s \\
--horizontal-pod-autoscaler-use-rest-clients=true"
EOF

剩下内容请关注微信公众号

1、关注微信公众号:山河编程
2、回复:k8s

获取完整全文。

感谢您的关注,小编会努力把最好的资料都呈现给您!

谢谢啦!
posted @ 2021-07-22 13:31  Alvin,  阅读(144)  评论(0编辑  收藏  举报