二进制部署Kubernetes

环境变量

规划端口

- 端口范围
etcd数据库 2379、2380、2381;
k8s组件端口 6443、10257、10257、10250、10249、10256
k8s插件端口 Calico: 179、9099;
k8s NodePort端口 30000 - 32767
ip_local_port_range 32768 - 65535

下面对上面的各端口类型进行解释:
a) etcd端口:所需端口
b) k8s组件端口:基础组件
c) k8s插件端口:calico端口、nginx-ingress-controller端口
d) k8s NodePort端口:跑在容器里面的应用,可以通过这个范围内的端口向外暴露服务,所以应用的对外端口要在这个范围内
e) ip_local_port_range:主机上一个进程访问外部应用时,需要与外部应用建立TCP连接,TCP连接需要本机的一个端口,主机会从这个范围内选择一个没有使用的端口建立TCP连接;

设置主机名

$ hostnamectl set-hostname k8s-master
$ hostnamectl set-hostname k8s-node01
$ hostnamectl set-hostname k8s-node02

注意:主机名不要用 _ 。不然启动 kubelet 有问题。识别不到主机名。

设置主机名映射

$ cat >> /etc/hosts <<-EOF
192.168.32.134 k8s-master
192.168.32.135 k8s-node01
192.168.32.136 k8s-node02
EOF

关闭防火墙

$ sudo systemctl stop firewalld
$ sudo systemctl disable firewalld

关闭selinux

#临时生效
$ sudo setenforce 0
 
sed -ri 's/(SELINUX=).*/\1disabled/g' /etc/selinux/config

关闭交换分区

#临时生效
$ swapoff -a
 
#永久生效,需要重启
$ sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

加载ipvs模块

$ cat > /etc/sysconfig/modules/ipvs.modules <<-EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
modprobe -- br_netfilter
modprobe -- ipip
EOF
 
# 生效ipvs模块
$ chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules
 
# 验证
$ lsmod | grep -e ip_vs -e nf_conntrack_ipv4 -e br_netfilter

注意:在 /etc/sysconfig/modules/ 目录下的modules文件,重启会自动加载。

安装ipset依赖包

$ yum install ipvsadm wget vim -y  # 确保安装ipset包

优化内核参数

$ cat > /etc/sysctl.d/kubernetes.conf << EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
net.ipv4.conf.all.rp_filter=1
kernel.sem=250 32000 100 128
net.core.netdev_max_backlog = 32768
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216           
net.core.somaxconn = 32768
net.core.wmem_default = 8388608
net.core.wmem_max = 16777216           
net.ipv4.ip_local_port_range = 32768 65535
net.ipv4.ip_forward = 1
net.ipv4.tcp_fin_timeout = 30
net.ipv4.tcp_keepalive_time = 1200
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.tcp_max_syn_backlog = 65536
net.ipv4.tcp_max_tw_buckets = 6000
net.ipv4.tcp_mem = 94500000 91500000 92700000
net.ipv4.tcp_rmem  = 32768 436600 873200
net.ipv4.tcp_syn_retries = 2
net.ipv4.tcp_synack_retries = 2
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_wmem = 8192 436600 873200
EOF
 
# 生效 kubernetes.conf 文件
$ sysctl -p /etc/sysctl.d/kubernetes.conf

# 设置资源限制
cat >> /etc/security/limits.conf <<-EOF
* - nofile 65535
* - core 65535
* - nproc 65535
* - stack 65535
EOF

设置时间同步

$ yum install ntp -y
$ vim /etc/ntp.conf
#server 0.centos.pool.ntp.org iburst  注释以下四行
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
server ntp1.aliyun.com iburst    #添加同步 ntp.aliyun.com
 
#启动并加入开机自启
$ systemctl start ntpd.service
$ systemctl enable ntpd.service

安装etcd

创建etcd目录及加入环境变量

$ mkdir -p /data/etcd/{bin,conf,certs,logs,data}
$ chmod 700 /data/etcd/data
$ echo 'PATH=/data/etcd/bin:$PATH' > /etc/profile.d/etcd.sh && source /etc/profile.d/etcd.sh

下载生成证书工具

$ mkdir ~/cfssl && cd ~/cfssl/
$ wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
$ wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
$ wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
 
$ cp cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
$ cp cfssljson_linux-amd64 /usr/bin/cfssljson
$ cp cfssl_linux-amd64 /usr/bin/cfssl
$ chmod u+x /usr/bin/cfssl*

创建根证书(CA)

$ cat > /data/etcd/certs/ca-config.json <<-EOF
{
    "signing": {
        "default": {
            "expiry": "87600h"
        },
        "profiles": {
            "kubernetes": {
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ],
                "expiry": "87600h"
            }
        }
    }
}
EOF

创建证书签名请求文件

$ cat > /data/etcd/certs/ca-csr.json  <<-EOF
{
    "CN": "etcd CA",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "GuangDong",
            "L": "GuangDong",
            "O": "k8s"
        }
    ]
}
EOF

生成CA证书和私钥

$ cd /data/etcd/certs/ && cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

分发CA证书和私钥到etcd节点

$ scp /data/etcd/certs/ca*pem root@k8s-node01:/data/etcd/certs/
$ scp /data/etcd/certs/ca*pem root@k8s-node02:/data/etcd/certs/

创建etcd证书签名请求

$ cat > /data/etcd/certs/etcd-csr.json << EOF
{
    "CN": "etcd",
    "hosts": [
        "192.168.32.134",
        "192.168.32.135",
        "192.168.32.136"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "GuangDong",
            "L": "GuangDong",
            "O": "k8s"
        }
    ]
}
EOF

说明:需要修改上面的 IP 地址。上述文件 hosts 字段中IP为所有 etcd 节点的集群内部通信IP,一个都不能少!为了方便后期扩容可以多写几个预留的IP。

生成证书与私钥

$ cd /data/etcd/certs/ && cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd -

说明:-profile 对应根(CA)证书的 profile

分发etcd证书和私钥到各个节点

$ scp /data/etcd/certs/etcd*pem root@k8s-node01:/data/etcd/certs/
$ scp /data/etcd/certs/etcd*pem root@k8s-node02:/data/etcd/certs/

载etcd包

$ mkdir ~/etcd && cd ~/etcd
$ wget https://mirrors.huaweicloud.com/etcd/v3.4.16/etcd-v3.4.16-linux-amd64.tar.gz
$ tar xf etcd-v3.4.16-linux-amd64.tar.gz 
$ cd etcd-v3.4.16-linux-amd64
$ cp -r etcd* /data/etcd/bin/

分发etcd程序到各个etcd节点

$ scp -r /data/etcd/bin/etcd* root@k8s-node01:/data/etcd/bin/
$ scp -r /data/etcd/bin/etcd* root@k8s-node02:/data/etcd/bin/

创建etcd配置文件

$ cat > /data/etcd/conf/etcd.conf  << EOF
#[Member]
ETCD_NAME="etcd01"
ETCD_DATA_DIR="/data/etcd/data/"
ETCD_LISTEN_PEER_URLS="https://192.168.32.134:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.32.134:2379"
 
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.32.134:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.32.134:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.32.134:2380,etcd02=https://192.168.32.135:2380,etcd03=https://192.168.32.136:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

说明:需要修改上面的 IP地址

分发etcd配置文件

$ scp /data/etcd/conf/etcd.conf root@k8s-node01:/data/etcd/conf/
$ scp /data/etcd/conf/etcd.conf root@k8s-node02:/data/etcd/conf/

说明:需要在各个节点修改上面的 IP地址 和 ETCD_NAME 。

创建etcd 的systemd 模板

$ cat > /usr/lib/systemd/system/etcd.service <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos
 
[Service]
Type=notify
EnvironmentFile=/data/etcd/conf/etcd.conf
ExecStart=/data/etcd/bin/etcd \\
--cert-file=/data/etcd/certs/etcd.pem \\
--key-file=/data/etcd/certs/etcd-key.pem \\
--peer-cert-file=/data/etcd/certs/etcd.pem \\
--peer-key-file=/data/etcd/certs/etcd-key.pem \\
--trusted-ca-file=/data/etcd/certs/ca.pem \\
--peer-trusted-ca-file=/data/etcd/certs/ca.pem
LimitNOFILE=65536
Restart=always
RestartSec=30
StartLimitBurst=3
StartLimitInterval=60s
 
[Install]
WantedBy=multi-user.target
EOF

注意:确认ExecStart启动参数是否正确。

分发etcd 的systemd 模板

$ scp /usr/lib/systemd/system/etcd.service k8s-node01:/usr/lib/systemd/system/
$ scp /usr/lib/systemd/system/etcd.service k8s-node02:/usr/lib/systemd/system/

启动etcd

$ systemctl daemon-reload
$ systemctl start etcd.service
$ systemctl enable etcd.service

验证etcd

$ ETCDCTL_API=3 /data/etcd/bin/etcdctl --cacert=/data/etcd/certs/ca.pem --cert=/data/etcd/certs/etcd.pem --key=/data/etcd/certs/etcd-key.pem --endpoints="https://192.168.32.134:2379,https://192.168.32.135:2379,https://192.168.32.136:2379" endpoint health -w table

说明:需要修改上面的 IP地址

安装docker

下载docker二进制包

$ mkdir ~/docker && cd ~/docker
$ wget https://download.docker.com/linux/static/stable/x86_64/docker-19.03.15.tgz

创建docker安装目录及环境变量

$ mkdir -p /data/docker/{bin,conf,data}
$ echo 'PATH=/data/docker/bin:$PATH' > /etc/profile.d/docker.sh  && source /etc/profile.d/docker.sh

解压二进制包

$ tar xf docker-19.03.15.tgz
$ cd docker/
$ cp * /data/docker/bin/

分发docker命令

$ scp /data/docker/bin/* k8s-node01:/data/docker/bin/
$ scp /data/docker/bin/* k8s-node02:/data/docker/bin/

创建docker 的systemd 模板

$ cat > /usr/lib/systemd/system/docker.service <<EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
 
[Service]
Type=notify
ExecStart=/data/docker/bin/dockerd --config-file=/data/docker/conf/daemon.json
ExecReload=/bin/kill -s SIGHUP \$MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
 
[Install]
WantedBy=multi-user.target
EOF

创建 daemon.json 文件

$ cat > /data/docker/conf/daemon.json  << EOF
{
  "data-root": "/data/docker/data/",
  "exec-opts": [
    "native.cgroupdriver=systemd"
  ],
  "registry-mirrors": [
    "https://docker.mirrors.ustc.edu.cn/",
    "https://hub-mirror.c.163.com/",
    "https://reg-mirror.qiniu.com/",
    "https://registry.docker-cn.com/"
  ],
  "log-driver": "json-file",
  "log-level": "info",
  "live-restore": true
}
EOF

分发docker配置文件

$ scp /usr/lib/systemd/system/docker.service k8s_node01:/usr/lib/systemd/system/
$ scp /usr/lib/systemd/system/docker.service k8s_node02:/usr/lib/systemd/system/
$ scp /data/docker/conf/daemon.json  k8s_node01:/data/docker/conf/
$ scp /data/docker/conf/daemon.json  k8s_node02:/data/docker/conf/

docker相关命令链接到/usr/bin/

$ ln -sv /data/docker/bin/containerd /usr/bin/
$ ln -sv /data/docker/bin/runc /usr/bin/
$ ln -sv /data/docker/bin/docker-init /usr/bin/
$ ln -sv /data/docker/bin/containerd-shim /usr/bin/

注意:如果docker解压的目录没有在/user/bin的目录下,一定要操作这一步。不然启动docker会失败。

启动docker

$ systemctl daemon-reload
$ systemctl start docker.service
$ systemctl enable docker.service

node节点启动docker及链接命令

$ ln -sv /data/docker/bin/containerd /usr/bin/
$ ln -sv /data/docker/bin/runc /usr/bin/
$ ln -sv /data/docker/bin/docker-init /usr/bin/
$ ln -sv /data/docker/bin/containerd-shim /usr/bin/
 
$ systemctl daemon-reload
$ systemctl start docker.service
$ systemctl enable docker.service

部署master节点

创建k8s目录及环境变量

$ mkdir -p /data/k8s/{bin,conf,certs,logs,data}
$ echo 'PATH=/data/k8s/bin:$PATH' > /etc/profile.d/k8s.sh  && source /etc/profile.d/k8s.sh

创建CA签名请求文件

$ cp /data/etcd/certs/ca-config.json /data/k8s/certs/
$ cp /data/etcd/certs/ca-csr.json /data/k8s/certs/
$ sed -i 's/etcd CA/kubernetes CA/g' /data/k8s/certs/ca-csr.json

说明:需要使用同一个CA根证书。

生成证书与私钥

$ cd /data/k8s/certs && cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

下载kubernetes二进制包
kubernetes官方地址,需要上外网。

$ mkdir ~/kubernetes && cd ~/kubernetes
$ wget https://github.com/kubernetes/kubernetes/releases/download/v1.18.18/kubernetes.tar.gz
$ tar xf kubernetes.tar.gz
$ cd kubernetes/
$ ./cluster/get-kube-binaries.sh

说明:./cluster/get-kube-binaries.sh 这一步需要上外网。亲测没有外网可以下载。但是可以会出现超时,或者连接错误。可以重试几次。下载到  kubernetes-server-linux-amd64.tar.gz压缩包就可以了。后面还会下载 kubernetes-manifests.tar.gz 的压缩可以。可以直接 CTRL + C退出下载。

解压kubernetes的安装包

$ cd ~/kubernetes/kubernetes/server && tar xf kubernetes-server-linux-amd64.tar.gz

说明:进入到server目录下,要是上面操作下载成功的话,会有 kubernetes-server-linux-amd64.tar.gz 压缩包

安装kube-apiserver

拷贝命令

$ cd ~/kubernetes/kubernetes/server/kubernetes/server/bin
$ cp kube-apiserver kubectl /data/k8s/bin/

创建日志目录

$ mkdir /data/k8s/logs/kube-api-server

生成apiserver证书与私钥

$ cat > /data/k8s/certs/apiserver-csr.json <<EOF
{
    "CN": "system:kube-apiserver",
    "hosts": [
        "10.183.0.1",
        "127.0.0.1",
        "192.168.32.134",
        "192.168.32.135",
        "192.168.32.136",
        "192.168.32.133",
        "192.168.32.100",
        "kubernetes",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "name": [
        {
            "C": "CN",
            "L": "GuangDong",
            "ST": "GuangDong",
            "O": "k8s"
        }
    ]
}
EOF
 
$ cd /data/k8s/certs && cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes apiserver-csr.json | cfssljson -bare apiserver -

说明:需要改 IP地址,不可以使用IP地址段。hosts 里面需要写上 service IP地址的x.x.x.1的地址

创建kube-apiserver的启动参数

$ cat > /data/k8s/conf/kube-apiserver.conf <<  EOF
KUBE_APISERVER_OPTS="--alsologtostderr=true \\
--logtostderr=false \\
--v=4 \\
--log-dir=/data/k8s/logs/kube-api-server \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/data/k8s/logs/kube-api-server/kube-apiserver-audit.log \\
--etcd-servers=https://192.168.32.134:2379,https://192.168.32.135:2379,https://192.168.32.136:2379 \\
--bind-address=192.168.32.134 \\
--insecure-port=0 \\
--secure-port=6443 \\
--advertise-address=192.168.32.134 \\
--allow-privileged=true \\
--service-cluster-ip-range=10.183.0.0/24 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction,PodPreset \\
--runtime-config=settings.k8s.io/v1alpha1=true \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/data/k8s/conf/token.csv \\
--service-node-port-range=30000-32767 \\
--kubelet-client-certificate=/data/k8s/certs/apiserver.pem \\
--kubelet-client-key=/data/k8s/certs/apiserver-key.pem \\
--tls-cert-file=/data/k8s/certs/apiserver.pem  \\
--tls-private-key-file=/data/k8s/certs/apiserver-key.pem \\
--client-ca-file=/data/k8s/certs/ca.pem \\
--service-account-key-file=/data/k8s/certs/ca-key.pem \\
--etcd-cafile=/data/etcd/certs/ca.pem \\
--etcd-certfile=/data/etcd/certs/etcd.pem \\
--etcd-keyfile=/data/etcd/certs/etcd-key.pem"
EOF

说明:需要修改 IP地址 和 service-cluster-ip-range(service IP段) 。

创建上述配置文件中token文件

$ cat > /data/k8s/conf/token.csv <<EOF
0fb61c46f8991b718eb38d27b605b008,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
 
#可以用使用下面命令生成token
$ head -c 16 /dev/urandom | od -An -t x | tr -d ' '

创建kube-apiserver的systemd模板

$ cat >> /usr/lib/systemd/system/kube-apiserver.service <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
 
[Service]
EnvironmentFile=-/data/k8s/conf/kube-apiserver.conf
ExecStart=/data/k8s/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
 
[Install]
WantedBy=multi-user.target
EOF

启动kube-apiserver

$ systemctl daemon-reload
$ systemctl start kube-apiserver.service
$ systemctl enable kube-apiserver.service

安装kube-controller-manager

拷贝命令

$ cd ~/kubernetes/kubernetes/server/kubernetes/server/bin/
$ cp kube-controller-manager /data/k8s/bin/

创建日志目录

$ mkdir /data/k8s/logs/kube-controller-manager

生成证书与私钥

$ cat > /data/k8s/certs/controller-manager.json << EOF
{
  "CN": "system:kube-controller-manager",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s"
    }
  ]
}
EOF
 
$ cd /data/k8s/certs && cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes controller-manager.json | cfssljson -bare controller-manager -

生成连接集群的kubeconfig文件

$ KUBE_APISERVER="https://192.168.32.134:6443"

$ kubectl config set-cluster kubernetes \
  --certificate-authority=/data/k8s/certs/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=/data/k8s/certs/controller-manager.kubeconfig
 
$ kubectl config set-credentials system:kube-controller-manager \
  --client-certificate=/data/k8s/certs/controller-manager.pem \
  --client-key=/data/k8s/certs/controller-manager-key.pem \
  --embed-certs=true \
  --kubeconfig=/data/k8s/certs/controller-manager.kubeconfig
 
$ kubectl config set-context default \
  --cluster=kubernetes \
  --user=system:kube-controller-manager \
  --kubeconfig=/data/k8s/certs/controller-manager.kubeconfig
 
$ kubectl config use-context default \
--kubeconfig=/data/k8s/certs/controller-manager.kubeconfig

启动kube-controller-manager参数

$ cat > /data/k8s/conf/kube-controller-manager.conf <<EOF
KUBE_CONTROLLER_MANAGER_OPTS="--alsologtostderr=true \\
--logtostderr=false \\
--v=4 \\
--log-dir=/data/k8s/logs/kube-controller-manager \\
--master=https://192.168.32.134:6443 \\
--bind-address=0.0.0.0 \\
--port=0 \\
--secure-port=10257 \\
--leader-elect=true \\
--allocate-node-cidrs=true \\
--cluster-cidr=20.0.0.0/16 \\
--service-cluster-ip-range=10.183.0.0/24 \\
--authentication-kubeconfig=/data/k8s/certs/controller-manager.kubeconfig \\
--authorization-kubeconfig=/data/k8s/certs/controller-manager.kubeconfig \\
--client-ca-file=/data/k8s/certs/ca.pem \\
--cluster-signing-cert-file=/data/k8s/certs/ca.pem \\
--cluster-signing-key-file=/data/k8s/certs/ca-key.pem  \\
--root-ca-file=/data/k8s/certs/ca.pem \\
--service-account-private-key-file=/data/k8s/certs/ca-key.pem \\
--kubeconfig=/data/k8s/certs/controller-manager.kubeconfig \\
--controllers=*,bootstrapsigner,tokencleaner \\
--node-cidr-mask-size=24 \\
--requestheader-client-ca-file=/data/k8s/certs/controller-manager.pem \\
--use-service-account-credentials=true \\
--experimental-cluster-signing-duration=87600h0m0s"
EOF

说明:需要修改 service-cluster-ip-range(service IP段)  、 cluster-cidr(pod IP段) 和 master 的值。

kube-controller-manager的systemd模板

$ cat > /usr/lib/systemd/system/kube-controller-manager.service <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
 
[Service]
EnvironmentFile=-/data/k8s/conf/kube-controller-manager.conf
ExecStart=/data/k8s/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
 
[Install]
WantedBy=multi-user.target
EOF

启动kube-controller-manager

$ systemctl daemon-reload
$ systemctl start kube-controller-manager.service
$ systemctl enable kube-controller-manager.service

安装kube-scheduler

拷贝命令

$ cd ~/kubernetes/kubernetes/server/kubernetes/server/bin/
$ cp kube-scheduler /data/k8s/bin/

创建日志目录

$ mkdir /data/k8s/logs/kube-scheduler

生成证书与私钥

$ cat > /data/k8s/certs/scheduler.json << EOF
{
  "CN": "system:kube-scheduler",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s"
    }
  ]
}
EOF
 
$ cd /data/k8s/certs && cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes scheduler.json | cfssljson -bare scheduler -

生成连接集群的kubeconfig文件

$ KUBE_APISERVER="https://192.168.32.134:6443"

$ kubectl config set-cluster kubernetes \
  --certificate-authority=/data/k8s/certs/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=/data/k8s/certs/scheduler.kubeconfig
 
$ kubectl config set-credentials system:kube-scheduler \
  --client-certificate=/data/k8s/certs/scheduler.pem \
  --client-key=/data/k8s/certs/scheduler-key.pem \
  --embed-certs=true \
  --kubeconfig=/data/k8s/certs/scheduler.kubeconfig
 
$ kubectl config set-context default \
  --cluster=kubernetes \
  --user=system:kube-scheduler \
  --kubeconfig=/data/k8s/certs/scheduler.kubeconfig
 
$ kubectl config use-context default \
--kubeconfig=/data/k8s/certs/scheduler.kubeconfig

创建启动kube-scheduler参数

$ cat > /data/k8s/conf/kube-scheduler.conf <<EOF
KUBE_SCHEDULER_OPTS="--alsologtostderr=true \\
--logtostderr=false \\
--v=4 \\
--log-dir=/data/k8s/logs/kube-scheduler \\
--master=https://192.168.32.134:6443 \\
--authentication-kubeconfig=/data/k8s/certs/scheduler.kubeconfig \\
--authorization-kubeconfig=/data/k8s/certs/scheduler.kubeconfig \\
--bind-address=0.0.0.0 \\
--port=0 \\
--secure-port=10259 \\
--kubeconfig=/data/k8s/certs/scheduler.kubeconfig \\
--client-ca-file=/data/k8s/certs/ca.pem \\
--requestheader-client-ca-file=/data/k8s/certs/scheduler.pem \\
--leader-elect=true"
EOF

说明:需要修改 master 的值。

创建kube-scheduler的systemd模板

$ cat > /usr/lib/systemd/system/kube-scheduler.service <<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
 
[Service]
EnvironmentFile=-/data/k8s/conf/kube-scheduler.conf
ExecStart=/data/k8s/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
 
[Install]
WantedBy=multi-user.target
EOF

启动kube-scheduler

$ systemctl daemon-reload
$ systemctl start kube-scheduler.service
$ systemctl enable kube-scheduler.service

客户端设置及验证

客户端设置

$ cat > /data/k8s/certs/admin-csr.json << EOF
{
  "CN": "system:admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s"
    }
  ]
}
EOF
 
$ cd /data/k8s/certs && cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin -

$ KUBE_APISERVER="https://192.168.32.134:6443"

$ kubectl config set-cluster kubernetes \
  --certificate-authority=/data/k8s/certs/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=/data/k8s/certs/admin.kubeconfig
 
$ kubectl config set-credentials system:admin \
  --client-certificate=/data/k8s/certs/admin.pem \
  --client-key=/data/k8s/certs/admin-key.pem \
  --embed-certs=true \
  --kubeconfig=/data/k8s/certs/admin.kubeconfig
 
$ kubectl config set-context default \
  --cluster=kubernetes \
  --user=system:admin \
  --kubeconfig=/data/k8s/certs/admin.kubeconfig
 
$ kubectl config use-context default \
--kubeconfig=/data/k8s/certs/admin.kubeconfig

$ sed -ri "s/(--insecure-port=0)/#\1/g" /data/k8s/conf/kube-apiserver.conf
$ systemctl restart kube-apiserver

$ kubectl create clusterrolebinding system:admin --clusterrole=cluster-admin --user=system:admin

$ sed -ri "s/#(--insecure-port=0)/\1/g" /data/k8s/conf/kube-apiserver.conf 
$ systemctl restart kube-apiserver

$ cp /data/k8s/certs/admin.kubeconfig ~/.kube/config

验证

# http方式验证
$ kubectl get cs 
NAME                 STATUS      MESSAGE                                                                                     ERROR
controller-manager   Healthy     ok 
scheduler            Healthy     ok
etcd-1               Healthy     {"health":"true"}
etcd-2               Healthy     {"health":"true"}
etcd-0               Healthy     {"health":"true"}  
 
# https方式验证
$ curl -sk --cacert /data/k8s/certs/ca.pem --cert /data/k8s/certs/admin.pem --key /data/k8s/certs/admin-key.pem https://192.168.32.134:10257/healthz && echo
$ curl -sk --cacert /data/k8s/certs/ca.pem --cert /data/k8s/certs/admin.pem --key /data/k8s/certs/admin-key.pem https://192.168.32.134:10259/healthz && echo

部署节点(master)

安装kubelet

授权kubelet-bootstrap用户允许请求证书

$ kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap

创建日志目录

$ mkdir /data/k8s/logs/kubelet

拷贝命令

$ cd ~/kubernetes/kubernetes/server/kubernetes/server/bin
$ cp kubelet /data/k8s/bin/

创建kubelet启动参数

$ cat > /data/k8s/conf/kubelet.conf <<EOF
KUBELET_OPTS="--alsologtostderr=true \\
--logtostderr=false \\
--v=4 \\
--log-dir=/data/k8s/logs/kubelet \\
--hostname-override=k8s-master \\
--network-plugin=cni \\
--cni-conf-dir=/etc/cni/net.d \\
--cni-bin-dir=/opt/cni/bin \\
--kubeconfig=/data/k8s/certs/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/data/k8s/certs/bootstrap.kubeconfig \\
--config=/data/k8s/conf/kubelet-config.yaml \\
--cert-dir=/data/k8s/certs/ \\
--root-dir=/data/k8s/data/kubelet/ \\
--pod-infra-container-image=ecloudedu/pause-amd64:3.0"
EOF

说明:修改 hostname-override 为当前的 hostname 。 cni-conf-dir 默认是 /etc/cni/net.d,cni-bin-dir 默认是/opt/cni/bin。指定 cgroupdriver 为systemd,默认也是systemd。root-dir 默认是/var/lib/kubelet目录

创建kubelet配置参数文件

$ cat > /data/k8s/conf/kubelet-config.yaml <<EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 0
cgroupDriver: systemd
clusterDNS:
  - 10.183.0.2
clusterDomain: cluster.local
failSwapOn: false
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /data/k8s/certs/ca.pem
anthorization:
  mode: Webhook
  Webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 100
EOF

说明:需要修改 clusterDNS 的IP地址为 server IP段 。
参考地址: https://github.com/kubernetes/kubelet
https://kubernetes.io/zh/docs/reference/config-api/kubelet-config.v1beta1/
https://pkg.go.dev/k8s.io/kubelet/config/v1beta1#KubeletConfiguration

生成bootstrap.kubeconfig文件

$ KUBE_APISERVER="https://192.168.32.134:6443"        #master IP
$ TOKEN="0fb61c46f8991b718eb38d27b605b008"    #跟token.csv文件的token一致
 
# 设置集群参数
$ kubectl config set-cluster kubernetes \
--certificate-authority=/data/k8s/certs/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=/data/k8s/certs/bootstrap.kubeconfig
 
# 设置客户端认证参数
$ kubectl config set-credentials "kubelet-bootstrap" \
--token=${TOKEN} \
--kubeconfig=/data/k8s/certs/bootstrap.kubeconfig
 
# 设置上下文参数
$ kubectl config set-context default \
--cluster=kubernetes \
--user="kubelet-bootstrap" \
--kubeconfig=/data/k8s/certs/bootstrap.kubeconfig
 
# 设置默认上下文
$ kubectl config use-context default \
--kubeconfig=/data/k8s/certs/bootstrap.kubeconfig

创建kubelet的systemd模板

$ cat > /usr/lib/systemd/system/kubelet.service <<EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service
 
[Service]
EnvironmentFile=/data/k8s/conf/kubelet.conf
ExecStart=/data/k8s/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65535
 
[Install]
WantedBy=multi-user.target
EOF

启动kubelet

$ systemctl daemon-reload
$ systemctl start kubelet.service
$ systemctl enable kubelet.service

批准kubelet加入集群

$ kubectl get csr
NAME                                                   AGE     SIGNERNAME                                    REQUESTOR           CONDITION
node-csr-C0QE1O0aWVJc-H5AObkjBJ4iqhQY2BiUqIyUVe9UBUM   6m22s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
 
$ kubectl certificate approve node-csr-DaJ36jEFJwOPQwFGY3uWmsyfS-4_LFYuTsYA71yCOZY
certificatesigningrequest.certificates.k8s.io/node-csr-DaJ36jEFJwOPQwFGY3uWmsyfS-4_LFYuTsYA71yCOZY approved

说明:node-csr-C0QE1O0aWVJc-H5AObkjBJ4iqhQY2BiUqIyUVe9UBUM 是 kubectl get csr 获取的 name 的值

验证

$ kubectl get nodes
NAME           STATUS     ROLES    AGE   VERSION
k8s-master01   NotReady   <none>   27s   v1.18.18
k8s-master02   NotReady   <none>   27s   v1.18.18

安装kube-proxy

创建日志目录

$ mkdir /data/k8s/logs/kube-proxy

拷贝命令

$ cd ~/kubernetes/kubernetes/server/kubernetes/server/bin/
$ cp kube-proxy /data/k8s/bin/

创建启动kube-proxy的参数

$ cat > /data/k8s/conf/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--alsologtostderr=true \\
--logtostderr=false \\
--v=4 \\
--log-dir=/data/k8s/logs/kube-proxy \\
--config=/data/k8s/conf/kube-proxy-config.yml"
EOF

创建配置参数文件

$ cat > /data/k8s/conf/kube-proxy-config.yml << EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
metricsBindAddress: 0.0.0.0:10249
clientConnection:
  kubeconfig: /data/k8s/certs/proxy.kubeconfig
hostnameOverride: k8s-master
clusterCIDR: 20.0.0.0/16
mode: ipvs
ipvs:
  minSyncPeriod: 5s
  syncPeriod: 5s
  scheduler: "rr"
EOF

说明:修改 hostnameOverride 的值为hostname的值。clusterCIDR 的值为pod IP段。
参考地址: https://github.com/kubernetes/kube-proxy
https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration
https://kubernetes.io/zh/docs/reference/config-api/kube-proxy-config.v1alpha1/

生成证书与私钥

$ cat > /data/k8s/certs/proxy.json << EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s"
    }
  ]
}
EOF
 
$ cd /data/k8s/certs && cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes proxy.json | cfssljson -bare proxy -

生成kube-proxy.kubeconfig文件

$ KUBE_APISERVER="https://192.168.32.134:6443"
 
# 设置集群参数
$ kubectl config set-cluster kubernetes \
  --certificate-authority=/data/k8s/certs/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=/data/k8s/certs/proxy.kubeconfig
 
# 设置客户端认证参数
$ kubectl config set-credentials system:kube-proxy \
  --client-certificate=/data/k8s/certs/proxy.pem \
  --client-key=/data/k8s/certs/proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=/data/k8s/certs/proxy.kubeconfig
 
# 设置上下文参数
$ kubectl config set-context default \
  --cluster=kubernetes \
  --user=system:kube-proxy \
  --kubeconfig=/data/k8s/certs/proxy.kubeconfig
 
# 设置默认上下文
$ kubectl config use-context default \
--kubeconfig=/data/k8s/certs/proxy.kubeconfig

创建kube-proxy的systemd模板

$ cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target
 
[Service]
EnvironmentFile=-/data/k8s/conf/kube-proxy.conf
ExecStart=/data/k8s/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
LimitNOFILE=65536
 
[Install]
WantedBy=multi-user.target
EOF

启动kube-proxy

$ systemctl daemon-reload
$ systemctl start kube-proxy.service
$ systemctl enable kube-proxy.service

解决ROLES不显示:

kubectl label node k8s-master node-role.kubernetes.io/master=
kubectl label node k8s-node01 node-role.kubernetes.io/node=
kubectl label node k8s-node02 node-role.kubernetes.io/node=

如果标签打错了,使用kubectl label node k8s-master node-role.kubernetes.io/node-取消标签。

新增node节点

安装kubelet服务

创建k8s目录及环境变量

$ mkdir -p /data/k8s/{bin,conf,certs,logs} && mkdir /data/k8s/logs/kubelet
$ echo 'PATH=/data/k8s/bin:$PATH' > /etc/profile.d/k8s.sh  && source /etc/profile.d/k8s.sh

获取kubelet文件

scp root@k8s-master:/data/k8s/bin/kubelet /data/k8s/bin/

kubelet启动参数

$ cat > /data/k8s/conf/kubelet.conf <<EOF
KUBELET_OPTS="--alsologtostderr=true \\
--logtostderr=false \\
--v=4 \\
--log-dir=/data/k8s/logs/kubelet \\
--hostname-override=HOSTNAME \\
--network-plugin=cni \\
--cni-conf-dir=/etc/cni/net.d \\
--cni-bin-dir=/opt/cni/bin \\
--kubeconfig=/data/k8s/certs/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/data/k8s/certs/bootstrap.kubeconfig \\
--config=/data/k8s/conf/kubelet-config.yaml \\
--cert-dir=/data/k8s/certs/ \\
--pod-infra-container-image=ecloudedu/pause-amd64:3.0"
EOF
 
$ scp root@k8s-master:/data/k8s/conf/kubelet-config.yaml /data/k8s/conf/

注意:修改 hostname-override 的值。

获取相关证书

$ scp root@k8s-master:/data/k8s/certs/bootstrap.kubeconfig /data/k8s/certs/
$ scp root@k8s-master:/data/k8s/certs/ca*pem /data/k8s/certs/

创建kubelet的systemd模板

$ cat > /usr/lib/systemd/system/kubelet.service <<EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service
 
[Service]
EnvironmentFile=/data/k8s/conf/kubelet.conf
ExecStart=/data/k8s/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65535
 
[Install]
WantedBy=multi-user.target
EOF

启动kubelet

$ systemctl daemon-reload
$ systemctl start kubelet.service
$ systemctl enable kubelet.service

批准kubelet加入集群

$ kubectl get csr
NAME                                                   AGE   SIGNERNAME                                    REQUESTOR           CONDITION
node-csr-i8aN5Ua8282QMSOERSZFCr26dzmSmXod-kv5fCm5Kf8   26s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
node-csr-sePBDxehlZbf8B4DwMvObQpRp-a5fOKNbx3NpDYcKeA   12m   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued
 
$ kubectl certificate approve node-csr-i8aN5Ua8282QMSOERSZFCr26dzmSmXod-kv5fCm5Kf8
certificatesigningrequest.certificates.k8s.io/node-csr-i8aN5Ua8282QMSOERSZFCr26dzmSmXod-kv5fCm5Kf8 approved

说明:node-csr-i8aN5Ua8282QMSOERSZFCr26dzmSmXod-kv5fCm5Kf8 是 kubectl get csr 获取的 name 的值

验证

$ kubectl get nodes
NAME           STATUS     ROLES    AGE   VERSION
k8s-master01   NotReady   master   15h   v1.18.18
k8s-master02   NotReady   master   15h   v1.18.18
k8s-node01     NotReady   <none>   4s    v1.18.18
k8s-node02     NotReady   <none>   4s    v1.18.18

安装kube-proxy服务

创建日志目录

mkdir /data/k8s/logs/kube-proxy

拷贝kube-proxy文件

scp root@k8s-master:/data/k8s/bin/kube-proxy /data/k8s/bin/

拷贝启动服务参数

cat > /data/k8s/conf/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--alsologtostderr=true \\
--logtostderr=false \\
--v=4 \\
--log-dir=/data/k8s/logs/kube-proxy \\
--config=/data/k8s/conf/kube-proxy-config.yml"
EOF
 
scp root@k8s-master:/data/k8s/conf/kube-proxy-config.yml /data/k8s/conf/
 
vim /data/k8s/conf/kube-proxy-config.yml
...
    hostnameOverride: HOSTNAME

注意:修改 hostnameOverride 与 kubelet 的 hostnameOverride 一致。

拷贝相关证书

scp root@k8s-master:/data/k8s/certs/proxy.kubeconfig /data/k8s/certs/

创建kube-proxy的systemd模板

cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target
 
[Service]
EnvironmentFile=-/data/k8s/conf/kube-proxy.conf
ExecStart=/data/k8s/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
LimitNOFILE=65536
 
[Install]
WantedBy=multi-user.target
EOF

启动kube-proxy服务

systemctl daemon-reload
systemctl start kube-proxy.service
systemctl enable kube-proxy.service

验证

journalctl -xeu kube-proxy.service

注意:日志如果出现这个 can't set sysctl net/ipv4/vs/conn_reuse_mode, kernel version must be at least 4.1 需要升级内核。

安装网络插件

安装flannel( 二进制 和 容器 )

二进制安装flanneld

注意:留意 /data/cni/{bin,conf} 相关目录,上面的配置有修改过,下面要留意。

生成flanneld证书

cat > /data/k8s/cert/flanneld-csr.json <<-EOF
{
  "CN": "system:flanneld",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s"
    }
  ]
}
EOF
 
cd /data/etcd/cert/ && cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes /data/k8s/cert/flanneld-csr.json | cfssljson -bare /data/k8s/cert/flannel -

注意:这个需要有 cfssl 命令。如果没有命令的话,可以直接拷贝证书到对应主机 scp k8s-master:/data/k8s/cert/flannel*pem /data/k8s/cert/ 。

ETCD插入pod的IP段

ETCDCTL_API=2 etcdctl \
--endpoints=https://172.20.0.6:2379,https://172.20.0.8:2379,https://172.20.0.4:2379 \
--ca-file=/data/etcd/cert/ca.pem \
--cert-file=/data/etcd/cert/etcd.pem \
--key-file=/data/etcd/cert/etcd-key.pem \
set /coreos.com/network/config '{"Network":"'20.0.0.0/16'", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'

查看ETCD数据

ETCDCTL_API=2 etcdctl \
--endpoints=https://172.20.0.6:2379,https://172.20.0.8:2379,https://172.20.0.4:2379 \
--ca-file=/data/etcd/cert/ca.pem \
--cert-file=/data/etcd/cert/etcd.pem \
--key-file=/data/etcd/cert/etcd-key.pem \
get /coreos.com/network/config

flanneld启动参数

mkdir -p /data/cni/{bin,conf}
ln -s /data/cni/conf/ /run/flannel
 
cat > /data/cni/conf/flanneld.conf <<-EOF
FLANNEL_OPTIONS="-etcd-endpoints=https://172.20.0.6:2379,https://172.20.0.8:2379,https://172.20.0.4:2379 \\
-iface=eth0 \\
-etcd-cafile=/data/etcd/cert/ca.pem \\
-etcd-certfile=/data/k8s/cert/flannel.pem \\
-etcd-keyfile=/data/k8s/cert/flannel-key.pem"
EOF

下载 flanneld

mkdir ~/flannel && cd ~/flannel
wget https://github.com/coreos/flannel/releases/download/v0.13.0/flannel-v0.13.0-linux-amd64.tar.gz
tar xf flannel-v0.13.0-linux-amd64.tar.gz -C /data/cni/bin
wget https://github.com/containernetworking/plugins/releases/download/v0.9.0/cni-plugins-linux-amd64-v0.9.0.tgz
tar xf cni-plugins-linux-amd64-v0.9.0.tgz -C /data/cni/bin

配置cni plugins参数

mkdir /data/cni/net.d/ -p
cat > /data/cni/net.d/10-flannel.conflist <<-EOF
{
  "name": "cni0",
  "plugins": [
    {
      "type": "flannel",
      "delegate": {
        "hairpinMode": true,
        "isDefaultGateway": true
      }
    },
    {
      "type": "portmap",
      "capabilities": {
        "portMappings": true
      }
    }
  ]
}
EOF

创建flanneld的systemd模板

cat > /usr/lib/systemd/system/flanneld.service <<-EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
 
[Service]
Type=notify
EnvironmentFile=/data/cni/conf/flanneld.conf
ExecStart=/data/cni/bin/flanneld -ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/data/cni/bin/mk-docker-opts.sh -d /run/flannel/docker_opts.env
Restart=on-failure
 
[Install]
WantedBy=multi-user.target
EOF

修改docker的systemd模板

cat > /usr/lib/systemd/system/docker.service <<-EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
#After=netddwork-online.target firewalld.service
After=network-online.target firewalld.service flanneld.service  #让docker在flannel网络后面启动
Wants=network-online.target
Requires=flanneld.service
 
[Service]
Type=notify
EnvironmentFile=-/run/flannel/docker_opts.env
ExecStart=/data/docker/bin/dockerd --config-file=/data/docker/conf/daemon.json \$DOCKER_OPTS
ExecReload=/bin/kill -s HUP
LimitNOFILE=infinity
LimitNPROC=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
 
[Install]
WantedBy=multi-user.target
EOF

重启相关服务

systemctl daemon-reload
systemctl start flanneld
systemctl enable flanneld
systemctl restart docker
systemctl restart kubelet

验证

kubectl get nodes
NAME         STATUS     ROLES    AGE   VERSION
k8s-master   Ready      <none>   17h   v1.19.5
k8s-node01   NotReady   <none>   17h   v1.19.5
k8s-node02   NotReady   <none>   17h   v1.19.5

容器安装flannel
下载cni插件

$ mkdir -p ~/cni/ && cd ~/cni/
$ wget https://github.com/containernetworking/plugins/releases/download/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz

创建cni目录(默认工作目录)

$ mkdir -p /data/cni/bin

说明:kubelet参数是否有 cni-conf-dir ,默认是 /opt/cni/bin 目录。

解压cni到/data/cni/bin目录

$ tar xf cni-plugins-linux-amd64-v0.8.7.tgz -C /data/cni/bin

部署cni网络

$ wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
$ sed -i -r "s#quay.io/coreos/flannel:v0.13.0-rc2#ecloudedu/flannel:v0.12.0-amd64#g" kube-flannel.yml
$ kubectl apply -f kube-flannel.yml

添加权限

$ cat > apiserver-to-kubelet-rbac.yaml << EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
      - pods/log
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kubernetes
EOF

创建权限

$ kubectl apply -f apiserver-to-kubelet-rbac.yaml

验证

$ kubectl get pod -n kube-system -l app=flannel
NAME                    READY   STATUS    RESTARTS   AGE
kube-flannel-ds-4hr5k   1/1     Running   4          5d20h
kube-flannel-ds-7pfhc   1/1     Running   5          5d20h
kube-flannel-ds-cqrst   1/1     Running   6          5d20h

安装calico

详细的参数信息,请查看calico官网
下载calico部署yaml文件

mkdir ~/calico && cd ~/calico
curl https://docs.projectcalico.org/manifests/calico-etcd.yaml -o calico.yaml

修改calico yaml文件

1.修改 Secret 类型,calico-etcd-secrets 的 `etcd-key` 、 `etcd-cert` 、 `etcd-ca`
将 cat /data/etcd/certs/ca.pem | base64 -w 0 && echo 输出的所有内容复制到 `etcd-ca`
将 cat /data/etcd/certs/etcd.pem | base64 -w 0 && echo 输出的所有内容复制到 `etcd-cert`
将 cat /data/etcd/certs/etcd-key.pem | base64 -w 0 && echo 输出的所有内容复制到 `etcd-key`

 
2.修改 ConfigMap 类型,calico-config 的 `etcd_endpoints`、`etcd_ca`、`etcd_cert`、`etcd_key`
`etcd_endpoints`:"https://172.20.0.6:2379,https://172.20.0.8:2379,https://172.20.0.4:2379"
`etcd_ca`: "/calico-secrets/etcd-ca"   # "/calico-secrets/etcd-ca"
`etcd_cert`: "/calico-secrets/etcd-cert" # "/calico-secrets/etcd-cert"
`etcd_key`: "/calico-secrets/etcd-key"  # "/calico-secrets/etcd-key"
根据后面注释的内容填写。
 
3.修改 DaemonSet 类型,calico-node 的 `CALICO_IPV4POOL_CIDR`、`calico-etcd-secrets`
将注释打开,填上你预计的pod IP段
- name: CALICO_IPV4POOL_CIDR
  value: "20.0.0.0/16"
 
将默认权限400,修改成644。
- name: etcd-certs
  secret:
    secretName: calico-etcd-secrets
    defaultMode: 0644
 
4.修改 DaemonSet 类型,calico-node 的 env 下添加一段下面的内容

# 是指定使用那个网卡,可以使用 | 分隔开,表示或者的关系。
            - name: IP_AUTODETECTION_METHOD
              value: "interface=eth.*|em.*|enp.*"
 
5.修改 Deployment 类型,calico-kube-controllers 的 `calico-etcd-secrets`
将默认权限400,修改成644。
- name: etcd-certs
  secret:
    secretName: calico-etcd-secrets
    defaultMode: 0644

部署calico

kubectl apply -f calico.yaml

验证calico

$ kubectl -n kube-system get pod
NAME                                    READY   STATUS    RESTARTS   AGE
calico-kube-controllers-f4c6dbf-tkq77   1/1     Running   1          42h
calico-node-c4ccj                       1/1     Running   1          42h
calico-node-crs9k                       1/1     Running   1          42h
calico-node-fm697                       1/1     Running   1          42h
 
$ kubectl get nodes
NAME         STATUS   ROLES    AGE     VERSION
k8s-master   Ready    <none>   5d23h   v1.19.5
k8s-node01   Ready    <none>   5d23h   v1.19.5
k8s-node02   Ready    <none>   5d23h   v1.19.5
**注意**:status不是为ready的话,稍等一段时间再看看。一直都没有变成ready,请检查 kubelet 配置文件是否设置cni-bin-dir参数。默认是 `/opt/cni/bin`、`/etc/cni/net.d/`
 
$ kubectl run busybox --image=busybox sleep 3600
$ kubectl run nginx --image=nginx
 
$ kubectl get pod -owide
NAME      READY   STATUS    RESTARTS   AGE   IP            NODE         NOMINATED NODE   READINESS GATES
busybox   1/1     Running   6          42h   20.0.58.194   k8s-node02   <none>           <none>
nginx     1/1     Running   1          42h   20.0.85.194   k8s-node01   <none>           <none>
 
$ kubectl exec busybox -- ping 20.0.85.194 -c4
PING 20.0.85.194 (20.0.85.194): 56 data bytes
64 bytes from 20.0.85.194: seq=0 ttl=62 time=0.820 ms
64 bytes from 20.0.85.194: seq=1 ttl=62 time=0.825 ms
64 bytes from 20.0.85.194: seq=2 ttl=62 time=0.886 ms
64 bytes from 20.0.85.194: seq=3 ttl=62 time=0.840 ms
 
--- 20.0.85.194 ping statistics ---
4 packets transmitted, 4 packets received, 0% packet loss
round-trip min/avg/max = 0.820/0.842/0.886 ms

除ping不通跨节点容器外,其他都没有问题的话
可能是IP隧道的原因。可以手动测试一下两台主机IP隧道是否可以通信。

modprobe ipip
ip tunnel add ipip-tunnel mode ipip remote 对端外面IP local 本机外网IP
ifconfig ipip-tunnel 虚IP netmask 255.255.255.0

如上述不通,请核查主机IP隧道通信问题。如果是openstack创建的虚机出现这种情况,可以禁用安全端口功能。

openstack server show 主机名称
openstack server remove security group 主机名称 安全组名称
openstack port set --disable-port-security `openstack port list | grep '主机IP地址' | awk '{print $2}'`

部署插件

部署coreDNS

下载coredns部署yaml文件

$ mkdir ~/coredns && cd ~/coredns
$ wget https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dns/coredns/coredns.yaml.sed -O coredns.yaml

修改参数

$ vim coredns.yaml
    ...
    kubernetes $DNS_DOMAIN in-addr.arpa ip6.arpa {
    ...
    memory: $DNS_MEMORY_LIMIT
    ...
    clusterIP: $DNS_SERVER_IP
    ...
    image: k8s.gcr.io/coredns:1.7.0

将 $DNS_DOMAIN 替换成 cluster.local. 。默认 DNS_DOMAIN 就是 cluster.local. 。
将 $DNS_MEMORY_LIMIT 替换成合适的资源。
将 $DNS_SERVER_IP 替换成和 kube-controller-manager 文件中的 service-cluster-ip-range  同网段 x.x.x.2

如果不能上外网的话,将 image 的镜像设置为 coredns/coredns:x.x.x。生产环境只有一个副本数不合适,所以在 deployment控制器的 spec 字段下,添加一行 replicas: 3 参数。

部署coredns

$ kubectl apply -f coredns.yaml

验证

$ kubectl get pod -n kube-system -l k8s-app=kube-dns
NAME                       READY   STATUS    RESTARTS   AGE
coredns-75d9bd4f59-df94b   1/1     Running   0          7m55s
coredns-75d9bd4f59-kh4rp   1/1     Running   0          7m55s
coredns-75d9bd4f59-vjkpb   1/1     Running   0          7m55s
 
$ kubectl run dig --rm -it --image=docker.io/azukiapp/dig /bin/sh
If you don't see a command prompt, try pressing enter.
/ # nslookup kubernetes.default.svc.cluster.local.
Server:        10.211.0.2
Address:    10.211.0.2#53
 
Name:    kubernetes.default.svc.cluster.local
Address: 10.211.0.1
 
/ # nslookup kube-dns.kube-system.svc.cluster.local.
Server:        10.211.0.2
Address:    10.211.0.2#53
 
Name:    kube-dns.kube-system.svc.cluster.local
Address: 10.211.0.2

部署dashboard

下载dashboard.yaml文件

$ mkdir ~/dashboard && cd ~/dashboard
$ wget https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended.yaml -O dashboard.yaml

修改dashboard.yml

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30088        #添加
  type: NodePort    #添加
  selector:
    k8s-app: kubernetes-dashboard

添加两个参数 nodePort  、type 。请仔细看配置文件,有两个 Service 配置文件。

部署dashboard

$ kubectl apply -f dashboard.yaml

创建 sa 并绑定cluster-admin

$ kubectl create serviceaccount dashboard-admin -n kube-system
$ kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin

验证

$ kubectl get pod -n kubernetes-dashboard
NAME                                         READY   STATUS    RESTARTS   AGE
dashboard-metrics-scraper-78f5d9f487-8gn6n   1/1     Running   0          5m47s
kubernetes-dashboard-7d8574ffd9-cgwvq        1/1     Running   0          5m47s

获取token

$ kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep dashboard-admin | awk '{print $1}')
Name:         dashboard-admin-token-dw4zw
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: dashboard-admin
              kubernetes.io/service-account.uid: 50d8dc6a-d75c-41e3-b9a6-82006d0970f9
 
Type:  kubernetes.io/service-account-token
 
Data
====
ca.crt:     1314 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6InlPZEgtUlJLQ3lReG4zMlEtSm53UFNsc09nMmQ0YWVOWFhPbEUwUF85aEUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tZHc0enciLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiNTBkOGRjNmEtZDc1Yy00MWUzLWI5YTYtODIwMDZkMDk3MGY5Iiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZC1hZG1pbiJ9.sgEroj26ANWX1PzzEMZlCIa1ZxcPkYuP5xolT1L6DDdlaJFteaZZffOqv3hIGQBSUW02n6-nZz4VvRZAitrcA9BCW2VPlqHiQDE37UueU8UE1frQ4VtUkLXAKtMc7CUgHa1stod51LW2ndIKiwq-qWdNC1CQA0KsiBi0t2mGgjNQSII9-7FBTFruDwHUp6RRRqtl_NUl1WQanhHOPXia5wScfB37K8MVB0A4jxXIxNCwpd7zEVp-oQPw8XB500Ut94xwUJY6ppxJpnzXHTcoNt6ClapldTtzTY-HXzy0nXv8QVDozTXC7rTX7dChc1yDjMLWqf-KwT1ZYrKzk-2RHg

输出的一大串字符串就是所需的 token

安装metrics-server

创建证书签名请求文件

cat > /data/k8s/certs/proxy-client-csr.json <<-EOF
{
  "CN": "aggregator",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "GuangDong",
      "L": "GuangDong",
      "O": "k8s"
    }
  ]
}
EOF

生成proxy-client证书和私钥

cd /data/k8s/certs/ && cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes proxy-client-csr.json | cfssljson -bare proxy-client -

下载yaml文件

mkdir ~/metrics-server  && cd ~/metrics-server
wget https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.5.1/components.yaml -O metrics-server.yaml

修改配置文件
1.修改 metrics-server 容器中的 deployment.spec.template.spec.containers.args 的参数

      - args:
        - --cert-dir=/tmp
        - --secure-port=4443        # 原 443 端口,会有权限不足导致启动失败。
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        - --kubelet-insecure-tls    # 添加的

2.修改 容器中的 deployment.spec.template.spec.containers.args 的参数

        ports:
        - containerPort: 4443       # 原 443 端口,改成4443。
          name: https
          protocol: TCP

3.kube-apiserver 服务开启 API 聚合功能

# /data/k8s/conf/kube-apiserver.conf 添加以下内容
--runtime-config=api/all=true  \
--requestheader-allowed-names=aggregator \
--requestheader-group-headers=X-Remote-Group \
--requestheader-username-headers=X-Remote-User \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-client-ca-file=/data/k8s/certs/ca.pem \
--proxy-client-cert-file=/data/k8s/certs/proxy-client.pem \
--proxy-client-key-file=/data/k8s/certs/proxy-client-key.pem \
--enable-aggregator-routing=true"

–requestheader-allowed-names: 允许访问的客户端 common names 列表,通过 header 中 –requestheader-username-headers 参数指定的字段获取。客户端 common names 的名称需要在 client-ca-file 中进行设置,将其设置为空值时,表示任意客户端都可访问。
–requestheader-username-headers: 参数指定的字段获取。
–requestheader-group-headers 请求头中需要检查的组名。
–requestheader-extra-headers-prefix: 请求头中需要检查的前缀名。
–requestheader-username-headers 请求头中需要检查的用户名。
–requestheader-client-ca-file: 客户端CA证书。
–proxy-client-cert-file: 在请求期间验证Aggregator的客户端CA证书。
–proxy-client-key-file: 在请求期间验证Aggregator的客户端私钥。
--enable-aggregator-routing=true  如果 kube-apiserver 所在的主机上没有运行 kube-proxy,即无法通过服务的 ClusterIP 进行访问,那么还需要设置以下启动参数

3.重启kube-apiserver服务

systemctl daemon-reload && systemctl restart kube-apiserver

部署metrics-server

cd ~/metrics-server
kubectl apply -f metrics-server.yaml

如果出现拉取镜像失败的话,可以更换仓库地址
 修改 metrics-server.yaml, 将 k8s.gcr.io/metrics-server/metrics-server:v0.4.1 修改成 bitnami/metrics-server:0.4.1

k8s命令补全

$ yum install -y bash-completion
$ source /usr/share/bash-completion/bash_completion
$ source <(kubectl completion bash)
$ echo "source <(kubectl completion bash)" >> ~/.bashrc

高可用master

环境配置这里就不写了,跟上面的一样。可以参考上面的步骤
创建k8s目录及环境变量

$ mkdir -p /data/k8s/{bin,conf,certs,logs,data}
$ mkdir -p /data/etcd/certs
$ echo 'PATH=/data/k8s/bin:$PATH' > /etc/profile.d/k8s.sh  && source /etc/profile.d/k8s.sh

安装kube-apiserver

拷贝命令

$ scp k8s-master01:/data/k8s/bin/{kube-apiserver,kubectl} /data/k8s/bin/

创建日志目录

$ mkdir /data/k8s/logs/kube-api-server

获取证书

$ scp k8s-master01:/data/k8s/certs/{apiserver*.pem,ca*.pem} /data/k8s/certs/
$ scp k8s-node01:/data/etcd/certs/ca*.pem /data/etcd/certs/
$ scp k8s-node01:/data/etcd/certs/etcd*.pem /data/etcd/certs/

获取kube-apiserver的启动参数

$ scp k8s-master01:/data/k8s/conf/kube-apiserver.conf /data/k8s/conf/

说明:需要修改 IP地址 和 service-cluster-ip-range(service IP段) 。

获取token文件

$ scp k8s-master01:/data/k8s/conf/token.csv /data/k8s/conf/

创建kube-apiserver的systemd模板

$ cat >> /usr/lib/systemd/system/kube-apiserver.service <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
 
[Service]
EnvironmentFile=-/data/k8s/conf/kube-apiserver.conf
ExecStart=/data/k8s/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
 
[Install]
WantedBy=multi-user.target
EOF

启动kube-apiserver

$ systemctl daemon-reload
$ systemctl start kube-apiserver.service
$ systemctl enable kube-apiserver.service

安装kube-controller-manager

拷贝命令

$ scp k8s-master01:/data/k8s/bin/kube-controller-manager /data/k8s/bin/

创建日志目录

$ mkdir /data/k8s/logs/kube-controller-manager

获取证书

$ scp k8s-master01:/data/k8s/certs/controller-manager*.pem /data/k8s/certs/

生成连接集群的kubeconfig文件

$ KUBE_APISERVER="https://192.168.32.133:6443"

$ kubectl config set-cluster kubernetes \
  --certificate-authority=/data/k8s/certs/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=/data/k8s/certs/controller-manager.kubeconfig
 
$ kubectl config set-credentials system:kube-controller-manager \
  --client-certificate=/data/k8s/certs/controller-manager.pem \
  --client-key=/data/k8s/certs/controller-manager-key.pem \
  --embed-certs=true \
  --kubeconfig=/data/k8s/certs/controller-manager.kubeconfig
 
$ kubectl config set-context default \
  --cluster=kubernetes \
  --user=system:kube-controller-manager \
  --kubeconfig=/data/k8s/certs/controller-manager.kubeconfig
 
$ kubectl config use-context default \
--kubeconfig=/data/k8s/certs/controller-manager.kubeconfig

获取kube-controller-manager参数

$ scp k8s-master01:/data/k8s/conf/kube-controller-manager.conf /data/k8s/conf/

说明:需要修改 service-cluster-ip-range(service IP段) 、 cluster-cidr(pod IP段) 和 master 的值。

kube-controller-manager的systemd模板

$ cat > /usr/lib/systemd/system/kube-controller-manager.service <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
 
[Service]
EnvironmentFile=-/data/k8s/conf/kube-controller-manager.conf
ExecStart=/data/k8s/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
 
[Install]
WantedBy=multi-user.target
EOF

启动kube-controller-manager

$ systemctl daemon-reload
$ systemctl start kube-controller-manager.service
$ systemctl enable kube-controller-manager.service

安装kube-scheduler

拷贝命令

$ scp k8s-master01:/data/k8s/bin/kube-scheduler /data/k8s/bin

创建日志目录

$ mkdir /data/k8s/logs/kube-scheduler

获取证书

$ scp k8s-master01:/data/k8s/certs/scheduler*.pem /data/k8s/certs

生成连接集群的kubeconfig文件

$ KUBE_APISERVER="https://192.168.32.133:6443"

$ kubectl config set-cluster kubernetes \
  --certificate-authority=/data/k8s/certs/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=/data/k8s/certs/scheduler.kubeconfig
 
$ kubectl config set-credentials system:kube-scheduler \
  --client-certificate=/data/k8s/certs/scheduler.pem \
  --client-key=/data/k8s/certs/scheduler-key.pem \
  --embed-certs=true \
  --kubeconfig=/data/k8s/certs/scheduler.kubeconfig
 
$ kubectl config set-context default \
  --cluster=kubernetes \
  --user=system:kube-scheduler \
  --kubeconfig=/data/k8s/certs/scheduler.kubeconfig
 
$ kubectl config use-context default \
--kubeconfig=/data/k8s/certs/scheduler.kubeconfig

获取启动kube-scheduler参数

$ scp k8s-master01:/data/k8s/conf/kube-scheduler.conf /data/k8s/conf/

说明:需要修改 master 的值。

创建kube-scheduler的systemd模板

$ cat > /usr/lib/systemd/system/kube-scheduler.service <<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
 
[Service]
EnvironmentFile=-/data/k8s/conf/kube-scheduler.conf
ExecStart=/data/k8s/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
 
[Install]
WantedBy=multi-user.target
EOF

启动kube-scheduler

$ systemctl daemon-reload
$ systemctl start kube-scheduler.service
$ systemctl enable kube-scheduler.service

安装nginx

所有的master主机都需要安装nginx服务。
下载nginx

$ yum install epel-release -y
$ yum install nginx -y

配置nginx

cat > /etc/nginx/nginx.conf <<-EOF
user  nginx;
worker_processes  auto;

pid /run/nginx.pid;

events {
    worker_connections  1024;
}

# 四层负载均衡,为两台Master apiserver组件提供负载均衡
stream {

    log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';

    access_log  /var/log/nginx/k8s-access.log  main;

    upstream k8s-apiserver {
       server 192.168.32.134:6443;   # Master1 APISERVER IP:PORT
       server 192.168.32.133:6443;   # Master2 APISERVER IP:PORT
    }
    
    server {
       listen 16443; # 由于nginx与master节点复用,这个监听端口不能是6443,否则会冲突
       proxy_pass k8s-apiserver;
    }
}
EOF

启动nginx

systemctl start nginx
systemctl enable nginx

安装keepalived

所有的master主机都需要安装keepalived服务。
下载keepalived

yum install keepalived -y

配置keepalived

# k8s-master01主机
cat > /etc/keepalived/keepalived.conf <<-EOF
! Configuration File for keepalived

global_defs {
    smtp_server 127.0.0.1
    script_user root
    enable_script_security
}

vrrp_script check_nginx {
    script "/etc/keepalived/check_k8s_nginx.sh"
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.32.100
    }
    track_script {
        check_nginx
    } 
}
EOF

cat > /etc/keepalived/check_k8s_nginx.sh <<-EOF
#!/bin/bash
count=\$(ss -lntup | egrep '16443' | wc -l)

if [ "\$count" -ge 0 ];then
    # 退出状态为0,代表检查成功
    exit 0
else
    # 退出状态为1,代表检查不成功
    exit 1
fi
EOF

chmod +x /etc/keepalived/check_k8s_nginx.sh

# k8s-master02主机
cat > /etc/keepalived/keepalived.conf <<-EOF
! Configuration File for keepalived

global_defs {
    smtp_server 127.0.0.1
    script_user root
    enable_script_security
}

vrrp_script check_nginx {
    script "/etc/keepalived/check_k8s_nginx.sh"
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 51
    priority 95
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.32.100
    }
    track_script {
        check_nginx
    } 
}
EOF

cat > /etc/keepalived/check_k8s_nginx.sh <<-EOF
#!/bin/bash
count=\$(ss -lntup | egrep '16443' | wc -l)

if [ "\$count" -ge 0 ];then
    # 退出状态为0,代表检查成功
    exit 0
else
    # 退出状态为1,代表检查不成功
    exit 1
fi
EOF

chmod +x /etc/keepalived/check_k8s_nginx.sh

启动keepalived

systemctl start keepalived
systemctl enable keepalived

修改服务连接地址

k8s所有的master节点

sed -ri 's#(server: https://).*#\1192.168.32.100:16443#g' /data/k8s/certs/bootstrap.kubeconfig
sed -ri 's#(server: https://).*#\1192.168.32.100:16443#g' /data/k8s/certs/admin.kubeconfig 
sed -ri 's#(server: https://).*#\1192.168.32.100:16443#g' /data/k8s/certs/kubelet.kubeconfig 
sed -ri 's#(server: https://).*#\1192.168.32.100:16443#g' /data/k8s/certs/proxy.kubeconfig
sed -ri 's#(server: https://).*#\1192.168.32.100:16443#g' ~/.kube/config

systemctl restart kubelet kube-proxy

k8s所有的node节点

sed -ri 's#(server: https://).*#\1192.168.32.100:16443#g' /data/k8s/certs/bootstrap.kubeconfig
sed -ri 's#(server: https://).*#\1192.168.32.100:16443#g' /data/k8s/certs/kubelet.kubeconfig 
sed -ri 's#(server: https://).*#\1192.168.32.100:16443#g' /data/k8s/certs/proxy.kubeconfig

systemctl restart kubelet kube-proxy

附加iptables规则

# ssh 服务
iptables -t filter -A INPUT -p icmp --icmp-type 8 -j ACCEPT
iptables -t filter -A INPUT -p tcp --dport 22  -m comment --comment "sshd service" -j ACCEPT
iptables -t filter -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
iptables -t filter -A INPUT -i lo -j ACCEPT
iptables -t filter -P INPUT DROP

# etcd数据库
iptables -t filter -I INPUT -p tcp --dport 2379:2381 -m comment --comment "etcd Component ports" -j ACCEPT

# matster服务及VIP端口
iptables -t filter -I INPUT -p tcp -m multiport --dport 6443,10257,10259 -m comment --comment "k8s master Component ports" -j ACCEPT
iptables -t filter -I INPUT -p tcp -m multiport --dport 16443 -m comment --comment "k8s master vip ports" -j ACCEPT

# keepalived心跳
iptables -t filter -I INPUT -p vrrp -s 192.168.32.0/24 -d 224.0.0.18 -m comment --comment "keepalived Heartbeat" -j ACCEPT

# node服务
iptables -t filter -I INPUT -p tcp -m multiport --dport 10249,10250,10256 -m comment --comment "k8s node Component ports" -j ACCEPT

# k8s使用到的端口
iptables -t filter -I INPUT -p tcp --dport 32768:65535 -m comment --comment "ip_local_port_range ports" -j ACCEPT
iptables -t filter -I INPUT -p tcp --dport 30000:32767 -m comment --comment "k8s service nodeports" -j ACCEPT

# calico服务端口
iptables -t filter -I INPUT -p tcp -m multiport --dport 179,9099 -m comment --comment "k8s calico Component ports" -j ACCEPT

# coredns服务端口
iptables -t filter -I INPUT -p udp -m udp --dport 53 -m comment --comment "k8s coredns ports" -j ACCEPT

# pod 到 service 网络。没有设置的话,启动coredns失败。
iptables -t filter -I INPUT -p tcp -s 20.0.0.0/16 -d 10.183.0.0/24 -m comment --comment "pod to service" -j ACCEPT

# 记录别drop的数据包,日志在 /var/log/messages,过滤关键字"iptables-drop: "
iptables -t filter -A INPUT -j LOG --log-prefix='iptables-drop: '
posted @   jiaxzeng  阅读(188)  评论(0编辑  收藏  举报
编辑推荐:
· 从 HTTP 原因短语缺失研究 HTTP/2 和 HTTP/3 的设计差异
· AI与.NET技术实操系列:向量存储与相似性搜索在 .NET 中的实现
· 基于Microsoft.Extensions.AI核心库实现RAG应用
· Linux系列:如何用heaptrack跟踪.NET程序的非托管内存泄露
· 开发者必知的日志记录最佳实践
阅读排行:
· winform 绘制太阳,地球,月球 运作规律
· TypeScript + Deepseek 打造卜卦网站:技术与玄学的结合
· AI 智能体引爆开源社区「GitHub 热点速览」
· Manus的开源复刻OpenManus初探
· 写一个简单的SQL生成工具
点击右上角即可分享
微信分享提示