k8s二进制部署

目录

插件

集群角色

master节点:管理集群
node节点:部署应用

master节点部署插件

kube-apiserver:中央管理器,调度管理集群
kube-controller-manager(控制器):管理容器,监控容器
kube-scheduler(调度器):调度容器
flannel,calico:提供集群网络
etcd:数据库
kubectl
kube-proxy

node节点部署插件

kubelet:部署容器,监控容器。(只监控自己node节点容器)
kube-proxy:提供容器间的网络

二进制部署系统准备(所有节点)

1、节点规划

# 节点规划
master:192.168.0.214
node1:192.168.0.215
node2:192.168.0.216

cat >> /etc/hosts << EOF
192.168.0.214 sg-14
192.168.0.215 sg-15
192.168.0.216 sg-16
EOF

2、插件规划

# Master节点规划
kube-apiserver
kube-controller-manager
kube-scheduler
flannel
etcd
kubelet
kube-proxy

# Node节点规划
kubelet
kube-proxy

3、系统优化

# 关闭selinux

# 关闭防火墙
systemctl stop firewalld.service。//关闭防火墙
systemctl disable --now firewalld。//禁止开启启动

# 关闭swap分区
swapoff -a
修改/etc/fstab
echo 'KUBELET_EXTRA_ARGS="--fail-swap-on=false"' > /etc/sysconfig/kubelet   # kubelet忽略swap

# 做免密登录
[root@sg-14 ~]# ssh-keygen -t rsa
[root@sg-14 ~]# for i in sg-14 sg-15 sg-16;do  ssh-copy-id -i ~/.ssh/id_rsa.pub root@$i; done

# 同步集群时间


# 配置镜像源
[root@sg-14 ~]# curl  -o /etc/yum.repos.d/CentOS-Base.repo https://repo.huaweicloud.com/repository/conf/CentOS-7-reg.repo
[root@sg-14 ~]# yum clean all
[root@sg-14 ~]# yum makecache

# 更新系统
[root@sg-14 ~]# yum update -y --exclud=kernel*

# 安装基础常用软件
[root@sg-14 ~]# yum install wget expect vim net-tools ntp bash-completion ipvsadm ipset jq iptables conntrack sysstat libseccomp -y

# 更新系统内核(docker 对系统内核要求比较高,最好使用4.4+)
    [root@sg-14 ~]# wget https://elrepo.org/linux/kernel/el7/x86_64/RPMS/kernel-lt-5.4.107-1.el7.elrepo.x86_64.rpm
    [root@sg-14 ~]# https://elrepo.org/linux/kernel/el7/x86_64/RPMS/kernel-lt-devel-5.4.107-1.el7.elrepo.x86_64.rpm
    ## 安装系统内容
    [root@sg-14 ~]# yum localinstall -y kernel-lt*
    ## 调到默认启动
    [root@sg-14 ~]# grub2-set-default  0 && grub2-mkconfig -o /etc/grub2.cfg
    ## 查看当前默认启动的内核
    [root@sg-14 ~]# grubby --default-kernel
    ## 重启
    [root@sg-14 ~]# reboot

# 安装IPVS
    yum install -y conntrack-tools ipvsadm ipset conntrack libseccomp

   	## 加载IPVS模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF

    chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs

# 修改内核启动参数
cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp.keepaliv.probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp.max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp.max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.top_timestamps = 0
net.core.somaxconn = 16384
EOF

# 立即生效
sysctl --system

4、安装docker

docker安装

二进制部署(开始)

证书

6套认证
根证书:生成应用证书

1、集群证书(master节点)

# 以下命令只需要在master01执行即可

# 安装证书生成工具
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64

# 设置执行权限
chmod +x cfssljson_linux-amd64
chmod +x cfssl_linux-amd64

# 移动到/usr/local/bin
mv cfssljson_linux-amd64 cfssljson
mv cfssl_linux-amd64 cfssl
mv cfssljson cfssl /usr/local/bin

2、生成根证书(master节点)

根证书配置文件

mkdir -p /opt/cert/ca

cat > /opt/cert/ca/ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "8760h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
          "signing",
          "key encipherment",
          "server auth",
          "client auth"
        ],
           "expiry": "8760h"
      }
    }
  }
}
EOF

#################################
"expiry": "8760h"  --> 有效期1年 
"usages": [ // 功能
  "signing", //认证
  "key encipherment", //私钥
  "server auth", // 服务端认证
  "client auth" // 客户端认证
],
#################################

生成根证书请求文件

cat > /opt/cert/ca/ca-csr.json << EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names":[{
    "C": "CN",
    "ST": "ShangHai",
    "L": "ShangHai"
  }]
}
EOF

开始生成根证书

[root@sg-14 /opt/cert/ca]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

######################################
[root@sg-14 ca]# ll
总用量 20
-rw-r--r-- 1 root root  285 10月 29 10:39 ca-config.json
-rw-r--r-- 1 root root  960 10月 29 10:41 ca.csr
-rw-r--r-- 1 root root  153 10月 29 10:39 ca-csr.json
-rw------- 1 root root 1675 10月 29 10:41 ca-key.pem
-rw-r--r-- 1 root root 1281 10月 29 10:41 ca.pem
[root@sg-14 ca]#
ca.csr:根证书请求签名文件
ca-key.pem:根证书私钥
ca.pem:根证书
#######################################

部署ETCD数据库集群

1、节点规划(所有msater节点)

192.168.0.214  etcd01

2、创建ETCD集群证书

mkdir -p /opt/cert/etcd
cd /opt/cert/etcd

cat > /opt/cert/etcd/etcd-csr.json << EOF
{
    "CN": "etcd",
    "hosts": [
        "127.0.0.1",
        "192.168.0.214",
        "192.168.0.215",
        "192.168.0.216"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
          "C": "CN",
          "ST": "ShangHai",
          "L": "ShangHai"
        }
    ]
}
EOF


#############################
所有节点ip
"hosts": [
        "127.0.0.1",
        "192.168.0.214",
        "192.168.0.215",
        "192.168.0.216"
    ],
##############################

3、生成ETCD证书

[root@sg-14 /opt/cert/etcd]# cfssl gencert -ca=../ca/ca.pem -ca-key=../ca/ca-key.pem -config=../ca/ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd

####################################################
2021/03/26 17:38:57 [INFO] generate received request
2021/03/26 17:38:57 [INFO] received CSR
2021/03/26 17:38:57 [INFO] generating key: rsa-2048
2021/03/26 17:38:58 [INFO] encoded CSR
2021/03/26 17:38:58 [INFO] signed certificate with serial number 179909685000914921289186132666286329014949215773
2021/03/26 17:38:58 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").                           ####################################################

4.分发ETCD证书

# 只分发所有的master节点
[root@sg-14 /opt/cert/etcd]# for ip in sg-14;do 
   ssh root@${ip} "mkdir -pv /etc/etcd/ssl"
   scp ../ca/ca*.pem  root@${ip}:/etc/etcd/ssl
   scp ./etcd*.pem  root@${ip}:/etc/etcd/ssl   
 done

#############################################
ls /etc/etcd/ssl/
ca-key.pem  ca.pem  etcd-key.pem  etcd.pem
#############################################

5、部署ETCD

# 下载ETCD安装包
cd /opt/
wget https://mirrors.huaweicloud.com/etcd/v3.3.24/etcd-v3.3.24-linux-amd64.tar.gz

# 解压
tar xf etcd-v3.3.24-linux-amd64

# 分发至master其他节点
for i in sg-14 sg-15 sg-16
do
	scp ./etcd-v3.3.24-linux-amd64/etcd* root@$i:/usr/local/bin/
done

// 检查etcd是否安装成功
[root@sg-14 /opt/etcd-v3.3.24-linux-amd64]# etcd --version
etcd Version: 3.3.24
Git SHA: bdd57848d
Go Version: go1.12.17
Go OS/Arch: linux/amd64

6、注册ETCD服务

# 在所有master节点上执行
mkdir -pv /etc/kubernetes/conf/etcd

ETCD_NAME=`hostname`
INTERNAL_IP="192.168.0.214"
INITIAL_CLUSTER=sg-14=https://192.168.0.214:2380
cat << EOF | sudo tee /usr/lib/systemd/system/etcd.service
[Unit]
Description=etcd
Documentation=https://github.com/coreos

[Service]
ExecStart=/usr/local/bin/etcd \\
  --name ${ETCD_NAME} \\
  --cert-file=/etc/etcd/ssl/etcd.pem \\
  --key-file=/etc/etcd/ssl/etcd-key.pem \\
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \\
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \\
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \\
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \\
  --peer-client-cert-auth \\
  --client-cert-auth \\
  --initial-advertise-peer-urls https://${INTERNAL_IP}:2380 \\
  --listen-peer-urls https://${INTERNAL_IP}:2380 \\
  --listen-client-urls https://${INTERNAL_IP}:2379,https://127.0.0.1:2379 \\
  --advertise-client-urls https://${INTERNAL_IP}:2379 \\
  --initial-cluster-token etcd-cluster \\
  --initial-cluster ${INITIAL_CLUSTER} \\
  --initial-cluster-state new \\
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

# 启动ETCD服务
systemctl enable --now etcd

###############成功!!!#########################
Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /usr/lib/systemd/system/etcd.service.

##############################
添上所有的master节点
INITIAL_CLUSTER=sg-14=https://192.168.0.214:2380
INTERNAL_IP="192.168.0.214" //master节点ip
•	ETCD_NAME:节点名称,集群中唯一
•	ETCD_DATA_DIR:数据目录
•	ETCD_LISTEN_PEER_URLS:集群通信监听地址
•	ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址
•	ETCD_INITIAL_ADVERTISE_PEER_URLS:集群通告地址
•	ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址
•	ETCD_INITIAL_CLUSTER:集群节点地址
•	ETCD_INITIAL_CLUSTER_TOKEN:集群Token
•	ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态,new是新集群,existing表示加入已有集群
##############################

7、测试ETCD服务

# 第一种方式
ETCDCTL_API=3 etcdctl \
--cacert=/etc/etcd/ssl/etcd.pem \
--cert=/etc/etcd/ssl/etcd.pem \
--key=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://192.168.0.214:2379" \
endpoint status --write-out='table'

# 第二种方式
ETCDCTL_API=3 etcdctl \
--cacert=/etc/etcd/ssl/etcd.pem \
--cert=/etc/etcd/ssl/etcd.pem \
--key=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://192.168.0.214:2379" \
member list --write-out='table'

部署master节点

主要把master节点上的各个组件部署成功

1、集群规划

# vim /etc/hosts
master:192.168.0.214
node1:192.168.0.215
node2:192.168.0.216

cat >> /etc/hosts << EOF
192.168.0.214 sg-14
192.168.0.215 sg-15
192.168.0.216 sg-16
EOF

kube-apiserver,控制器,调度器,flannel,etcd,kubelet,kube-proxy,dns

2、ca证书

生成根证书配置文件

mkdir /opt/cert/k8s
[root@sg-14 k8s] cat > /opt/cert/k8s/ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

生成根证书请求文件

[root@sg-14 k8s] cat > /opt/cert/k8s/ca-csr.json << EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "ShangHai",
            "ST": "ShangHai"
        }
    ]
}
EOF

生成ca根证书

[root@sg-14 /opt/cert/ca]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

######################################
[root@sg-14 ca]# ll
总用量 20
-rw-r--r-- 1 root root  294 10月 29 15:26 ca-config.json
-rw-r--r-- 1 root root  960 10月 29 15:37 ca.csr
-rw-r--r-- 1 root root  214 10月 29 15:27 ca-csr.json
-rw------- 1 root root 1675 10月 29 15:37 ca-key.pem
-rw-r--r-- 1 root root 1281 10月 29 15:37 ca.pem
[root@sg-14 ca]#
ca.csr:根证书请求签名文件
ca-key.pem:根证书私钥
ca.pem:根证书
#######################################

3、创建集群普通证书

创建kube-apiserver的证书配置文件

[root@sg-14 k8s]# mkdir /opt/cert/k8s
[root@sg-14 k8s]# cat > /opt/cert/k8s/server-csr.json << EOF
{
    "CN": "kubernetes",
    "hosts": [
        "127.0.0.1",
        "192.168.0.214",
        "192.168.0.215",
        "192.168.0.216",
        "10.96.0.1",
        "kubernetes",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "ShangHai",
            "ST": "ShangHai"
        }
    ]
}
EOF

############################################
"hosts": [
        "127.0.0.1",
        "192.168.0.214", ## master的ip
        "192.168.0.215", ## node1的ip
        "192.168.0.216", ## node2的ip
        "10.96.0.1",  ## 集群内部网络
        "kubernetes",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local"
    ],
############################################

创建kube-apiserver证书

[root@sg-14 k8s] cd /opt/cert/k8s
[root@sg-14 k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server

#########################################
[root@sg-14 k8s]# ll
总用量 36
-rw-r--r-- 1 root root  294 10月 29 15:26 ca-config.json
-rw-r--r-- 1 root root  960 10月 29 15:37 ca.csr
-rw-r--r-- 1 root root  214 10月 29 15:27 ca-csr.json
-rw------- 1 root root 1675 10月 29 15:37 ca-key.pem
-rw-r--r-- 1 root root 1281 10月 29 15:37 ca.pem
-rw-r--r-- 1 root root 1220 10月 29 16:03 server.csr
-rw-r--r-- 1 root root  528 10月 29 16:00 server-csr.json
-rw------- 1 root root 1679 10月 29 16:03 server-key.pem
-rw-r--r-- 1 root root 1549 10月 29 16:03 server.pem
#########################################

创建controller-manager的证书配置文件

集群控制器
[root@sg-14 /opt/cert/k8s]# cat > /opt/cert/k8s/kube-controller-manager-csr.json << EOF
{
    "CN": "system:kube-controller-manager",
    "hosts": [
        "127.0.0.1",
        "192.168.0.214",
        "192.168.0.215",
        "192.168.0.216"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "ShangHai",
            "ST": "ShangHai",
            "O": "system:kube-controller-manager",
            "OU": "System"
        }
    ]
}
EOF

创建controller-manager的证书配置文件

[root@sg-14 k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager

创建kube-scheduler的证书配置文件

调度器
[root@sg-14 k8s]# cat > /opt/cert/k8s/kube-scheduler-csr.json << EOF
{
    "CN": "system:kube-scheduler",
    "hosts": [
        "127.0.0.1",
        "192.168.0.214",
        "192.168.0.215",
        "192.168.0.216"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "system:kube-scheduler",
            "OU": "System"
        }
    ]
}
EOF

创建kube-scheduler的证书

[root@sg-14 k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler

创建kube-proxy的证书配置文件

kube-proxy:只提供容器网络,和其他组件无关联。所以hosts为空

[root@sg-14 k8s]# cat > kube-proxy-csr.json << EOF
{
    "CN":"system:kube-proxy",
    "hosts":[],
    "key":{
        "algo":"rsa",
        "size":2048
    },
    "names":[
        {
            "C":"CN",
            "L":"BeiJing",
            "ST":"BeiJing",
            "O":"system:kube-proxy",
            "OU":"System"
        }
    ]
}
EOF

创建kube-proxy的证书

[root@sg-14 k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

创建集群管理员证书配置文件

cat > /opt/cert/k8s/admin-csr.json << EOF
{
    "CN":"admin",
    "key":{
        "algo":"rsa",
        "size":2048
    },
    "names":[
        {
            "C":"CN",
            "L":"BeiJing",
            "ST":"BeiJing",
            "O":"system:masters",
            "OU":"System"
        }
    ]
}
EOF

创建集群管理员证书

[root@sg-14 k8s]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

创建kubelet的证书配置文件

使用TLS bootstrapping自动生成

创建TLS bootstrapping证书

自动化创建node,高可用时部署

4、颁发证书

把创建的证书,分发的其他节点
[root@sg-14 k8s]# mkdir -pv /etc/kubernetes/ssl
[root@sg-14 k8s]# cp -p ./{ca*pem,server*pem,kube-controller-manager*pem,kube-scheduler*.pem,kube-proxy*pem,admin*.pem} /etc/kubernetes/ssl

//for循环分发
[root@sg-14 ssl]# for i in sg-14 sg-15 sg-16;do
ssh root@$i "mkdir -pv /etc/kubernetes/ssl"
scp /etc/kubernetes/ssl/* root@$i:/etc/kubernetes/ssl
done

5、下载安装包和编辑配置文件

下载

https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.20.md#downloads-for-v12012
[root@sg-14 ssl]# mkdir /opt/data/
// 下载
[root@sg-14 data]# cd /opt/data/ && wget https://dl.k8s.io/v1.20.12/kubernetes-server-linux-amd64.tar.gz

// 解压
[root@sg-14 data]# tar -xf kubernetes-server-linux-amd64.tar.gz

//分发组件
[root@sg-14 bin]# cd kubernetes/server/bin
[root@sg-14 bin]# for i in sg-14 sg-15 sg-16 ;do  scp kube-apiserver kube-controller-manager kube-proxy kubectl kubelet kube-scheduler root@$i:/usr/local/bin; done

创建kube-controller-manager.kubeconfig管理员集群配置文件

cd /opt/cert/k8s/
## 创建kube-controller-manager.kubeconfig
export KUBE_APISERVER="https://192.168.0.214:8443"

# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-controller-manager.kubeconfig

# 设置客户端认证参数
kubectl config set-credentials "kube-controller-manager" \
  --client-certificate=/etc/kubernetes/ssl/kube-controller-manager.pem \
  --client-key=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-controller-manager.kubeconfig

# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
kubectl config set-context default \
  --cluster=kubernetes \
  --user="kube-controller-manager" \
  --kubeconfig=kube-controller-manager.kubeconfig

# 配置默认上下文
kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig

创建kube-scheduler.kubeconfig集群配置文件

cd /opt/cert/k8s/
# 创建kube-scheduler.kubeconfig
export KUBE_APISERVER="https://192.168.0.214:8443"

# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-scheduler.kubeconfig

# 设置客户端认证参数
kubectl config set-credentials "kube-scheduler" \
  --client-certificate=/etc/kubernetes/ssl/kube-scheduler.pem \
  --client-key=/etc/kubernetes/ssl/kube-scheduler-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-scheduler.kubeconfig

# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
kubectl config set-context default \
  --cluster=kubernetes \
  --user="kube-scheduler" \
  --kubeconfig=kube-scheduler.kubeconfig

# 配置默认上下文
kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig

创建kube-proxy.kubeconfig集群配置文件

cd /opt/cert/k8s/
## 创建kube-proxy.kubeconfig集群配置文件
export KUBE_APISERVER="https://192.168.0.214:8443"

# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-proxy.kubeconfig

# 设置客户端认证参数
kubectl config set-credentials "kube-proxy" \
  --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig

# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
kubectl config set-context default \
  --cluster=kubernetes \
  --user="kube-proxy" \
  --kubeconfig=kube-proxy.kubeconfig

# 配置默认上下文
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

创建超级管理员的集群配置文件

cd /opt/cert/k8s/
export KUBE_APISERVER="https://192.168.0.214:8443"

# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=admin.kubeconfig

# 设置客户端认证参数
kubectl config set-credentials "admin" \
  --client-certificate=/etc/kubernetes/ssl/admin.pem \
  --client-key=/etc/kubernetes/ssl/admin-key.pem \
  --embed-certs=true \
  --kubeconfig=admin.kubeconfig

# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
kubectl config set-context default \
  --cluster=kubernetes \
  --user="admin" \
  --kubeconfig=admin.kubeconfig

# 配置默认上下文
kubectl config use-context default --kubeconfig=admin.kubeconfig

6、颁发集群配置文件

cd /opt/cert/k8s/
for i in sg-14 sg-15 sg-16; do
ssh root@$i  "mkdir -pv /etc/kubernetes/cfg"
scp ./*.kubeconfig root@$i:/etc/kubernetes/cfg
done

7、创建集群token

# 只需要创建一次
# 必须要用自己机器创建的Token
TLS_BOOTSTRAPPING_TOKEN=`head -c 16 /dev/urandom | od -An -t x | tr -d ' '`

cat > token.csv << EOF
${TLS_BOOTSTRAPPING_TOKEN},kubelet_bootstrap,10001,"system:kubelet_bootstrap"
EOF

# 分发集群token,用于集群TLS认证
[root@sg-14 k8s]## for i in sg-14 sg-15 sg-16;do
scp token.csv root@$i:
done

7、部署各个组件

安装各个组件,使其可以正常工作

注册创建kube-apiserver配置文件

# 在所有的master节点上执行
# 注意:KUBE_APISERVER_IP=`hostname -i`能否获取ip地址
# KUBE_APISERVER_IP=`hostname -i`
KUBE_APISERVER_IP="192.168.0.214"
cat > /etc/kubernetes/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--advertise-address=${KUBE_APISERVER_IP} \\
--default-not-ready-toleration-seconds=360 \\
--default-unreachable-toleration-seconds=360 \\
--max-mutating-requests-inflight=2000 \\
--max-requests-inflight=4000 \\
--default-watch-cache-size=200 \\
--delete-collection-workers=2 \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--allow-privileged=true \\
--service-cluster-ip-range=10.96.0.0/16 \\
--service-node-port-range=30000-52767 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/etc/kubernetes/cfg/token.csv \\
--kubelet-client-certificate=/etc/kubernetes/ssl/server.pem \\
--kubelet-client-key=/etc/kubernetes/ssl/server-key.pem \\
--tls-cert-file=/etc/kubernetes/ssl/server.pem  \\
--tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \\
--client-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/log/kubernetes/k8s-audit.log \\
--etcd-servers=https://192.168.0.214:2379 \\
--etcd-cafile=/etc/etcd/ssl/ca.pem \\
--etcd-certfile=/etc/etcd/ssl/etcd.pem \\
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem"
EOF



#########################################
servers:需要修改
service-node-port-range:改为30000以上,避免冲突
KUBE_APISERVER_IP:修改
少了这3个参数,参照下面配置文件
--service-account-issuer
--service-account-key-file
--service-account-signing-key-file
#########################################

注册kube-apiserver的服务

## 生成 service-account-key.pem,service-account.pem
cd /etc/kubernetes/apiserver
openssl genrsa -out service-account-key.pem 4096
openssl req -new -x509 -days 365 -key service-account-key.pem -sha256 -out service-account.pem
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver \\
	--logtostderr=false \\
  --log-dir=/var/log/kubernetes \\
  --advertise-address=192.168.0.214 \\
  --default-not-ready-toleration-seconds=360 \\
  --default-unreachable-toleration-seconds=360 \\
  --max-mutating-requests-inflight=2000 \\
  --max-requests-inflight=4000 \\
  --default-watch-cache-size=200 \\
  --delete-collection-workers=2 \\
  --bind-address=0.0.0.0 \\
  --secure-port=6443 \\
  --allow-privileged=true \\
  --service-cluster-ip-range=10.96.0.0/16 \\
  --service-node-port-range=30000-52767 \\
  --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
  --authorization-mode=RBAC,Node \\
  --enable-bootstrap-token-auth=true \\
  --token-auth-file=/etc/kubernetes/cfg/token.csv \\
  --kubelet-client-certificate=/etc/kubernetes/ssl/server.pem \\
  --kubelet-client-key=/etc/kubernetes/ssl/server-key.pem \\
  --tls-cert-file=/etc/kubernetes/ssl/server.pem  \\
  --tls-private-key-file=/etc/kubernetes/ssl/server-key.pem \\
  --client-ca-file=/etc/kubernetes/ssl/ca.pem \\
  --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \\
  --audit-log-maxage=30 \\
  --audit-log-maxbackup=3 \\
  --audit-log-maxsize=100 \\
  --audit-log-path=/var/log/kubernetes/k8s-audit.log \\
  --etcd-servers=https://192.168.0.214:2379 \\
  --etcd-cafile=/etc/etcd/ssl/ca.pem \\
  --etcd-certfile=/etc/etcd/ssl/etcd.pem \\
  --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
  --service-account-issuer=api \\
  --service-account-key-file=/etc/kubernetes/apiserver/service-account.pem \\
  --service-account-signing-key-file=/etc/kubernetes/apiserver/service-account-key.pem \\
  --v=2 
Restart=on-failure
RestartSec=10
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
// 刷新配置文件
[root@sg-14 cfg]# systemctl daemon-reload
tail -f /var/log/messages //查看系统日志
systemctl  restart kube-apiserver.service
systemctl  status kube-apiserver.service
systemctl enable --now kube-apiserver.service

对kube-apiserver做高可用

步骤:
安装高可用软件
修改keepalived配置文件
修改haproxy配置文件

安装高可用软件

# 三台master节点都需要安装
# keeplived + haproxy
[root@sg-14 ~]# yum install -y keepalived haproxy

修改keepalived配置文件

# 做Vip的,做master所有机器做代理。根据节点的不同,修改的配置也不同
mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf_bak

cd /etc/keepalived

# KUBE_APISERVER_IP=`hostname -i`
KUBE_APISERVER_IP="192.169.0.214"
cat > /etc/keepalived/keepalived.conf <<EOF
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_kubernetes {
    script "/etc/keepalived/check_kubernetes.sh"
    interval 2
    weight -5
    fall 3
    rise 2
}
vrrp_instance VI_1 {
    state MASTER
    interface eth0
    mcast_src_ip ${KUBE_APISERVER_IP}
    virtual_router_id 51
    priority 100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.15.56
    }
}
EOF

############################################
如果3个master,也有一个主,两个从
master1:主
state MASTER
mcast_src_ip:master1的ip 3个不同的master的ip
priority:100 优先级
virtual_ipaddress {
        192.168.15.56  # 虚拟ip
    }

master2:从
state BACKUP
mcast_src_ip:master2的ip
priority:90 优先级
virtual_ipaddress {
        192.168.15.56  # 虚拟ip
    }
访问192.168.15.56就相当于随机访问所有master机器
############################################
[root@sg-14 /etc/keepalived]# systemctl enable --now keepalived

修改haproxy配置文件

# 高可用软件,做负载均衡
cat > /etc/haproxy/haproxy.cfg <<EOF
global
  maxconn  2000 
  ulimit-n  16384
  log  127.0.0.1 local0 err
  stats timeout 30s

defaults
  log global
  mode  http
  option  httplog
  timeout connect 5000
  timeout client  50000
  timeout server  50000
  timeout http-request 15s
  timeout http-keep-alive 15s

frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor

listen stats
  bind    *:8006
  mode    http
  stats   enable
  stats   hide-version
  stats   uri       /stats
  stats   refresh   30s
  stats   realm     Haproxy\ Statistics
  stats   auth      admin:admin

frontend k8s-master
  bind 0.0.0.0:8443
  bind 127.0.0.1:8443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-master

backend k8s-master
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server kubernetes-master-01    192.168.15.51:6443  check inter 2000 fall 2 rise 2 weight 100
  server kubernetes-master-02    192.168.15.52:6443  check inter 2000 fall 2 rise 2 weight 100
  server kubernetes-master-03    192.168.15.53:6443  check inter 2000 fall 2 rise 2 weight 100
EOF

##########################################
端口与上面管理员配置文件一致
frontend k8s-master
  bind 0.0.0.0:8443
  bind 127.0.0.1:8443
weight 100  权重

##########################################

[root@sg-14 /etc/keepalived]# systemctl enable --now haproxy.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /usr/lib/systemd/system/haproxy.service.

8、部署TLS

apiserver 动态签署颁发到Node节点,实现证书签署自动化

创建集群配置文件

# 只需要在一台master节点上执行
export KUBE_APISERVER="https://192.168.0.214:8443"

# 设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kubelet-bootstrap.kubeconfig

# 设置客户端认证参数,此处token必须用上叙token.csv中的token
kubectl config set-credentials "kubelet-bootstrap" \
  --token=a0efb61e41a2f006bbe49f1fc5591785 \  # 使用自己的token.csv里面的token
  --kubeconfig=kubelet-bootstrap.kubeconfig

# 设置上下文参数(在上下文参数中将集群参数和用户参数关联起来)
kubectl config set-context default \
  --cluster=kubernetes \
  --user="kubelet-bootstrap" \
  --kubeconfig=kubelet-bootstrap.kubeconfig

# 配置默认上下文
kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig


###########################################
token:cat /opt/cert/k8s/token.csv
a0efb61e41a2f006bbe49f1fc5591785,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
###########################################

颁发证书

# 颁发集群配置文件
[root@sg-14 k8s]# for i in sg-14; do scp kubelet-bootstrap.kubeconfig root@$i:/etc/kubernetes/cfg/; done

创建TLS低权限用户

kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node_bootstrapper \
--user=kubelet_bootstrap

9、部署contorller-manager

contorller-manager组件作用:监控整个集群

编辑配置文件

# 需要在三台master节点上执行
cat > /etc/kubernetes/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--leader-elect=true \\
--cluster-name=kubernetes \\
--bind-address=127.0.0.1 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/12 \\
--service-cluster-ip-range=10.96.0.0/16 \\
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem  \\
--root-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--kubeconfig=/etc/kubernetes/cfg/kube-controller-manager.kubeconfig \\
--tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \\
--tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s \\
--controllers=*,bootstrapsigner,tokencleaner \\
--use-service-account-credentials=true \\
--node-monitor-grace-period=10s \\
--horizontal-pod-autoscaler-use-rest-clients=true"
EOF

注册服务

# 需要在三台master节点上执行
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

启动

[root@sg-14 ~]# systemctl daemon-reload 
[root@sg-14 ~]# systemctl enable --now kube-controller-manager.service 
[root@sg-14 ~]# systemctl status kube-controller-manager.service

10、部署kube-scheduler

编写配置文件

# 三台master机器上都需要执行
cat > /etc/kubernetes/cfg/kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--kubeconfig=/etc/kubernetes/cfg/kube-scheduler.kubeconfig \\
--leader-elect=true \\
--master=http://127.0.0.1:8080 \\
--bind-address=127.0.0.1 "
EOF

######################################
--master=http://127.0.0.1:8080 \\操作集群入口的地方
######################################

注册服务

# 三台master节点上都需要执行
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

启动

[root@sg-14 ~]# systemctl daemon-reload 
[root@sg-14 ~]# systemctl enable --now kube-scheduler.service 
[root@sg-14 ~]# systemctl status kube-scheduler.service 

查看集群状态

[root@sg-14 ~]# kubectl get cs

11、部署kubelet服务

创建kubelet服务配置文件

# 需要在三台master节点上执行
KUBE_HOSTNAME=`hostname`

cat > /etc/kubernetes/cfg/kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--hostname-override=${KUBE_HOSTNAME} \\
--container-runtime=docker \\
--kubeconfig=/etc/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/etc/kubernetes/cfg/kubelet-bootstrap.kubeconfig \\
--config=/etc/kubernetes/cfg/kubelet-config.yml \\
--cert-dir=/etc/kubernetes/ssl \\
--image-pull-progress-deadline=15m \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/k8sos/pause:3.2"
EOF

创建kubelet-config.yaml

# 需要在三台master节点上执行
# KUBE_HOSTNAME=`hostname -i`
KUBE_HOSTNAME="192.168.0.214"

cat > /etc/kubernetes/cfg/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${KUBE_HOSTNAME}
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.96.0.2
clusterDomain: cluster.local
failSwapOn: false
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/ssl/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF

注册kubelet的服务

# 需要在三台master节点上执行
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kubelet.conf
ExecStart=/usr/local/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

启动

[root@sg-14 ~]# systemctl daemon-reload
[root@sg-14 ~]# systemctl enable --now kubelet.service
[root@sg-14 ~]# systemctl status kubelet.service

12、部署kube-proxy

创建配置文件

# 需要在三台master节点上执行
cat > /etc/kubernetes/cfg/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/var/log/kubernetes \\
--config=/etc/kubernetes/cfg/kube-proxy-config.yml"
EOF

创建kube-proxy-config.yml

# 需要在三台master节点上执行
# KUBE_HOSTNAME=`hostname -i`
KUBE_HOSTNAME="192.168.0.214"
HOSTNAME=`hostname`
cat > /etc/kubernetes/cfg/kube-proxy-config.yml << EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: ${KUBE_HOSTNAME}
healthzBindAddress: ${KUBE_HOSTNAME}:10256
metricsBindAddress: ${KUBE_HOSTNAME}:10249
clientConnection:
  burst: 200
  kubeconfig: /etc/kubernetes/cfg/kube-proxy.kubeconfig
  qps: 100
hostnameOverride: ${HOSTNAME}
clusterCIDR: 10.96.0.0/16
enableProfiling: true
mode: "ipvs"
kubeProxyIPTablesConfiguration:
  masqueradeAll: false
kubeProxyIPVSConfiguration:
  scheduler: rr
  excludeCIDRs: []
EOF

注册服务

# 需要在三台master节点上执行
cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=/etc/kubernetes/cfg/kube-proxy.conf
ExecStart=/usr/local/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

启动

[root@sg-14 ~]# systemctl daemon-reload
[root@sg-14 ~]# systemctl enable --now kube-proxy.service 
[root@sg-14 ~]# systemctl status kube-proxy.service 

13、加入集群节点

查看集群节点加入请求

# 只需要在一台节点上执行即可
[root@k8s-m-01 /opt/cert/k8s]# kubectl get csr
NAME                                                   AGE    SIGNERNAME                                    REQUESTOR           CONDITION
node-csr-5AWYEWZ0DkF4DzHTOP00M2_Ne6on7XMwvryxbwsh90M   6m3s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
node-csr-8_Rjm9D7z-04h400v_8RDHHCW3UGILeSRhxx-KkIWNI   6m3s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
node-csr-wlHMJiNAkMuPsQPoD6dan8QF4AIlm-x_hVYJt9DukIg   6m2s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending

批准加入

# 只需要在一台节点上执行即可
[root@k8s-m-01 /opt/cert/k8s]# kubectl certificate approve `kubectl get csr | grep "Pending" | awk '{print $1}'`
certificatesigningrequest.certificates.k8s.io/node-csr-5AWYEWZ0DkF4DzHTOP00M2_Ne6on7XMwvryxbwsh90M approved
certificatesigningrequest.certificates.k8s.io/node-csr-8_Rjm9D7z-04h400v_8RDHHCW3UGILeSRhxx-KkIWNI approved
certificatesigningrequest.certificates.k8s.io/node-csr-wlHMJiNAkMuPsQPoD6dan8QF4AIlm-x_hVYJt9DukIg approved
[root@k8s-m-01 /opt/cert/k8s]# kubectl get nodes
NAME       STATUS   ROLES    AGE   VERSION
k8s-m-01   Ready    <none>   13s   v1.18.8
k8s-m-02   Ready    <none>   12s   v1.18.8
k8s-m-03   Ready    <none>   12s   v1.18.8

14、安装网络插件

本次选择使用flannel网络插件

下载flannel安装包并安装

# 只需要在一台节点上执行即可
[root@k8s-m-01 /opt/data]# tar -xf flannel-v0.11.0-linux-amd64.tar.gz
[root@k8s-m-01 /opt/data]# for i in m1 m2 m3;do
> scp flanneld mk-docker-opts.sh root@$i:/usr/local/bin/
> done

将flannel配置写入集群数据库

# 只需要在一台节点上执行即可
etcdctl \
--ca-file=/etc/etcd/ssl/ca.pem \
--cert-file=/etc/etcd/ssl/etcd.pem \
--key-file=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://192.168.15.51:2379,https://192.168.15.52:2379,https://192.168.15.53:2379" \
mk /coreos.com/network/config '{"Network":"10.244.0.0/12", "SubnetLen": 21, "Backend": {"Type": "vxlan", "DirectRouting": true}}'

注册flannel服务

# 需要在三台机器运行
cat > /usr/lib/systemd/system/flanneld.service << EOF
[Unit]
Description=Flanneld address
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service

[Service]
Type=notify
ExecStart=/usr/local/bin/flanneld \\
  -etcd-cafile=/etc/etcd/ssl/ca.pem \\
  -etcd-certfile=/etc/etcd/ssl/etcd.pem \\
  -etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
  -etcd-endpoints=https://192.168.15.51:2379,https://192.168.15.52:2379,https://192.168.15.53:2379 \\
  -etcd-prefix=/coreos.com/network \\
  -ip-masq
ExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=always
RestartSec=5
StartLimitInterval=0
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF

修改docker启动文件

# 让flannel接管docker网络
sed -i '/ExecStart/s/\(.*\)/#\1/' /usr/lib/systemd/system/docker.service
sed -i '/ExecReload/a ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock' /usr/lib/systemd/system/docker.service
sed -i '/ExecReload/a EnvironmentFile=-/run/flannel/subnet.env' /usr/lib/systemd/system/docker.service

启动

# 先启动flannel,再启动docker
[root@k8s-m-01 ~]# systemctl daemon-reload 
[root@k8s-m-01 ~]# systemctl enable --now flanneld.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.
Created symlink from /etc/systemd/system/docker.service.requires/flanneld.service to /usr/lib/systemd/system/flanneld.service.
[root@k8s-m-01 ~]# systemctl restart docker

验证集群网络

# 集群节点互ping对方的flannel网络

15、安装集群DNS

# 只需要在一台节点上执行即可
# 下载DNS安装配置文件包
[root@k8s-m-01 ~]# wget https://github.com/coredns/deployment/archive/refs/heads/master.zip
[root@k8s-m-01 ~]# unzip master.zip
[root@k8s-m-01 ~]# cd deployment-master/kubernetes

# 执行部署命令
[root@k8s-m-01 ~/deployment-master/kubernetes]# ./deploy.sh -i 10.96.0.2 -s | kubectl apply -f -

# 验证集群DNS
[root@k8s-m-01 ~/deployment-master/kubernetes]# kubectl get pods -n kube-system
NAME                      READY   STATUS    RESTARTS   AGE
coredns-6ff445f54-m28gw   1/1     Running   0          48s

验证集群

# 绑定一下超管用户(只需要在一台服务器上执行即可)
[root@k8s-m-01 ~/deployment-master/kubernetes]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubernetes
clusterrolebinding.rbac.authorization.k8s.io/cluster-system-anonymous created

# 验证集群DNS和集群网络成功
[root@k8s-m-01 ~/deployment-master/kubernetes]# kubectl run test -it --rm --image=busybox:1.28.3
If you don't see a command prompt, try pressing enter.
/ # nslookup kubernetes
Server:    10.96.0.2
Address 1: 10.96.0.2 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
posted @ 2021-11-08 14:18  Jeff的技术栈  阅读(330)  评论(0编辑  收藏  举报
回顶部