k8s集群二进制安装部署

1、前期规划

主机规划

IP地址 主机名 主机角色 软件列表
192.168.16.129 k8s-master01 master kube-apiserver、kube-controller-manager、kube-scheduler、etcd、kubelet、haproxy、keepalived
192.168.16.130 k8s-master02 master kube-apiserver、kube-controller-manager、kube-scheduler、etcd、kubelet、haproxy、keepalived
192.168.16.131 k8s-master03 master kube-apiserver、kube-controller-manager、kube-scheduler、etcd、kubelet
192.168.16.132 k8s-node1 node kubelet、kube-proxy

软件版本

软件名称 版本 备注
centos7 kernel:6.6
kubernetes v1.21.10
etcd v3.5.2
calico v3.19.4
coredns v1.8.4
docker 20.10.13 yum安装
haproxy 5.18 yum安装
keepalived 3.5 yum安装

网络地址规划

网络名称 网段 备注
Node网络 192.168.16.0/24
Service网络 10.96.0.0/16
Pod网络 10.244.0.0/16
2、所有主机通用配置

设置主机名和hosts文件解析

# cat /etc/hosts
192.168.150.184 k8s-master1
192.168.150.185 k8s-master2
192.168.150.186 k8s-master3
192.168.150.187 k8s-node1

关闭防火墙、Selinux、swap分区

设置时间同步

limit设置

# vim /etc/security/limits.conf
*    soft     nofile     655360
*    hard     nofile     131072
*    soft     nproc      655350
*    hard     nproc      655350
*    soft     memlock    unlimited
*    hard     memlock    unlimited

安装ipvs管理模块,并配置

# yum install ipvsadm ipset syssyay conntrack libseccomp -y
# modprobe -- ip_vs
# modprobe -- ip_vs_rr
# modprobe -- ip_vs_wrr
# modprobe -- ip_vs_sh
# modprobe -- nf_conntrack

# cat >/etc/modules-load.d/ipvs.conf <<EOF
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
# systemctl enable --now systemd-modules-load
# systemctl restart systemd-modules-load

内核升级

# rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
# yum install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
# yum --enablerepo="elrepo-kernel" -y install kernel-ml.x86_64
# grub2-set-default 0
# grub2-mkconfig -o /boot/grub2/grub.cfg
# reboot

安装工具

# yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git lrzsz -y
3、高可用配置

在master01和master02主机安装haproxy和keepalived

haproxy配置

# yum install haproxy keepalived -y
# cat /etc/haproxy/haproxy.cfg 
#---------------------------------------------------------------------
# Example configuration for a possible web application.  See the
# full configuration options online.
#
#   http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
#
#---------------------------------------------------------------------

#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
        maxconn 2000
        ulimit-n 16384
        log 127.0.0.1 local0 err

defaults
        log global
        mode http
        option httplog
        timeout connect 5000
        timeout client  50000
        timeout server  50000
        timeout http-request 15s
        timeout http-keep-alive 15s



frontend monitor-in
        bind 0.0.0.0:33305
        mode http
        option httplog
        monitor-uri /monitor

frontend k8s-master
        bind 0.0.0.0:16443
        bind 127.0.0.1:16443
        mode tcp
        option tcplog
        tcp-request inspect-delay 5s
        default_backend k8s-master

backend k8s-master
        mode tcp
        option tcplog
        option tcp-check
        balance roundrobin
        default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
        server master01 192.168.16.129:6443 check
        server master02 192.168.16.130:6443 check
        server master03 192.168.16.131:6443 check

# systemctl enable --now haproxy

浏览器访问验证 http://192.168.16.130:33305/monitor

keepalived配置

# cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   router_id LVS_DEVEL
   script_user root
   enable_script_security
}

vrrp_script chk_apiserver {
   script "/etc/keepalived/check_apiserver.sh"
   interval 5
   weight -5
   fall 2
   rise 1
}

vrrp_instance VI_1 {
    state MASTER                        ## 备机设置 BACKUP
    interface ens33
    mcast_src_ip 192.168.16.129         ## 备机设置自己的IP地址192.168.150.176
    virtual_router_id 51
    priority 101                        ## 备机设置优先级 99
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass abc123
    }
    virtual_ipaddress {
        192.168.16.250
    }
    track_script {
        chk_apiserver
    }
}
# cat /etc/keepalived/check_apiserver.sh 
#!/bin/bash

err=0
for k in $(seq 1 3)
do
    check_code=$(pgrep haproxy)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done

if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi
# systemctl enable --now keepalived

配置主机ssh免密连接

4、使用cfssl工具创建证书

获取cfssl工具,实现正式签发的工具

# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 --no-check-certificate
# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 --no-check-certificate
# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 --no-check-certificate
# chmod +x cfssl*
# mv cfssl_linux-amd64 /usr/bin/cfssl
# mv cfssljson_linux-amd64 /usr/bin/cfssljson
# mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo

创建CA证书

# cat ca-csr.json
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "kubemsb",
      "OU": "CN"
    }
  ],
  "ca": {
      "expiry": "87600h"
  }
}

# cfssl gencert -initca ca-csr.json | cfssljson -bare ca
2023/11/13 11:00:31 [INFO] generating a new CA key and certificate from CSR
2023/11/13 11:00:31 [INFO] generate received request
2023/11/13 11:00:31 [INFO] received CSR
2023/11/13 11:00:31 [INFO] generating key: rsa-2048
2023/11/13 11:00:31 [INFO] encoded CSR
2023/11/13 11:00:31 [INFO] signed certificate with serial number 574209306477940501530924598323722273337915651468

## 配置ca证书策略
# cat ca-config.json 
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "87600h"
      }
    }
  }
}
5、etcd集群安装

生成etcd证书

# cat etcd-csr.json 
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.150.184",
    "192.168.150.185",
    "192.168.150.186"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "kubemsb",
      "OU": "CN"
    }
  ]
}

# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd

etcd集群部署

# wget https://github.com/etcd-io/etcd/releases/download/v3.5.2/etcd-v3.5.2-linux-amd64.tar.gz
# tar -zxvf etcd-v3.5.2-linux-amd64.tar.gz
# cp -p etcd-v3.5.2-linux-amd64/etcd* /usr/bin/
# mkdir /etc/etcd
# scp /usr/bin/etcd* k8s-master02:/usr/bin/
# scp /usr/bin/etcd* k8s-master03:/usr/bin/
# vim /etc/etcd/etcd.conf
#[Member]
ETCD_NAME="etcd1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.16.129:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.16.129:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.16.129:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.16.129:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.16.129:2380,etcd2=https://192.168.16.130:2380,etcd3=https://192.168.16.131:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"


# mkdir -p /etc/etcd/ssl
# mkdir  -p /var/lib/etcd/default.etcd
# cp ca*.pem /etc/etcd/ssl/
# cp etcd*.pem /etc/etcd/ssl/
# vim /etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/etc/etcd/etcd.conf
ExecStart=/usr/bin/etcd \
  --cert-file=/etc/etcd/ssl/etcd.pem \
  --key-file=/etc/etcd/ssl/etcd-key.pem \
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --client-cert-auth \
  --peer-client-cert-auth
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

## 注意拷贝的etcd.conf文件修改IP地址和节点名
# scp /etc/etcd/etcd.conf k8s-master02:/etc/etcd/
# scp /etc/etcd/etcd.conf k8s-master03:/etc/etcd/
# scp /etc/etcd/ssl/* k8s-master02:/etc/etcd/ssl/
# scp /etc/etcd/ssl/* k8s-master03:/etc/etcd/ssl/
# scp /etc/systemd/system/etcd.service k8s-master02:/etc/systemd/system/
# scp /etc/systemd/system/etcd.service k8s-master03:/etc/systemd/system/
## 三台主机都需启动
# systemctl daemon-reload
# systemctl enable --now etcd
# systemctl status etcd

# ETCDCTL_API=3 /usr/bin/etcdctl --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints="https://192.168.16.129:2379,https://192.168.16.130:2379,https://192.168.16.131:2379" endpoint health --write-out=table
+-----------------------------+--------+-------------+-------+
|          ENDPOINT           | HEALTH |    TOOK     | ERROR |
+-----------------------------+--------+-------------+-------+
| https://192.168.16.129:2379 |   true |  9.964038ms |       |
| https://192.168.16.131:2379 |   true | 10.207664ms |       |
| https://192.168.16.130:2379 |   true | 11.264541ms |       |
+-----------------------------+--------+-------------+-------+

6、k8s master组件安装
# wget https://dl.k8s.io/v1.21.10/kubernetes-server-linux-amd64.tar.gz --no-check-certificate
# tar -zxvf kubernetes-server-linux-amd64.tar.gz 
# cp kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl} /usr/bin/
# scp kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl} k8s-master02:/usr/bin
# scp kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl} k8s-master03:/usr/bin
# scp kubernetes/server/bin/{kubelet,kube-proxy} k8s-master02:/usr/bin
# scp kubernetes/server/bin/{kubelet,kube-proxy} k8s-master03:/usr/bin
# scp kubernetes/server/bin/{kubelet,kube-proxy} k8s-master01:/usr/bin
# scp kubernetes/server/bin/{kubelet,kube-proxy} k8s-node1:/usr/bin

# mkdir -p /etc/kubernetes
# mkdir -p /etc/kubernetes/ssl
# mkdir -p /var/log/kubernetes

安装api-server

# vim kube-apiserver-csr.json
{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "192.168.16.129",
    "192.168.16.130",
    "192.168.16.131",
    "192.168.16.132",
    "192.168.16.250",
    "10.96.0.1",
    "localhost",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "kubemsb",
      "OU": "CN"
    }
  ]
}

# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver

# cat > token.csv << EOF
> $(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
> EOF

# vim /etc/kubernetes/kube-apiserver.conf
KUBE_APISERVER_OPTS=--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,ResourceQuota,DefaultStorageClass \
--anonymous-auth=false \
--bind-address=192.168.16.129 \
--secure-port=6443 \
--advertise-address=192.168.16.129 \
--insecure-port=0 \
--service-cluster-ip-range=10.96.0.0/16 \
--authorization-mode=RBAC,Node \
--runtime-config=api/all=true \
--enable-bootstrap-token-auth=true \
--token-auth-file=/etc/kubernetes/token.csv \
--service-node-port-range=30000-32767 \
--tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem  \
--tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
--kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-issuer=api \
--etcd-cafile=/etc/etcd/ssl/ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--etcd-servers=https://192.168.16.129:2379,https://192.168.16.130:2379,https://192.168.16.131:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--apiserver-count=1 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/kube-apiserver-audit.log" \
--event-ttl=1h \
--log-dir=/var/log/kubernetes \
--alsologtostderr=true \
--logtostderr=false \
--v=4"



# vim /etc/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
After=etcd.service
Wants=etcd.service
 
[Service]
EnvironmentFile=-/etc/kubernetes/kube-apiserver.conf
ExecStart=/usr/bin/kube-apiserver $KUBE_APISERVER_OPTS 
Restart=on-failure
RestartSec=5
Type=notify
limitNOFILE=65536

[Install]
WantedBy=multi-user.target

# cp ca*.pem /etc/kubernetes/ssl/
# cp kube-apiserver*.pem /etc/kubernetes/ssl/
# cp token.csv /etc/kubernetes/
# scp /etc/kubernetes/token.csv k8s-master02:/etc/kubernetes/
# scp /etc/kubernetes/token.csv k8s-master03:/etc/kubernetes/
# scp /etc/kubernetes/ssl/kube-apiserver*.pem k8s-master02:/etc/kubernetes/ssl/
# scp /etc/kubernetes/ssl/kube-apiserver*.pem k8s-master03:/etc/kubernetes/ssl/
# scp /etc/kubernetes/ssl/ca*.pem k8s-master02:/etc/kubernetes/ssl/
# scp /etc/kubernetes/ssl/ca*.pem k8s-master03:/etc/kubernetes/ssl/
# scp /etc/kubernetes/kube-apiserver.conf k8s-master02:/etc/kubernetes/               # 修改其他主机IP地址
# scp /etc/kubernetes/kube-apiserver.conf k8s-master03:/etc/kubernetes/               # 修改其他主机IP地址
# scp /etc/systemd/system/kube-apiserver.service k8s-master02:/etc/systemd/system/
# scp /etc/systemd/system/kube-apiserver.service k8s-master03:/etc/systemd/system/
# systemctl daemon-reload
# systemctl enable --now kube-apiserver
# systemctl status kube-apiserver
# curl --insecure https://192.168.16.129:6443
# curl --insecure https://192.168.16.250:16443
{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {
    
  },
  "status": "Failure",
  "message": "Unauthorized",
  "reason": "Unauthorized",
  "code": 401

安装kubectl

# cat admin-csr.json 
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:masters",             
      "OU": "system"
    }
  ]
}

# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
# cp admin*.pem /etc/kubernetes/ssl/

## 生成kubeconfig配置文件
# kubectl config set-cluster kubernetes --certificate-authority=ca.pem  --embed-certs=true --server=https://192.168.16.250:16443 --kubeconfig=kube.config
## 设置管理员admin证书
# kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=kube.config 
## 设置安全上下文
# kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config 
# kubectl config use-context kubernetes --kubeconfig=kube.config 


# mkdir ~/.kube
# cp kube.config ~/.kube/config
# kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubenetes --kubeconfig=/root/.kube/config 
# export KUBECONFIG=$HOME/.kube/config
# kubectl cluster-info
# kubectl get componentstatuses
# kubectl get all --all-namespaces
# scp /root/.kube/config k8s-master02:/root/.kube
# scp /root/.kube/config k8s-master03:/root/.kube

### 配置kubectl命令补全
# yum install -y bash-completion
# source /usr/share/bash-completion/bash_completion
# source <(kubectl completion bash)
# kubectl completion bash > ~/.kube/completion.bash.inc
# source '/root/.kube/completion.bash.inc'  
# source $HOME/.bash_profile

安装kube-controller-manager

# vim kube-controller-manager-csr.json 
{
    "CN": "system:kube-controller-manager",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [
      "127.0.0.1",
      "192.168.16.129",
      "192.168.16.130",
      "192.168.16.131"
    ],
    "names": [
      {
        "C": "CN",
        "ST": "Beijing",
        "L": "Beijing",
        "O": "system:kube-controller-manager",
        "OU": "system"
      }
    ]
}
# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager


# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.16.250:16443 --kubeconfig=kube-controller-manager.kubeconfig
# kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig
# kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig
# kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

# vim /etc/kubernetes/kube-controller-manager.conf
KUBE_CONTROLLER_MANAGER_OPTS="--port=10252 \
  --secure-port=10257 \
  --bind-address=127.0.0.1 \
  --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
  --service-cluster-ip-range=10.96.0.0/16 \
  --cluster-name=kubernetes \
  --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
  --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --allocate-node-cidrs=true \
  --cluster-cidr=10.244.0.0/16 \
  --experimental-cluster-signing-duration=87600h \
  --root-ca-file=/etc/kubernetes/ssl/ca.pem \
  --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --leader-elect=true \
  --feature-gates=RotateKubeletServerCertificate=true \
  --controllers=*,bootstrapsigner,tokencleaner \
  --horizontal-pod-autoscaler-use-rest-clients=true \
  --horizontal-pod-autoscaler-sync-period=10s \
  --tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \
  --tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
  --use-service-account-credentials=true \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=2"


# cat /etc/systemd/system/kube-controller-manager.service 
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/kube-controller-manager.conf
ExecStart=/usr/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target

# cp kube-controller-manager*.pem /etc/kubernetes/ssl/
# cp kube-controller-manager.kubeconfig /etc/kubernetes/
# systemctl enable --now kube-controller-manager
# systemctl status kube-controller-manager
# scp /etc/kubernetes/ssl/kube-controller-manager*.pem k8s-master02:/etc/kubernetes/ssl
# scp /etc/kubernetes/ssl/kube-controller-manager*.pem k8s-master03:/etc/kubernetes/ssl
# scp /etc/kubernetes/kube-controller-manager.* k8s-master02:/etc/kubernetes/
# scp /etc/kubernetes/kube-controller-manager.* k8s-master03:/etc/kubernetes/

# openssl x509 -in /etc/kubernetes/ssl/kube-controller-manager.pem -noout -text
# kubectl get componentstatuses

安装scheduler

# vim kube-scheduler-csr.json
{
    "CN": "system:kube-scheduler",
    "hosts": [
      "127.0.0.1",
      "192.168.16.129",
      "192.168.16.130",
      "192.168.16.131"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
      {
        "C": "CN",
        "ST": "Beijing",
        "L": "Beijing",
        "O": "system:kube-scheduler",
        "OU": "system"
      }
    ]
}

# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.16.250:16443 --kubeconfig=kube-scheduler.kubeconfig
# kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig
# kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
# kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig
# vim /etc/kubernetes/kube-scheduler.conf
KUBE_SCHEDULER_OPTS="--address=127.0.0.1 \
--kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
--leader-elect=true \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/var/log/kubernetes \
--v=2"

# vim /etc/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/kube-scheduler.conf
ExecStart=/usr/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target

# cp kube-scheduler*.pem /etc/kubernetes/ssl/
# cp kube-scheduler.kubeconfig /etc/kubernetes/
# scp /etc/kubernetes/ssl/kube-scheduler* k8s-master02:/etc/kubernetes/ssl/
# scp /etc/kubernetes/ssl/kube-scheduler* k8s-master03:/etc/kubernetes/ssl/
# scp /etc/kubernetes/kube-scheduler.* k8s-master02:/etc/kubernetes/
# scp /etc/kubernetes/kube-scheduler.* k8s-master03:/etc/kubernetes/
# scp /etc/systemd/system/kube-scheduler.service k8s-master02:/etc/systemd/system/
# scp /etc/systemd/system/kube-scheduler.service k8s-master03:/etc/systemd/system/
# systemctl daemon-reload
# systemctl enable --now kube-scheduler
# systemctl status kube-scheduler
# kubectl get componentstatuses
7、k8s node组件安装

安装docker

# wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# yum install docker-ce -y
# vim /etc/docker/daemon.json
{
    "exec-opts": ["native.cgroupdriver=systemd"]
}

# systemctl enable --now docker

安装kubelet

# BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /etc/kubernetes/token.csv)

# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.16.250:16443 --kubeconfig=kubelet-bootstrap.kubeconfig
# kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig
# kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig
# kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig

#创建集群角色绑定
# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubelet-bootstrap
# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig

# kubectl describe clusterrolebinding cluster-system-anonymous
# kubectl describe clusterrolebinding kubelet-bootstrap


# cat > kubelet.json << EOF
{
  "kind": "KubeletConfiguration",
  "apiVersion": "kubelet.config.k8s.io/v1beta1",
  "authentication": {
    "x509": {
      "clientCAFile": "/etc/kubernetes/ssl/ca.pem"
    },
    "webhook": {
      "enabled": true,
      "cacheTTL": "2m0s"
    },
    "anonymous": {
      "enabled": false
    }
  },
  "authorization": {
    "mode": "Webhook",
    "webhook": {
      "cacheAuthorizedTTL": "5m0s",
      "cacheUnauthorizedTTL": "30s"
    }
  },
  "address": "192.168.16.129",
  "port": 10250,
  "readOnlyPort": 10255,
  "cgroupDriver": "systemd",                    
  "hairpinMode": "promiscuous-bridge",
  "serializeImagePulls": false,
  "clusterDomain": "cluster.local.",
  "clusterDNS": ["10.96.0.2"]
}
EOF


# cat > kubelet.service << "EOF"
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/bin/kubelet \
  --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \
  --cert-dir=/etc/kubernetes/ssl \
  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
  --config=/etc/kubernetes/kubelet.json \
  --network-plugin=cni \
  --rotate-certificates \
  --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.2 \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

# cp kubelet-bootstrap.kubeconfig /etc/kubernetes/
# cp kubelet.json /etc/kubernetes/
# cp kubelet.service /usr/lib/systemd/system/

# for i in k8s-master02 k8s-master03 k8s-node1;do scp kubelet-bootstrap.kubeconfig kubelet.json $i:/etc/kubernetes/;done
# for i in k8s-master02 k8s-master03 k8s-node1;do scp ca.pem $i:/etc/kubernetes/ssl/;done
# for i in k8s-master02 k8s-master03 k8s-node1;do scp kubelet.service $i:/usr/lib/systemd/system/;done

# mkdir -p /var/lib/kubelet
# mkdir -p /var/log/kubernetes
# systemctl daemon-reload
# systemctl enable --now kubelet
# systemctl status kubelet
# kubectl get csr
# kubectl get nodes

安装kube-proxy

  # cat > kube-proxy-csr.json << "EOF"
{
  "CN": "system:kube-proxy",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "kubemsb",
      "OU": "CN"
    }
  ]
}
EOF

# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

#设置管理集群
# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.16.250:16443 --kubeconfig=kube-proxy.kubeconfig
#设置证书
# kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig
#设置上下文
# kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig
#使用上下文
# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

# cat > kube-proxy.yaml << "EOF"
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 192.168.16.129
clientConnection:
  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
clusterCIDR: 10.244.0.0/16
healthzBindAddress: 192.168.16.129:10256
kind: KubeProxyConfiguration
metricsBindAddress: 192.168.16.129:10249
mode: "ipvs"
EOF


# cat >  kube-proxy.service << "EOF"
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/bin/kube-proxy \
  --config=/etc/kubernetes/kube-proxy.yaml \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# cp kube-proxy*.pem /etc/kubernetes/ssl/
# cp kube-proxy.kubeconfig kube-proxy.yaml /etc/kubernetes/
# cp kube-proxy.service /usr/lib/systemd/system/

# for i in k8s-master02 k8s-master03 k8s-node1;do scp kube-proxy.kubeconfig kube-proxy.yaml $i:/etc/kubernetes/;done
# for i in k8s-master02 k8s-master03 k8s-node1;do scp kube-proxy.service $i:/usr/lib/systemd/system/;done

# mkdir -p /var/lib/kube-proxy
# systemctl daemon-reload
# systemctl enable --now kube-proxy
# systemctl status kube-proxy

网络组件部署calico

# wget https://docs.projectcalico.org/v3.19/manifests/calico.yaml --no-check-certificate
# 下面两行注释打开
# vim calico.yaml
3683             - name: CALICO_IPV4POOL_CIDR
3684               value: "10.244.0.0/16"
# kubectl apply -f calico.yaml

安装coredns

# cat >  coredns.yaml << "EOF"
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
rules:
  - apiGroups:
    - ""
    resources:
    - endpoints
    - services
    - pods
    - namespaces
    verbs:
    - list
    - watch
  - apiGroups:
    - discovery.k8s.io
    resources:
    - endpointslices
    verbs:
    - list
    - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
        errors
        health {
          lameduck 5s
        }
        ready
        kubernetes cluster.local  in-addr.arpa ip6.arpa {
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf {
          max_concurrent 1000
        }
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/name: "CoreDNS"
spec:
  # replicas: not specified here:
  # 1. Default is 1.
  # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        kubernetes.io/os: linux
      affinity:
         podAntiAffinity:
           preferredDuringSchedulingIgnoredDuringExecution:
           - weight: 100
             podAffinityTerm:
               labelSelector:
                 matchExpressions:
                   - key: k8s-app
                     operator: In
                     values: ["kube-dns"]
               topologyKey: kubernetes.io/hostname
      containers:
      - name: coredns
        image: coredns/coredns:1.8.4
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.96.0.2 #需要和上边指定的clusterDNS IP一致
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
 
EOF

# kubectl apply -f coredns.yam
posted @   原来是你~~~  阅读(148)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· winform 绘制太阳,地球,月球 运作规律
· AI与.NET技术实操系列(五):向量存储与相似性搜索在 .NET 中的实现
· 超详细:普通电脑也行Windows部署deepseek R1训练数据并当服务器共享给他人
· 上周热点回顾(3.3-3.9)
· AI 智能体引爆开源社区「GitHub 热点速览」
点击右上角即可分享
微信分享提示