k8s v1.24.3 二进制高可用架构实践(笔记)

现在网上关于 v1.24.x 已经存在很多高可用架构的版本部署文档,本文章是结合网上的优秀部署文档,进行实际操作部署实践,将所遇到的各个环节需要注意的地方都已进行标注。

本文章主要用于自学记录笔记,可随意参考~

1.环境

## 1.1主机规划
#主机名称 IP地址 说明 软件
k8s-master01 192.168.27.21 master节点 kube-apiserver、kube-controller-manager、kube-scheduler、etcd、kubelet、kube-proxy
k8s-master02 192.168.27.22 master节点 kube-apiserver、kube-controller-manager、kube-scheduler、etcd、kubelet、kube-proxy
k8s-master03 192.168.27.23 master节点 kube-apiserver、kube-controller-manager、kube-scheduler、etcd、kubelet、kube-proxy
k8s-node01 192.168.27.24 node节点 kubelet、kube-proxy
k8s-node02 192.168.27.25 node节点 kubelet、kube-proxy
lb 192.168.27.20
## 1.2软件及版本
#软件 版本
kernel 5.18.0-1.el8
Ubuntu 22.04
kube-apiserver、kube-controller-manager、kube-scheduler、kubelet、kube-proxy v1.24.2
etcd v3.5.4
containerd v1.6.6
cfssl v1.6.1
cni v1.1.1
crictl v1.24.2
haproxy v1.8.27
keepalived v2.1.5
#网段
物理主机:10.0.0.0/24
service:10.96.0.0/12
pod:172.16.0.0/12
#建议k8s集群与etcd集群分开安装

2.安装所需软件包

#选择性下载需要工具
#1.下载kubernetes1.24.+的二进制包
#github二进制包下载地址:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.24.md
wget https://dl.k8s.io/v1.24.2/kubernetes-server-linux-amd64.tar.gz
#2.下载etcdctl二进制包
#github二进制包下载地址:https://github.com/etcd-io/etcd/releases
wget https://github.com/etcd-io/etcd/releases/download/v3.5.4/etcd-v3.5.4-linux-amd64.tar.gz
#3.docker-ce二进制包下载地址
#二进制包下载地址:https://download.docker.com/linux/static/stable/x86_64/
#这里需要下载20.10.+版本
wget https://download.docker.com/linux/static/stable/x86_64/docker-20.10.14.tgz
#4.containerd二进制包下载
#github下载地址:https://github.com/containerd/containerd/releases
#containerd下载时下载带cni插件的二进制包。
wget https://github.com/containerd/containerd/releases/download/v1.6.6/cri-containerd-cni-1.6.6-linux-amd64.tar.gz
#5.下载cfssl二进制包
#github二进制包下载地址:https://github.com/cloudflare/cfssl/releases
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.1_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.1_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl-certinfo_1.6.1_linux_amd64
#6.cni插件下载
#github下载地址:https://github.com/containernetworking/plugins/releases
wget https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz
#7.crictl客户端二进制下载
#github下载:https://github.com/kubernetes-sigs/cri-tools/releases
wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.24.2/crictl-v1.24.2-linux-amd64.tar.gz

3.安装包目录

root@k8s-master01:~/urbancabin# pwd
/root/urbancabin
#上传软件到:/root/urbancabin #(所有节点,方便操作)
#(安装后路径)
root@k8s-master01:~/urbancabin# ls
bootstrap coredns.yaml dashboard.yaml ingress metrics-server
calico.yaml cri-containerd-cni-1.6.6-linux-amd64.tar.gz deployments.yaml kubernetes-server-linux-amd64.tar.gz pki
cni-plugins-linux-amd64-v1.1.1.tgz crictl-v1.24.2-linux-amd64.tar.gz docker-20.10.14.tgz kubernetes-v1.24.3
coredns dashboard-user.yaml etcd-v3.5.4-linux-amd64.tar.gz kubernetes-v1.24.3.tar
root@k8s-master01:~/urbancabin# rm -rf kubernetes-v1.24.3*
root@k8s-master01:~/urbancabin# ls
bootstrap coredns.yaml dashboard.yaml ingress
calico.yaml cri-containerd-cni-1.6.6-linux-amd64.tar.gz deployments.yaml kubernetes-server-linux-amd64.tar.gz
cni-plugins-linux-amd64-v1.1.1.tgz crictl-v1.24.2-linux-amd64.tar.gz docker-20.10.14.tgz metrics-server
coredns dashboard-user.yaml etcd-v3.5.4-linux-amd64.tar.gz pki
root@k8s-master01:~/urbancabin# tree -L 2
.
├── bootstrap
│ └── bootstrap.secret.yaml
├── calico.yaml
├── cni-plugins-linux-amd64-v1.1.1.tgz
├── coredns
│ └── coredns.yaml
├── coredns.yaml
├── cri-containerd-cni-1.6.6-linux-amd64.tar.gz
├── crictl-v1.24.2-linux-amd64.tar.gz
├── dashboard-user.yaml
├── dashboard.yaml
├── deployments.yaml
├── docker-20.10.14.tgz
├── etcd-v3.5.4-linux-amd64.tar.gz
├── ingress
│ ├── backend.yaml
│ ├── cby.yaml
│ ├── deploy.yaml
│ └── ingress-demo-app.yaml
├── kubernetes-server-linux-amd64.tar.gz
├── metrics-server
│ └── metrics-server.yaml
└── pki
├── admin-csr.json
├── apiserver-csr.json
├── ca-config.json
├── ca-csr.json
├── etcd-ca-csr.json
├── etcd-csr.json
├── front-proxy-ca-csr.json
├── front-proxy-client-csr.json
├── kube-proxy-csr.json
├── kubelet-csr.json
├── manager-csr.json
└── scheduler-csr.json
5 directories, 30 files

4.k8s基础系统环境配置

#4.1.配置IP
#Ubuntu
root@k8s-master01:~# vim /etc/netplan/00-installer-config.yaml
root@k8s-master01:~#
root@k8s-master01:~# cat /etc/netplan/00-installer-config.yaml
# This is the network config written by 'subiquity'
network:
ethernets:
ens33:
dhcp4: false
addresses:
- 192.168.27.21/24
routes:
- to: default
via: 192.168.27.1
nameservers:
addresses: [8.8.8.8,114.114.114.114]
version: 2
root@k8s-master01:~# netplan apply
#Centos7
root@k8s-master01:~# cat /etc/sysconfig/network-scripts/ifcfg-eth0
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="none"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
NAME="eth0"
DEVICE="eth0"
ONBOOT="yes"
IPADDR="172.16.68.34"
PREFIX="23"
GATEWAY="172.16.68.1"
DNS1="114.114.114.114"
root@k8s-master01:~# systemctl restart network
#4.2.设置主机名
hostnamectl set-hostname k8s-master01
hostnamectl set-hostname k8s-master02
hostnamectl set-hostname k8s-master03
hostnamectl set-hostname k8s-node01
hostnamectl set-hostname k8s-node02
#4.3.配置yum源
#Ubuntu
# 备份源文件
sudo cp /etc/apt/sources.list /etc/apt/sources.list.back
# 替换阿里云镜像源地址
sudo sed -i s#archive.ubuntu#mirrors.aliyun#g /etc/apt/sources.list
sudo sed -i s#security.ubuntu#mirrors.aliyun#g /etc/apt/sources.list
root@k8s-master01:~/urbancabin# cat /etc/apt/sources.list
#阿里源
deb http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse
#deb http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse
#deb-src http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse
deb-src http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse
# 更新软件列表
sudo apt-get -y update
# 更新软件包
sudo apt-get -y upgrade
#Centos7
yum install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm
sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \
-e 's|^#baseurl=http://mirror.centos.org|baseurl=https://mirrors.tuna.tsinghua.edu.cn|g' \
-i.bak \
/etc/yum.repos.d/CentOS-*.repo
#CentOS 8
sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \
-e 's|^#baseurl=http://mirror.centos.org/$contentdir|baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos|g' \
-i.bak \
/etc/yum.repos.d/CentOS-*.repo
#私有仓库
sed -e 's|^mirrorlist=|#mirrorlist=|g' -e 's|^#baseurl=http://mirror.centos.org/\$contentdir|baseurl=http://10.0.0.123/centos|g' -i.bak /etc/yum.repos.d/CentOS-*.repo
#4.4.安装一些必备工具
#Ubuntu
apt install wget jq psmisc vim net-tools nfs-kernel-server telnet lvm2 git tar curl net-tools -y
#Centos7\8
yum -y install wget jq psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git network-scripts tar curl -y
#4.5.关闭防火墙
#Ubunut
systemctl stop ufw
systemctl disable ufw
#Centos
systemctl stop firewalld
systemctl disable firewalld
#4.6关闭交换分区
sed -ri 's/.*swap.*/#&/' /etc/fstab
swapoff -a && sysctl -w vm.swappiness=0
cat /etc/fstab
# /dev/mapper/centos-swap swap swap defaults 0 0
#4.7.进行时间同步 (lb除外)
# 服务端
#centos将apt换为yum
apt install chrony -y
cat > /etc/chrony.conf << EOF
pool ntp.aliyun.com iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
allow 192.168.27.0/24
local stratum 10
keyfile /etc/chrony/chrony.keys
leapsectz right/UTC
logdir /var/log/chrony
EOF
systemctl restart chronyd
systemctl enable chrony
# 客户端
apt install chrony -y
cat > /etc/chrony.conf << EOF
pool 192.168.27.21 iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
keyfilerz
leapsectz right/UTC
logdir /var/log/chrony
EOF
cat /etc/chrony.conf | grep -v "^#" | grep -v "^$"
pool 192.168.27.21 iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
keyfile /etc/chrony/chrony.keys
leapsectz right/UTC
logdir /var/log/chrony
systemctl restart chronyd ; systemctl enable chrony
# 客户端安装一条命令
apt install chrony -y ; sed -i "s#2.centos.pool.ntp.org#192.168.27.21#g" /etc/chrony.conf ; systemctl restart chronyd ; systemctl enable chronyd
#使用客户端进行验证
chronyc sources -v
#4.8配置ulimit
ulimit -SHn 65535
cat >> /etc/security/limits.conf <<EOF
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* seft memlock unlimited
* hard memlock unlimitedd
EOF
#修改时区:cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
#修改为24小时制:echo "LC_TIME=en_DK.UTF-8" >> /etc/default/locale
#reboot
#4.9.配置免密登录
apt install -y sshpass
ssh-keygen -f /root/.ssh/id_rsa -P ''
export IP="192.168.27.21 192.168.27.22 192.168.27.23 192.168.27.24 192.168.27.25"
export SSHPASS=urbancabin
for HOST in $IP;do
sshpass -e ssh-copy-id -o StrictHostKeyChecking=no $HOST
done
#4.10.安装ipvsadm (lb除外)
apt install ipvsadm ipset sysstat conntrack -y
#所有节点配置ipvs模块,在内核4.19+版本nf_conntrack_ipv4已经改为nf_conntrack, 4.18以下使用nf_conntrack_ipv4即可:
cat >> /etc/modules-load.d/ipvs.conf <<EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
systemctl restart systemd-modules-load.service
lsmod | grep -e ip_vs -e nf_conntrack
ip_vs_sh 16384 0
ip_vs_wrr 16384 0
ip_vs_rr 16384 0
ip_vs 180224 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 176128 1 ip_vs
nf_defrag_ipv6 24576 2 nf_conntrack,ip_vs
nf_defrag_ipv4 16384 1 nf_conntrack
libcrc32c 16384 3 nf_conntrack,xfs,ip_vs
#4.11.修改内核参数 (lb除外)
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
net.ipv6.conf.all.disable_ipv6 = 0
net.ipv6.conf.default.disable_ipv6 = 0
net.ipv6.conf.lo.disable_ipv6 = 0
net.ipv6.conf.all.forwarding = 0
EOF
sysctl --system
#4.12所有节点配置hosts本地解析
cat > /etc/hosts <<EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.27.21 k8s-master01
192.168.27.22 k8s-master02
192.168.27.23 k8s-master03
192.168.27.24 k8s-node01
192.168.27.25 k8s-node02
EOF

5.k8s基本组件安装

5.1Containerd

#所有k8s节点安装Containerd作为Runtime
wget https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz
#创建cni插件所需目录
mkdir -p /etc/cni/net.d /opt/cni/bin
#在k8s-master01上传到其他节点
NODE='k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02'
for i in $NODE;do scp cni-plugins-linux-amd64-v1.1.1.tgz root@$i:/root/urbancabin;done
#所有节点解压安装
tar xf cni-plugins-linux-amd64-v1.1.1.tgz -C /opt/cni/bin/
wget https://github.com/containerd/containerd/releases/download/v1.6.6/cri-containerd-cni-1.6.6-linux-amd64.tar.gz
#上传到其他节点
NODE='k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02'
for i in $NODE;do scp cri-containerd-cni-1.6.6-linux-amd64.tar.gz root@$i:/root/urbancabin;done
#解压
tar -C / -xzf cri-containerd-cni-1.6.6-linux-amd64.tar.gz
#创建服务启动文件
cat > /etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=infinity
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
EOF
#配置Containerd所需的模块
cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
#加载模块
systemctl restart systemd-modules-load.service
#配置Containerd所需的内核
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
# 加载内核
sysctl --system
#创建Containerd的配置文件
mkdir -p /etc/containerd
containerd config default | tee /etc/containerd/config.toml
#修改Containerd的配置文件
sed -i "s#SystemdCgroup\ \=\ false#SystemdCgroup\ \=\ true#g" /etc/containerd/config.toml
cat /etc/containerd/config.toml | grep SystemdCgroup
sed -i "s#k8s.gcr.io#registry.cn-hangzhou.aliyuncs.com/chenby#g" /etc/containerd/config.toml
cat /etc/containerd/config.toml | grep sandbox_image
# 找到containerd.runtimes.runc.options,在其下加入SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".cni]
# 将sandbox_image默认地址改为符合版本地址
sandbox_image = "registry.cn-hangzhou.aliyuncs.com/chenby/pause:3.6"
#启动并设置为开机启动
systemctl daemon-reload
systemctl enable --now containerd
systemctl status containerd
#配置crictl客户端连接的运行时位置
wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.24.2/crictl-v1.24.2-linux-amd64.tar.gz
#上传到其他节点
NODE='k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02'
for i in $NODE;do scp crictl-v1.24.2-linux-amd64.tar.gz root@$i:/root/urbancabin;done
#解压
tar xf crictl-v1.24.2-linux-amd64.tar.gz -C /usr/bin/
#生成配置文件
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF
#测试
systemctl restart containerd
crictl info

6.k8s与etcd下载及安装(仅在master01操作)

#解压k8s安装包
# 下载安装包
wget https://dl.k8s.io/v1.24.2/kubernetes-server-linux-amd64.tar.gz
wget https://github.com/etcd-io/etcd/releases/download/v3.5.4/etcd-v3.5.4-linux-amd64.tar.gz
# 解压k8s安装文件
cd /root/urbancabin #安装包目录
tar -xf kubernetes-server-linux-amd64.tar.gz --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}
# 解压etcd安装文件
tar -xf etcd-v3.5.4-linux-amd64.tar.gz --strip-components=1 -C /usr/local/bin etcd-v3.5.4-linux-amd64/etcd{,ctl}
# 查看/usr/local/bin下内容
ls /usr/local/bin/
containerd containerd-shim-runc-v1 containerd-stress critest ctr etcdctl kube-controller-manager kubelet kube-scheduler containerd-shim containerd-shim-runc-v2 crictl ctd-decoder etcd kube-apiserver kubectl kube-proxy
root@k8s-master01:~/urbancabin# ls /usr/local/bin/|wc -l
17
#查看版本
[root@k8s-master01 ~]# kubelet --version
Kubernetes v1.24.3
[root@k8s-master01 ~]# etcdctl version
etcdctl version: 3.5.4
API version: 3.5
[root@k8s-master01 ~]#
#将组件发送至其他k8s节点
Master='k8s-master02 k8s-master03'
Work='k8s-node01 k8s-node02'
for NODE in $Master; do echo $NODE; scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} $NODE:/usr/local/bin/; scp /usr/local/bin/etcd* $NODE:/usr/local/bin/; done
for NODE in $Work; do scp /usr/local/bin/kube{let,-proxy} $NODE:/usr/local/bin/ ; done
#回到master01节点
cd /root/urbancabin
mkdir -p /opt/cni/bin
#创建证书相关文件
mkdir pki
cd pki
cat > admin-csr.json << EOF
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:masters",
"OU": "Kubernetes-manual"
}
]
}
EOF
cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
EOF
#server auth 表示client可以对使用该ca对server提供的证书进行验证
#client auth 表示server可以使用该ca对client提供的证书进行验证
cat > etcd-ca-csr.json << EOF
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "etcd",
"OU": "Etcd Security"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
cat > front-proxy-ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"ca": {
"expiry": "876000h"
}
}
EOF
cat > kubelet-csr.json << EOF
{
"CN": "system:node:\$NODE",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "system:nodes",
"OU": "Kubernetes-manual"
}
]
}
EOF
cat > manager-csr.json << EOF
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-controller-manager",
"OU": "Kubernetes-manual"
}
]
}
EOF
cat > apiserver-csr.json << EOF
{
"CN": "kube-apiserver",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "Kubernetes",
"OU": "Kubernetes-manual"
}
]
}
EOF
cat > ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "Kubernetes",
"OU": "Kubernetes-manual"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
cat > etcd-csr.json << EOF
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "etcd",
"OU": "Etcd Security"
}
]
}
EOF
cat > front-proxy-client-csr.json << EOF
{
"CN": "front-proxy-client",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
cat > kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-proxy",
"OU": "Kubernetes-manual"
}
]
}
EOF
cat > scheduler-csr.json << EOF
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-scheduler",
"OU": "Kubernetes-manual"
}
]
}
EOF
cd ..
mkdir bootstrap
cd bootstrap
cat > bootstrap.secret.yaml << EOF
apiVersion: v1
kind: Secret
metadata:
name: bootstrap-token-c8ad9c
namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
description: "The default bootstrap token generated by 'kubelet '."
token-id: c8ad9c
token-secret: 2e4d610cf3e7426e
usage-bootstrap-authentication: "true"
usage-bootstrap-signing: "true"
auth-extra-groups: system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubelet-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-certificate-rotation
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kube-apiserver
EOF
cd ..
mkdir coredns
cd coredns
cat > coredns.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. Default is 1.
# 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: registry.cn-beijing.aliyuncs.com/dotbalo/coredns:1.8.6
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.96.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
EOF
cd ..
mkdir metrics-server
cd metrics-server
cat > metrics-server.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-insecure-tls
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem # change to front-proxy-ca.crt for kubeadm
- --requestheader-username-headers=X-Remote-User
- --requestheader-group-headers=X-Remote-Group
- --requestheader-extra-headers-prefix=X-Remote-Extra-
image: registry.cn-beijing.aliyuncs.com/dotbalo/metrics-server:0.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
- name: ca-ssl
mountPath: /etc/kubernetes/pki
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
- name: ca-ssl
hostPath:
path: /etc/kubernetes/pki
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
EOF

7.相关证书生成

# master01节点下载证书生成工具
# wget "https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.1_linux_amd64" -O /usr/local/bin/cfssl
# wget "https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.1_linux_amd64" -O /usr/local/bin/cfssljson
# wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl-certinfo_1.6.1_linux_amd64 -O /usr/local/bin/cfssl-certinfo
#cfssl是使用go编写,由CloudFlare开源的一款PKI/TLS工具。主要程序有:
#- cfssl,是CFSSL的命令行工具
#- cfssljson用来从cfssl程序获取JSON输出,并将证书,密钥,CSR和bundle写入文件中。
# 软件包内有
cd /root/urbancabin/
chmod +x cfssl*
mv cfssl_1.6.1_linux_amd64 /usr/local/bin/cfssl
mv cfssljson_1.6.1_linux_amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_1.6.1_linux_amd64 /usr/local/bin/cfssl-certinfo
#chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson
#7.1.生成etcd证书
#特别说明除外,以下操作在所有master节点操作
#7.1.1所有master节点创建证书存放目录
mkdir /etc/etcd/ssl -p
#7.1.2master01节点生成etcd证书
cd pki
# 生成etcd证书和etcd证书的key(如果你觉得以后可能会扩容,可以在ip那多写几个预留出来)
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca
cfssl gencert \
-ca=/etc/etcd/ssl/etcd-ca.pem \
-ca-key=/etc/etcd/ssl/etcd-ca-key.pem \
-config=ca-config.json \
-hostname=127.0.0.1,k8s-master01,,k8s-master02,k8s-master03,k8s-node01,k8s-node02,192.168.27.21,192.168.27.22,192.168.27.23,192.168.27.24,192.168.27.25 \
-profile=kubernetes \
etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd
#7.1.3将证书复制到其他节点
Master='k8s-master02 k8s-master03'
for NODE in $Master; do ssh $NODE "mkdir -p /etc/etcd/ssl"; for FILE in etcd-ca-key.pem etcd-ca.pem etcd-key.pem etcd.pem; do scp /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}; done; done;
#7.2.生成k8s相关证书
#特别说明除外,以下操作在所有master节点操作
#7.2.1所有k8s节点创建证书存放目录
mkdir -p /etc/kubernetes/pki
#7.2.2master01节点生成k8s证书
cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca
# 生成一个根证书 ,多写了一些IP作为预留IP,为将来添加node做准备
# 10.96.0.1是service网段的第一个地址,需要计算,10.0.0.69为高可用vip地址
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-hostname=10.96.0.1,192.168.27.20,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,x.oiox.cn,k.oiox.cn,l.oiox.cn,o.oiox.cn,192.168.27.21,192.168.27.22,192.168.27.23,192.168.27.24,192.168.27.25,192.168.27.26,192.168.27.27,192.168.27.28,192.168.27.29 \
-profile=kubernetes apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver
#说明:
#如果 hostsname 字段不为空则需要指定授权使用该证书的 IP(含VIP) 或域名列表。由于该证书被 集群使用,需要将节点的IP都填上,为了方便后期扩容可以多写几个预留的IP。
#同时还需要填写 service 网络的首个IP(一般是 kube-apiserver 指定的 service-cluster-ip-range 网段的第一个IP,如 10.96.0.1)。
#7.2.3生成apiserver聚合证书
cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca
# 有一个警告,可以忽略
cfssl gencert \
-ca=/etc/kubernetes/pki/front-proxy-ca.pem \
-ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem \
-config=ca-config.json \
-profile=kubernetes front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client
#7.2.4生成controller-manage的证书
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager
# 设置一个集群项
# 自定义一个k8s用户,如下叫kubernetes,此用户绑定ca.pem根证书,传递给apiserver,这样此账户-证书-apiserver进行了关联
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.27.20:6443 \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 设置一个环境项,一个上下文
# 定义一个用户账号(system:kube-controller-manager),用这个用户账号,跟上述创建的自定义一个k8s用户kubernetes进行绑定,
#用于使用提供的认证信息和命名空间将请求发送到指定的集群。给这个绑定起个名字system:kube-controller-manager@kubernetes
kubectl config set-context system:kube-controller-manager@kubernetes \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 设置一个用户项
# 定义用于向 k8s 集群进行身份验证的客户端凭据。将客户端证书controller-manager.pem根controller-manager-key.pem
#服务端证书上传进去,用于账户(system:kube-controller-manager)申请的验证
kubectl config set-credentials system:kube-controller-manager \
--client-certificate=/etc/kubernetes/pki/controller-manager.pem \
--client-key=/etc/kubernetes/pki/controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 设置默认环境
# 把这些信息作为承载式文件
kubectl config use-context system:kube-controller-manager@kubernetes \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
#7.2.5生成scheduler的证书
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.27.20:6443 \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler \
--client-certificate=/etc/kubernetes/pki/scheduler.pem \
--client-key=/etc/kubernetes/pki/scheduler-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kubectl config set-context system:kube-scheduler@kubernetes \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kubectl config use-context system:kube-scheduler@kubernetes \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
#7.2.6生成kubernetes-admin的证书
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.27.20:6443 \
--kubeconfig=/etc/kubernetes/admin.kubeconfig
kubectl config set-credentials kubernetes-admin \
--client-certificate=/etc/kubernetes/pki/admin.pem \
--client-key=/etc/kubernetes/pki/admin-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/admin.kubeconfig
kubectl config set-context kubernetes-admin@kubernetes \
--cluster=kubernetes \
--user=kubernetes-admin \
--kubeconfig=/etc/kubernetes/admin.kubeconfig
kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=/etc/kubernetes/admin.kubeconfig
#7.2.7创建kube-proxy证书
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
kube-proxy-csr.json | cfssljson -bare /etc/kubernetes/pki/kube-proxy
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.27.20:6443 \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
--client-certificate=/etc/kubernetes/pki/kube-proxy.pem \
--client-key=/etc/kubernetes/pki/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
kubectl config set-context kube-proxy@kubernetes \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
kubectl config use-context kube-proxy@kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
#7.2.8创建ServiceAccount Key ——secret
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
#7.2.9将证书发送到其他master节点(只有master节点)
#其他节点创建目录
mkdir /etc/kubernetes/pki/ -p
for NODE in k8s-master02 k8s-master03; do for FILE in $(ls /etc/kubernetes/pki | grep -v etcd); do scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE}; done; for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE}; done; done
#7.2.10查看证书
ls /etc/kubernetes/pki/
admin.csr ca.csr front-proxy-ca.csr kube-proxy.csr scheduler-key.pem
admin-key.pem ca-key.pem front-proxy-ca-key.pem kube-proxy-key.pem scheduler.pem
admin.pem ca.pem front-proxy-ca.pem kube-proxy.pem
apiserver.csr controller-manager.csr front-proxy-client.csr sa.key
apiserver-key.pem controller-manager-key.pem front-proxy-client-key.pem sa.pub
apiserver.pem controller-manager.pem front-proxy-client.pem scheduler.csr
# 一共26个就对了
ls /etc/kubernetes/pki/ |wc -l
26

8.k8s系统组件配置

8.1 etcd配置

#8.1.1.master01配置
# 如果要用IPv6那么把IPv4地址修改为IPv6即可
cat > /etc/etcd/etcd.config.yml << EOF
name: 'k8s-master01'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.27.21:2380'
listen-client-urls: 'https://192.168.27.21:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.27.21:2380'
advertise-client-urls: 'https://192.168.27.21:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://192.168.27.21:2380,k8s-master02=https://192.168.27.22:2380,k8s-master03=https://192.168.27.23:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
#8.1.2master02配置
# 如果要用IPv6那么把IPv4地址修改为IPv6即可
cat > /etc/etcd/etcd.config.yml << EOF
name: 'k8s-master02'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.27.22:2380'
listen-client-urls: 'https://192.168.27.22:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.27.22:2380'
advertise-client-urls: 'https://192.168.27.22:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://192.168.27.21:2380,k8s-node01=https://192.168.27.22:2380,k8s-node02=https://192.168.27.23:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
#8.1.3master03配置
# 如果要用IPv6那么把IPv4地址修改为IPv6即可
cat > /etc/etcd/etcd.config.yml << EOF
name: 'k8s-master03'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.27.23:2380'
listen-client-urls: 'https://192.168.27.253:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.27.23:2380'
advertise-client-urls: 'https://192.168.27.23:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://192.168.27.21:2380,k8s-node01=https://192.168.27.22:2380,k8s-node02=https://192.168.27.23:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
#8.2.创建service(所有master节点操作)
#8.2.1创建etcd.service并启动
cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
Alias=etcd3.service
EOF
#8.2.2创建etcd证书目录
mkdir -p /etc/kubernetes/pki/etcd
ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
systemctl daemon-reload
systemctl enable --now etcd
systemctl status etcd
#8.2.3查看etcd状态
# 如果要用IPv6那么把IPv4地址修改为IPv6即可
export ETCDCTL_API=3
etcdctl --endpoints="192.168.27.21:2379,192.168.27.22:2379,192.168.27.23:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint status --write-out=table
+--------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
+--------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+--------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| 192.168.27.21:2379 | 5df904e597cae46d | 3.5.4 | 20 kB | true | false | 2 | 8 | 8 | |
| 192.168.27.22:2379 | 2eaa917d1d3596a6 | 3.5.4 | 20 kB | false | false | 2 | 8 | 8 | |
| 192.168.27.23:2379 | 7df1be7426c85145 | 3.5.4 | 20 kB | false | false | 2 | 8 | 8 | |
+--------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
#检查ETCD数据库性能
ETCDCTL_API=3
etcdctl --endpoints="192.168.27.21:2379,192.168.27.22:2379,192.168.27.23:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem check perf
59 / 60 Boooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooom ! 98.33%
PASS: Throughput is 151 writes/s
PASS: Slowest request took 0.066478s
PASS: Stddev is 0.002354s
PASS
export ETCDCTL_API=3
etcdctl --endpoints="192.168.27.21:2379,192.168.27.22:2379,192.168.27.23:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem member list
etcdctl --endpoints="192.168.27.21:2379,192.168.27.22:2379,192.168.27.23:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint status
#8.3 etcd数据定时备份
mkdir -p /data/backup/etcd
vi /etc/etcd/backup.sh
#!/bin/sh
cd /var/lib
name="etcd-bak"`date "+%Y%m%d"`
tar -cvf "/data/backup/etcd/"$name".tar.gz" etcd
chmod 755 /etc/etcd/backup.sh
crontab -e
00 00 * * * /etc/etcd/backup.sh
#或者
SHELL=/bin/bash
PATH=/sbin:/bin:/usr/sbin:/usr/bin
00 00 * * * (/path/to/backup.sh)
crontab -l
00 00 * * * /etc/etcd/backup.sh
systemctl restart crond

9.高可用:

#三台master操作:
apt -y install keepalived
#配置keepalived master节点
#cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
# 注意网卡名
interface ens33
mcast_src_ip 192.168.27.21
virtual_router_id 51
priority 100
nopreempt
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.27.20
}
track_script {
chk_apiserver
} }
EOF
#配置keepalived backup节点
# cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state BACKUP
# 注意网卡名
interface ens33
mcast_src_ip 192.168.27.22
virtual_router_id 51
priority 50
nopreempt
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.27.20
}
track_script {
chk_apiserver
} }
EOF
#配置keepalived backup节点
# cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state BACKUP
# 注意网卡名
interface ens33
mcast_src_ip 192.168.27.23
virtual_router_id 51
priority 50
nopreempt
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.27.20
}
track_script {
chk_apiserver
} }
EOF
#健康检查脚本配置(三台master主机)
cat > /etc/keepalived/check_apiserver.sh << EOF
#!/bin/bash
err=0
for k in \$(seq 1 3)
do
check_code=\$(pgrep etcd)
if [[ \$check_code == "" ]]; then
err=\$(expr \$err + 1)
sleep 1
continue
else
err=0
break
fi
done
if [[ \$err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
EOF
# 给脚本授权
chmod +x /etc/keepalived/check_apiserver.sh
#启动服务
systemctl daemon-reload
systemctl enable --now keepalived

10.k8s组件配置

#所有k8s节点创建以下目录
mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes

10.1.创建apiserver(所有master节点)

#master01节点配置
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
--v=2 \\
--logtostderr=true \\
--allow-privileged=true \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--advertise-address=192.168.27.21 \\
--service-cluster-ip-range=10.96.0.0/12 \\
--feature-gates=IPv6DualStack=true \\
--service-node-port-range=30000-32767 \\
--etcd-servers=https://192.168.27.21:2379,https://192.168.27.22:2379,https://192.168.27.23:2379 \\
--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\
--etcd-certfile=/etc/etcd/ssl/etcd.pem \\
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
--client-ca-file=/etc/kubernetes/pki/ca.pem \\
--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \\
--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \\
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \\
--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \\
--service-account-key-file=/etc/kubernetes/pki/sa.pub \\
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \\
--service-account-issuer=https://kubernetes.default.svc.cluster.local \\
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
--authorization-mode=Node,RBAC \\
--enable-bootstrap-token-auth=true \\
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \\
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \\
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \\
--requestheader-allowed-names=aggregator \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-username-headers=X-Remote-User \\
--enable-aggregator-routing=true
# --token-auth-file=/etc/kubernetes/token.csv
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
EOF
#启动apiserver(所有master节点)
systemctl daemon-reload && systemctl enable --now kube-apiserver && systemctl status kube-apiserver

10.2.配置kube-controller-manager service

# 所有master节点配置,且配置相同
# 172.16.0.0/12为pod网段,按需求设置你自己的网段
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \\
--v=2 \\
--logtostderr=true \\
--bind-address=127.0.0.1 \\
--root-ca-file=/etc/kubernetes/pki/ca.pem \\
--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \\
--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \\
--service-account-private-key-file=/etc/kubernetes/pki/sa.key \\
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \\
--leader-elect=true \\
--use-service-account-credentials=true \\
--node-monitor-grace-period=40s \\
--node-monitor-period=5s \\
--pod-eviction-timeout=2m0s \\
--controllers=*,bootstrapsigner,tokencleaner \\
--allocate-node-cidrs=true \\
--feature-gates=IPv6DualStack=true \\
--service-cluster-ip-range=10.96.0.0/12 \\
--cluster-cidr=172.16.0.0/12 \\
--node-cidr-mask-size-ipv4=24 \\
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
#6.2.1启动kube-controller-manager,并查看状态
systemctl daemon-reload
systemctl enable --now kube-controller-manager
systemctl status kube-controller-manager

10.3.配置kube-scheduler service

#所有master节点配置,且配置相同
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-scheduler \\
--v=2 \\
--logtostderr=true \\
--bind-address=127.0.0.1 \\
--leader-elect=true \\
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
#启动并查看服务状态
systemctl daemon-reload
systemctl enable --now kube-scheduler
systemctl status kube-scheduler

10.4.TLS Bootstrapping配置

#在master01上配置
cd /root/urbancabin/bootstrap
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true --server=https://192.168.27.20:6443 \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
kubectl config set-credentials tls-bootstrap-token-user \
--token=c8ad9c.2e4d610cf3e7426e \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
kubectl config set-context tls-bootstrap-token-user@kubernetes \
--cluster=kubernetes \
--user=tls-bootstrap-token-user \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
kubectl config use-context tls-bootstrap-token-user@kubernetes \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
# token的位置在bootstrap.secret.yaml,如果修改的话到这个文件修改
mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config
------------------------------------------------------------------------------
#其他节点如果想要能够执行kubectl命令,需要在master节点执行:
#scp /etc/kubernetes/admin.kubeconfig root@k8s-node01:/etc/kubernetes/
#scp /usr/local/bin/kubectl root@k8s-node01:/usr/local/bin
#去k8s-node01节点:
#mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config
------------------------------------------------------------------------------
#查看集群状态,没问题的话继续后续操作
kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health":"true","reason":""}
etcd-2 Healthy {"health":"true","reason":""}
etcd-1 Healthy {"health":"true","reason":""}
# 切记执行,别忘记!!!
kubectl create -f bootstrap.secret.yaml

10.5.node节点配置

#在master01上将证书复制到node节点
cd /etc/kubernetes/
for NODE in k8s-node01 k8s-node02 ; do ssh $NODE mkdir -p /etc/kubernetes/pki; for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig kube-proxy.kubeconfig; do scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}; done; done

10.6.所有k8s节点配置kubelet service

#kubelet配置
#所有k8s节点创建相关目录
mkdir -p /var/lib/kubelet /var/log/kubernetes /etc/systemd/system/kubelet.service.d /etc/kubernetes/manifests/
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service
[Service]
ExecStart=/usr/local/bin/kubelet \\
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \\
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
--config=/etc/kubernetes/kubelet-conf.yml \\
--container-runtime=remote \\
--runtime-request-timeout=15m \\
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \\
--cgroup-driver=systemd \\
--node-labels=node.kubernetes.io/node= \\
--feature-gates=IPv6DualStack=true
[Install]
WantedBy=multi-user.target
EOF
#注意若是CentOS7,将 --node-labels=node.kubernetes.io/node='' 替换为 --node-labels=node.kubernetes.io/node=
#将 '' 删除
#所有k8s节点创建kubelet的配置文件
cat > /etc/kubernetes/kubelet-conf.yml <<EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF
#启动kubelet
systemctl daemon-reload
systemctl restart kubelet
systemctl enable --now kubelet
systemctl status kubelet
#查看集群
[root@k8s-master01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready <none> 17s v1.24.3
k8s-master02 Ready <none> 12s v1.24.3
k8s-master03 Ready <none> 9s v1.24.3
k8s-node01 Ready <none> 13s v1.24.3
k8s-node02 Ready <none> 15s v1.24.3
#------------------------------
# 若发现为 No resources fund 检查kublet是否正常启动和启动后有没有报错,通过系统日志messases 和 服务status查看。
# 注意 巨坑 ,如下配置是启动生成的,pem证书文件不对会直接导致节点“未发现” ,测试可以删除重启kubelet,生产发现node意外宕机,启动后无法注册到集群和这个pem有关系。更正软连接即可。
[root@master-01 ~]# ll /var/lib/kubelet/pki/
总用量 16
-rw------- 1 root root 1208 6 16 20:43 kubelet-client-2022-06-16-20-43-31.pem
-rw------- 1 root root 1244 6 17 20:51 kubelet-client-2022-06-17-20-51-36.pem
lrwxrwxrwx 1 root root 59 6 17 20:51 kubelet-client-current.pem -> /var/lib/kubelet/pki/kubelet-client-2022-06-17-20-51-36.pem
-rw-r--r-- 1 root root 2258 6 16 20:43 kubelet.crt
-rw------- 1 root root 1679 6 16 20:43 kubelet.key
# 查看证书时间
[root@master-01 pki]# openssl x509 -in kubelet-client-2022-06-16-20-43-31.pem -noout -text
Validity # 这个时间很关键
Not Before: Jun 16 12:38:31 2022 GMT
Not After : Jun 15 10:00:00 2027 GMT
#------------------------------
[root@k8s-master01 ~]#

10.7.kube-proxy配置

#将kubeconfig发送至其他节点
for NODE in k8s-master02 k8s-master03; do scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig; done
for NODE in k8s-node01 k8s-node02 ; do scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig; done
#所有k8s节点添加kube-proxy的service文件
cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-proxy \\
--config=/etc/kubernetes/kube-proxy.yaml \\
--v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
#所有k8s节点添加kube-proxy的配置
cat > /etc/kubernetes/kube-proxy.yaml << EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
qps: 5
clusterCIDR: 172.16.0.0/12
configSyncPeriod: 15m0s
conntrack:
max: null
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
masqueradeAll: true
minSyncPeriod: 5s
scheduler: "rr"
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF
#说明:
#修改kube-proxy.yaml中IP地址为当前主机IP,或者使用0.0.0.0
#启动kube-proxy
systemctl daemon-reload
systemctl restart kube-proxy
systemctl enable --now kube-proxy
systemctl status kube-proxy

10.8.安装Calico

#以下步骤只在master01操作
#更改calico网段
#curl https://projectcalico.docs.tigera.io/manifests/calico-typha.yaml -o calico.yaml
# vim calico.yaml
vim calico-ipv6.yaml
# calico-config ConfigMap处
"ipam": {
"type": "calico-ipam",
"assign_ipv4": "true",
"assign_ipv6": "true"
},
- name: IP
value: "autodetect"
- name: IP6
value: "autodetect"
- name: CALICO_IPV4POOL_CIDR
value: "172.16.0.0/12"
- name: CALICO_IPV6POOL_CIDR
value: "fc00::/48"
- name: FELIX_IPV6SUPPORT
value: "true"
(可选)
- name: IP_AUTODETECTION_METHOD
value: "interface=enp0s3"
#替换阿里镜像地址:sed -i "s/docker.io\/calico\//registry.cn-beijing.aliyuncs.com\/dotbalo\//g" calico.yaml
#typha_service_name
#typha模式,k8s数据存储模式超过50各节点推荐启用typha。
#Typha组件可以帮助Calico扩展到大量的节点,而不会对Kubernetes API服务器造成过度的影响。
#修改typha_service_name "none"改为"calico-typha"
#typha_service_name: "calico-typha"
# 在Calico的讲解中提到过,大于50node节点,名为calico-typha的Deployment资源的副本数最好为大于3
#修改文件参考:
#------------------------------------------------------------------------
# 网段地址为 1.3 章节pod指定网段,以及前面配置文件所写的网段
# - name: CALICO_IPV4POOL_CIDR
# value: "172.16.0.0/12"
# - name: IP_AUTODETECTION_METHOD
# value: "interface=ens.*"
# 源文件image 替换为
#[root@master-01 tmp]# grep image calico.yaml
# image: docker.io/calico/cni:v3.23.1
# image: docker.io/calico/cni:v3.23.1
# image: docker.io/calico/node:v3.23.1
# image: docker.io/calico/kube-controllers:v3.23.1
# 替换为如下,按照结尾名称一一对应
#root@k8s-master01:~/urbancabin# grep -i image calico.yaml
# - image: registry.cn-beijing.aliyuncs.com/dotbalo/typha:v3.23.1
# image: registry.cn-beijing.aliyuncs.com/dotbalo/cni:v3.23.1
# image: registry.cn-beijing.aliyuncs.com/dotbalo/cni:v3.23.1
# image: registry.cn-beijing.aliyuncs.com/dotbalo/pod2daemon-flexvol:v3.23.1
# image: registry.cn-beijing.aliyuncs.com/dotbalo/node:v3.23.1
# image: registry.cn-beijing.aliyuncs.com/dotbalo/kube-controllers:v3.23.1
#calico如果有节点是多网卡(多网卡获取地址不一定是集群配置的ip,会随机选一个,这样会导致启动不了),需要在配置文件中指定内网网卡名称,之前提到的网卡名称就是这里需要使用
# 如果需要修改 编辑文件 参考下面位置
#env下面添加两行参数
# - name: IP_AUTODETECTION_METHOD
# value: "interface=enp0s3"
#containers:
# # Runs calico-node container on each Kubernetes node. This
# # container programs network policy and routes on each
# # host.
# - name: calico-node
# image: registry.cn-beijing.aliyuncs.com/dotbalo/node:v3.22.0 #参考这个坐标
# .... 省略
# env:
# # Use Kubernetes API as the backing datastore.
# - name: DATASTORE_TYPE
# value: "kubernetes"
# # Typha support: controlled by the ConfigMap.
# - name: IP_AUTODETECTION_METHOD
# value: "interface=enp0s3"
#------------------------------------------------------------------------
# kubectl apply -f calico.yaml
kubectl apply -f calico-ipv6.yaml
#查看容器状态
root@k8s-master01:~/urbancabin# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-647fd5f6d5-jd7jm 1/1 Running 2 (2m3s ago) 3m17s
calico-node-5pxz7 1/1 Running 1 (2m39s ago) 3m18s
calico-node-khrdq 1/1 Running 0 3m18s
calico-node-wmh6w 1/1 Running 0 3m18s
calico-typha-5c5cb6cd95-7mwmj 1/1 Running 0 3m18s
[root@k8s-master01 ~]#

10.9.安装CoreDNS

#以下步骤只在master01操作
#修改文件
cd coredns/
cat coredns.yaml | grep clusterIP:
clusterIP: 10.96.0.10
#sed -i "s#10.96.0.10#10.96.0.10#g" coredns.yaml
#安装
kubectl create -f coredns.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created
#如果coredns起不来,报错:
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 106s default-scheduler Successfully assigned kube-system/coredns-6d86b45487-nlw9l to k8s-node02
Normal Pulling 105s kubelet Pulling image "registry.cn-beijing.aliyuncs.com/dotbalo/coredns:1.8.6"
Normal Pulled 95s kubelet Successfully pulled image "registry.cn-beijing.aliyuncs.com/dotbalo/coredns:1.8.6" in 9.81506302s
Warning Unhealthy 90s kubelet Readiness probe failed: Get "http://172.27.14.193:8181/ready": dial tcp 172.27.14.193:8181: connect: connection refused
Normal Created 51s (x4 over 94s) kubelet Created container coredns
Normal Pulled 51s (x3 over 91s) kubelet Container image "registry.cn-beijing.aliyuncs.com/dotbalo/coredns:1.8.6" already present on machine
Normal Started 50s (x4 over 93s) kubelet Started container coredns
Warning BackOff 19s (x11 over 89s) kubelet Back-off restarting failed container
#ubuntu DNS一直还原,执行
mv /etc/resolv.conf /etc/resolv.conf.bak
ln -s /run/systemd/resolve/resolv.conf /etc/

10.10.安装Metrics Server

#以下步骤只在master01操作
#安装Metrics-server
#在新版的Kubernetes中系统资源的采集均使用Metrics-server,可以通过Metrics采集节点和Pod的内存、磁盘、CPU和网络的使用率
# 安装metrics server
cd metrics-server/
kubectl apply -f metrics-server.yaml
#稍等片刻查看状态
root@k8s-master01:~/urbancabin/metrics-server# kubectl top node
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
k8s-master01 1099m 27% 2056Mi 35%
k8s-node01 266m 6% 886Mi 47%
k8s-node02 223m 5% 940Mi 50%

11.集群验证

#部署pod资源
cat<<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: busybox
namespace: default
spec:
containers:
- name: busybox
image: busybox:1.28
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
restartPolicy: Always
EOF
# 查看
kubectl get pod
NAME READY STATUS RESTARTS AGE
busybox 1/1 Running 0 34s
#用pod解析默认命名空间中的kubernetes
kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 89m
kubectl exec busybox -n default -- nslookup kubernetes
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
Name: kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
#测试跨命名空间是否可以解析
kubectl exec busybox -n default -- nslookup kube-dns.kube-system
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
Name: kube-dns.kube-system
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
#每个节点都必须要能访问Kubernetes的kubernetes svc 443和kube-dns的service 53
telnet 10.96.0.1 443
Trying 10.96.0.1...
Connected to 10.96.0.1.
Escape character is '^]'.
telnet 10.96.0.10 53
Trying 10.96.0.10...
Connected to 10.96.0.10.
Escape character is '^]'.
curl 10.96.0.10:53
curl: (52) Empty reply from server
#Pod和Pod之前要能通
kubectl get po -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
busybox 1/1 Running 0 17m 172.27.14.193 k8s-node02 <none> <none>
kubectl get po -n kube-system -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
calico-kube-controllers-647fd5f6d5-jd7jm 1/1 Running 2 (11m ago) 13m 172.25.244.192 k8s-master01 <none> <none>
calico-node-5pxz7 1/1 Running 1 (12m ago) 13m 192.168.27.24 k8s-node01 <none> <none>
calico-node-khrdq 1/1 Running 0 13m 192.168.27.21 k8s-master01 <none> <none>
calico-node-wmh6w 1/1 Running 0 13m 192.168.27.25 k8s-node02 <none> <none>
calico-typha-5c5cb6cd95-7mwmj 1/1 Running 0 13m 192.168.27.21 k8s-master01 <none> <none>
coredns-6d86b45487-4m4q8 1/1 Running 0 5m39s 172.17.125.1 k8s-node01 <none> <none>
metrics-server-6d9df85947-9tchk 1/1 Running 0 3m45s 172.25.244.194 k8s-master01 <none> <none>
#进入busybox ping其他节点上的pod
kubectl exec -ti busybox -- sh
/ # ping 192.168.27.24
PING 192.168.27.24 (192.168.27.24): 56 data bytes
64 bytes from 192.168.27.24: seq=0 ttl=63 time=1.547 ms
64 bytes from 192.168.27.24: seq=1 ttl=63 time=0.623 ms
64 bytes from 192.168.27.24: seq=2 ttl=63 time=0.609 ms
64 bytes from 192.168.27.24: seq=3 ttl=63 time=0.742 ms
64 bytes from 192.168.27.24: seq=4 ttl=63 time=0.762 ms
# 可以连通证明这个pod是可以跨命名空间和跨主机通信的
#创建三个副本,可以看到3个副本分布在不同的节点上(用完可以删了)
cat > deployments.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
EOF
kubectl apply -f deployments.yaml
deployment.apps/nginx-deployment created
kubectl get pod
NAME READY STATUS RESTARTS AGE
busybox 1/1 Running 0 6m25s
nginx-deployment-9456bbbf9-4bmvk 1/1 Running 0 8s
nginx-deployment-9456bbbf9-9rcdk 1/1 Running 0 8s
nginx-deployment-9456bbbf9-dqv8s 1/1 Running 0 8s
# 删除nginx
[root@k8s-master01 ~]# kubectl delete -f deployments.yaml

12.安装dashboard

wget https://raw.githubusercontent.com/cby-chen/Kubernetes/main/yaml/dashboard.yaml
wget https://raw.githubusercontent.com/cby-chen/Kubernetes/main/yaml/dashboard-user.yaml
kubectl apply -f dashboard.yaml
kubectl apply -f dashboard-user.yaml
#更改dashboard的svc为NodePort,如果已是请忽略
kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
type: NodePort
#查看端口号
kubectl get svc kubernetes-dashboard -n kubernetes-dashboard
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes-dashboard NodePort 10.102.228.167 <none> 443:30001/TCP 3m3s
#创建token
kubectl -n kubernetes-dashboard create token admin-user
eyJhbGciOiJSUzI1NiIsImtpZCI6Imc4enN6RHExeVBGODlLcUtVNDNDMW5CUTNvYjdOaUhRVFZRY0FpMXBrNUkifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjU3OTg3MDAwLCJpYXQiOjE2NTc5ODM0MDAsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiNTU2ZDI3OWQtMTRkMC00NTA0LTg5MDctOWM2N2E1YmZkODU4In19LCJuYmYiOjE2NTc5ODM0MDAsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.4Z13czCtawREukNKXhmT6YsXLCgLBaluxbTl-uUd3W14SSTDgqmA-qijCU8itzKMjoYVaIy2sIvj-NfYGGOfUY-6On2Qe4X8rcIQvpubHKvWeySvwIJjJn3MJ0oS_kCLThS8fTaekHCrOEBL7i7iGCvnvpoA5Je8kPp-hgBgqi7GmpV5-r_OvdOgcD_LSwtRDGMMx7XV5NgTLgAP8BxdxHrw7he9JvhpT8TiaLHFG7AhwibugXqaleWOe_v98aWjkhiKbzAmeI-vck5WR3vrsnr6tvYCR-fNCfxYgtMc1Vklz76gXZg3CvqUOfc0HdVp543VIoaWsDf2bcp_X_t1ug
#登录dashboard
https://192.168.27.20:30001/

13.ingress安装

#原理:ingress-controller 就是把k8s内部的ningx调度器,ingress-controller跟宿主机共享网络空间,通过nodeport 80 把宿主机的80端口映射到ingress-controller的80端口,这时候配置ingress资源,比如 tomcat.od.com,其实也就是访问的是tomcat.od.com:80,这时候我们通过hosts或者named将,tomcat.od.com解析成ingress-controller的pod所在的宿主机IP,也就是访问的是宿主机的IP:80,由于访问宿主机的IP:80,也就是ingress-controller的pod的80,所以访问tomcat.od.com流量引入到了ingress-controller,ingress-controller跟api通讯获得有一个ingress资源是tomcat.od.com,这个ingress资源把流量给到了他指定的service资源,service在通过kube-proxy负载给到pod。
#注:
#1、在上述说的,我需要一个把访问tomcat.od.com给到一个宿主机,这个宿主机是谁,依赖于ingress-controller的pod在那个机器,所以我i们通过node选择器,固定在那些node节点运行
#2、一定提前把镜像下载下来,否则会发现,ingress功能不好使,在被选定的node节点运行,此文章我们选择了使用daemonset类型,通过node选择器固定到master01、master02
crictl pull willdockerhub/ingress-nginx-controller:v1.1.0
crictl registry.cn-hangzhou.aliyuncs.com/chenby/kube-webhook-certgen:v1.1.1
mkdir ingress
cd ingress
[root@hello ~/yaml]# vim deploy.yaml
[root@hello ~/yaml]# cat deploy.yaml
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
data:
allow-snippet-annotations: 'true'
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- ''
resources:
- nodes
verbs:
- get
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- namespaces
verbs:
- get
- apiGroups:
- ''
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- ingress-controller-leader
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-service-webhook.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
type: ClusterIP
ports:
- name: https-webhook
port: 443
targetPort: webhook
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
type: NodePort
externalTrafficPolicy: Local
ipFamilyPolicy: SingleStack
ipFamilies:
- IPv4
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
appProtocol: http
- name: https
port: 443
protocol: TCP
targetPort: https
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
revisionHistoryLimit: 10
minReadySeconds: 0
template:
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
spec:
dnsPolicy: ClusterFirst
containers:
- name: controller
image: registry.cn-hangzhou.aliyuncs.com/chenby/controller:v1.2.0
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --election-id=ingress-controller-leader
- --controller-class=k8s.io/ingress-nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
- name: webhook
containerPort: 8443
protocol: TCP
volumeMounts:
- name: webhook-cert
mountPath: /usr/local/certificates/
readOnly: true
resources:
requests:
cpu: 100m
memory: 90Mi
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
# Source: ingress-nginx/templates/controller-ingressclass.yaml
# We don't support namespaced ingressClass yet
# So a ClusterRole and a ClusterRoleBinding is required
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: nginx
namespace: ingress-nginx
spec:
controller: k8s.io/ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
# before changing this value, check the required kubernetes version
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
name: ingress-nginx-admission
webhooks:
- name: validate.nginx.ingress.kubernetes.io
matchPolicy: Equivalent
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
failurePolicy: Fail
sideEffects: None
admissionReviewVersions:
- v1
clientConfig:
service:
namespace: ingress-nginx
name: ingress-nginx-controller-admission
path: /networking/v1/ingresses
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- ''
resources:
- secrets
verbs:
- get
- create
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-create
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-create
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: create
image: registry.cn-hangzhou.aliyuncs.com/chenby/kube-webhook-certgen:v1.2.0
imagePullPolicy: IfNotPresent
args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 2000
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-patch
namespace: ingress-nginx
annotations:
helm.sh/hook: post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-patch
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: patch
image: registry.cn-hangzhou.aliyuncs.com/chenby/kube-webhook-certgen:v1.1.1
imagePullPolicy: IfNotPresent
args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 2000
[root@hello ~/yaml]#
#启用后端,写入配置文件执行
[root@hello ~/yaml]# vim backend.yaml
[root@hello ~/yaml]# cat backend.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: default-http-backend
labels:
app.kubernetes.io/name: default-http-backend
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: default-http-backend
template:
metadata:
labels:
app.kubernetes.io/name: default-http-backend
spec:
terminationGracePeriodSeconds: 60
containers:
- name: default-http-backend
image: registry.cn-hangzhou.aliyuncs.com/chenby/defaultbackend-amd64:1.5
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
ports:
- containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
---
apiVersion: v1
kind: Service
metadata:
name: default-http-backend
namespace: kube-system
labels:
app.kubernetes.io/name: default-http-backend
spec:
ports:
- port: 80
targetPort: 8080
selector:
app.kubernetes.io/name: default-http-backend
[root@hello ~/yaml]#
#安装测试应用
[root@hello ~/yaml]# vim ingress-demo-app.yaml
[root@hello ~/yaml]#
[root@hello ~/yaml]# cat ingress-demo-app.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: hello-server
spec:
replicas: 2
selector:
matchLabels:
app: hello-server
template:
metadata:
labels:
app: hello-server
spec:
containers:
- name: hello-server
image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/hello-server
ports:
- containerPort: 9000
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx-demo
name: nginx-demo
spec:
replicas: 2
selector:
matchLabels:
app: nginx-demo
template:
metadata:
labels:
app: nginx-demo
spec:
containers:
- image: nginx
name: nginx
---
apiVersion: v1
kind: Service
metadata:
labels:
app: nginx-demo
name: nginx-demo
spec:
selector:
app: nginx-demo
ports:
- port: 8000
protocol: TCP
targetPort: 80
---
apiVersion: v1
kind: Service
metadata:
labels:
app: hello-server
name: hello-server
spec:
selector:
app: hello-server
ports:
- port: 8000
protocol: TCP
targetPort: 9000
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress-host-bar
spec:
ingressClassName: nginx
rules:
- host: "hello.chenby.cn"
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: hello-server
port:
number: 8000
- host: "demo.chenby.cn"
http:
paths:
- pathType: Prefix
path: "/nginx"
backend:
service:
name: nginx-demo
port:
number: 8000
#执行部署
kubectl apply -f deploy.yaml
kubectl apply -f backend.yaml
# 等创建完成后在执行:
kubectl apply -f ingress-demo-app.yaml
kubectl get ingress
NAME CLASS HOSTS ADDRESS PORTS AGE
ingress-host-bar nginx hello.chenby.cn,demo.chenby.cn 192.168.27.25 80 111s
#过滤查看ingress端口
root@k8s-master01:~/urbancabin/ingress# kubectl get svc -A | grep ingress
ingress-nginx ingress-nginx-controller NodePort 10.103.146.13 <none> 80:31962/TCP,443:32210/TCP 7m51s
ingress-nginx ingress-nginx-controller-admission ClusterIP 10.102.99.95 <none> 443/TCP 7m52s
[root@hello ~/yaml]#

14.IPv6测试

#部署应用
[root@k8s-master01 ~]# vim cby.yaml
[root@k8s-master01 ~]# cat cby.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: chenby
spec:
replicas: 3
selector:
matchLabels:
app: chenby
template:
metadata:
labels:
app: chenby
spec:
containers:
- name: chenby
image: nginx
resources:
limits:
memory: "128Mi"
cpu: "500m"
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: chenby
spec:
ipFamilyPolicy: PreferDualStack
ipFamilies:
- IPv4
type: NodePort
selector:
app: chenby
ports:
- port: 80
targetPort: 80
[root@k8s-master01 ~]# kubectl apply -f cby.yaml
#查看端口
[root@k8s-master01 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
chenby NodePort 10.103.54.8 <none> 80:32219/TCP 7s
hello-server ClusterIP 10.96.182.188 <none> 8000/TCP 5m28s
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 113m
nginx-demo ClusterIP 10.97.76.186 <none> 8000/TCP 5m32s
[root@k8s-master01 ~]#
#使用内网访问
root@k8s-master01:~/urbancabin/ingress# curl -I 10.103.54.8
HTTP/1.1 200 OK
Server: nginx/1.23.0
Date: Sun, 03 Jul 2022 09:54:47 GMT
Content-Type: text/html
Content-Length: 615
Last-Modified: Tue, 21 Jun 2022 14:25:37 GMT
Connection: keep-alive
ETag: "62b1d4e1-267"
Accept-Ranges: bytes
root@k8s-master01:~/urbancabin/ingress# curl -I http://192.168.27.20:32219
HTTP/1.1 200 OK
Server: nginx/1.23.0
Date: Sun, 03 Jul 2022 09:55:34 GMT
Content-Type: text/html
Content-Length: 615
Last-Modified: Tue, 21 Jun 2022 14:25:37 GMT
Connection: keep-alive
ETag: "62b1d4e1-267"
Accept-Ranges: bytes
[root@localhost yaml]#

15.安装命令行自动补全功能

apt install bash-completion -y
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
#设置别名
echo 'alias k=kubectl' >>~/.bashrc
#设置别名喝completion一起使用
echo 'complete -F __start_kubectl k' >>~/.bashrc

参考链接:
https://blog.csdn.net/qq_33921750/article/details/125814818?spm=1001.2014.3001.5501
https://blog.csdn.net/Jerry00713/article/details/125220620

posted @   都市小木屋  阅读(630)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· 无需6万激活码!GitHub神秘组织3小时极速复刻Manus,手把手教你使用OpenManus搭建本
· C#/.NET/.NET Core优秀项目和框架2025年2月简报
· 什么是nginx的强缓存和协商缓存
· 一文读懂知识蒸馏
· Manus爆火,是硬核还是营销?
点击右上角即可分享
微信分享提示