kubernetes-v1.16.x在openeuler上部署
00 概述
在openeuler-20.03-sp3上使用kubeadm不是k8s-v1.16.15,cni使用flannel组件
01 env
- kubernetes-v1.16.15
- docker-19.03.x
- euler20.03.sp3(centos8.4.x)
- kernnel-v4.18+
- flannel-0.13
02 规划
- k8s-master(单节点master/work许可调度pod)
03 requirement
selinux/firewalld/swap/hosts/ntp
swapoff -a
/etc/fstab
mount -a
sysctl
tee >/etc/sysctl.d/docker.conf <<EOF
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward=1
EOF
sysctl --system
load module lvs
#enable ipvs
yum install ipvsadm ipset -y
tee >/etc/modules-load.d/ipvs.conf <<EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
EOF
systemctl restart systemd-modules-load
lsmod | grep -e ip_vs -e nf_conntrack
04 docker/kubeadm
注意:k8s支持最大docker版本
docker-yum
tee >/etc/yum.repos.d/docker-ce.repo <<'EOF'
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/8/$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
[docker-ce-stable-debuginfo]
name=Docker CE Stable - Debuginfo $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/8/debug-$basearch/stable
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
[docker-ce-stable-source]
name=Docker CE Stable - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/8/source/stable
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
[docker-ce-test]
name=Docker CE Test - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/8/$basearch/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
[docker-ce-test-debuginfo]
name=Docker CE Test - Debuginfo $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/8/debug-$basearch/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
[docker-ce-test-source]
name=Docker CE Test - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/8/source/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
[docker-ce-nightly]
name=Docker CE Nightly - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/8/$basearch/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
[docker-ce-nightly-debuginfo]
name=Docker CE Nightly - Debuginfo $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/8/debug-$basearch/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
[docker-ce-nightly-source]
name=Docker CE Nightly - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/8/source/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
EOF
openeuler-20.03是基于centos8.x改的版本所以基于el8
kubernetes-yum
//kubernetes
cat>/etc/yum.repos.d/kubrenetes.repo<<EOF
[kubernetes]
name=Kubernetes Repo
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
EOF
yum clean all && yum makecache
install docker
#查看docker-ce版本
yum list docker-ce.x86_64 --showduplicates |sort -r
yum list docker-ce --showduplicates |sort -r //早期的docker版本
yum install -y docker-ce-3:19.03.13-3.el8
systemctl enable docker --now
tee <<EOF >/etc/docker/daemon.json
{
"registry-mirrors": ["https://eyg9yi6d.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "50m",
"max-file": "3"
}
}
EOF
systemctl restart docker && docker info |grep -i Cgroup
install kubeadm
//kubeadm/kubelet/kubectl
[root@euler-200 ~]# yum list kubeadm.x86_64 --showduplicates |grep 1.16.15
kubeadm.x86_64 1.16.15-0 kubernetes
yum install -y kubeadm-1.16.15-0 kubelet-1.16.15-0 kubectl-1.16.15-0
注意:kubeadm/kubectl/kubelet/kubernetes 版本一致,nodes 节点可以不安装kubectl
systemctl enable kubelet --now
//移除kubelet cni(解决coredns pending问题)
vim /var/lib/kubelet/kubeadm-flags.env
#KUBELET_KUBEADM_ARGS="--cgroup-driver=systemd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.1"
KUBELET_KUBEADM_ARGS="--cgroup-driver=systemd --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.1"
注意:
--network-plugin=cni去掉使用flannel的cni
systemctl daemon-reload
systemctl restart kubelet
#env
/etc/systemd/system/kubelet.service.d/10-kubeadm.conf
05 kubeadm init cluster
//需要拉取的镜像
[root@euler-200 ~]# kubeadm config images list
k8s.gcr.io/kube-apiserver:v1.16.15
k8s.gcr.io/kube-controller-manager:v1.16.15
k8s.gcr.io/kube-scheduler:v1.16.15
k8s.gcr.io/kube-proxy:v1.16.15
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.3.15-0
k8s.gcr.io/coredns:1.6.2
//init
kubeadm init --kubernetes-version=1.16.15 \
--apiserver-advertise-address=172.24.20.200 \
--image-repository registry.aliyuncs.com/google_containers \
--service-cidr=10.10.0.0/16 --pod-network-cidr=10.20.0.0/16 -v 10
or
kubeadm init --kubernetes-version=1.16.15 \
--image-repository registry.aliyuncs.com/google_containers \
--service-cidr=10.10.0.0/16 --pod-network-cidr=10.20.0.0/16 -v 10
#使用kubeadm安装的kubernetes核心组件都是以docker容器的形式运行
#异常重置kubeadm
kubeadm reset
ifconfig cni0 down
ip link delete cni0
ifconfig flannel.1 down
ip link delete flannel.1
rm -rf /var/lib/cni/
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get cs //查看组件状态
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health": "true"}
//集群添加节点
kubeadm join --token e7986d.e440de5882342711 192.168.61.41:6443
kubeadm join 192.168.61.11:6443 --token leaahe.ydaf5vnts83a9myp --discovery-token-ca-cert-hash sha256:6b2761d20f115c4e22cc14788a78e1691c13cf42f6d573ae8a8f3efbed6da60f
kubectl get nodes
06 flannel-v0.13.x
- https://github.com/flannel-io/flannel/tree/v0.13.0 #v1.7+ k8s
- https://github.com/flannel-io/flannel/blob/v0.13.0/Documentation/kube-flannel.yml
wget https://raw.githubusercontent.com/flannel-io/flannel/v0.13.0/Documentation/kube-flannel.yml
//net-conf
net-conf.json: |
{
"Network": "10.20.0.0/16", //podcidr
"Backend": {
"Type": "vxlan",
"Directrouting": true //直接路由
}
}
//flanneld
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.13.0
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
- --iface=eth1 //multi ethx suggest add
//add docker support
//docker0 flannel不在一个网段上
15: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UNKNOWN group default
link/ether ca:a6:a4:36:39:d0 brd ff:ff:ff:ff:ff:ff
inet 10.20.0.0/32 scope global flannel.1
valid_lft forever preferred_lft forever
inet6 fe80::c8a6:a4ff:fe36:39d0/64 scope link
valid_lft forever preferred_lft forever
4: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ef:3b:02:22 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
valid_lft forever preferred_lft forever
inet6 fe80::42:efff:fe3b:222/64 scope link
[root@euler-200 ~]# ll /run/flannel/subnet.env //podcidr
-rw-r--r-- 1 root root 94 Oct 27 20:27 /run/flannel/subnet.env
[root@euler-200 ~]# cat /run/flannel/subnet.env
FLANNEL_NETWORK=10.20.0.0/16
FLANNEL_SUBNET=10.20.0.1/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=true
/run/flannel/docker //从/run/flannel/subnet.env取相关值
#新增docker0网络配置,发现不能变量取到,不同节点的flannel_subnet不一样的注意
DOCKER_NETWORK_OPTIONS=" --bip=10.20.0.1/24 --ip-masq=true --mtu=1450"
vim /usr/lib/systemd/system/docker.service
EnvironmentFile=-/run/flannel/docker
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
//all node stop
systemctl stop kubelet
systemctl stop docker
systemctl daemon-reload && systemctl start docker
systemctl start kubelet
kubectl get nodes //默认master节点不参数运行业务pod
kubectl get nodes
NAME STATUS AGE
cent0 Ready,master 20m
kubectl apply -f kube-flannel.yml
//kubectl get cm -A
kube-system kube-flannel-cfg 2 16m
kube-system kube-proxy 2 70m
#network
kubectl edit cm/kube-flannel-cfg -n kube-system
#ifcae
[root@euler-202 ~]# kubectl get ds -A
NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
kube-system kube-flannel-ds 1 1 1 1 1 <none> 18m
kube-system kube-proxy 1 1 1 1 1
kubectl edit ds/kube-flannel-ds -n kube-system
//重启kube-flannel
kubectl -n kube-system get pods | grep kube-flannel |awk '{system("kubectl -n kube-system delete pod "$1" ")}'
//许可master调度pod
kubectl taint nodes --all node-role.kubernetes.io/master-
07 kube-proxy enable ipvs
[root@euler-200 ~]# kubectl get pods -A |grep kube-proxy
kube-system kube-proxy-bh69x 1/1 Running 1 9h
kube-system kube-proxy-fp6ng 1/1 Running 0 8h
[root@euler-200 ~]#
//kube-proxy daemonset
kubectl get ds -A |grep kube-proxy
kubectl get pods -A |grep kube-proxy
kubectl -n kube-system logs ds/kube-proxy
I1027 02:56:24.767080 1 server_others.go:150] Using iptables Proxier.
kubectl logs -n kube-system pod/kube-proxy-q5fmk //查看kube-proxy启动代理的方式
[root@k8s-master ~]# kubectl edit cm kube-proxy -n kube-system //kube-proxy修改控制管理配置
...
ipvs:
excludeCIDRs: null
minSyncPeriod: 0s
scheduler: ""
strictARP: false
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs" //设置ipvs
//重启kube-proxy pod
kubectl -n kube-system get pods | grep kube-proxy |awk '{system("kubectl -n kube-system delete pod "$1" ")}'
//查看kube-proxy pod 启动日志
[root@c-3-54 ~]# kubectl get pod -n kube-system |grep kube-proxy |awk '{system("kubectl logs pod/"$1" -n kube-system")}'
kubectl get pod -n kube-system |grep kube-proxy |awk '{system("kubectl logs pod/"$1" -n kube-system")}'
kubectl -n kube-system logs ds/kube-proxy
I0406 15:19:02.600751 1 server_others.go:170] Using ipvs Proxier.
W0406 15:19:02.603497 1 proxier.go:401] IPVS scheduler not specified, use rr by default //默认rr
ipvsadm -ln
08 kubectl get cs(组件健康状态)
[root@c-3-103 manifests]# kubectl get cs
controller-manager Unhealthy Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: connect: connection refused
scheduler Unhealthy Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused
etcd-0 Healthy {"health":"true"}
[root@euler-200 ~]# kubectl get cs
NAME AGE
controller-manager <unknown>
scheduler <unknown>
etcd-0 <unknown>
//k8s v1.16.x bug实际是正常的
kubectl get cs -o=go-template='{{printf "NAME\t\t\tHEALTH_STATUS\tMESSAGE\t\n"}}{{range .items}}{{$name := .metadata.name}}{{range .conditions}}{{printf "%-24s%-16s%-20s\n" $name .status .message}}{{end}}{{end}}'
[root@euler-200 ~]# ss -lnt |grep 1025
LISTEN 0 128 127.0.0.1:10257 0.0.0.0:*
LISTEN 0 128 127.0.0.1:10259 0.0.0.0:*
LISTEN 0 128 *:10250 *:*
LISTEN 0 128 *:10251 *:*
LISTEN 0 128 *:10252 *:*
LISTEN 0 128 *:10256 *:*
curl -k -I http://127.0.0.1:10257/healthz
/etc/kubernetes/manifests
[root@c-3-103 manifests]# ls -l
total 16
-rw------- 1 root root 1881 Aug 8 15:24 etcd.yaml
-rw------- 1 root root 2738 Aug 8 15:24 kube-apiserver.yaml
-rw------- 1 root root 2608 Aug 8 15:24 kube-controller-manager.yaml
-rw------- 1 root root 1164 Aug 8 15:24 kube-scheduler.yaml
[root@c-3-103 manifests]# grep port kube-controller-manager.yaml
command
#- --port=0 //注释调这个,重启kubelet自动填充存活检测端口
port: 1025
[root@c-3-103 manifests]# grep port kube-scheduler.yaml
#- --port=0
port: 10259
09 testing
kubectl run -it curl --image=radial/busyboxplus:curl
nslookup kubernetes.default
docker pull quay.io/mvpbang/busybox
kubectl run -it curl --image=quay.io/mvpbang/busybox
nslookup kubernetes.default
10 kubectl
# Installing bash completion on Linux
## If bash-completion is not installed on Linux, please install the 'bash-completion' package
## via your distribution's package manager.
## Load the kubectl completion code for bash into the current shell
source <(kubectl completion bash)
## Write bash completion code to a file and source if from .bash_profile
kubectl completion bash > ~/.kube/completion.bash.inc
printf "
# Kubectl shell completion
source '$HOME/.kube/completion.bash.inc'
" >> $HOME/.bash_profile
source $HOME/.bash_profile
# Load the kubectl completion code for zsh[1] into the current shell
source <(kubectl completion zsh)
# Set the kubectl completion code for zsh[1] to autoload on startup
kubectl completion zsh > "${fpath[1]}/_kubectl"
Usage:
kubectl completion SHELL [options]
Use "kubectl options" for a list of global command-line options (applies to all commands).
//kubectl 命令自动补全
yum install -y bash-completion
#add env
kubectl completion bash > ~/.kube/completion.bash.inc
tee <<-'EOF' >>~/.bash_profile
source ~/.kube/completion.bash.inc
EOF
11 archive
control-plan
[root@euler-200 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
rancher/mirrored-flannelcni-flannel v0.20.0 fd14f6e39753 9 days ago 59.4MB
rancher/mirrored-flannelcni-flannel-cni-plugin v1.1.0 fcecffc7ad4a 5 months ago 8.09MB
registry.aliyuncs.com/google_containers/kube-proxy v1.16.15 6133ee425f8b 2 years ago 116MB
registry.aliyuncs.com/google_containers/kube-apiserver v1.16.15 b4ac6c566833 2 years ago 170MB
registry.aliyuncs.com/google_containers/kube-controller-manager v1.16.15 440c7cc060df 2 years ago 162MB
registry.aliyuncs.com/google_containers/kube-scheduler v1.16.15 626a04631b0f 2 years ago 93.6MB
registry.aliyuncs.com/google_containers/etcd 3.3.15-0 b2756210eeab 3 years ago 247MB
registry.aliyuncs.com/google_containers/coredns 1.6.2 bf261d157914 3 years ago 44.1MB
registry.aliyuncs.com/google_containers/pause 3.1 da86e6ba6ca1 4 years ago 742kB
work-node
[root@euler-201 kubelet]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
rancher/mirrored-flannelcni-flannel v0.20.0 fd14f6e39753 9 days ago 59.4MB
rancher/mirrored-flannelcni-flannel-cni-plugin v1.1.0 fcecffc7ad4a 5 months ago 8.09MB
registry.aliyuncs.com/google_containers/kube-proxy v1.16.15 6133ee425f8b 2 years ago 116MB
registry.aliyuncs.com/google_containers/coredns 1.6.2 bf261d157914 3 years ago 44.1MB
registry.aliyuncs.com/google_containers/pause 3.1 da86e6ba6ca1 4 years ago 742kB