kubeadm HA安装

一、环境介绍

#阿里云服务器
#本来是要LSB作为api-server的负载均衡是最好的,但是阿里云的SLB对TCP方式的监听,如果是本服务器访问SLB最后又通过SLB访问到本机的话是走不通的,只有http和https的方式能通。
#node节点最好是使用阿里云的弹性伸缩服务创建,这样后面扩容和伸缩方便。
172.16.208.161 master1
172.16.208.159 master2
172.16.208.160 master3
172.16.208.163 haproxy
172.16.208.164 node1

 

二、master服务器操作(所有master节点)

 

#修改内核参数
echo net.bridge.bridge-nf-call-iptables = 1  >>/etc/sysctl.conf
echo net.ipv4.ip_forward=1 >>/etc/sysctl.conf
echo net.bridge.bridge-nf-call-iptables=1 >>/etc/sysctl.conf
echo net.bridge.bridge-nf-call-ip6tables=1 >>/etc/sysctl.conf
echo vm.swappiness=0 >>/etc/sysctl.conf
sysctl -p


#关闭swap
#swapoff -a
#sed -i '/swap/s/^/#/' /etc/fstab


#关闭firewalld
systemctl stop firewalld
systemctl disable firewalld
sed -i 's/=enforcing/=disabled/g' /etc/selinux/config

#配置IPVS模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules 
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack_ipv4


#配置源
cd /etc/yum.repos.d/
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
cat>>/etc/yum.repos.d/kubrenetes.repo<<EOF
[kubernetes]
name=Kubernetes Repo
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
EOF
yum makecache

#安装docker
yum -y install docker-ce

#配置加速地址
mkdir -p /etc/docker
cat>/etc/docker/daemon.json <<-'EOF'
{
    "registry-mirrors": [
        "https://1nj0zren.mirror.aliyuncs.com",
        "https://docker.mirrors.ustc.edu.cn",
        "http://f1361db2.m.daocloud.io",
        "https://registry.docker-cn.com"
    ]
}
EOF
systemctl daemon-reload
systemctl restart docker
systemctl enable docker


#安装kubeadm等
yum install  kubelet kubeadm kubectl -y

#安装ipvs
yum -y install ipvsadm ipset


#启动kubelet
systemctl enable kubelet && systemctl start kubelet

 

三、haproxy服务器操作

#haproxy服务器操作
yum install -y haproxy


#修改haproxy配置文件
[root@nginx-proxy ~]# egrep -v "^$|^#|#"  /etc/haproxy/haproxy.cfg 
global
    log         127.0.0.1 local2
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
    stats socket /var/lib/haproxy/stats
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
frontend k8s-master
  bind 0.0.0.0:6443
  bind 127.0.0.1:6443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-master
backend k8s-master
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server master1	172.16.208.161:6443  check
  server master2	172.16.208.159:6443  check
  server master3	172.16.208.160:6443  check
backend static
    balance     roundrobin
    server      static 127.0.0.1:4331 check
#启动haproxy
[root@haproxy ~]#  systemctl  start haproxy

#查看
[root@haproxy ~]# ss -lntp|grep 6443
LISTEN     0      128    127.0.0.1:6443                     *:*                   users:(("haproxy",pid=11943,fd=6))
LISTEN     0      128          *:6443                     *:*                   users:(("haproxy",pid=11943,fd=5))

  

四、一台master服务器kubeadm init操作

1、创建kubeadm配置的yaml文件

[root@master1 ~]# kubeadm config print init-defaults > kubeadm-init.yaml

 

2、修改配置

[root@master1 ~]# cat  kubeadm-init.yaml 
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  #ApiServer 程序绑定的 ip, 填写网卡实际ip
  advertiseAddress: 172.16.208.161
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: master1
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "172.16.208.163:6443" #访问api-server的地址,填写haporyx的地址
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers  #国内镜像仓库
kind: ClusterConfiguration
kubernetesVersion: v1.20.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
scheduler: {}

  

3、初始化集群(-upload-certs会在加入master节点的时候自动拷贝证书)

[root@master1 ~]# kubeadm init --config kubeadm-init.yaml  --upload-certs

  

4、初始化结束的输出

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 172.16.208.163:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:e8fb32223f9ddae02aced75e50cda25474fd803ac6ce0e5db2d73bff3272109c \
    --control-plane --certificate-key 3d151f00234812596732feb72f6a52a7a190bb14325341fa65d5b288453d0827

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.16.208.163:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:e8fb32223f9ddae02aced75e50cda25474fd803ac6ce0e5db2d73bff3272109c 

5、拷贝权限文件

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

  

五、其它master服务器接入集群

1、master2和master3服务器join

[root@master2 ~]#   kubeadm join 172.16.208.163:6443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:e8fb32223f9ddae02aced75e50cda25474fd803ac6ce0e5db2d73bff3272109c \
>     --control-plane --certificate-key 3d151f00234812596732feb72f6a52a7a190bb14325341fa65d5b288453d0827

[root@master3 ~]#   kubeadm join 172.16.208.163:6443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:e8fb32223f9ddae02aced75e50cda25474fd803ac6ce0e5db2d73bff3272109c \
>     --control-plane --certificate-key 3d151f00234812596732feb72f6a52a7a190bb14325341fa65d5b288453d0827

  

2、拷贝权限文件

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

  

六、安装calico

wget https://docs.projectcalico.org/manifests/calico.yaml
kubectl  apply -f calico.yaml

  

 七、查看集群状态

[root@master1 ~]# kubectl get node 
NAME      STATUS   ROLES                  AGE     VERSION
master1   Ready    control-plane,master   4m44s   v1.20.0
master2   Ready    control-plane,master   2m51s   v1.20.0
master3   Ready    control-plane,master   2m47s   v1.20.0

  

八、处理cs组件的错误与修改master端kubelet访问api-server的ip地址

1、刚安装完cs组件中的scheduler和controller-manager会有connect: connection refused的错误

[root@master1 ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS      MESSAGE                                                                                       ERROR
scheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused   
controller-manager   Unhealthy   Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused   
etcd-0               Healthy     {"health":"true"}            

  

2、注释3台master服务器上kube-controller-manager.yaml和kube-scheduler.yaml默认端口--port=0这行配置

cd /etc/kubernetes/manifests
[root@master3 manifests]# grep 'port=0' kube-scheduler.yaml 
    #- --port=0
[root@master3 manifests]# grep 'port=0' kube-controller-manager.yaml 
    #- --port=0

  

3、修改master服务器上的 /etc/kubernetes/kubelet.conf配置,修改成本机的内网ip

[root@master1 manifests]# grep 6443 /etc/kubernetes/kubelet.conf 
    server: https://172.16.208.161:6443
[root@master2 manifests]# grep 6443 /etc/kubernetes/kubelet.conf 
    server: https://172.16.208.159:6443
[root@master3 manifests]# grep 6443 /etc/kubernetes/kubelet.conf 
    server: https://172.16.208.160:6443

  

4、重启kubelet

systemctl  restart kubelet

 5、查看cs状态

[root@master1 manifests]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   

  

九、node节点接入集群操作

1、node节点配置

#修改内核参数
echo net.bridge.bridge-nf-call-iptables = 1  >>/etc/sysctl.conf
echo net.ipv4.ip_forward=1 >>/etc/sysctl.conf
echo net.bridge.bridge-nf-call-iptables=1 >>/etc/sysctl.conf
echo net.bridge.bridge-nf-call-ip6tables=1 >>/etc/sysctl.conf
echo vm.swappiness=0 >>/etc/sysctl.conf
sysctl -p


#关闭swap
#swapoff -a
#sed -i '/swap/s/^/#/' /etc/fstab


#关闭firewalld
systemctl stop firewalld
systemctl disable firewalld
sed -i 's/=enforcing/=disabled/g' /etc/selinux/config

#配置IPVS模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules 
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack_ipv4


#配置源
cd /etc/yum.repos.d/
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
cat>>/etc/yum.repos.d/kubrenetes.repo<<EOF
[kubernetes]
name=Kubernetes Repo
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
EOF
yum makecache

#安装docker
yum -y install docker-ce

#配置加速地址
mkdir -p /etc/docker
cat>/etc/docker/daemon.json <<-'EOF'
{
    "registry-mirrors": [
        "https://1nj0zren.mirror.aliyuncs.com",
        "https://docker.mirrors.ustc.edu.cn",
        "http://f1361db2.m.daocloud.io",
        "https://registry.docker-cn.com"
    ]
}
EOF
systemctl daemon-reload
systemctl restart docker
systemctl enable docker

#安装kubeadm等
yum install  kubelet kubeadm -y

#安装ipvs
yum -y install ipvsadm ipset
View Code

 

2、master节点创建新的token

[root@master1 manifests]# kubeadm token create --print-join-command
kubeadm join 172.16.208.163:6443 --token c8i365.o0k3q1q8hhlowcx1     --discovery-token-ca-cert-hash sha256:e8fb32223f9ddae02aced75e50cda25474fd803ac6ce0e5db2d73bff3272109c 

  

3、node节点join

 

[root@node1 ~]# kubeadm join 172.16.208.163:6443 --token c8i365.o0k3q1q8hhlowcx1     --discovery-token-ca-cert-hash sha256:e8fb32223f9ddae02aced75e50cda25474fd803ac6ce0e5db2d73bff3272109c 

  

4、查看集群状态

[root@master1 manifests]# kubectl get node 
NAME      STATUS   ROLES                  AGE   VERSION
master1   Ready    control-plane,master   46m   v1.20.0
master2   Ready    control-plane,master   44m   v1.20.0
master3   Ready    control-plane,master   44m   v1.20.0
node1     Ready    <none>                 66s   v1.20.0

  

 

 
posted @ 2020-12-18 15:07  巽逸  阅读(341)  评论(0编辑  收藏  举报