使用kubeadm安装高可用kubernetes集群

一:docker部署

| Version
---|---
| 18.06.3-ce

1.docker安装

yum remove docker \
                  docker-client \
                  docker-client-latest \
                  docker-common \
                  docker-latest \
                  docker-latest-logrotate \
                  docker-logrotate \
                  docker-engine
                  
yum install -y yum-utils

yum-config-manager \
    --add-repo \
    https://download.docker.com/linux/centos/docker-ce.repo

yum-config-manager --enable docker-ce-nightly

yum -y install docker-ce-18.06.3.ce-3.el7 \
               docker-ce-cli-18.06.3.ce-3.el7 \
               containerd.io \
               docker-compose

2.启动服务并验证

systemctl start docker
systemctl enable docker

二:Haproxy部署

1.安装

yum -y install haproxy

2.修改配置文件
代理到后端的所有kube-apiserver

cat > /etc/haproxy/haproxy.cfg <<EOF
global
    log         127.0.0.1 local2
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
    stats socket /var/lib/haproxy/stats

defaults
    mode                    tcp
    log                     global
    option                  dontlognull
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout check           10s
    maxconn                 3000

frontend  kubernetes-apiserver
    mode tcp
    bind *:8443
    default_backend     kubernetes-apiserver

backend kubernetes-apiserver
    mode        tcp  
    balance     roundrobin  
    server master001 10.240.75.9:6443 check
    server master002 10.240.75.10:6443 check
    server master003 10.240.75.11:6443 check
EOF

修改rsyslog,将日志持久化输出

mkdir -p /data/haproxy/logs

echo "\$ModLoad imudp" >> /etc/rsyslog.conf
echo "\$UDPServerRun 514" >> /etc/rsyslog.conf
echo "local2.* /data/haproxy/logs/haproxy.log" >> /etc/rsyslog.conf

3.启动相关服务

systemctl start haproxy
systemctl enable haproxy

systemctl restart rsyslog

三:keepalive 部署

1.安装

yum -y install keepalived nc

mkdir -p /data/keepalived

2.生成配置文件

cat > /etc/keepalived/keepalived.conf <<EOF
global_defs {
   script_user root 
   enable_script_security

}

vrrp_script chk_haproxy {
    script "/usr/bin/nc -z -v -n 127.0.0.1 8443"  
    interval 2
    weight 11
}

vrrp_instance VI_1 {
  interface ens192

  state MASTER
  virtual_router_id 51 
  priority 100
  nopreempt 

  unicast_peer {

  }

  virtual_ipaddress {
    10.240.75.250  
  }

  authentication {
    auth_type PASS
    auth_pass Jingle@100
  }

  track_script {
      chk_haproxy
  }

  notify "/data/keepalived/notify.sh"
}
EOF

故障转移通知脚本

cat > /data/keepalived/notify.sh <<EOF
#!/bin/bash

echo "\$(date) [\$(hostname)] Haproxy fails, transfer begins!" >> /data/keepalived/keepalived.log
EOF

3.启动服务

systemctl start keepalived
systemctl enable keepalived

四:kubeadm安装

1.安装依赖软件

yum -y install epel-release
yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget

2.加载内核模块,并添加开机自动加载

modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
br_netfilter

# 添加开机自启动
cat >> /etc/rc.local <<EOF
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
br_netfilter
EOF

3.修改内核参数

cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
 
sysctl --system

4.安装kubeadm

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
 
# Set SELinux in permissive mode (effectively disabling it)
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
 
yum install -y kubelet-1.16.3 kubeadm-1.16.3 kubectl-1.16.3 --disableexcludes=kubernetes
 
systemctl enable --now kubelet

5.修改kubelet的参数

DOCKER_CGROUPS=$(docker info | grep 'Cgroup' | cut -d' ' -f3)
echo $DOCKER_CGROUPS
cat >/etc/sysconfig/kubelet<<EOF
KUBELET_EXTRA_ARGS="--cgroup-driver=$DOCKER_CGROUPS"
EOF

# 启动
systemctl daemon-reload
systemctl enable kubelet && systemctl restart kubelet

五:Etcd集群搭建

1.YUM安装etcd

yum -y install etcd

2.生成kubeadm的配置文件

#!/bin/bash
 
# Update HOST0, HOST1, and HOST2 with the IPs or resolvable names of your hosts
export HOST0=10.240.47.6
export HOST1=10.240.47.2
export HOST2=10.240.47.4
 
# Create temp directories to store files that will end up on other hosts.
mkdir -p /tmp/${HOST0}/ /tmp/${HOST1}/ /tmp/${HOST2}/
 
ETCDHOSTS=(${HOST0} ${HOST1} ${HOST2})
NAMES=("etcd-01" "etcd-02" "etcd-03")
 
for i in "${!ETCDHOSTS[@]}"; do
HOST=${ETCDHOSTS[$i]}
NAME=${NAMES[$i]}
cat << EOF > /tmp/${HOST}/kubeadmcfg.yaml
apiVersion: "kubeadm.k8s.io/v1beta2"
kind: ClusterConfiguration
etcd:
    local:
        serverCertSANs:
        - "${HOST}"
        peerCertSANs:
        - "${HOST}"
        extraArgs:
            initial-cluster: ${NAMES[0]}=https://${ETCDHOSTS[0]}:2380,${NAMES[1]}=https://${ETCDHOSTS[1]}:2380,${NAMES[2]}=https://${ETCDHOSTS[2]}:2380
            initial-cluster-state: new
            name: ${NAME}
            listen-peer-urls: https://${HOST}:2380
            listen-client-urls: https://${HOST}:2379
            advertise-client-urls: https://${HOST}:2379
            initial-advertise-peer-urls: https://${HOST}:2380
EOF
done

3.使用kubeadm生成相应证书

kubeadm init phase certs etcd-server --config=/tmp/${HOST2}/kubeadmcfg.yaml
kubeadm init phase certs etcd-peer --config=/tmp/${HOST2}/kubeadmcfg.yaml
kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST2}/kubeadmcfg.yaml
kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST2}/kubeadmcfg.yaml
cp -R /etc/kubernetes/pki /tmp/${HOST2}/
# cleanup non-reusable certificates
find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete
 
kubeadm init phase certs etcd-server --config=/tmp/${HOST1}/kubeadmcfg.yaml
kubeadm init phase certs etcd-peer --config=/tmp/${HOST1}/kubeadmcfg.yaml
kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST1}/kubeadmcfg.yaml
kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST1}/kubeadmcfg.yaml
cp -R /etc/kubernetes/pki /tmp/${HOST1}/
find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete
 
kubeadm init phase certs etcd-server --config=/tmp/${HOST0}/kubeadmcfg.yaml
kubeadm init phase certs etcd-peer --config=/tmp/${HOST0}/kubeadmcfg.yaml
kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST0}/kubeadmcfg.yaml
kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST0}/kubeadmcfg.yaml
# No need to move the certs because they are for HOST0
 
# clean up certs that should not be copied off this host
find /tmp/${HOST2} -name ca.key -type f -delete
find /tmp/${HOST1} -name ca.key -type f -delete

4.将对应证书分发到对应主机

scp -P 22 -r /tmp/10.240.47.2/* root@10.240.47.2:/etc/kubernetes
scp -P 22 -r /tmp/10.240.47.4/* root@10.240.47.4:/etc/kubernetes
scp -P 22 -r /tmp/10.240.47.6/* root@10.240.47.6:/etc/kubernetes
 
# 目录结构如下:
# tree /etc/kubernetes -f
/etc/kubernetes
/etc/kubernetes/pki
    /etc/kubernetes/pki/apiserver-etcd-client.crt
    /etc/kubernetes/pki/apiserver-etcd-client.key
    /etc/kubernetes/pki/etcd
        /etc/kubernetes/pki/etcd/ca.crt
        /etc/kubernetes/pki/etcd/ca.key
        /etc/kubernetes/pki/etcd/healthcheck-client.crt
        /etc/kubernetes/pki/etcd/healthcheck-client.key
        /etc/kubernetes/pki/etcd/peer.crt
        /etc/kubernetes/pki/etcd/peer.key
        /etc/kubernetes/pki/etcd/server.crt
        /etc/kubernetes/pki/etcd/server.key
 
2 directories, 10 files

5.添加主机hosts

# cat /etc/hosts
127.0.0.1       localhost localhost.localdomain localhost4 localhost4.localdomain4
 
164.52.73.71    localhost
10.240.47.6 etcd-01
10.240.47.2 etcd-02
10.240.47.4 etcd-03

6.修改etcd的启动文件

# vim /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos
 
[Service]
Type=notify
User=root
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/bin/etcd \
  --name etcd-01 \
  --cert-file=/etc/kubernetes/pki/etcd/server.crt \
  --key-file=/etc/kubernetes/pki/etcd/server.key \
  --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt \
  --peer-key-file=/etc/kubernetes/pki/etcd/peer.key \
  --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt \
  --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt \
  --initial-advertise-peer-urls https://10.240.47.6:2380 \
  --listen-peer-urls https://10.240.47.6:2380 \
  --listen-client-urls https://10.240.47.6:2379,http://127.0.0.1:2379 \
  --advertise-client-urls https://10.240.47.6:2379 \
  --initial-cluster-token etcd-cluster-0 \
  --initial-cluster etcd-01=https://10.240.47.6:2380,etcd-02=https://10.240.47.2:2380,etcd-03=https://10.240.47.4:2380 \
  --initial-cluster-state new \
  --data-dir=/data/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
 
[Install]
WantedBy=multi-user.target

7.启动服务后,并进行相应的检查

# docker run --rm -it --net host -v /etc/kubernetes:/etc/kubernetes quay.io/coreos/etcd:v3.2.24 etcdctl --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --ca-file /etc/kubernetes/pki/etcd/ca.crt --endpoints https://10.240.47.6:2379 cluster-health
member 65e5f54c59b9675c is healthy: got healthy result from https://10.240.47.2:2379
member b9ac45df73026bb7 is healthy: got healthy result from https://10.240.47.4:2379
member d2731b483b4e1ac1 is healthy: got healthy result from https://10.240.47.6:2379
cluster is healthy

六:集群初始化

1.创建kubeadm-config.yaml

---
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
  # 本机的kube-apiserver地址
  advertiseAddress: 10.240.75.9
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: master001
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
  # 需要迁入证书的ip或域名
  certSANs:
  - master001
  - master002
  - master003
  - 10.240.75.9
  - 10.240.75.10
  - 10.240.75.11
  - 10.240.75.250
  - 164.52.115.12
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: master
# Keepalive VIP的IP,Haproxy的端口
controlPlaneEndpoint: "10.240.77.50:8443"
controllerManager: {}
# DNS 插件
dns:
  type: CoreDNS
# etcd相关配置
etcd:
    external:
      endpoints:
      - http://10.240.75.6:2379
      - http://10.240.75.7:2379
      - http://10.240.75.8:2379
      caFile: /etc/kubernetes/pki/etcd/ca.crt
      certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt
      keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
# 安装kubernetes的版本
kubernetesVersion: v1.16.3
networking:
  dnsDomain: cluster.local
  # service的子网段
  serviceSubnet: 10.96.0.0/12
  # pod的子网段,默认跟flannel匹配
  podSubnet: 10.244.0.0/16
scheduler: {}
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
systemReserved: 
  cpu: "1"
  memory: 512Mi
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
# kube-peoxy使用ipvs模式,默认为iptables
mode: ipvs          

在使用外部etcd时,建议不要使用证书,因为证书的默认有效期为1年,后述相关更新比较麻烦

3.初始化第一台控制节点

# kubeadm init --config=kubeadm-config.yaml --upload-certs
Your Kubernetes control-plane has initialized successfully!
 
To start using your cluster, you need to run the following as a regular user:
 
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
 
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/
 
You can now join any number of the control-plane node running the following command on each as root:
 
  kubeadm join 10.240.77.50:8443 --token abcdef.2iihisjdiid \
    --discovery-token-ca-cert-hash sha256:dsdkskdkslldk \
    --control-plane --certificate-key ksdkskdksdsdksdskdsk
 
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
 
Then you can join any number of worker nodes by running the following on each as root:
 
kubeadm join 10.240.77.50:8443 --token abcdef.kskdksdlsk \
    --discovery-token-ca-cert-hash sha256:kskdksdlkskdksldkkld

使用上述返回命令,集群初始化剩余节点即可

posted @ 2020-07-22 16:54  Goun  阅读(311)  评论(0编辑  收藏  举报