kubeadm搭建k8s集群

由于我后边要研究KubeEdge,而KubeEdge在这个时候最高支持到1.19版本,所以我使用的k8s版本为1.19.9,使用的平台是OpenStack私有云平台

节点规划以及网段配置

5台云服务器我这里用的系统版本为CentOS7.6,IP及后边的hosts文件需要改为如下:

192.168.80.10   k8s-master-lb #相当于ELB/SLB,若不是高可用集群,改IP为master01的IP
192.168.80.11	k8s-master01 
192.168.80.12	k8s-master02
192.168.80.13	k8s-master03
192.168.80.21	k8s-node01
192.168.80.22	k8s-node02

Service网段: 10.96.0.0/12

Pod网段: 172.168.0.0/12

基本配置(先在一台服务器)

hostnamectl set-hostname k8s-master01

yum源配置

curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

同步时间

yum install ntpdate
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' >/etc/timezone
ntpdate time2.aliyun.com

关闭防火墙、swap、selinux、dnsmasq

# 关闭防火墙
systemctl stop firewalld && systemctl disable firewalld

# 关闭swap 在所有的节点上 包括主节点和woker节点
# 切记一定要关闭 不然 kubelet启动失败 血的教训
swapoff -a
# 禁止swap开机启动
sed -i '/swap/s/^\(.*\)$/#\1/g' /etc/fstab

# 关闭selinux
setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
getenforce ##检查selinux状态

# 关闭dnsmasq(否则可能导致docker容器无法解析域名)
systemctl stop dnsmasq && systemctl disable dnsmasq

安装必备组件

yum install wget jq psmisc vim net-tools telnet yum-utils device-mapper-persistent-data lvm2 git ntpdate keepalived haproxy  -y

配置参数

vim /etc/security/limits.conf
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited

vim /etc/modules-load.d/ipvs.conf
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip

cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
 
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF

CentOS7.x系统自带的3.10.x内核存在一些Bugs,导致运行的Docker、kubernetes不稳定,建议升级内核,容器使用的坑会少很多,升级内核到4.19.12

wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm
wget http://193.49.22.109/elrepo/kernel/el7/x86_64/RPMS/kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm
yum localinstall -y kernel-ml*

grub2-set-default  0 && grub2-mkconfig -o /etc/grub2.cfg
grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"

# 设置完成 执行重启
reboot

安装Docker-ce 19.03,镜像加速地址去自己的阿里云帐号申请,或者直接用https://hub-mirror.c.163.com/

yum install docker-ce-19.03.15-3.el7  docker-ce-cli-19.03.15-3.el7 -y
systemctl enable docker && systemctl restart docker
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "registry-mirrors": [
    "https://******.mirror.aliyuncs.com",
    "http://hub-mirror.c.163.com",
    "https://docker.mirrors.ustc.edu.cn",
    "https://registry.docker-cn.com"
   ]
}
EOF

安装kubeadm

# 找到要安装的版本号
yum list kubeadm --showduplicates | sort -r
yum install -y kubectl-1.19.8-0 kubelet-1.19.8-0
yum install -y kubeadm-1.19.8-0
修改使用阿里云镜像
cat >/etc/sysconfig/kubelet<<EOF
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.2"
EOF
systemctl enable kubelet && systemctl restart kubelet

高可用haproxy+KeepAlived

vim  /etc/haproxy/haproxy.cfg
# 添加内容为: 注意需要根据ip 规划修改自己的ip地址等内容. 
global
  maxconn  2000
  ulimit-n  16384
  log  127.0.0.1 local0 err
  stats timeout 30s
 
defaults
  log global
  mode  http
  option  httplog
  timeout connect 5000
  timeout client  50000
  timeout server  50000
  timeout http-request 15s
  timeout http-keep-alive 15s
 
frontend monitor-in
  bind *:33305
  mode http
  option httplog
  monitor-uri /monitor
 
frontend k8s-master
  bind 0.0.0.0:16443
  bind 127.0.0.1:16443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-master
 
backend k8s-master
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server k8s-master01   192.168.80.11:6443  check
  server k8s-master02   192.168.80.12:6443  check
  server k8s-master03   192.168.80.13:6443  check

KeepAlived

vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
script_user root
    enable_script_security
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2 
rise 1
}
vrrp_instance VI_1 {
    state MASTER
    interface eth0 # 这里要修改成ifconfig 查出来本地局域网ip地址对应的网卡信息 
    mcast_src_ip 192.168.80.11 #这里需要修改为具体的master机器地址
    virtual_router_id 51
    priority 101 #master01为101,master02和master03为100
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass K8SHA_KA_AUTH
    }
    virtual_ipaddress {
        192.168.80.10 #这里需要修改成 vip 的地址
    }
    track_script {
       chk_apiserver
    }
}
vim /etc/keepalived/check_apiserver.sh
#!/bin/bash
 
err=0
for k in $(seq 1 3)
do
    check_code=$(pgrep haproxy)
    if [[ $check_code == "" ]]; then
        err=$(expr $err + 1)
        sleep 1
        continue
    else
        err=0
        break
    fi
done
 
if [[ $err != "0" ]]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi
chmod +x /etc/keepalived/check_apiserver.sh
systemctl enable keepalived && systemctl enable haproxy
reboot

重启后, 验证 vip 是否能够通 不通 需要重新处理

ping 192.168.80.10
telnet 192.168.80.10 6443

虚拟机可再复制4台,云服务器制作成镜像,再创建4台

复制后配置

先配好其他机器的hostname和网络

配置好hosts文件

设置master01节点免密钥登录其他节点:

ssh-keygen -t rsa
for i in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02;do ssh-copy-id -i .ssh/id_rsa.pub $i;done

修改master02,master03的KeepAlived配置文件

systemctl restart keepalived haproxy

在master01配置kubeadm-config.yaml

vim kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: 7t2weq.bjbawausm0jaxury
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.80.11
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master01
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  certSANs:
  - 192.168.80.10
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.80.10:16443
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.19.9
networking:
  dnsDomain: cluster.local
  podSubnet: 172.168.0.0/12
  serviceSubnet: 10.96.0.0/12
scheduler: {}

执行镜像拉取

kubeadm config images pull --config /root/kubeadm-config.yaml

初始化

kubeadm init --config /root/kubeadm-config.yaml --upload-certs
# master加入命令
  kubeadm join 192.168.80.10:16443 --token 7t2weq.bjbawausm0jaxury \
    --discovery-token-ca-cert-hash sha256:566390dce77ff59473f00bb4b06073303f3478ade3e2cd181f1c09b0239e9a60 \
    --control-plane --certificate-key 9068979f113454a0d517d6320eebd7b776851fbf5cf2f575bf76a410448546a9

# node加入命令
kubeadm join 192.168.80.10:16443 --token 7t2weq.bjbawausm0jaxury \
    --discovery-token-ca-cert-hash sha256:566390dce77ff59473f00bb4b06073303f3478ade3e2cd181f1c09b0239e9a60 

使用上边得到的token将其他节点加入

mkdir -p ~/.kube
cp -i /etc/kubernetes/admin.conf ~/.kube/config
$ kubectl get nodes
NAME           STATUS     ROLES    AGE     VERSION
k8s-master01   NotReady   master   13m     v1.19.9
k8s-master02   NotReady   master   10m     v1.19.9
k8s-master03   NotReady   master   6m40s   v1.19.9
k8s-node01     NotReady   <none>   4m16s   v1.19.9
k8s-node02     NotReady   <none>   3m57s   v1.19.9

下载calico.yaml文件

curl https://docs.projectcalico.org/manifests/calico.yaml -O

默认的Pod网段pod CIDR 192.168.0.0/16,如果不采用默认的,取消对文件中的 CALICO_IPV4POOL_CIDR 变量的注释,并将其设置为自己的pod CIDR

vim calico.yaml
- name: CALICO_IPV4POOL_CIDR
  value: "172.168.0.0/12"
kubectl apply -f calico.yaml

用以下命令查看,所有pod是否启动完毕

$ kubectl get pods --all-namespaces
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-55ffdb7658-rp8xq   1/1     Running   6          18m
kube-system   calico-node-2l6xb                          1/1     Running   0          18m
kube-system   calico-node-65gwf                          1/1     Running   0          18m
kube-system   calico-node-7vcc2                          1/1     Running   0          18m
kube-system   calico-node-ms4ts                          1/1     Running   0          18m
kube-system   calico-node-r4crq                          1/1     Running   0          18m
kube-system   coredns-6c76c8bb89-99qxh                   1/1     Running   0          45m
kube-system   coredns-6c76c8bb89-sntts                   1/1     Running   0          45m
kube-system   etcd-k8s-master01                          1/1     Running   0          45m
kube-system   etcd-k8s-master02                          1/1     Running   0          19m
kube-system   etcd-k8s-master03                          1/1     Running   0          20m
kube-system   kube-apiserver-k8s-master01                1/1     Running   0          45m
kube-system   kube-apiserver-k8s-master02                1/1     Running   0          19m
kube-system   kube-apiserver-k8s-master03                1/1     Running   0          20m
kube-system   kube-controller-manager-k8s-master01       1/1     Running   2          45m
kube-system   kube-controller-manager-k8s-master02       1/1     Running   0          19m
kube-system   kube-controller-manager-k8s-master03       1/1     Running   0          20m
kube-system   kube-proxy-5p8xm                           1/1     Running   0          19m
kube-system   kube-proxy-8rkc5                           1/1     Running   0          45m
kube-system   kube-proxy-gnmlz                           1/1     Running   0          24m
kube-system   kube-proxy-lnr6r                           1/1     Running   0          19m
kube-system   kube-proxy-q4spk                           1/1     Running   0          20m
kube-system   kube-scheduler-k8s-master01                1/1     Running   1          45m
kube-system   kube-scheduler-k8s-master02                1/1     Running   0          19m
kube-system   kube-scheduler-k8s-master03                1/1     Running   0          20m

部署完成

posted @ 2021-08-08 23:26  请务必优秀  阅读(623)  评论(0编辑  收藏  举报