kubeadm 之k8s 多master 部署

 

环境初始设置

主机名修改

hostnamectl set-hostname master-1 && exec bash 
hostnamectl set-hostname master-2 && exec bash 
hostnamectl set-hostname node-1 && exec bash 

 统一设置host文件

/etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.10.29 master-1
192.168.10.30 master-2
192.168.10.31 node-1

  设置双机免密登录

[root@master-1 ~]# ssh-keygen 
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:lgzlxsYu///hScHtRNsUqUiNHuM/HyvYBJ3aiavnnYM root@master-1
The key's randomart image is:
+---[RSA 2048]----+
|        .   o  ..|
|       =   = . ..|
|      . * + = o o|
|       * . = = +o|
|      . S   * +.+|
|       +   o * = |
|        .   * = +|
|         . E.*.= |
|         .=o.+*  |
+----[SHA256]-----+
[root@master-1 ~]# ssh-copy-id master-1
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'master-1 (192.168.10.29)' can't be established.
ECDSA key fingerprint is SHA256:T9yZYCrcVc0EtAUoRLsxgWbeKAM+x3Can+rpn9MjpnM.
ECDSA key fingerprint is MD5:42:ea:9c:5d:f9:96:02:df:d8:1d:ee:c4:7c:61:f5:ad.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@master-1's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'master-1'"
and check to make sure that only the key(s) you wanted were added.

[root@master-1 ~]# ssh-copy-id master-2
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'master-2 (192.168.10.30)' can't be established.
ECDSA key fingerprint is SHA256:T9yZYCrcVc0EtAUoRLsxgWbeKAM+x3Can+rpn9MjpnM.
ECDSA key fingerprint is MD5:42:ea:9c:5d:f9:96:02:df:d8:1d:ee:c4:7c:61:f5:ad.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@master-2's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'master-2'"
and check to make sure that only the key(s) you wanted were added.

[root@master-1 ~]# ssh-copy-id node-2
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
^C
[root@master-1 ~]# ssh-copy-id node-1\
> 
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'node-1 (192.168.10.31)' can't be established.
ECDSA key fingerprint is SHA256:T9yZYCrcVc0EtAUoRLsxgWbeKAM+x3Can+rpn9MjpnM.
ECDSA key fingerprint is MD5:42:ea:9c:5d:f9:96:02:df:d8:1d:ee:c4:7c:61:f5:ad.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@node-1's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'node-1'"
and check to make sure that only the key(s) you wanted were added.




[root@master-2 ~]# ssh-keygen 
Generating public/private rsa key pair.

Enter file in which to save the key (/root/.ssh/id_rsa): Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:zMT6ci/nxV2Iy5n5WWUgVS3+kHMSIq/Tgf0zUpo8aDM root@master-2
The key's randomart image is:
+---[RSA 2048]----+
|              ..o|
|       .  . ..o .|
|        o  =.o.+ |
|       =  . +oBo.|
|      . S  =.*.*+|
|       .  EoB*+oo|
|      . o. +Oo.o.|
|       o.... . o |
|         +o   o  |
+----[SHA256]-----+
[root@master-2 ~]# ssh-copy-id master-1
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'master-1 (192.168.10.29)' can't be established.
ECDSA key fingerprint is SHA256:T9yZYCrcVc0EtAUoRLsxgWbeKAM+x3Can+rpn9MjpnM.
ECDSA key fingerprint is MD5:42:ea:9c:5d:f9:96:02:df:d8:1d:ee:c4:7c:61:f5:ad.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@master-1's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'master-1'"
and check to make sure that only the key(s) you wanted were added.

[root@master-2 ~]# ssh-copy-id master-2
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'master-2 (192.168.10.30)' can't be established.
ECDSA key fingerprint is SHA256:T9yZYCrcVc0EtAUoRLsxgWbeKAM+x3Can+rpn9MjpnM.
ECDSA key fingerprint is MD5:42:ea:9c:5d:f9:96:02:df:d8:1d:ee:c4:7c:61:f5:ad.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@master-2's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'master-2'"
and check to make sure that only the key(s) you wanted were added.

[root@master-2 ~]# ssh-copy-id node-1
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'node-1 (192.168.10.31)' can't be established.
ECDSA key fingerprint is SHA256:T9yZYCrcVc0EtAUoRLsxgWbeKAM+x3Can+rpn9MjpnM.
ECDSA key fingerprint is MD5:42:ea:9c:5d:f9:96:02:df:d8:1d:ee:c4:7c:61:f5:ad.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@node-1's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'node-1'"
and check to make sure that only the key(s) you wanted were added.
[root@node-1 ~]# setenforce 0
setenforce: SELinux is disabled
[root@node-1 ~]# ssh-keygen 
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:VFobKW3tU3J1iDDcJt3zI2reWb+KklK8ujImSgs19WM root@node-1
The key's randomart image is:
+---[RSA 2048]----+
|         o** o oo|
|        .+*+B * .|
|    .   oo.+ + o |
|   . . .    o. ..|
|  o   E S   ... .|
| . . . . o o   . |
|..      . = . o .|
|o .. + . + ..o  .|
| o. o oo+ .. ....|
+----[SHA256]-----+
[root@node-1 ~]# ssh-copy-id node-1
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'node-1 (192.168.10.31)' can't be established.
ECDSA key fingerprint is SHA256:T9yZYCrcVc0EtAUoRLsxgWbeKAM+x3Can+rpn9MjpnM.
ECDSA key fingerprint is MD5:42:ea:9c:5d:f9:96:02:df:d8:1d:ee:c4:7c:61:f5:ad.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@node-1's password: 
Permission denied, please try again.
root@node-1's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'node-1'"
and check to make sure that only the key(s) you wanted were added.

[root@node-1 ~]# ssh-copy-id master-1
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'master-1 (192.168.10.29)' can't be established.
ECDSA key fingerprint is SHA256:T9yZYCrcVc0EtAUoRLsxgWbeKAM+x3Can+rpn9MjpnM.
ECDSA key fingerprint is MD5:42:ea:9c:5d:f9:96:02:df:d8:1d:ee:c4:7c:61:f5:ad.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@master-1's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'master-1'"
and check to make sure that only the key(s) you wanted were added.

[root@node-1 ~]# ssh-copy-id master-2
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'master-2 (192.168.10.30)' can't be established.
ECDSA key fingerprint is SHA256:T9yZYCrcVc0EtAUoRLsxgWbeKAM+x3Can+rpn9MjpnM.
ECDSA key fingerprint is MD5:42:ea:9c:5d:f9:96:02:df:d8:1d:ee:c4:7c:61:f5:ad.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@master-2's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'master-2'"
and check to make sure that only the key(s) you wanted were added.

  关闭交换分区

 swapoff -a
 vim /etc/fstab 


#
# /etc/fstab
# Created by anaconda on Sun Feb  7 10:14:45 2021
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root /                       xfs     defaults        0 0
UUID=ec65c557-715f-4f2b-beae-ec564c71b66b /boot                   xfs     defaults        0 0
#/dev/mapper/centos-swap swap                    swap    defaults        0 0
~                                                                                                       

  关闭防火墙与selinux

 systemctl disable firewalld.service 
 systemctl stop firewalld.service 
 setenforce 0
setenforce: SELinux is disabled
 vim /etc/selinux/config 
'

# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
#     enforcing - SELinux security policy is enforced.
#     permissive - SELinux prints warnings instead of enforcing.
#     disabled - No SELinux policy is loaded.
SELINUX=disabled
# SELINUXTYPE= can take one of three values:
#     targeted - Targeted processes are protected,
#     minimum - Modification of targeted policy. Only selected processes are protected. 
#     mls - Multi Level Security protection.
SELINUXTYPE=targeted

  加载ipvs内核模块

 modprobe br_netfilter
echo "modprobe br_netfilter" >> /etc/profile

 lsmod | grep br_netfilter
br_netfilter           22256  0 
bridge                151336  1 br_netfilter

  编写调用网络模块文件

cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

  

net.bridge.bridge-nf-call-ip6tables = 1   开启容器网络转发

net.bridge.bridge-nf-call-iptables = 1

net.ipv4.ip_forward = 1   开启数据包转发

 sysctl -p /etc/sysctl.d/k8s.conf

  安装docker 

# step 1: 安装必要的一些系统工具
 yum install -y yum-utils device-mapper-persistent-data lvm2
# Step 2: 添加软件源信息
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# Step 3: 更新并安装 Docker-CE
yum makecache fast
yum -y install docker-ce
# Step 4: 开启Docker服务
service docker start

  配置k8s的yum源

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
setenforce 0

  设置时间同步计划任务

 crontab -e
crontab: installing new crontab
 crontab -l
* */1 * * * /usr/sbin/ntpdate time.windows.com >/dev/null
您在 /var/spool/mail/root 中有新邮件
systemctl restart crond.service 

  加载ipvs

cd /etc/sysconfig/modules/
cat ipvs.modules 
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in ${ipvs_modules}; do
 /sbin/modinfo -F filename ${kernel_module} > /dev/null 2>&1
 if [ 0 -eq 0 ]; then
 /sbin/modprobe ${kernel_module}
 fi
done
chmod +x ipvs.modules     授权
./ipvs.modules   运行
lsmod | grep ip_vs   查看是否加载
ip_vs_ftp              13079  0 
nf_nat                 26787  1 ip_vs_ftp
ip_vs_sed              12519  0 
ip_vs_nq               12516  0 
ip_vs_sh               12688  0 
ip_vs_dh               12688  0 
ip_vs_lblcr            12922  0 
ip_vs_lblc             12819  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  0 
ip_vs_wlc              12519  0 
ip_vs_lc               12516  0 
ip_vs                 145497  22 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_lblcr,ip_vs_lblc
nf_conntrack          133095  2 ip_vs,nf_nat
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack

  启动docker配置

systemctl start docker.service 
 systemctl enable docker.service 

  配置镜像加速器及docker启动驱动

cat /etc/docker/daemon.json 
{
"registry-mirrors": ["http://f1361db2.m.daocloud.io"],    #注意逗号分开
"exec-opts": ["native.cgroupdriver=systemd"]    #修改驱动
}

  重启

systemctl restart docker

  安装k8s 初始化所需安装包

 yum install -y kubelet-1.20.6 kubeadm-1.20.6 kubectl-1.20.6
systemctl enable kubelet.service 

  配置ng+keepalived的高可用api-server

设置两个master节点允许监听非本机端口

echo "net.ipv4.ip_nonlocal_bind=1" >> /etc/sysctl.conf
sysctl -p

  安装nginx   nginx-mod-stream  (四层模块)

yum -y install nginx keepalived nginx-mod-stream

  ng主从配置一样

user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;

include /usr/share/nginx/modules/*.conf;

events {
    worker_connections 1024;
}

# 四层负载均衡,为两台Master apiserver组件提供负载均衡
stream {

    log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';

    access_log  /var/log/nginx/k8s-access.log  main;

    upstream k8s-apiserver {
       server 192.168.10.29:6443;   # Master1 APISERVER IP:PORT
       server 192.168.10.30:6443;   # Master2 APISERVER IP:PORT
    }
    
    server {
       listen 192.168.10.28:16443; # 由于nginx与master节点复用,这个IP是vip监听
       proxy_pass k8s-apiserver;
    }
}

http {
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    access_log  /var/log/nginx/access.log  main;

    sendfile            on;
    tcp_nopush          on;
    tcp_nodelay         on;
    keepalive_timeout   65;
    types_hash_max_size 2048;

    include             /etc/nginx/mime.types;
    default_type        application/octet-stream;

    server {
        listen       80 default_server;
        server_name  _;

        location / {
        }
    }
}

  配置高可用主节点

vim /etc/keepalived/keepalived.conf 
global_defs {
   notification_email {
     root@localhost
   }
   notification_email_from ka1@localhost
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   vrrp_mcast_group4 224.111.111.111
}
 
vrrp_script chk_ng {
    script "ss -lntp | grep  192.168.10.28"
    interval 2
    weight -10
    fall 2
    rise 2
}
 
vrrp_instance External_1 {
    state MASTER
    interface ens33
    virtual_router_id 171
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1402b1b5
    }
    virtual_ipaddress {
        192.168.10.28/24
    }
    track_script {
        chk_ng
    }
}

  从节点

cat   /etc/keepalived/keepalived.conf 
global_defs {
   notification_email {
     root@localhost
   }
   notification_email_from ka2@localhost
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   vrrp_mcast_group4 224.111.111.111
}
 
vrrp_script chk_ng {
    script "ss -lntp | grep 192.168.10.28" 
    interval 2
    weight -10
    fall 2
    rise 2
}
 
vrrp_instance External_1 {
    state BACKUP
    interface ens33
    virtual_router_id 171
    priority 95
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1402b1b5
    }
    virtual_ipaddress {
        192.168.10.28/24
    }
    track_script {
        chk_ng
    }
}

  设置开机自启

 systemctl enable keepalived.service 
 systemctl enable nginx.service 

  初始化k8s集群master-1节点root家目录

master-1 ~]# vim kubeadm-config.yaml 
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.20.6
controlPlaneEndpoint: 192.168.10.28:16443
imageRepository: registry.aliyuncs.com/google_containers
apiServer:
 certSANs:
 - 192.168.10.28
 - 192.168.10.29
 - 192.168.10.30
 - 192.168.10.31
networking:
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.10.0.0/16
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind:  KubeProxyConfiguration
mode: ipvs

  初始化操作

[root@master-1 ~]# kubeadm init --config kubeadm-config.yaml
[init] Using Kubernetes version: v1.20.6
[preflight] Running pre-flight checks
	[WARNING SystemVerification]: this Docker version is not on the lis
Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join 192.168.10.28:16443 --token q67anl.4yjg1lvquk63dp5r \
    --discovery-token-ca-cert-hash sha256:921b72ea60e0d84050ad24b8aa9790d53bd0d38d6cb13624ecdf33f99a4041df \
    --control-plane 

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.10.28:16443 --token q67anl.4yjg1lvquk63dp5r \
    --discovery-token-ca-cert-hash sha256:921b72ea60e0d84050ad24b8aa9790d53bd0d38d6cb13624ecdf33f99a4041df 

[root@master-1 ~]#  mkdir -p $HOME/.kube
[root@master-1 ~]#   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master-1 ~]#   sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@master-1 ~]# 

  master-2 创建证书目录

[root@master-2 modules]#  cd /root && mkdir -p /etc/kubernetes/pki/etcd &&mkdir -p ~/.kube/

  从master-1拷贝证书目录,并在master-2删除多余的证书

[root@master-1 ~]# scp -r /etc/kubernetes/pki/* master-2:/etc/kubernetes/pki/
[root@master-2 ~]# rm -f  /etc/kubernetes/pki/apiserver*
[root@master-2 ~]# rm -f  /etc/kubernetes/pki/etcd/healthcheck-client.* /etc/kubernetes/pki/etcd/peer.* /etc/kubernetes/pki/etcd/server.*

  注意master1上查看加入节点的命令:(24小时后会重新生成)加入控制节点只需后面加入跟    --control-plane 参数

[root@master-1 ~]# kubeadm token create --print-join-command
kubeadm join 192.168.10.28:16443 --token wcocyz.3ooph0ydisfgkzey     --discovery-token-ca-cert-hash sha256:921b72ea60e0d84050ad24b8aa9790d53bd0d38d6cb13624ecdf33f99a4041df 

  将master-2初始化加入集群

[root@master-2 ~]# kubeadm join 192.168.10.28:16443 --token q67anl.4yjg1lvquk63dp5r     --discovery-token-ca-cert-hash sha256:921b72ea60e0d84050ad24b8aa9790d53bd0d38d6cb13624ecdf33f99a4041df     --control-plan
e 
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

  加入node 节点

[root@node-1 ~]# kubeadm join 192.168.10.28:16443 --token wcocyz.3ooph0ydisfgkzey     --discovery-token-ca-cert-hash sha256:921b72ea60e0d84050ad24b8aa9790d53bd0d38d6cb13624ecdf33f99a4041df
[preflight] Running pre-flight checks
	[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.11. Latest validated version: 19.03
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

  查看集群状态

[root@master-1 ~]# kubectl get nodes
NAME       STATUS     ROLES                  AGE    VERSION
master-1   NotReady   control-plane,master   37m    v1.20.6
master-2   NotReady   control-plane,master   12m    v1.20.6
node-1     NotReady   <none>                 106s   v1.20.6

  安装calico 网络插件

[root@master-1 ~]#kubectl  apply -f https://docs.projectcalico.org/manifests/calico.yaml
[root@master-1 ~]# kubectl get nodes
NAME       STATUS   ROLES                  AGE    VERSION
master-1   Ready    control-plane,master   45m    v1.20.6
master-2   Ready    control-plane,master   19m    v1.20.6
node-1     Ready    <none>                 9m8s   v1.20.6

  查看pod 详细信息

[root@master-1 ~]# kubectl get pods -n kube-system -o wide
NAME                                       READY   STATUS    RESTARTS   AGE     IP              NODE       NOMINATED NODE   READINESS GATES
calico-kube-controllers-558995777d-66vfv   1/1     Running   0          2m51s   10.244.84.130   node-1     <none>           <none>
calico-node-dmlrb                          1/1     Running   0          2m50s   192.168.10.30   master-2   <none>           <none>
calico-node-wgtnv                          1/1     Running   0          2m50s   192.168.10.29   master-1   <none>           <none>
calico-node-xgl9t                          1/1     Running   0          2m50s   192.168.10.31   node-1     <none>           <none>
coredns-7f89b7bc75-5fqvb                   1/1     Running   0          44m     10.244.84.131   node-1     <none>           <none>
coredns-7f89b7bc75-9wqcb                   1/1     Running   0          44m     10.244.84.129   node-1     <none>           <none>
etcd-master-1                              1/1     Running   0          44m     192.168.10.29   master-1   <none>           <none>
etcd-master-2                              1/1     Running   0          18m     192.168.10.30   master-2   <none>           <none>
kube-apiserver-master-1                    1/1     Running   0          44m     192.168.10.29   master-1   <none>           <none>
kube-apiserver-master-2                    1/1     Running   0          18m     192.168.10.30   master-2   <none>           <none>
kube-controller-manager-master-1           1/1     Running   1          44m     192.168.10.29   master-1   <none>           <none>
kube-controller-manager-master-2           1/1     Running   0          18m     192.168.10.30   master-2   <none>           <none>
kube-proxy-bkl67                           1/1     Running   0          8m42s   192.168.10.31   node-1     <none>           <none>
kube-proxy-c7tqq                           1/1     Running   0          44m     192.168.10.29   master-1   <none>           <none>
kube-proxy-mjxzz                           1/1     Running   0          18m     192.168.10.30   master-2   <none>           <none>
kube-scheduler-master-1                    1/1     Running   1          44m     192.168.10.29   master-1   <none>           <none>
kube-scheduler-master-2                    1/1     Running   0          18m     192.168.10.30   master-2   <none>           <none>

  测试网络

[root@master-1 ~]#  kubectl run busybox --image busybox:1.28 --restart=Never --rm -it busybox -- sh
If you don't see a command prompt, try pressing enter.
/ # ping www.baidu.com
PING www.baidu.com (103.235.46.39): 56 data bytes
64 bytes from 103.235.46.39: seq=0 ttl=127 time=33.008 ms
64 bytes from 103.235.46.39: seq=1 ttl=127 time=29.207 ms
64 bytes from 103.235.46.39: seq=2 ttl=127 time=27.405 ms
^C
--- www.baidu.com ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 27.405/29.873/33.008 ms
/ # 

  应用测试

[root@master-1 ~]# cat tomcat.yaml 
apiVersion: v1  #pod属于k8s核心组v1
kind: Pod  #创建的是一个Pod资源
metadata:  #元数据
  name: demo-pod  #pod名字
  namespace: default  #pod所属的名称空间
  labels:
    app: myapp  #pod具有的标签
    env: dev      #pod具有的标签
spec:
  containers:      #定义一个容器,容器是对象列表,下面可以有多个name
  - name:  tomcat-pod-java  #容器的名字
    ports:
    - containerPort: 8080
    image: tomcat:8.5-jre8-alpine   #容器使用的镜像
    imagePullPolicy: IfNotPresent

[root@master-1 ~]# cat tomcat-service.yaml 
apiVersion: v1
kind: Service
metadata:
  name: tomcat
spec:
  type: NodePort
  ports:
    - port: 8080
      nodePort: 30080
  selector:
    app: myapp
    env: dev
[root@master-1 ~]#  kubectl apply -f tomcat.yaml
[root@master-1 ~]# kubectl apply -f tomcat-service.yaml
[root@master-1 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        
  AGEkubernetes   ClusterIP   10.10.0.1      <none>        443/TCP        
  62mtomcat       NodePort    10.10.69.243   <none>        8080:30080/TCP 
  3s

  浏览器访问集群IP:30080

 

posted @ 2021-12-08 13:27  烟雨楼台,行云流水  阅读(1052)  评论(0编辑  收藏  举报