Kubernetes - 环境配置

一:虚拟机准备

安装一台纯净的 CentOS7 虚拟机

完整的安装教程:http://www.xuexianqi.top/archives/666.html

二:环境准备(制作模板机)

注意:

  • 推荐下面的所有命令都在Windows Terminal(如果有的话)中执行(除了上传文件需要用到FinalShell
  • 以下所有的命令,如果有2个bash窗口第1个就是纯命令第2个实际运行的效果,所以只需参考第一个即可
  • 命令中如果出现了cat,可能会出现报错的情况,手动把<<EOFEOF前的复制下来,用vim打开再粘贴进去即可

1.配置yum源

① 备份 原yum源

cp /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak

② 配置 阿里云yun源

curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo

③ 制作缓存

yum makecache

④ 更新系统内核

yum update -y --exclud=kernel*

2.安装所需工具

yum install wget lrzsz expect vim net-tools ntp bash-completion ipvsadm ipset jq iptables conntrack sysstat libseccomp -y

3.安装指定版本内核

① 下载内核包

链接:https://pan.baidu.com/s/1wEkaV3qW_6aPlJoySXWD9w
提取码:ul8a

② 用 FinalShell 或者 XShell 工具上传内核包

用上述的工具,将2个内核包上传到/root目录中

上传后,即可看到文件

③ 安装内核

需要执行的命令(下同)
yum localinstall -y kernel-lt*

grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg

grubby --default-kernel
实际的执行效果(下同)
# 本地安装内核
[root@localhost ~]# yum localinstall -y kernel-lt*
grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg
grubby --default-kernelLoaded plugins: fastestmirror
...


# 设置默认内核
[root@localhost ~]# grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg
Generating grub configuration file ...
Found linux image: /boot/vmlinuz-4.4.246-1.el7.elrepo.x86_64
Found initrd image: /boot/initramfs-4.4.246-1.el7.elrepo.x86_64.img
Found linux image: /boot/vmlinuz-3.10.0-1160.el7.x86_64
...


# 查看默认内核
[root@localhost ~]# grubby --default-kernel
/boot/vmlinuz-4.4.246-1.el7.elrepo.x86_64

4.关闭 selinux

vim /etc/selinux/config 

i

SELINUX=disabled

Esc

:wq

setenforce 0
# 用vim编辑器打开 /etc/selinux/config 
[root@localhost ~]# vim /etc/selinux/config

# 按 i  进入插入模式
i

# 将 SELINUX 设置成 disabled
SELINUX=disabled

# 按 Esc,退出插入模式
Esc

# 按 :wq 保存并退出
:wq

# 使设置生效
[root@localhost ~]# setenforce 0

5.加速 ssh 连接

vim /etc/ssh/sshd_config

i

UseDNS no

Esc

:wq

systemctl restart sshd
# 用vim编辑器打开 /etc/ssh/sshd_config
vim /etc/ssh/sshd_config

# 按 i 进入插入模式
i

# 找到:#UseDNS yes,修改成 UseDNS no (按方向键往下找,在比较下面的位置)
UseDNS no

# 按 Esc,退出插入模式
Esc

# 输入 :wq 保存并退出
:wq

# 重启sshd服务,使之生效
systemctl restart sshd

6.安装IPVS

yum install -y conntrack-tools ipvsadm ipset conntrack libseccomp

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
  /sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
  if [ $? -eq 0 ]; then
    /sbin/modprobe \${kernel_module}
  fi
done
EOF
 
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
# 安装所需的依赖
[root@localhost ~]# yum install -y conntrack-tools ipvsadm ipset conntrack libseccomp


# 将以下内容 输入重定向到 /etc/sysconfig/modules/ipvs.modules
[root@localhost ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
> #!/bin/bash
> ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
> for kernel_module in \${ipvs_modules}; do
> /sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
> if [ $? -eq 0 ]; then
> /sbin/modprobe \${kernel_module}
> fi
> done
> EOF


# 修改下面文件的权限
[root@localhost ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
modprobe: FATAL: Module ip_vs_fo not found.
ip_vs_ftp              13079  0
ip_vs_sed              12519  0
ip_vs_nq               12516  0
ip_vs_sh               12688  0
ip_vs_dh               12688  0
ip_vs_lblcr            12922  0
ip_vs_lblc             12819  0
ip_vs_wrr              12697  0
ip_vs_rr               12600  0
ip_vs_wlc              12519  0
ip_vs_lc               12516  0
ip_vs                 145458  22 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_lblcr,ip_vs_lblc
nf_nat                 26583  3 ip_vs_ftp,nf_nat_ipv4,nf_nat_ipv6
nf_conntrack          139264  7 ip_vs,nf_nat,nf_nat_ipv4,nf_nat_ipv6,xt_conntrack,nf_conntrack_ipv4,nf_conntrack_ipv6
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack

7.修改K8S的内核优化参数

cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp.keepaliv.probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp.max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp.max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.top_timestamps = 0
net.core.somaxconn = 16384
EOF
 
sysctl --system
# 将以下内容 输入重定向到 /etc/sysctl.d/k8s.conf
[root@localhost ~]# cat > /etc/sysctl.d/k8s.conf << EOF
> net.ipv4.ip_forward = 1
> net.bridge.bridge-nf-call-iptables = 1
> net.bridge.bridge-nf-call-ip6tables = 1
> fs.may_detach_mounts = 1
> vm.overcommit_memory=1
> vm.panic_on_oom=0
> fs.inotify.max_user_watches=89100
> fs.file-max=52706963
> fs.nr_open=52706963
> net.ipv4.tcp_keepalive_time = 600
> net.ipv4.tcp.keepaliv.probes = 3
> net.ipv4.tcp_keepalive_intvl = 15
> net.ipv4.tcp.max_tw_buckets = 36000
> net.ipv4.tcp_tw_reuse = 1
> net.ipv4.tcp.max_orphans = 327680
> net.ipv4.tcp_orphan_retries = 3
> net.ipv4.tcp_syncookies = 1
> net.ipv4.tcp_max_syn_backlog = 16384
> net.ipv4.ip_conntrack_max = 65536
> net.ipv4.tcp_max_syn_backlog = 16384
> net.ipv4.top_timestamps = 0
> net.core.somaxconn = 16384
> EOF


# 使配置立即生效
[root@localhost ~]# sysctl --system
* Applying /usr/lib/sysctl.d/00-system.conf ...
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
kernel.kptr_restrict = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
net.ipv4.ip_forward = 1
fs.may_detach_mounts = 1
vm.overcommit_memory = 1
vm.panic_on_oom = 0
fs.inotify.max_user_watches = 89100
fs.file-max = 52706963
fs.nr_open = 52706963
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_max_syn_backlog = 16384
net.core.somaxconn = 16384
* Applying /etc/sysctl.conf ...

8.安装 Docker

yum install -y yum-utils device-mapper-persistent-data lvm2

yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum install docker-ce -y

mkdir -p /etc/docker
tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://rxsa4cyh.mirror.aliyuncs.com"]
}
EOF

systemctl daemon-reload

systemctl restart docker

systemctl enable --now docker.service
# 安装 Docker所需的依赖
[root@localhost ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * base: mirrors.aliyun.com
 * extras: mirrors.aliyun.com
 * updates: mirrors.aliyun.com
 ...


# 添加软件源信息
[root@localhost ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
Loaded plugins: fastestmirror
adding repo from: https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
grabbing file https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.repo
repo saved to /etc/yum.repos.d/docker-ce.repo


# 安装 Docker CE
[root@localhost ~]# yum install docker-ce -y
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * base: mirrors.aliyun.com
 * extras: mirrors.aliyun.com
 * updates: mirrors.aliyun.com
docker-ce-stable


# 创建 /etc/docker
[root@localhost ~]# mkdir -p /etc/docker


# 配置镜像加速
[root@localhost ~]# tee /etc/docker/daemon.json <<-'EOF'
> {
>   "registry-mirrors": ["https://rxsa4cyh.mirror.aliyuncs.com"]
> }
> EOF
{
  "registry-mirrors": ["https://rxsa4cyh.mirror.aliyuncs.com"]
}

# 重新加载 配置文件
[root@localhost ~]# systemctl daemon-reload


# 重启 Docker服务
[root@localhost ~]# systemctl restart docker


# 将 Docker服务 加入开机自启
[root@localhost ~]# systemctl enable --now docker.service
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.

9.同步时间

yum install ntp -y
 
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia/Shanghai' > /etc/timezone
 
ntpdate time2.aliyun.com
# 安装所需的依赖
[root@localhost ~]# yum install ntp -y
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * base: mirrors.aliyun.com
 * extras: mirrors.aliyun.com
 * updates: mirrors.aliyun.com
 ...
 
 
 # 创建连接
 [root@localhost ~]# ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
echo 'Asia[root@localhost ~]# echo 'Asia/Shanghai' > /etc/timezone


# 同步阿里云的时区
[root@localhost ~]# ntpdate time2.aliyun.com
 7 Dec 14:29:20 ntpdate[26271]: adjust time server 203.107.6.88 offset 0.026339 sec

10.关闭系统防火墙

systemctl disable --now firewalld
# 开机就关闭防火墙
[root@localhost ~]# systemctl disable --now firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.

11.配置 Kubernetes 源

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
 
setenforce 0
 
yum install -y kubelet kubeadm kubectl
 
systemctl enable kubelet && systemctl start kubelet
# 将以下内容 输入重定向到 /etc/yum.repos.d/kubernetes.repo
[root@localhost ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
> [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
> enabled=1
> gpgcheck=1
> repo_gpgcheck=1
> gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF


# 使配置立即生效
[root@localhost ~]# setenforce 0


# 安装 kubernetes 所需的依赖
[root@localhost ~]# yum install -y kubelet kubeadm kubectl
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
 * base: mirrors.aliyun.com
 * extras: mirrors.aliyun.com
 * updates: mirrors.aliyun.com
 ...

注意:

操作完成后,要养成良好的习惯:拍摄快照

三:集群准备

1.集群设备

一共需要3台虚拟机(之前的操作都是配置模板机)

虚拟机 外网(eth0) 内网(eth1)
kubernetes-master-01 192.168.50.50 172.16.0.50
kubernetes-node-01 192.168.50.53 172.16.0.53
kubernetes-node-02 192.168.50.54 172.16.0.54

2.复制 3台 模板机

关闭模板机 - 右键 - 管理 - 克隆

② 点击:下一步

② 虚拟机中的当前状态 - 下一步

③ 闯进完整克隆 - 下一步

  • 创建链接克隆:占用空间较小
  • 创建完整克隆:完整地复制一份原文件,占用空间较大

④ 设置 虚拟机名称 和 安装路径

以下是我的配置(仅供参考,具体配置可以自定义)
虚拟机 安装路径
kubernetes-master-01 D:\Software\VMspace\Kubernetes_Test\kubernetes-master-01
kubernetes-node-01 D:\Software\VMspace\Kubernetes_Test\kubernetes-node-01
kubernetes-node-02 D:\Software\VMspace\Kubernetes_Test\kubernetes-node-02

⑤ 克隆完成,点击:关闭

⑥ 重复上述步骤,直至克隆出3个虚拟机

3.配置 2个网卡

① 还是参照这个参数

虚拟机 外网(eth0) 内网(eth1)
kubernetes-master-01 192.168.50.50 172.16.0.50
kubernetes-node-01 192.168.50.53 172.16.0.53
kubernetes-node-02 192.168.50.54 172.16.0.54

注意:

  • 账号密码和之前的模板机相同
  • 网卡配置和之前的模板机也相同
  • 需要在 VMware Workstation 中,进行下面的操作
  • 配置eth0,需要设置静态IP(每台设备都需要有不同的IP),并且把UUID删除
  • 配置eth1,只需配置LAN区段的IP(内网IP),然后把UUID删除

② 配置kubernetes-master-01

配置网卡eth0
vim /etc/sysconfig/network-scripts/ifcfg-eth0
dd
i
IPADDR=192.168.50.50
Esc
:wq
# 用 vim 打开 /etc/sysconfig/network-scripts/ifcfg-eth0
vim /etc/sysconfig/network-scripts/ifcfg-eth0

# 删除 UUID 那一行(光标移至那一行,按 dd 即可删除一整行)
dd

# 按 i 进入插入模式
i

# 设置 IPADDR 为 192.168.50.50
IPADDR=192.168.50.50

# 按 Esc,退出插入模式
Esc

# 输入 :wq ,保存并退出
:wq

配置网卡eth1
vim /etc/sysconfig/network-scripts/ifcfg-eth1
dd
i
BOOTPROTO=static
IPADDR=172.16.0.50
Esc
:wq
# 用 vim 打开 /etc/sysconfig/network-scripts/ifcfg-eth1
vim /etc/sysconfig/network-scripts/ifcfg-eth1

# 删除 UUID 那一行(光标移至那一行,按 dd 即可删除一整行)
dd

# 按 i 进入插入模式
i

# 设置 BOOTPROTO 为 static(静态)
BOOTPROTO=static

# 设置 IPADDR 为 192.168.50.50
IPADDR=172.16.0.50

# 按 Esc,退出插入模式
Esc

# 输入 :wq ,保存并退出
:wq

重启网卡
systemctl restart network
查看 IP(确认修改生效)
ifconfig

③ 配置kubernetes-node-01

配置网卡eth0
vim /etc/sysconfig/network-scripts/ifcfg-eth0
dd
i
BOOTPROTO=static
IPADDR=192.168.50.53
Esc
:wq
配置网卡eth1
vim /etc/sysconfig/network-scripts/ifcfg-eth1
i
IPADDR=172.16.0.53
Esc
:wq
重启网卡
systemctl restart network
查看 IP
ifconfig

④ 配置kubernetes-node-02

配置网卡eth0
vim /etc/sysconfig/network-scripts/ifcfg-eth0
dd
i
BOOTPROTO=static
IPADDR=192.168.50.54
Esc
:wq
配置网卡eth1
vim /etc/sysconfig/network-scripts/ifcfg-eth1
i
IPADDR=172.16.0.54
Esc
:wq
重启网卡
systemctl restart network
查看 IP
ifconfig

4.设置 hostname

① 配置 kubernetes-master-01

hostnamectl set-hostname kubernetes-master-01
bash

② 配置 kubernetes-node-01

hostnamectl set-hostname kubernetes-node-01
bash

③ 配置 kubernetes-node-02

hostnamectl set-hostname kubernetes-node-02
bash

注意:

操作完成后,要养成良好的习惯:给3个虚拟机 拍摄快照

四:配置 master 虚拟机

注意:

以下的操作,都在中kubernetes-master-01进行

1.节点初始化

kubeadm init \
--image-repository=registry.cn-hangzhou.aliyuncs.com/k8sos \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16
# 初始化
[root@localhost ~]# kubeadm init \
> --image-repository=registry.cn-hangzhou.aliyuncs.com/k8sos \
> --service-cidr=10.96.0.0/12 \
> --pod-network-cidr=10.244.0.0/16
W1207 16:02:41.025634    1463 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[init] Using Kubernetes version: v1.19.4
...

出现以下提示,就直接执行下方代码

# 提示
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.50.50:6443 --token faczpr.5ehpppudjxqb158l \
    --discovery-token-ca-cert-hash sha256:d09ca70bd1f90bb041a6f93b30391ae17d5ea995798bc11c9e2f371f462efd72
直接执行下方代码
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

2.编辑节点

vim /etc/hosts

i

172.16.0.50	kubernetes-master-01
172.16.0.53	kubernetes-node-01
172.16.0.54	kubernetes-node-02

Esc

:wq
# 用 vim 打开 /etc/hosts
[root@localhost ~]# vim /etc/hosts

# 按 i 进入插入模式
i

# 将下面的内容 添加到最后
172.16.0.50	kubernetes-master-01
172.16.0.53	kubernetes-node-01
172.16.0.54	kubernetes-node-02

# 按 Esc,退出插入模式
Esc

# 输入 :wq ,保存并退出
:wq

3.同步hosts(此时,3台虚拟机都需要保持开启状态,下同)

for i in kubernetes-node-01 kubernetes-node-02; do 
scp /etc/hosts root@$i:/etc/hosts
done

yes

kubectl get nodes
# 把当前节点的hosts文件,同步到其他2个节点
[root@localhost ~]# for i in kubernetes-node-01 kubernetes-node-02; do
> scp /etc/hosts root@$i:/etc/hosts
> done
The authenticity of host 'kubernetes-node-01 (172.16.0.53)' can't be established.
ECDSA key fingerprint is SHA256:OXPFjAa4eSQzPHFGbLYorPgbo2DYoy7eSpUTfc7dLtE.
ECDSA key fingerprint is MD5:e5:da:17:9c:3a:d0:db:ca:0e:8b:18:36:00:a9:f6:f1.
# 验证 kubernetes-node-01
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'kubernetes-node-01,172.16.0.53' (ECDSA) to the list of known hosts.
# 输入 kubernetes-node-01 的密码
root@kubernetes-node-01's password:
hosts                                                                                 100%  254   385.9KB/s   00:00
The authenticity of host 'kubernetes-node-02 (172.16.0.54)' can't be established.
ECDSA key fingerprint is SHA256:OXPFjAa4eSQzPHFGbLYorPgbo2DYoy7eSpUTfc7dLtE.
ECDSA key fingerprint is MD5:e5:da:17:9c:3a:d0:db:ca:0e:8b:18:36:00:a9:f6:f1.
# 验证 kubernetes-node-02
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'kubernetes-node-02,172.16.0.54' (ECDSA) to the list of known hosts.
# 输入 kubernetes-node-02 的密码
root@kubernetes-node-02's password:
hosts

# 查看节点
[root@kubernetes-master-01 ~]# kubectl get nodes
NAME                   STATUS     ROLES    AGE     VERSION
kubernetes-master-01   NotReady   master   2m36s   v1.19.4
posted @ 2020-12-18 21:15  轻描丨淡写  阅读(661)  评论(0编辑  收藏  举报