随笔 - 366  文章 - 0  评论 - 101  阅读 - 30万

Kubernetes k8s 安装 配置

一、初始化

 

1、准备三节点  192.168.67.130-132     配置计算机名  hosts    时区   DNS         setenforce 0  

1
2
3
4
5
6
7
8
9
10
11
hostnamectl  set-hostname  k8s-master01
hostnamectl  set-hostname  k8s-node01
hostnamectl  set-hostname  k8s-node02
 
[root@k8s-master01 ~]# cat /etc/hosts|grep k8 192.168.67.130 k8s-master01 m1 192.168.67.131 k8s-node01 n1 192.168.67.132 k8s-node01 n2
 
 
rm -rf /etc/localtime  &&  ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
 
 
echo "nameserver 8.8.8.8" >> /etc/resolv.conf

  

 

2、关闭firewalld  swap  selinux  postfix

1
2
3
4
5
systemctl  stop firewalld  &&  systemctl  disable firewalld
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config 
systemctl stop postfix && systemctl disable postfix
systemctl stop  NetworkManager && systemctl  disable NetworkManager

  

3、安装依赖包 

yum install -y conntrack ntpdate ntp ipvsadm ipset  iptables curl sysstat libseccomp wget  vim net-tools git iptables-services   &&  systemctl  start iptables  &&  systemctl  enable iptables  &&  iptables -F  &&  service iptables save

4、调整内核参数,对于 K8S
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
cat > kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM 
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF<br><br><br><br>sysctl -p kubernetes.conf<br>

 

5、 设置 rsyslogd 和 systemd journald

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
mkdir /var/log/journal # 持久化保存日志的目录
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent
 
# 压缩历史日志
Compress=yes
 
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
 
# 最大占用空间 10G
SystemMaxUse=10G
 
# 单日志文件最大 200M
SystemMaxFileSize=200M
 
# 日志保存时间 2 周
MaxRetentionSec=2week
 
# 不将日志转发到 syslog
ForwardToSyslog=no
EOF<br><br>
systemctl restart systemd-journald 

 

6、升级系统内核

1
2
3
4
5
6
7
8
9
10
11
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
yum --enablerepo=elrepo-kernel install -y kernel-lt
 
#Installed:
#kernel-lt.x86_64 0:5.4.185-1.el7.elrepo
 
#[root@localhost ~]# cat /boot/grub2/grub.cfg |grep Core
#menuentry 'CentOS Linux (5.4.185-1.el7.elrepo.x86_64) 7 (Core)' --class centos --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-693.el7.x86_64-advanced-90620579-94b5-4d17-8c9e-69be7e7e8510'
 
#设置开机启动的内核并重启
grub2-set-default 'CentOS Linux (5.4.185-1.el7.elrepo.x86_64) 7 (Core)'init 6

7、kube-proxy开启ipvs的前置条件
1
2
3
4
5
6
7
8
9
10
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
 
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

二、Docker 、导入镜像


1、安装 Docker
1
2
3
4
5
yum-config-manager \
  --add-repo \
  http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
  
yum install -y docker-ce yum-utils device-mapper-persistent-data lvm2

 

2、 配置docker

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
mkdir -pv /etc/docker  /etc/systemd/system/docker.service.d
 
cat > /etc/docker/daemon.conf <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
   # "insecure-registries": ["www.zzx.com"]
   "registry-mirrors": ["https://f1bhsuge.mirror.aliyuncs.com"]
}
EOF
 
 
systemctl daemon-reload && systemctl restart docker && systemctl enable docker

  

3、安装 Kubeadm

1
2
3
4
5
6
7
8
9
10
11
12
13
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
 
yum -y  install  kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
 
systemctl enable kubelet.service

 

4、查看需要 安装的镜像

1
2
3
4
5
6
7
8
9
[root@k8s-master01 ~]# kubeadm config images list
I0319 21:45:39.690796   58635 version.go:248] remote version is much newer: v1.23.5; falling back to: stable-1.15
k8s.gcr.io/kube-apiserver:v1.15.12
k8s.gcr.io/kube-controller-manager:v1.15.12
k8s.gcr.io/kube-scheduler:v1.15.12
k8s.gcr.io/kube-proxy:v1.15.12
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.3.10
k8s.gcr.io/coredns:1.3.1

 

5、 安装镜像(所有节点,flannel也导入最好)

1
#自动安装<br>kubeadm config images pull<br>#<strong>或者</strong>导入提前下载好的镜像     apiserver.tar  coredns.tar  etcd.tar  kubec-con-man.tar  pause.tar  proxy.tar  scheduler.tar<br>docker load -i  镜像名.tar

  docker pull quay.io/coreos/flannel:v0.12.0-amd64

6、检查已安装镜像

1
2
3
4
5
6
7
8
9
10
11
[root@k8s-master01 ~]# docker images
REPOSITORY                                       TAG       IMAGE ID       CREATED       SIZE
rancher/mirrored-flannelcni-flannel              v0.17.0   9247abf08677   2 weeks ago   59.8MB
rancher/mirrored-flannelcni-flannel-cni-plugin   v1.0.1    ac40ce625740   8 weeks ago   8.1MB
k8s.gcr.io/kube-proxy                            v1.15.1   89a062da739d   2 years ago   82.4MB
k8s.gcr.io/kube-scheduler                        v1.15.1   b0b3c4c404da   2 years ago   81.1MB
k8s.gcr.io/kube-controller-manager               v1.15.1   d75082f1d121   2 years ago   159MB
k8s.gcr.io/kube-apiserver                        v1.15.1   68c3eb07bfc3   2 years ago   207MB
k8s.gcr.io/coredns                               1.3.1     eb516548c180   3 years ago   40.3MB
k8s.gcr.io/etcd                                  3.3.10    2c4adeb21b4f   3 years ago   258MB
k8s.gcr.io/pause                                 3.1       da86e6ba6ca1   4 years ago   742kB

  

7、修改kubeadm配置文件(controlPlaneEndpoint 是高可用添加其他master需要用的)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
kubeadm config print init-defaults > kubeadm-config.yaml 修改文件中的内容
 
1、<   advertiseAddress: 1.2.3.4
改为masterip
>   advertiseAddress: 192.168.67.130
 
2、
< kubernetesVersion: v1.14.0
修改版本
> kubernetesVersion: v1.15.1
 
3、serviceSubnet: 10.96.0.0/12后添加如下:
podSubnet: "10.244.0.0/16"
 
4、在    kubernetesVersion:下一行加     
controlPlaneEndpoint: 192.168.67.130:6443
 
5、在文件结尾,就是scheduler: {}后,添加三个-开头的内容,添加如下:
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs

  

 

初始化主节点(master操作,node节点不需要初始化)

1
kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log

 

 加上controlPlaneEndpoint的日志(第一个join是加master用的,第二个是加node用的)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
[root@k8s-master11 ~]# kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log
Flag --experimental-upload-certs has been deprecated, use --upload-certs instead
W0325 14:38:13.577618    4660 strict.go:54] error unmarshaling configuration schema.GroupVersionKind{Group:"kubeadm.k8s.io", Version:"v1beta2", Kind:"ClusterConfiguration"}: error unmarshaling JSON: while decoding JSON: json: unknown field "scheduler"
W0325 14:38:13.580013    4660 strict.go:54] error unmarshaling configuration schema.GroupVersionKind{Group:"kubeproxy.config.k8s.io", Version:"v1alpha1", Kind:"KubeProxyConfiguration"}: error unmarshaling JSON: while decoding JSON: json: unknown field "SupportIPVSProxyMode"
[init] Using Kubernetes version: v1.15.1
[preflight] Running pre-flight checks
        [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.13. Latest validated version: 18.09
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master11 localhost] and IPs [192.168.1.222 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master11 localhost] and IPs [192.168.1.222 127.0.0.1 ::1]
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master11 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.1.222 192.168.1.222]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
[apiclient] All control plane components are healthy after 43.510140 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
32c6b111d2e3b0c00536e46c035dea7cf48b227d8c0ec5f22204a22dcbffadaa
[mark-control-plane] Marking the node k8s-master11 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master11 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
 
Your Kubernetes control-plane has initialized successfully!
 
To start using your cluster, you need to run the following as a regular user:
 
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
 
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/
 
You can now join any number of the control-plane node running the following command on each as root:
 
  kubeadm join 192.168.1.222:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:b4d052516693a00a4f65b7f357ed5de7b9d6e6971c444674e023b6099d0321de \
    --control-plane --certificate-key 32c6b111d2e3b0c00536e46c035dea7cf48b227d8c0ec5f22204a22dcbffadaa
 
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
 
Then you can join any number of worker nodes by running the following on each as root:
 
kubeadm join 192.168.1.222:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:b4d052516693a00a4f65b7f357ed5de7b9d6e6971c444674e023b6099d0321de

  

 

1
初始化完检查CONTAINER
1
2
3
4
5
6
7
8
9
10
11
12
[root@k8s-master01 ~]# docker ps -a
CONTAINER ID   IMAGE                  COMMAND                  CREATED         STATUS         PORTS     NAMES
9a334ebeff7b   89a062da739d           "/usr/local/bin/kube…"   4 minutes ago   Up 4 minutes             k8s_kube-proxy_kube-proxy-hpgj4_kube-system_752ff8cb-a6d2-4057-b66c-d806f2f94252_0
a63518132184   k8s.gcr.io/pause:3.1   "/pause"                 4 minutes ago   Up 4 minutes             k8s_POD_kube-proxy-hpgj4_kube-system_752ff8cb-a6d2-4057-b66c-d806f2f94252_0
b858b2c351c6   2c4adeb21b4f           "etcd --advertise-cl…"   6 minutes ago   Up 6 minutes             k8s_etcd_etcd-k8s-master01_kube-system_9091c3932085dc9fa7b1927b2dd6af54_0
f7d24c8e46f5   b0b3c4c404da           "kube-scheduler --bi…"   6 minutes ago   Up 6 minutes             k8s_kube-scheduler_kube-scheduler-k8s-master01_kube-system_ecae9d12d3610192347be3d1aa5aa552_0
14c87b4328a6   68c3eb07bfc3           "kube-apiserver --ad…"   6 minutes ago   Up 6 minutes             k8s_kube-apiserver_kube-apiserver-k8s-master01_kube-system_21e2cd988cdb757666987c7460642659_0
11fe3dcd2159   d75082f1d121           "kube-controller-man…"   6 minutes ago   Up 6 minutes             k8s_kube-controller-manager_kube-controller-manager-k8s-master01_kube-system_5a1fa432561d9745fe013857ccb566c1_0
2cc9bd668f88   k8s.gcr.io/pause:3.1   "/pause"                 6 minutes ago   Up 6 minutes             k8s_POD_kube-scheduler-k8s-master01_kube-system_ecae9d12d3610192347be3d1aa5aa552_0
fc775bda68d9   k8s.gcr.io/pause:3.1   "/pause"                 6 minutes ago   Up 6 minutes             k8s_POD_kube-controller-manager-k8s-master01_kube-system_5a1fa432561d9745fe013857ccb566c1_0
7307ddd0a491   k8s.gcr.io/pause:3.1   "/pause"                 6 minutes ago   Up 6 minutes             k8s_POD_kube-apiserver-k8s-master01_kube-system_21e2cd988cdb757666987c7460642659_0
3b61a057462b   k8s.gcr.io/pause:3.1   "/pause"                 6 minutes ago   Up 6 minutes             k8s_POD_etcd-k8s-master01_kube-system_9091c3932085dc9fa7b1927b2dd6af54_0

 加master

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
用日志中的提示,如果token过期就先查下
下面这个是加join用的:
[root@k8s-master12 ~]# kubeadm token create --print-join-command
kubeadm join 192.168.1.222:6443 --token 9gqlv3.ls8txw55cmv0b8cv     --discovery-token-ca-cert-hash sha256:b4d052516693a00a4f65b7f357ed5de7b9d6e6971c444674e023b6099d0321de
 
再查
[root@k8s-master12 ~]#  kubeadm init phase upload-certs --experimental-upload-certs
Flag --experimental-upload-certs has been deprecated, use --upload-certs instead
I0325 15:47:01.601649   42170 version.go:248] remote version is much newer: v1.23.5; falling back to: stable-1.15
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
b67f75146964cc75a21643fbce229a0a584101db2fb4a8411ef2a861dbabba48
 
组合一下
 
kubeadm join 192.168.1.222:6443 --token 9gqlv3.ls8txw55cmv0b8cv     --discovery-token-ca-cert-hash sha256:b4d052516693a00a4f65b7f357ed5de7b9d6e6971c444674e023b6099d0321de      --control-plane --certificate-key b67f75146964cc75a21643fbce229a0a584101db2fb4a8411ef2a861dbabba48

 

配置admin.conf 否则kubectl报错:     

1
mkdir -p $HOME/.kube    #三节点
1
2
3
[root@k8s-master01 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config [root@k8s-master01 ~]# chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-master01 ~]# scp -r /etc/kubernetes/admin.conf n1:$HOME/.kube/config
[root@k8s-master01 ~]# scp -r /etc/kubernetes/admin.conf n2:$HOME/.kube/config

  如果不配置配置admin.conf  会报错

1
2
3
4
[root@k8s-master01 ~]# kubectl get pod
The connection to the server localhost:8080 was refused - did you specify the right host or port?<br>
[root@k8s-node01 ~]# kubectl apply -f kube-flannel.yml
unable to recognize "kube-flannel.yml": Get http://localhost:8080/api?timeout=32s: dial tcp 127.0.0.1:8080: connect: connection refused

 

node节点join到集群 (token会过期的 如果无法加集群需要重新获取token,后面有补充添加新节点失败的处理)

1
[root@k8s-node01 ~]# kubeadm join 192.168.67.130:6443 --token abcdef.0123456789abcdef     --discovery-token-ca-cert-hash sha256:54061c49d48775a80026cce95b531df9e52e08b965b5ede4f5dfc74e2d038c31

  

 检查 node

1
2
3
4
5
[root@k8s-master01 ~]# kubectl get node
NAME           STATUS     ROLES    AGE     VERSION
k8s-master01   NotReady   master   48m     v1.15.1
k8s-node01     NotReady   <none>   2m16s   v1.15.1
k8s-node02     NotReady   <none>   2m9s    v1.15.1

 配置flannel(master节点执行就好,一个master会启动所有节点的flannel,    kubectl get po -A -o wide|grep flannel  检查)

1
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

如果无法下载就用这个(quay.io/coreos/flannel               v0.12.0-amd64   4e9f801d2217                         

docker pull quay.io/coreos/flannel:v0.12.0-amd64                       )

示例

  

 检查集群状态

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
[root@k8s-node01 ~]# kubectl cluster-info
Kubernetes master is running at https://192.168.1.222:6443
KubeDNS is running at https://192.168.1.222:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
 
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
 
[root@k8s-node01 ~]# kubectl get node
NAME           STATUS   ROLES    AGE   VERSION
k8s-master11   Ready    master   60m   v1.15.1
k8s-master12   Ready    master   59m   v1.15.1
k8s-master13   Ready    master   45m   v1.15.1
k8s-node01     Ready    <none>   16m   v1.15.1
[root@k8s-node01 ~]#
[root@k8s-node01 ~]#
[root@k8s-node01 ~]# kubectl  get endpoints kube-controller-manager -n kube-system -o yaml
apiVersion: v1
kind: Endpoints
metadata:
  annotations:
    control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-master13_a009f513-fc78-4fe9-b145-3349ddf97e32","leaseDurationSeconds":15,"acquireTime":"2022-03-25T06:59:42Z","renewTime":"2022-03-25T07:39:30Z","leaderTransitions":3}'
  creationTimestamp: "2022-03-25T06:39:07Z"
  name: kube-controller-manager
  namespace: kube-system
  resourceVersion: "6475"
  selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager
  uid: f78caf1f-52c7-45e8-8d7e-f9af572cf2f8
[root@k8s-node01 ~]#
[root@k8s-node01 ~]#
[root@k8s-node01 ~]#   kubectl  get endpoints kube-scheduler -n kube-system -o yaml
apiVersion: v1
kind: Endpoints
metadata:
  annotations:
    control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-master12_db4f2952-76dc-47e2-87e5-98f7f43dcddc","leaseDurationSeconds":15,"acquireTime":"2022-03-25T06:59:30Z","renewTime":"2022-03-25T07:39:37Z","leaderTransitions":3}'
  creationTimestamp: "2022-03-25T06:39:07Z"
  name: kube-scheduler
  namespace: kube-system
  resourceVersion: "6484"
  selfLink: /api/v1/namespaces/kube-system/endpoints/kube-scheduler
  uid: b54f3201-de65-478d-880a-de7222156c3f

  

  

配置完flannel  检查状态变为Ready    

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
[root@k8s-master01 ~]# kubectl get node    
NAME           STATUS   ROLES    AGE   VERSION
k8s-master01   Ready    master   76m   v1.15.1
k8s-node01     Ready    <none>   30m   v1.15.1
k8s-node02     Ready    <none>   30m   v1.15.1
[root@k8s-master01 ~]# kubectl get pod
No resources found.
[root@k8s-master01 ~]# kubectl get pod -n kube-system
NAME                                   READY   STATUS    RESTARTS   AGE
coredns-5c98db65d4-rwdtr               1/1     Running   0          77m
coredns-5c98db65d4-zhqwb               1/1     Running   0          77m
etcd-k8s-master01                      1/1     Running   0          77m
kube-apiserver-k8s-master01            1/1     Running   0          77m
kube-controller-manager-k8s-master01   1/1     Running   0          77m
kube-flannel-ds-jrhz6                  1/1     Running   0          12m
kube-flannel-ds-kdmgx                  1/1     Running   0          12m
kube-flannel-ds-skqvq                  1/1     Running   0          12m
kube-proxy-hpgj4                       1/1     Running   0          77m
kube-proxy-q8rxb                       1/1     Running   0          31m
kube-proxy-ts8xr                       1/1     Running   0          31m
kube-scheduler-k8s-master01            1/1     Running   0          77m

 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-5c98db65d4-rwdtr 1/1 Running 0 18h 10.244.0.2 k8s-master01 <none> <none>
coredns-5c98db65d4-zhqwb 1/1 Running 0 18h 10.244.0.3 k8s-master01 <none> <none>
etcd-k8s-master01 1/1 Running 0 18h 192.168.67.130 k8s-master01 <none> <none>
kube-apiserver-k8s-master01 1/1 Running 0 18h 192.168.67.130 k8s-master01 <none> <none>
kube-controller-manager-k8s-master01 1/1 Running 1 18h 192.168.67.130 k8s-master01 <none> <none>
kube-flannel-ds-jrhz6 1/1 Running 0 17h 192.168.67.131 k8s-node01 <none> <none>
kube-flannel-ds-kdmgx 1/1 Running 0 17h 192.168.67.130 k8s-master01 <none> <none>
kube-flannel-ds-skqvq 1/1 Running 0 17h 192.168.67.132 k8s-node02 <none> <none>
kube-proxy-hpgj4 1/1 Running 0 18h 192.168.67.130 k8s-master01 <none> <none>
kube-proxy-q8rxb 1/1 Running 0 17h 192.168.67.131 k8s-node01 <none> <none>
kube-proxy-ts8xr 1/1 Running 0 17h 192.168.67.132 k8s-node02 <none> <none>
kube-scheduler-k8s-master01 1/1 Running 1 18h 192.168.67.130 k8s-master01 <none> <none>
 
[root@k8s-master01 ~]# kubectl get pod --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-5c98db65d4-rwdtr 1/1 Running 0 12h
kube-system coredns-5c98db65d4-zhqwb 1/1 Running 0 12h
kube-system etcd-k8s-master01 1/1 Running 0 12h
kube-system kube-apiserver-k8s-master01 1/1 Running 0 12h
kube-system kube-controller-manager-k8s-master01 1/1 Running 1 12h
kube-system kube-flannel-ds-jrhz6 1/1 Running 0 11h
kube-system kube-flannel-ds-kdmgx 1/1 Running 0 11h
kube-system kube-flannel-ds-skqvq 1/1 Running 0 11h
kube-system kube-proxy-hpgj4 1/1 Running 0 12h
kube-system kube-proxy-q8rxb 1/1 Running 0 11h
kube-system kube-proxy-ts8xr 1/1 Running 0 11h
kube-system kube-scheduler-k8s-master01 1/1 Running 1 12h
 
 
[root@k8s-master01 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 12h

  

创建一个deployment(不指定namespace)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
[root@k8s-master01 ~]#  kubectl create deployment my-nginx-first --image=nginx
deployment.apps/my-nginx-first created
 
[root@k8s-master01 ~]#  kubectl get pod  -o wide
NAME                            READY   STATUS    RESTARTS   AGE   IP            NODE         NOMINATED NODE   READINESS GATES
my-nginx-first-54ccf4ff-r5c7n   1/1     Running   0          8s    10.244.1.11   k8s-node01   <none>           <none>
 
[root@k8s-master01 ~]#  kubectl get deployment -o wide
NAME             READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS   IMAGES   SELECTOR
my-nginx-first   1/1     1            1           24s   nginx        nginx    app=my-nginx-first
 
[root@k8s-master01 ~]# kubectl get svc  -o wide
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE   SELECTOR
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   18h   <none>
[root@k8s-master01 ~]# kubectl create svc clusterip my-nginx-first  --tcp=8081:80      
service/my-nginx-first created
 
[root@k8s-master01 ~]# kubectl get svc  -o wide
NAME             TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE   SELECTOR
kubernetes       ClusterIP   10.96.0.1       <none>        443/TCP    18h   <none>
my-nginx-first   ClusterIP   10.104.230.86   <none>        8081/TCP   15s   app=my-nginx-first
 
[root@k8s-master01 ~]# curl 10.104.230.86:8081
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
 
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
 
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
 
#扩容  调整replicas: 2
[root@k8s-master01 ~]#  kubectl  edit deployment my-nginx-first
deployment.extensions/my-nginx-first edited
 
[root@k8s-master01 ~]# kubectl get pod  -o wide         
NAME                            READY   STATUS    RESTARTS   AGE     IP            NODE         NOMINATED NODE   READINESS GATES
my-nginx-first-54ccf4ff-fv7rz   1/1     Running   0          8s      10.244.2.11   k8s-node02   <none>           <none>
my-nginx-first-54ccf4ff-r5c7n   1/1     Running   0          3m20s   10.244.1.11   k8s-node01   <none>           <none>
 
[root@k8s-master01 ~]# kubectl delete pod my-nginx-first-54ccf4ff-fv7rz
pod "my-nginx-first-54ccf4ff-fv7rz" deleted
 
[root@k8s-mastekubectl get pod  -o wide                                
NAME                            READY   STATUS    RESTARTS   AGE     IP            NODE         NOMINATED NODE   READINESS GATES
my-nginx-first-54ccf4ff-k2xhx   1/1     Running   0          11s     10.244.2.12   k8s-node02   <none>           <none>
my-nginx-first-54ccf4ff-r5c7n   1/1     Running   0          3m44s   10.244.1.11   k8s-node01   <none>           <none>

 

 

kubectl describe svc/svc名称

1
2
3
4
5
6
7
8
9
10
11
12
13
[root@k8s-master01 ~]# kubectl describe svc/nginx-deployment
Name:              nginx-deployment
Namespace:         default
Labels:            app=nginx-deployment
Annotations:       <none>
Selector:          app=nginx-deployment
Type:              ClusterIP
IP:                10.101.173.31                          #CLUSTER-IP
Port:              8089-80  8089/TCP
TargetPort:        80/TCP
Endpoints:         10.244.1.19:80,10.244.2.21:80          #pods  IP
Session Affinity:  None
Events:            <none>

 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
[root@k8s-master01 ~]# kubectl delete svc/nginx-deployment           
service "nginx-deployment" deleted
 
[root@k8s-master01 ~]# kubectl create svc nodeport nginx-deployment  --tcp=8011:80
service/nginx-deployment created
 
[root@k8s-master01 ~]#  kubectl get svc/nginx-deployment 
NAME               TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
nginx-deployment   NodePort   10.107.24.168   <none>        8011:31521/TCP   4m24s
 
#130-132都可以访问
[root@k8s-master01 ~]# curl  192.168.67.131:31521        
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>

 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[root@k8s-master01 ~]# kubectl describe svc/nginx-deployment
Name:                     nginx-deployment
Namespace:                default
Labels:                   app=nginx-deployment
Annotations:              <none>
Selector:                 app=nginx-deployment
Type:                     NodePort
IP:                       10.107.24.168
Port:                     8011-80  8011/TCP
TargetPort:               80/TCP
NodePort:                 8011-80  31521/TCP
Endpoints:                10.244.1.19:6:80
Session Affinity:         None
External Traffic Policy:  Cluster
Events:                   <none>

  

scale扩容

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
[root@k8s-master01 ~]# kubectl create deploy nginx-scale   --image=nginx
deployment.apps/nginx-scale created
 
[root@k8s-master01 ~]# kubectl get   deploy/nginx-scale  
NAME          READY   UP-TO-DATE   AVAILABLE   AGE
nginx-scale   1/1     1            1           66s
 
[root@k8s-master01 ~]#  kubectl scale --replicas=3 deployment nginx-scale
deployment.extensions/nginx-scale scaled
[root@k8s-master01 ~]# kubectl get   deploy/nginx-scale                  
NAME          READY   UP-TO-DATE   AVAILABLE   AGE
nginx-scale   1/3     3            1           2m28s
[root@k8s-master01 ~]# kubectl get   deploy/nginx-scale -o wide
NAME          READY   UP-TO-DATE   AVAILABLE   AGE     CONTAINERS   IMAGES   SELECTOR
nginx-scale   3/3     3            3           2m40s   nginx        nginx    app=nginx-scale
[root@k8s-master01 ~]# kubectl get   pod
NAME                                READY   STATUS        RESTARTS   AGE
nginx-scale-5ff9f49f4d-8xz4m        1/1     Running       0          25s
nginx-scale-5ff9f49f4d-kvr44        1/1     Running       0          25s
nginx-scale-5ff9f49f4d-t4h4c        1/1     Running       0          2m49s

 

metrics-server 配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
[root@k8s-master01 ~]# wget https://github.com/kubernetes-sigs/metrics-server/archive/v0.3.6.tar.gz
[root@k8s-master01 ~]# tar xvf v0.3.6.tar.gz
[root@k8s-master01 ~]# cd metrics-server-0.3.6/deploy/1.8+/
[root@k8s-master01 1.8+]# vi metrics-server-deployment.yaml
# 修改image 和 imagePullPolicy
        image: mirrorgooglecontainers/metrics-server-amd64:v0.3.6
        imagePullPolicy: IfNotPresent
# 新增command配置
        command:
        - /metrics-server
        - --kubelet-insecure-tls
        - --kubelet-preferred-address-types=InternalDNS,InternalIP,ExternalDNS,ExternalIP,Hostname
# 最后新增resources配置
        resources:
          limits:
            cpu: 300m
            memory: 200Mi
          requests:
            cpu: 200m
            memory: 100Mi
 
[root@k8s-master01 ~]# kubectl apply -f metrics-server-0.3.6/deploy/1.8+/      #是整个目录不是单个yml
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
serviceaccount/metrics-server created
deployment.apps/metrics-server created
service/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
 
 
[root@k8s-master01 ~]# kubectl top node     #出现这个报错就等一下再跑
Error from server (ServiceUnavailable): the server is currently unable to handle the request (get nodes.metrics.k8s.io)
 
 
[root@k8s-master01 ~]# kubectl top node
NAME           CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%    
k8s-master01   554m         27%    1010Mi          35%        
k8s-node01     242m         12%    838Mi           44%        
k8s-node02     180m         4%     491Mi           26%    
 
[root@k8s-master01 ~]# kubectl top po
NAME                                CPU(cores)   MEMORY(bytes)  
nginx-deployment-7f58cf9455-bhnn8   0m           6Mi            
nginx-scale-5ff9f49f4d-8xz4m        0m           1Mi            
nginx-scale-5ff9f49f4d-kvr44        0m           1Mi            
nginx-scale-5ff9f49f4d-t4h4c        0m           1Mi   

 

配置hpa      autoscale

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
[root@k8s-master01 ~]# kubectl get pod
NAME                                READY   STATUS    RESTARTS   AGE
nginx-scale-5ff9f49f4d-8xz4m        1/1     Running   0          23m
nginx-scale-5ff9f49f4d-kvr44        1/1     Running   0          23m
nginx-scale-5ff9f49f4d-t4h4c        1/1     Running   0          25m
 
[root@k8s-master01 ~]# kubectl set resources deployment nginx-scale   --limits=cpu=50m,memory=512Mi --requests=cpu=10m,memory=256Mi 
deployment.extensions/nginx-scale resource requirements updated
 
[root@k8s-master01 ~]#   kubectl autoscale deployment nginx-scale  --min=1 --max=10 --cpu-percent=50                   
horizontalpodautoscaler.autoscaling/nginx-scale autoscaled
[root@k8s-master01 ~]# kubectl get hpa  
NAME          REFERENCE                TARGETS         MINPODS   MAXPODS   REPLICAS   AGE
nginx-scale   Deployment/nginx-scale   <unknown>/50%   1         10        0          5s
 
#<unknown> 需要过会儿 ,如果不配置resources的limits也会一直unknown
[root@k8s-master01 ~]# kubectl get hpa
NAME          REFERENCE                TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
nginx-scale   Deployment/nginx-scale   0%/50%    1         10        3          24s
 
[root@k8s-master01 ~]# kubectl top pod
NAME                                CPU(cores)   MEMORY(bytes)        
nginx-scale-649d95bcb4-dnrjr        0m           1Mi            
nginx-scale-649d95bcb4-nkg8m        0m           3Mi            
nginx-scale-649d95bcb4-nlrjk        0m           3Mi
 
[root@k8s-master01 ~]#  kubectl create svc nodeport nginx-scale  --tcp=8011:80                     
service/nginx-scale created
[root@k8s-master01 ~]# kubectl get svc
NAME               TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
kubernetes         ClusterIP   10.96.0.1       <none>        443/TCP          3d3h
nginx-scale        NodePort    10.107.61.46    <none>        8011:30096/TCP   9s
 
 
 
 
[root@k8s-master01 ~]# kubectl get pod
NAME                                READY   STATUS    RESTARTS   AGE
nginx-deployment-7f58cf9455-bhnn8   1/1     Running   0          72m   10.244.1.78   k8s-node01   <none>           <none>
nginx-scale-649d95bcb4-nlrjk        1/1     Running   0          15m
 
[root@k8s-master01 ~]# kubectl top pod       
NAME                                CPU(cores)   MEMORY(bytes)      
nginx-scale-649d95bcb4-nlrjk        0m           3Mi   
 
 
#开始压测验证自动扩容   ab压缩工具安装    yum -y install httpd-tools
[root@k8s-node02 ~]#  while :;do ab -n 1000 -c 100   http://192.168.67.130:30096/ ;done
 
[root@k8s-master01 ~]# kubectl top pod
NAME                                CPU(cores)   MEMORY(bytes)   
nginx-scale-649d95bcb4-nlrjk        12m          3Mi       
 
[root@k8s-master01 ~]# kubectl get pod
NAME                                READY   STATUS              RESTARTS   AGE
nginx-scale-649d95bcb4-nlrjk        1/1     Running             0          17m
nginx-scale-649d95bcb4-q5qcm        0/1     ContainerCreating   0          2s
nginx-scale-649d95bcb4-w9hlf        0/1     ContainerCreating   0          2s
 
[root@k8s-master01 ~]# kubectl get pod -o wide
NAME                                READY   STATUS    RESTARTS   AGE   IP            NODE         NOMINATED NODE   READINESS GATES
nginx-scale-649d95bcb4-nlrjk        1/1     Running   0          17m   10.244.2.86   k8s-node02   <none>           <none>
nginx-scale-649d95bcb4-q5qcm        1/1     Running   0          21s   10.244.2.88   k8s-node02   <none>           <none>
nginx-scale-649d95bcb4-w9hlf        1/1     Running   0          21s   10.244.1.84   k8s-node01   <none>           <none>
 
[root@k8s-master01 ~]# kubectl get pod -o wide
NAME                                READY   STATUS              RESTARTS   AGE   IP            NODE         NOMINATED NODE   READINESS GATES
nginx-scale-649d95bcb4-mgjjc        0/1     ContainerCreating   0          7s    <none>        k8s-node01   <none>           <none>
nginx-scale-649d95bcb4-nlrjk        1/1     Running             0          18m   10.244.2.86   k8s-node02   <none>           <none>
nginx-scale-649d95bcb4-q5qcm        1/1     Running             0          68s   10.244.2.88   k8s-node02   <none>           <none>
nginx-scale-649d95bcb4-qwrdx        0/1     ContainerCreating   0          8s    <none>        k8s-node01   <none>           <none>
nginx-scale-649d95bcb4-sh79d        0/1     ContainerCreating   0          7s    <none>        k8s-node02   <none>           <none>
nginx-scale-649d95bcb4-w9hlf        1/1     Running             0          68s   10.244.1.84   k8s-node01   <none>           <none>
 
[root@k8s-master01 ~]# kubectl top pod
NAME                                CPU(cores)   MEMORY(bytes)     
nginx-scale-649d95bcb4-nlrjk        44m          3Mi            
nginx-scale-649d95bcb4-q5qcm        47m          3Mi            
nginx-scale-649d95bcb4-w9hlf        43m          2Mi 
 
[root@k8s-master01 ~]# kubectl top node
NAME           CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%    
k8s-master01   834m         41%    1030Mi          36%        
k8s-node01     1302m        65%    809Mi           43%        
k8s-node02     878m         21%    549Mi           29%  
 
[root@k8s-master01 ~]# kubectl get pod -o wide
NAME                                READY   STATUS    RESTARTS   AGE     IP            NODE         NOMINATED NODE   READINESS GATES
nginx-scale-649d95bcb4-mgjjc        1/1     Running   0          89s     10.244.1.86   k8s-node01   <none>           <none>
nginx-scale-649d95bcb4-mtt7c        1/1     Running   0          74s     10.244.2.91   k8s-node02   <none>           <none>
nginx-scale-649d95bcb4-nlrjk        1/1     Running   0          19m     10.244.2.86   k8s-node02   <none>           <none>
nginx-scale-649d95bcb4-q5qcm        1/1     Running   0          2m30s   10.244.2.88   k8s-node02   <none>           <none>
nginx-scale-649d95bcb4-qwrdx        1/1     Running   0          90s     10.244.1.85   k8s-node01   <none>           <none>
nginx-scale-649d95bcb4-rj7dq        1/1     Running   0          74s     10.244.2.90   k8s-node02   <none>           <none>
nginx-scale-649d95bcb4-sh79d        1/1     Running   0          89s     10.244.2.89   k8s-node02   <none>           <none>
nginx-scale-649d95bcb4-spzsb        1/1     Running   0          74s     10.244.1.87   k8s-node01   <none>           <none>
nginx-scale-649d95bcb4-w9hlf        1/1     Running   0          2m30s   10.244.1.84   k8s-node01   <none>           <none>
nginx-scale-649d95bcb4-xdk79        1/1     Running   0          26s     10.244.1.88   k8s-node01   <none>           <none>
 
[root@k8s-master01 ~]# kubectl top pod
NAME                                CPU(cores)   MEMORY(bytes)     
nginx-scale-649d95bcb4-mgjjc        31m          2Mi            
nginx-scale-649d95bcb4-mtt7c        32m          3Mi            
nginx-scale-649d95bcb4-nlrjk        42m          3Mi            
nginx-scale-649d95bcb4-q5qcm        43m          3Mi            
nginx-scale-649d95bcb4-qwrdx        27m          2Mi            
nginx-scale-649d95bcb4-rj7dq        44m          3Mi            
nginx-scale-649d95bcb4-sh79d        38m          3Mi            
nginx-scale-649d95bcb4-spzsb        30m          2Mi            
nginx-scale-649d95bcb4-w9hlf        24m          2Mi            
nginx-scale-649d95bcb4-xdk79        36m          2Mi 
 
#停止ab,等待回收,要等挺久的
[root@k8s-master01 ~]# kubectl top node
NAME           CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%    
k8s-master01   470m         23%    1029Mi          36%        
k8s-node01     205m         10%    832Mi           44%        
k8s-node02     215m         5%     526Mi           27%        
k8s-node03     <unknown>                           <unknown>               <unknown>               <unknown>              
[root@k8s-master01 ~]# kubectl get pod -o wide
NAME                                READY   STATUS    RESTARTS   AGE    IP            NODE         NOMINATED NODE   READINESS GATES
nginx-scale-649d95bcb4-nlrjk        1/1     Running   0          126m   10.244.2.86   k8s-node02   <none>           <none>

  

解决容器内时间和计算节点不一样的问题

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[root@k8s-master01 ~]# cat time.yml
apiVersion: v1
kind: Pod
metadata:
  name: time-nginx
spec:
  containers:
  - name: time-nginx
    image: nginx
    env:
    - name: TZ
      value: Asia/Shanghai
 
 
[root@k8s-master01 ~]# kubectl apply -f time.yml

  

 查看标签

1
2
3
4
[root@k8s-master01 ~]# kubectl get po --show-labels
NAME                                READY   STATUS    RESTARTS   AGE   LABELS
nginx-deployment-75bd67c8c5-4ws67   1/1     Running   0          21h   app=nginx-deployment,pod-template-hash=75bd67c8c5
nginx-deployment-75bd67c8c5-zp9nz   1/1     Running   0          21h   app=nginx-deployment,pod-template-hash=75bd67c8c5

  

 滚动重启(新pod :无 --> ContainerCreating -->  Running       ,   旧pod  : Running -->  Terminating  --> 消失 )

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
[root@k8s-master01 ~]# kubectl rollout restart deploy my-nginx-first;while :;do kubectl get pod  -o wide;sleep 1;done
deployment.extensions/my-nginx-first restarted
NAME                             READY   STATUS              RESTARTS   AGE     IP            NODE         NOMINATED NODE   READINESS GATES
my-nginx-first-68c47d7f6-8jzs5   1/1     Running             0          2m46s   10.244.1.12   k8s-node01   <none>           <none>
my-nginx-first-68c47d7f6-fpnr2   1/1     Running             0          2m31s   10.244.2.14   k8s-node02   <none>           <none>
my-nginx-first-78cbf67ff-dkmbt   0/1     ContainerCreating   0          1s      <none>        k8s-node01   <none>           <none>
 
NAME                             READY   STATUS              RESTARTS   AGE     IP            NODE         NOMINATED NODE   READINESS GATES
my-nginx-first-68c47d7f6-8jzs5   1/1     Running             0          3m5s    10.244.1.12   k8s-node01   <none>           <none>
my-nginx-first-68c47d7f6-fpnr2   1/1     Terminating         0          2m50s   10.244.2.14   k8s-node02   <none>           <none>
my-nginx-first-78cbf67ff-dkmbt   1/1     Running             0          20s     10.244.1.13   k8s-node01   <none>           <none>
my-nginx-first-78cbf67ff-dtrxc   0/1     ContainerCreating   0          1s      <none>        k8s-node02   <none>           <none>
 
NAME                             READY   STATUS              RESTARTS   AGE     IP            NODE         NOMINATED NODE   READINESS GATES
my-nginx-first-68c47d7f6-8jzs5   1/1     Running             0          3m12s   10.244.1.12   k8s-node01   <none>           <none>
my-nginx-first-78cbf67ff-dkmbt   1/1     Running             0          27s     10.244.1.13   k8s-node01   <none>           <none>
my-nginx-first-78cbf67ff-dtrxc   0/1     ContainerCreating   0          8s      <none>        k8s-node02   <none>           <none>
 
NAME                             READY   STATUS        RESTARTS   AGE     IP            NODE         NOMINATED NODE   READINESS GATES
my-nginx-first-68c47d7f6-8jzs5   1/1     Terminating   0          3m16s   10.244.1.12   k8s-node01   <none>           <none>
my-nginx-first-78cbf67ff-dkmbt   1/1     Running       0          31s     10.244.1.13   k8s-node01   <none>           <none>
my-nginx-first-78cbf67ff-dtrxc   1/1     Running       0          12s     10.244.2.15   k8s-node02   <none>           <none>
 
NAME                             READY   STATUS    RESTARTS   AGE   IP            NODE         NOMINATED NODE   READINESS GATES
my-nginx-first-78cbf67ff-dkmbt   1/1     Running   0          34s   10.244.1.13   k8s-node01   <none>           <none>
my-nginx-first-78cbf67ff-dtrxc   1/1     Running   0          15s   10.244.2.15   k8s-node02   <none>           <none>

回滚(从78cbf67ff回到68c47d7f6)   出了undo还有  rollout pause 和rollout resume   分别表示暂停和继续

1
2
3
4
5
6
7
8
9
10
11
12
[root@k8s-master01 ~]#  kubectl rollout  undo deployment    
deployment.extensions/my-nginx-first rolled back
[root@k8s-master01 ~]# kubectl get pod  -o wide
NAME                             READY   STATUS              RESTARTS   AGE   IP            NODE         NOMINATED NODE   READINESS GATES
my-nginx-first-68c47d7f6-n96cm   1/1     Running             0          12s   10.244.1.14   k8s-node01   <none>           <none>
my-nginx-first-68c47d7f6-trbbk   0/1     ContainerCreating   0          3s    <none>        k8s-node02   <none>           <none>
my-nginx-first-78cbf67ff-dkmbt   1/1     Running             0          17m   10.244.1.13   k8s-node01   <none>           <none>
my-nginx-first-78cbf67ff-dtrxc   0/1     Terminating         0          16m   10.244.2.15   k8s-node02   <none>           <none>
[root@k8s-master01 ~]# kubectl get pod  -o wide
NAME                             READY   STATUS    RESTARTS   AGE   IP            NODE         NOMINATED NODE   READINESS GATES
my-nginx-first-68c47d7f6-n96cm   1/1     Running   0          58s   10.244.1.14   k8s-node01   <none>           <none>
my-nginx-first-68c47d7f6-trbbk   1/1     Running   0          49s   10.244.2.16   k8s-node02   <none>           <none>

用yml创建

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
[root@k8s-master01 ~]# cat 1.yml
apiVersion: extensions/v1beta1 #api的版本
kind: Deployment          #资源的类型
metadata:                 #资源的元数据
  name: nginx-deployment  #资源的名称
spec:                     #资源的说明
  replicas: 2             #副本2个,缺省为1
  template:               #pod模板
    metadata:             #pod元数据
      labels:            #标签
        app: web_server    #标签选择器
    spec:
      containers:
      - name: nginx
        image: nginx
        ports:
        - containerPort: 22122

 

 创建一个3节点的 (直接run不指定namespace)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
[root@k8s-master01 ~]# kubectl run my-nginx  --image=nginx --replicas 3 --port=80    #kubectl delete deployment  my-nginxkubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead. deployment.apps/my-nginx created
[root@k8s-master01 ~]#  kubectl get pod  -o wide
NAME                            READY   STATUS              RESTARTS   AGE    IP           NODE         NOMINATED NODE   READINESS GATES
my-nginx-756fb87568-pfc22       0/1     ContainerCreating   0          22s    <none>       k8s-node02   <none>           <none>
my-nginx-756fb87568-q4fc2       1/1     Running             0          23s    10.244.2.3   k8s-node02   <none>           <none>
my-nginx-756fb87568-t2bvp       1/1     Running             0          22s    10.244.1.4   k8s-node01   <none>           <none>
my-nginx-first-54ccf4ff-5s2dd   1/1     Running             0          6m5s   10.244.2.2   k8s-node02   <none>           <none>
my-nginx-first-54ccf4ff-7x9rs   1/1     Running             0          27m    10.244.1.2   k8s-node01   <none>           <none>
my-nginx-first-54ccf4ff-j7f7w   1/1     Running             0          6m5s   10.244.1.3   k8s-node01   <none>           <none>
[root@k8s-master01 ~]#  kubectl get pod  --all-namespaces
NAMESPACE     NAME                                   READY   STATUS    RESTARTS   AGE
default       my-nginx-756fb87568-pfc22              1/1     Running   0          57s
default       my-nginx-756fb87568-q4fc2              1/1     Running   0          58s
default       my-nginx-756fb87568-t2bvp              1/1     Running   0          57s
default       my-nginx-first-54ccf4ff-5s2dd          1/1     Running   0          6m40s
default       my-nginx-first-54ccf4ff-7x9rs          1/1     Running   0          28m
default       my-nginx-first-54ccf4ff-j7f7w          1/1     Running   0          6m40s
kube-system   coredns-5c98db65d4-rwdtr               1/1     Running   0          12h
kube-system   coredns-5c98db65d4-zhqwb               1/1     Running   0          12h
kube-system   etcd-k8s-master01                      1/1     Running   0          12h
kube-system   kube-apiserver-k8s-master01            1/1     Running   0          12h
kube-system   kube-controller-manager-k8s-master01   1/1     Running   1          12h
kube-system   kube-flannel-ds-jrhz6                  1/1     Running   0          11h
kube-system   kube-flannel-ds-kdmgx                  1/1     Running   0          11h
kube-system   kube-flannel-ds-skqvq                  1/1     Running   0          11h
kube-system   kube-proxy-hpgj4                       1/1     Running   0          12h
kube-system   kube-proxy-q8rxb                       1/1     Running   0          12h
kube-system   kube-proxy-ts8xr                       1/1     Running   0          12h
kube-system   kube-scheduler-k8s-master01            1/1     Running   1          12h

 

  

  

添加node3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
vm克隆一个新的节点,
 kubeadm join 报错
[root@k8s-node03 ~]# kubeadm join 192.168.67.130:6443 --token abcdef.0123456789abcdef \
>     --discovery-token-ca-cert-hash sha256:54061c49d48775a80026cce95b531df9e52e08b965b5ede4f5dfc74e2d038c31
[preflight] Running pre-flight checks
        [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.13. Latest validated version: 18.09
error execution phase preflight: [preflight] Some fatal errors occurred:
        [ERROR FileAvailable--etc-kubernetes-kubelet.conf]: /etc/kubernetes/kubelet.conf already exists
        [ERROR FileAvailable--etc-kubernetes-bootstrap-kubelet.conf]: /etc/kubernetes/bootstrap-kubelet.conf already exists
        [ERROR Port-10250]: Port 10250 is in use
        [ERROR FileAvailable--etc-kubernetes-pki-ca.crt]: /etc/kubernetes/pki/ca.crt already exists
 
[root@k8s-node03 ~]# kubeadm reset
[reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.
[reset] Are you sure you want to proceed? [y/N]: y
 
这样还是无法加入集群,因为token过期了
在master机器中执行,kubeadm token create,获取token,token有效期一般为24小时
 
[root@k8s-master01 ~]# kubeadm token create
9sihb9.c4xrt1qai09401oq
 
用新token添加成功
[root@k8s-node03 ~]# kubeadm join 192.168.67.130:6443 --token 9sihb9.c4xrt1qai09401oq     --discovery-token-ca-cert-hash sha256:54061c49d48775a80026cce95b531df9e52e08b965b5ede4f5dfc74e2d038c31                       
[preflight] Running pre-flight checks
        [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.13. Latest validated version: 18.09
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
 
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
 
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
 
 
[root@k8s-master01 ~]# kubectl get node   
NAME           STATUS   ROLES    AGE    VERSION
k8s-master01   Ready    master   2d3h   v1.15.1
k8s-node01     Ready    <none>   2d3h   v1.15.1
k8s-node02     Ready    <none>   2d3h   v1.15.1
k8s-node03     Ready    <none>   69s    v1.15.1

 

 

 

 

 

导出yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
[root@k8s-master01 ~]# kubectl get deploy nginx-deployment -o=yaml --export
Flag --export has been deprecated, This flag is deprecated and will be removed in future.
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  annotations:
    deployment.kubernetes.io/revision: "11"
    kubectl.kubernetes.io/last-applied-configuration: |
      {"apiVersion":"extensions/v1beta1","kind":"Deployment","metadata":{"annotations":{},"name":"nginx-deployment","namespace":"default"},"spec":{"replicas":2,"template":{"metadata":{"labels":{"app":"nginx-deployment"}},"spec":{"containers":[{"image":"nginx","name":"nginx","ports":[{"containerPort":22122}]}]}}}}
  creationTimestamp: null
  generation: 1
  labels:
    app: nginx-deployment
  name: nginx-deployment
  selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/nginx-deployment
spec:
  progressDeadlineSeconds: 2147483647
  replicas: 10
  revisionHistoryLimit: 2147483647
  selector:
    matchLabels:
      app: nginx-deployment
  strategy:
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 1
    type: RollingUpdate
  template:
    metadata:
      annotations:
        kubectl.kubernetes.io/restartedAt: "2022-03-22T00:53:51+08:00"
      creationTimestamp: null
      labels:
        app: nginx-deployment
    spec:
      containers:
      - env:
        - name: TZ
          value: Asia/Shanghai
        image: nginx
        imagePullPolicy: Always
        name: nginx
        ports:
        - containerPort: 22122
          protocol: TCP
        resources:
          limits:
            cpu: "2"
            memory: 512Mi
          requests:
            cpu: 10m
            memory: 256Mi
        terminationMessagePath: /dev/termination-log
        terminationMessagePolicy: File
      dnsPolicy: ClusterFirst
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext: {}
      terminationGracePeriodSeconds: 30
status: {}

  

dashboard

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
[root@k8s-master01 ~]# cat /etc/docker/daemon.json
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "insecure-registries": ["https://k1ktap5m.mirror.aliyuncs.com"]
}
 
 
 
[root@k8s-master01 ~]# docker pull kainonly/kubernetes-dashboard-amd64:v1.10.1
v1.10.1: Pulling from kainonly/kubernetes-dashboard-amd64
9518d8afb433: Pull complete
Digest: sha256:0ae6b69432e78069c5ce2bcde0fe409c5c4d6f0f4d9cd50a17974fea38898747
Status: Downloaded newer image for kainonly/kubernetes-dashboard-amd64:v1.10.1
docker.io/kainonly/kubernetes-dashboard-amd64:v1.10.1
 
 
 
[root@k8s-master01 ~]# kubectl apply -f  kubernetes-dashboard.yaml
secret/kubernetes-dashboard-certs created
serviceaccount/kubernetes-dashboard created
role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
deployment.apps/kubernetes-dashboard created
service/kubernetes-dashboard created
 
 
[root@k8s-master01 ~]# kubectl get pod,svc -A|grep dashboard
 
kube-system   pod/kubernetes-dashboard-779b476744-z8c4c   1/1     Running   0          29m
kube-system   service/kubernetes-dashboard   NodePort    10.110.4.110    <none>        443:31080/TCP            29m
 
火狐访问 https://192.168.67.130:31080
 
 
[root@k8s-master01 ~]# cat kubernetes-dashboard.yaml
apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kube-system
type: Opaque
---
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
rules:
- apiGroups: [""]
  resources: ["secrets"]
  verbs: ["create"]
- apiGroups: [""]
  resources: ["configmaps"]
  verbs: ["create"]
- apiGroups: [""]
  resources: ["secrets"]
  resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
  verbs: ["get", "update", "delete"]
- apiGroups: [""]
  resources: ["configmaps"]
  resourceNames: ["kubernetes-dashboard-settings"]
  verbs: ["get", "update"]
- apiGroups: [""]
  resources: ["services"]
  resourceNames: ["heapster"]
  verbs: ["proxy"]
- apiGroups: [""]
  resources: ["services/proxy"]
  resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
  verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard
  namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
      - name: kubernetes-dashboard
        image: docker.io/kainonly/kubernetes-dashboard-amd64:v1.10.1
        ports:
        - containerPort: 8443
          protocol: TCP
        args:
          - --auto-generate-certificates
        volumeMounts:
        - name: kubernetes-dashboard-certs
          mountPath: /certs
        - mountPath: /tmp
          name: tmp-volume
        livenessProbe:
          httpGet:
            scheme: HTTPS
            path: /
            port: 8443
          initialDelaySeconds: 30
          timeoutSeconds: 30
      volumes:
      - name: kubernetes-dashboard-certs
        secret:
          secretName: kubernetes-dashboard-certs
      - name: tmp-volume
        emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      tolerations:
      - key: node-role.kubernetes.io/master
        effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 31080
  selector:
    k8s-app: kubernetes-dashboard

 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
[root@k8s-master01 ~]# cat dashboard-svc-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: dashboard-admin
  namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: dashboard-admin
subjects:
  - kind: ServiceAccount
    name: dashboard-admin
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io

  #签名证书让chrome可以访问

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
[root@k8s-master01 ~]#  mkdir kubernetes-dashboard-key && cd kubernetes-dashboard-key
 
[root@k8s-master01 kubernetes-dashboard-key]# openssl genrsa -out dashboard.key 2048
Generating RSA private key, 2048 bit long modulus
..........+++
...+++
e is 65537 (0x10001)
 
[root@k8s-master01 kubernetes-dashboard-key]# ll
total 4
-rw-r--r-- 1 root root 1675 Mar 23 01:50 dashboard.key
 
[root@k8s-master01 kubernetes-dashboard-key]#  openssl req -days 3650 -new -out dashboard.csr -key dashboard.key -subj '/CN=192.168.100.100'
 
[root@k8s-master01 kubernetes-dashboard-key]# openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
Signature ok
subject=/CN=192.168.100.100
Getting Private key   
     
[root@k8s-master01 kubernetes-dashboard-key]# ll
total 12
-rw-r--r-- 1 root root  993 Mar 23 01:50 dashboard.crt
-rw-r--r-- 1 root root  899 Mar 23 01:50 dashboard.csr
-rw-r--r-- 1 root root 1675 Mar 23 01:50 dashboard.key   
     
[root@k8s-master01 kubernetes-dashboard-key]#  kubectl delete secret kubernetes-dashboard-certs -n kube-system
secret "kubernetes-dashboard-certs" deleted
 
[root@k8s-master01 kubernetes-dashboard-key]#     kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kube-system
secret/kubernetes-dashboard-certs created
 
[root@k8s-master01 ~]#  kubectl get pod -A|grep dashboard
kubernetes-dashboard-779b476744-tdgvz   1/1     Running   0          3h16m
 
[root@k8s-master01 kubernetes-dashboard-key]# kubectl delete pod kubernetes-dashboard-779b476744-tdgvz -n kube-system
pod "kubernetes-dashboard-779b476744-tdgvz" deleted
 
[root@k8s-master01 ~]#  kubectl get pod -A|grep dashboard
kube-system   kubernetes-dashboard-779b476744-s4jch   1/1     Running   0          20s
 
[root@k8s-master01 kubernetes-dashboard-key]# kubectl get svc -n kube-system
NAME                   TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)                  AGE
kube-dns               ClusterIP   10.96.0.10     <none>        53/UDP,53/TCP,9153/TCP   3d6h
kubernetes-dashboard   NodePort    10.110.4.110   <none>        443:31080/TCP            9h
metrics-server         ClusterIP   10.96.212.45   <none>        443/TCP                  33h
 
 谷歌访问 https://192.168.67.130:31080/
  
输入令牌
 
#获取令牌(跑上面的 dashboard-svc-account.yaml)
[root@k8s-master01 ~]#  kubectl describe secret ` kubectl get secret -n kube-system |grep admin|awk '{print $1}' `  -n kube-system|grep '^token'|awk '{print $2}'
eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tdzl3djgiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiMTliNWRkNTctZDdiMi00ZTk4LThmMTktMzc4YTY1YzAyOGIzIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZC1hZG1pbiJ9.PBye3-1rYtP0cnESxkMNrhP39e0uayBvqeULrWukUuKASjLDXRMx19cIVxzz74WwzDH1EANNyWcN4QkHPBphbIraGwaZgH8biqp0FzZfP5VdW6KV6bWt7twC8hq_0pxGohDOrjlr5u79Rtl1elx-p60fvTIY3HmWguXxM8K4u6QVDuQlsk7YZ9pCpQRNtmVmbln-5Rj6sqQAYrX29LV-lswGGgwR2JomfPFMZ-kdnm7KjrLgm_bh9qbYZwgok2VlJyKWAGZUebVHEPHZGehnEI2k6iVdPiCZ2WjILdwSE8lQqpUcpZYRMunUtO952eYsJlg7r49EQu0V1J04Txv-DQ

 

 

 

 

 

 

 

Deployment vs ReplicationController in Kubernetes    https://blog.csdn.net/u010884123/article/details/55213011
kubernetes命令 http://docs.kubernetes.org.cn/638.html
命名空间、标签以及挂载deployment       https://blog.csdn.net/jiangbenchu/article/details/91517865
输入 kubectl run 后,到底发生了什么?   https://zhuanlan.zhihu.com/p/79774851

 

-ReplicaSet、Deployment、DaemonSet    https://www.cnblogs.com/bjlhx/p/13617166.html

 

K8s Master节点高可用集群搭建

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
[root@k8s-master01 ~]# docker ps -a|grep Up
8f7c95f65e99   eb516548c180           "/coredns -conf /etc…"   2 hours ago   Up 2 hours                           k8s_coredns_coredns-5c98db65d4-rwdtr_kube-system_f967ac2e-23fc-4b7f-8762-bead67dbaab3_6
f7f196f7b85e   eb516548c180           "/coredns -conf /etc…"   2 hours ago   Up 2 hours                           k8s_coredns_coredns-5c98db65d4-zhqwb_kube-system_32bdf37c-06d9-46bb-93e3-f7cd03ffd750_5
e620d288b4e1   k8s.gcr.io/pause:3.1   "/pause"                 2 hours ago   Up 2 hours                           k8s_POD_coredns-5c98db65d4-rwdtr_kube-system_f967ac2e-23fc-4b7f-8762-bead67dbaab3_64
fa48f0aac53f   k8s.gcr.io/pause:3.1   "/pause"                 2 hours ago   Up 2 hours                           k8s_POD_coredns-5c98db65d4-zhqwb_kube-system_32bdf37c-06d9-46bb-93e3-f7cd03ffd750_62
c8df0377024e   d75082f1d121           "kube-controller-man…"   2 hours ago   Up 2 hours                           k8s_kube-controller-manager_kube-controller-manager-k8s-master01_kube-system_5a1fa432561d9745fe013857ccb566c1_22
7ce3a8e22bb1   9247abf08677           "/opt/bin/flanneld -…"   2 hours ago   Up 2 hours                           k8s_kube-flannel_kube-flannel-ds-kdmgx_kube-system_ded8ee32-7936-4c49-9af3-c6ed1e072de2_46
06958b247da2   68c3eb07bfc3           "kube-apiserver --ad…"   2 hours ago   Up 2 hours                           k8s_kube-apiserver_kube-apiserver-k8s-master01_kube-system_21e2cd988cdb757666987c7460642659_49
e3f9f21510c8   89a062da739d           "/usr/local/bin/kube…"   2 hours ago   Up 2 hours                           k8s_kube-proxy_kube-proxy-hpgj4_kube-system_752ff8cb-a6d2-4057-b66c-d806f2f94252_6
4fc01a7e344a   k8s.gcr.io/pause:3.1   "/pause"                 2 hours ago   Up 2 hours                           k8s_POD_kube-flannel-ds-kdmgx_kube-system_ded8ee32-7936-4c49-9af3-c6ed1e072de2_6
10608c3fe272   k8s.gcr.io/pause:3.1   "/pause"                 2 hours ago   Up 2 hours                           k8s_POD_kube-proxy-hpgj4_kube-system_752ff8cb-a6d2-4057-b66c-d806f2f94252_6
055a829e9094   2c4adeb21b4f           "etcd --advertise-cl…"   2 hours ago   Up 2 hours                           k8s_etcd_etcd-k8s-master01_kube-system_9091c3932085dc9fa7b1927b2dd6af54_6
a02321513038   b0b3c4c404da           "kube-scheduler --bi…"   2 hours ago   Up 2 hours                           k8s_kube-scheduler_kube-scheduler-k8s-master01_kube-system_ecae9d12d3610192347be3d1aa5aa552_21
c11e600e945f   k8s.gcr.io/pause:3.1   "/pause"                 2 hours ago   Up 2 hours                           k8s_POD_kube-scheduler-k8s-master01_kube-system_ecae9d12d3610192347be3d1aa5aa552_6
09ac011a5ec8   k8s.gcr.io/pause:3.1   "/pause"                 2 hours ago   Up 2 hours                           k8s_POD_kube-controller-manager-k8s-master01_kube-system_5a1fa432561d9745fe013857ccb566c1_6
2bef7e1b3f52   k8s.gcr.io/pause:3.1   "/pause"                 2 hours ago   Up 2 hours                           k8s_POD_kube-apiserver-k8s-master01_kube-system_21e2cd988cdb757666987c7460642659_6
7d8415cadb4c   k8s.gcr.io/pause:3.1   "/pause"                 2 hours ago   Up 2 hours                           k8s_POD_etcd-k8s-master01_kube-system_9091c3932085dc9fa7b1927b2dd6af54_6

 

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
[root@k8s-master01 ~]# kubectl get pod -A -o wide
NAMESPACE     NAME                                    READY   STATUS        RESTARTS   AGE    IP               NODE           NOMINATED NODE   READINESS GATES
default       nginx-deployment-7f58cf9455-bhnn8       1/1     Running       0          7m5s   10.244.1.78      k8s-node01     <none>           <none>
default       nginx-deployment-7f58cf9455-gc9vc       1/1     Terminating   0          102m   10.244.2.84      k8s-node02     <none>           <none>
kube-system   coredns-5c98db65d4-rwdtr                1/1     Running       6          3d2h   10.244.0.9       k8s-master01   <none>           <none>
kube-system   coredns-5c98db65d4-zhqwb                1/1     Running       5          3d2h   10.244.0.8       k8s-master01   <none>           <none>
kube-system   etcd-k8s-master01                       1/1     Running       6          3d2h   192.168.67.130   k8s-master01   <none>           <none>
kube-system   kube-apiserver-k8s-master01             1/1     Running       49         3d2h   192.168.67.130   k8s-master01   <none>           <none>
kube-system   kube-controller-manager-k8s-master01    1/1     Running       22         3d2h   192.168.67.130   k8s-master01   <none>           <none>
kube-system   kube-flannel-ds-jrhz6                   1/1     Running       45         3d1h   192.168.67.131   k8s-node01     <none>           <none>
kube-system   kube-flannel-ds-kdmgx                   1/1     Running       46         3d1h   192.168.67.130   k8s-master01   <none>           <none>
kube-system   kube-flannel-ds-qdj9k                   1/1     Running       1          22h    192.168.67.133   k8s-node03     <none>           <none>
kube-system   kube-flannel-ds-skqvq                   1/1     Running       37         3d1h   192.168.67.132   k8s-node02     <none>           <none>
kube-system   kube-proxy-hpgj4                        1/1     Running       6          3d2h   192.168.67.130   k8s-master01   <none>           <none>
kube-system   kube-proxy-q8rxb                        1/1     Running       2          3d2h   192.168.67.131   k8s-node01     <none>           <none>
kube-system   kube-proxy-sfn2g                        1/1     Running       1          22h    192.168.67.133   k8s-node03     <none>           <none>
kube-system   kube-proxy-ts8xr                        1/1     Running       5          3d2h   192.168.67.132   k8s-node02     <none>           <none>
kube-system   kube-scheduler-k8s-master01             1/1     Running       21         3d2h   192.168.67.130   k8s-master01   <none>           <none>
kube-system   kubernetes-dashboard-779b476744-tdgvz   1/1     Running       0          7m5s   10.244.1.77      k8s-node01     <none>           <none>
kube-system   kubernetes-dashboard-779b476744-z8c4c   1/1     Terminating   1          6h2m   10.244.2.83      k8s-node02     <none>           <none>
kube-system   metrics-server-6bfbbcff4d-prpb2         1/1     Terminating   0          102m   10.244.2.85      k8s-node02     <none>           <none>
kube-system   metrics-server-6bfbbcff4d-qcx5d         1/1     Running       0          7m5s   10.244.1.79      k8s-node01     <none>           <none>

  

 

posted on   寒星12345678999  阅读(3111)  评论(12编辑  收藏  举报
相关博文:
阅读排行:
· DeepSeek “源神”启动!「GitHub 热点速览」
· 微软正式发布.NET 10 Preview 1:开启下一代开发框架新篇章
· C# 集成 DeepSeek 模型实现 AI 私有化(本地部署与 API 调用教程)
· DeepSeek R1 简明指南:架构、训练、本地部署及硬件要求
· 2 本地部署DeepSeek模型构建本地知识库+联网搜索详细步骤
< 2025年2月 >
26 27 28 29 30 31 1
2 3 4 5 6 7 8
9 10 11 12 13 14 15
16 17 18 19 20 21 22
23 24 25 26 27 28 1
2 3 4 5 6 7 8

点击右上角即可分享
微信分享提示