k8s v-1.20版本部署详细过程[实测可用无坑]

k8s v-1.20版本部署详细过程[实测可用无坑]

1.部署环境准备

1.1 各软件版本
系统 Docker k8s
Linux master 3.10.0-1160.el7.x86_64 Docker version 20.10.17 1.20.0-0
1.2.部署规划[单master]
主机名 IP地址 角色
k8s-master 192.168.56.202 master节点
k8s-node1 192.168.56.203 node节点
k8s-node2 192.168.56.204 node节点

2.系统环境的初始化操作

2.1 关闭防火墙
[root@K8sMaster ~]# systemctl stop firewalld 
[root@K8sMaster ~]# systemctl disable firewalld
2.2 关闭selinux
#临时关闭
[root@K8sMaster ~]# setenforce 0
#永久关闭
[root@K8sMaster ~]# sed -i 's/enforcing/disabled/' /etc/selinux/config 禁用
2.3 关闭swap交换分区
#此处一定要关闭交换分区否则在kubeadm初始化时会报错
#临时关闭
[root@K8sMaster ~]# swapoff -a
#永久关闭
[root@K8sMaster ~]# sed -ri 's/.*swap.*/#&/' /etc/fstab 
2.4 设置主机名
#临时处理
[root@K8sMaster ~]# hostnamectl set-hostname k8s-master
#永久处理
[root@master kubelet]# cat > /etc/hostname << EOF
k8smaster
EOF
2.5 设置主机名通信
[root@K8sMaster ~]# cat >> /etc/hosts << EOF
192.168.56.202 K8sMaster
192.168.56.203 K8sNode1
192.168.56.204 K8sNode2
EOF
2.6 桥接的IPv4流量传递到IpTABLES链
[root@K8sMaster sysctl.d]# cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1 
net.ipv4.ip_forward = 1
EOF
# 配置生效
[root@K8sMaster ~]# sysctl --system 
[root@K8sMaster ~]# sysctl -a
2.7 设置时钟同步
[root@K8sMaster ~]# yum -y install ntpdate
# 从阿里云同步时钟
[root@K8sMaster ~]# ntpdate time1.aliyun.com

3.安装docker

3.1 设置docker仓库
 [root@K8sMaster ~]# yum install -y yum-utils
 [root@K8sMaster ~]# yum-config-manager \
    --add-repo \
    https://download.docker.com/linux/centos/docker-ce.repo
 [root@K8sMaster ~]# yum-config-manager \
    --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
3.2 安装docker
 # 安装
 [root@K8sMaster ~]# yum install docker-ce
 # 启用
 [root@K8sMaster ~]# systemctl start docker
 # 设置开机启用 此处要设置开机启动否则后面初始化时也会出错
 [root@K8sMaster ~]# systemctl enable docker
3.3 设置docker镜像加速
[root@K8sMaster ~]# mkdir -p /etc/docker
[root@K8sMaster ~]# cat > /etc/docker/daemon.json << EOF
{
  "registry-mirrors": ["https://49qrnx21.mirror.aliyuncs.com"]
 }
EOF
[root@K8sMaster ~]# systemctl daemon-reload && systemctl restart docker

4. 使用kubeadm方式安装k8s

4.1 设置K8S的仓库
[root@K8sMaster ~]# cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
EOF
4.2 安装kubeadm kubectl kubelet
[root@K8sMaster ~]# yum install -y kubelet-1.20.0 kubeadm-1.20.0 kubectl-1.20.0
[root@K8sMaster ~]# systemctl enable kubelet
4.3 初始化kubeadm

【master节点,这一步只在master节点运行】

[root@K8sMaster ~]# kubeadm init \
   --apiserver-advertise-address=192.168.56.202 \
   --image-repository registry.aliyuncs.com/google_containers \
   --kubernetes-version 1.20.0 \
   --service-cidr=10.96.0.0/12 \
   --pod-network-cidr=10.244.0.0/16
# 初始化完成之后会出现 如下字样
Your Kubernetes control-plane has initialized successfully
.......
# 根据上述指示创建文件 
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 并且出现 kubeadm join的提示 【此处的命令为node节点加入master的命令】
4.4 将node节点加入到 cluster集群
# 第一种情况在 master 节点 初始化完成之后会出现 加入集群的命令直接使用
# 第二种情况在部署node节点时候 token过期或者忘记 并且没有--discovery-token-ca-cert-hash值
# 则需要在master主机重新生成 token 和 --discovery-token-ca-cert-hash
4.4.1 查看token状态
[root@K8sMaster ~]# kubeadm token list  
TOKEN                     TTL         EXPIRES                     USAGES                   DESCRIPTION                                                EXTRA GROUPS
hkqc00.c0sn7s5mpauw11fx   3h          2022-08-22T21:25:46+08:00   authentication,signing   The default bootstrap token generated by 'kubeadm init'.   system:bootstrappers:kubeadm:default-node-token
qlhylz.emm864nueu1ypv6p   23h         2022-08-23T17:44:21+08:00   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
xnwbbo.1ylplt1jrz10ms1u   23h         2022-08-23T17:44:27+08:00   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
4.4.2 生成新的token
[root@K8sMaster ~]# kubeadm token create
4.4.3 获取--discovery-token-ca-cert-hash值
[root@K8sMaster ~]# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \
openssl dgst -sha256 -hex | sed 's/^.* //'
4.5 node节点加入master 集群
[root@K8sMaster ~]# kubeadm join 192.168.56.203:6443 --token qlhylz.emm864nueu1ypv6p \
--discovery-token-ca-cert-hash sha256:cd778ad01bdbc656eaff7d3b1273691f0070ebbadd2f1b8a3189a6dc1e88f39f
# 这里的两个值使用刚才生成的值
4.6 查看节点状态
[root@K8sMaster ~]# kubectl get nodes   
NAME     STATUS     ROLES                  AGE   VERSION
K8sMaster   NotReady   control-plane,master   20h   v1.20.0
K8sNode1    NotReady   <none>                 13m   v1.20.0
[root@K8sMaster ~]# 

5.部署容器网络插件[CNI]

5.1 flannel网络插件的部署
kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
5.2 查看是否部署完成
[root@master ~]# kubectl get pods --all-namespaces
NAMESPACE      NAME                             READY   STATUS    RESTARTS   AGE
kube-flannel   kube-flannel-ds-c66ds            1/1     Running   0          14m
kube-flannel   kube-flannel-ds-pxhzc            1/1     Running   0          14m
kube-system    coredns-7f89b7bc75-gpgl9         1/1     Running   0          27h
kube-system    coredns-7f89b7bc75-sr5mx         1/1     Running   0          27h
kube-system    etcd-master                      1/1     Running   2          27h
kube-system    kube-apiserver-master            1/1     Running   2          27h
kube-system    kube-controller-manager-master   1/1     Running   2          27h
kube-system    kube-proxy-hkmd9                 1/1     Running   0          7h33m
kube-system    kube-proxy-mrxwf                 1/1     Running   3          27h
kube-system    kube-scheduler-master            1/1     Running   3          27h
5.3 查看node状态

都是Ready状态

[root@master ~]# kubectl get node
NAME     STATUS   ROLES                  AGE     VERSION
master   Ready    control-plane,master   27h     v1.20.0
node1    Ready    <none>                 7h31m   v1.20.0
5.4 测试集群
# 创建nginx应用
[root@master opt]# kubectl create deployment nginx --image=nginx
# 查看是否下载完成
[root@master opt]# kubectl get pods
NAME                     READY   STATUS    RESTARTS   AGE
nginx-6799fc88d8-lx9nc   1/1     Running   0          3m44s
# 暴露端口
[root@master opt]# kubectl expose deployment nginx --port=80 --type=NodePort
# 查看端口情况
[root@master opt]# kubectl get pod,svc
NAME                         READY   STATUS    RESTARTS   AGE
pod/nginx-6799fc88d8-lx9nc   1/1     Running   0          4m27s

NAME                 TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
service/kubernetes   ClusterIP   10.96.0.1      <none>        443/TCP        28h
service/nginx        NodePort    10.100.68.38   <none>        80:30572/TCP   3s
5.5 访问测试
访问任意一个node节点的http://ip:30572 查看访问结果 能访问到nginx初始界面几位成功

6. 安装DashBoard面板

6.1下载Dashboard配置文件
[root@master opt]# wget https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml

由于用的是国外的源所以 原文大致如下 可直接复制使用

# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.8.
#
# Example usage: kubectl create -f <this_file>
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kube-system
type: Opaque
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
rules:
  # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
  resources: ["secrets"]
  verbs: ["create"]
  # Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
  resources: ["configmaps"]
  verbs: ["create"]
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
  resources: ["secrets"]
  resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
  verbs: ["get", "update", "delete"]
  # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
  resources: ["configmaps"]
  resourceNames: ["kubernetes-dashboard-settings"]
  verbs: ["get", "update"]
  # Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
  resources: ["services"]
  resourceNames: ["heapster"]
  verbs: ["proxy"]
- apiGroups: [""]
  resources: ["services/proxy"]
  resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
  verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard
  namespace: kube-system
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
      - name: kubernetes-dashboard
# 此处为修改后的值 国内源
        image: registry.cn-beijing.aliyuncs.com/minminmsn/kubernetes-dashboard:v1.10.1
        ports:
        - containerPort: 8443
          protocol: TCP
        args:
          - --auto-generate-certificates
          # Uncomment the following line to manually specify Kubernetes API server Host
          # If not specified, Dashboard will attempt to auto discover the API server and connect
          # to it. Uncomment only if the default does not work.
          # - --apiserver-host=http://my-address:port
        volumeMounts:
        - name: kubernetes-dashboard-certs
          mountPath: /certs
          # Create on-disk volume to store exec logs
        - mountPath: /tmp
          name: tmp-volume
        livenessProbe:
          httpGet:
            scheme: HTTPS
            path: /
            port: 8443
          initialDelaySeconds: 30
          timeoutSeconds: 30
      volumes:
      - name: kubernetes-dashboard-certs
        secret:
          secretName: kubernetes-dashboard-certs
      - name: tmp-volume
        emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
      - key: node-role.kubernetes.io/master
        effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
# --此处为新增配置
  type: NodePort 
# --
  ports:
    - port: 443
      targetPort: 8443
# --此处为新增配置
      nodePort: 30001
# -- 
  selector:
    k8s-app: kubernetes-dashboard
6.1 创建dashboard
[root@master opt]# kubectl apply -f kubernetes-dashboard.yaml
6.2 查看新建状态
[root@master opt]# kubectl get pods --namespace=kube-system
NAME                                   READY   STATUS    RESTARTS   AGE
coredns-7f89b7bc75-gpgl9               1/1     Running   0          33h
coredns-7f89b7bc75-sr5mx               1/1     Running   0          33h
etcd-master                            1/1     Running   2          33h
kube-apiserver-master                  1/1     Running   2          33h
kube-controller-manager-master         1/1     Running   2          33h
kube-proxy-hkmd9                       1/1     Running   0          13h
kube-proxy-mrxwf                       1/1     Running   3          33h
kube-scheduler-master                  1/1     Running   3          33h
kubernetes-dashboard-97c4799d9-4jp7k   1/1     Running   0          13m
6.3查看映射端口
[root@master opt]# kubectl get services kubernetes-dashboard -n kube-system
NAME                   TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE
kubernetes-dashboard   NodePort   10.108.118.239   <none>        443:30001/TCP   4h39m
6.4 查看被分配的node
[root@master opt]#  kubectl get pods -o wide --namespace=kube-system
NAME                                   READY   STATUS    RESTARTS   AGE   IP               NODE     NOMINATED NODE   READINESS GATES
coredns-7f89b7bc75-gpgl9               1/1     Running   0          33h   10.244.1.3       node1    <none>           <none>
coredns-7f89b7bc75-sr5mx               1/1     Running   0          33h   10.244.1.2       node1    <none>           <none>
etcd-master                            1/1     Running   2          33h   192.168.56.202   master   <none>           <none>
kube-apiserver-master                  1/1     Running   2          33h   192.168.56.202   master   <none>           <none>
kube-controller-manager-master         1/1     Running   2          33h   192.168.56.202   master   <none>           <none>
kube-proxy-hkmd9                       1/1     Running   0          13h   192.168.56.203   node1    <none>           <none>
kube-proxy-mrxwf                       1/1     Running   3          33h   192.168.56.202   master   <none>           <none>
kube-scheduler-master                  1/1     Running   3          33h   192.168.56.202   master   <none>           <none>
kubernetes-dashboard-97c4799d9-4jp7k   1/1     Running   0          16m   10.244.1.6       node1    <none>           <none>
[root@master opt]# 
6.5查看集群权限对象
[root@master opt]#  kubectl get clusterroles
NAME                                                                   CREATED AT
admin                                                                  2022-08-21T13:25:44Z
cluster-admin                                                          2022-08-21T13:25:44Z
edit                                                                   2022-08-21T13:25:44Z
flannel                                                                2022-08-22T17:06:43Z
kubeadm:get-nodes                                                      2022-08-21T13:25:46Z
system:aggregate-to-admin                                              2022-08-21T13:25:44Z
system:aggregate-to-edit                                               2022-08-21T13:25:45Z
system:aggregate-to-view                                               2022-08-21T13:25:45Z
system:auth-delegator                                                  2022-08-21T13:25:45Z
system:basic-user                                                      2022-08-21T13:25:44Z
6.6 查看cluster-admin权限
[root@master opt]# kubectl describe clusterroles cluster-admin
Name:         cluster-admin
Labels:       kubernetes.io/bootstrapping=rbac-defaults
Annotations:  rbac.authorization.kubernetes.io/autoupdate: true
PolicyRule:
  Resources  Non-Resource URLs  Resource Names  Verbs
  ---------  -----------------  --------------  -----
  *.*        []                 []              [*]
             [*]                []              [*]
6.7 将服务账户 kubernetes-dashboard 跟 cluster-admin 这个集群管理员权限对象绑定
[root@master opt]# cat > kubernetes-dashboard-ClusterRoleBinding.yaml << EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
  labels:
    k8s-app: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard
  namespace: kube-system
EOF
[root@master opt]# kubectl create -f kubernetes-dashboard-ClusterRoleBinding.yaml
6.8获取登录Token
[root@master opt]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin | awk '{print $1}')
Name:         admin-token-t4sxg
Namespace:    kube-system
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin
              kubernetes.io/service-account.uid: 9f6ec4cf-925f-4d9f-a104-8e7367d08c98

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1066 bytes
namespace:  11 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6ImdyX0lGZm9xNlNrbTRxQnJoeU1CREZPUFdTR0F0WmNxekEzZ29ITkRFY0EifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi10b2tlbi10NHN4ZyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJhZG1pbiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjlmNmVjNGNmLTkyNWYtNGQ5Zi1hMTA0LThlNzM2N2QwOGM5OCIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTphZG1pbiJ9.mv7wGLXAFcThtrS5_zuIKG-MaYJkQ2pud72G5vNfUrQsqSXNn3Kqf0OjFhagvZ1By5fHbyahTGWSWtYOoZUSKNXk42kEbh1LnhkYyjLKtFI1hUvk5Fw4Scc1PgGUpIyE1KPd4V3_jH3U7P6Kz5GfjkGhcTNDenoqdXtJN7c8UeF6QaG9hYxLyKCEH2M7SJDHq8q8LBuLT12yfxrFl6tGq5U_LwyYmJNUdpTuFOuvBYJ-8hi_ptxpxRDvUJskuZBW4HIxNi5dnNsQUt7euhSWePqfsSYAfTdPmTH6UvpKTAv87i9CA3rVeb46Jek7TMC6so5QNWnqBFb5RXkFqja8PA
6.9访问dashboard
特别注释:由于这里使用了非443的端口 所以google和ie浏览器打不开dashboard界面,可使用火狐打开
连接地址:http://所分部的node节点的ip:30001 打开输入以上token
6.10 解决无法用google浏览器无法打开的问题
[root@master opt]# mkdir kubernetes-dashboard-key && cd kubernetes-dashboard-key
  # 生成证书请求的key
[root@master opt]#openssl genrsa -out dashboard.key 2048
  # 生成csr
[root@master opt]#openssl req -new -out dashboard.csr -key dashboard.key -subj '/CN=192.168.56.202.113'
  # 生成自签证书
[root@master opt]# openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
  # 删除原有证书
[root@master opt]# kubectl delete secret kubernetes-dashboard-certs -n kube-system
  # 创建新证书的secret
[root@master opt]# kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kube-system
[root@master opt]# kubectl get pod -n kube-system
  删除pod,重启
[root@master opt]# kubectl delete pod kubernetes-dashboard-7d6c598b5f-fvcg8 -n kube-system
6.11如果出现大量的404页面
# 由于使用的创建dashboard的yaml文件是 旧版本的文件 要使用新的yaml文件 可以解决
文件内容可以参考如下:
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30000
  selector:
    k8s-app: kubernetes-dashboard
  type: NodePort

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
        - name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.0.0-beta4
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
    spec:
      containers:
        - name: dashboard-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.1
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
      serviceAccountName: kubernetes-dashboard
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}

重启dashboard服务

[root@master opt]# kubectl delete -f kubernetes-dashboard.yaml
[root@master opt]# kubectl create -f kubernetes-dashboard.yaml
posted @   ewindstorm  阅读(441)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· 无需6万激活码!GitHub神秘组织3小时极速复刻Manus,手把手教你使用OpenManus搭建本
· Manus爆火,是硬核还是营销?
· 终于写完轮子一部分:tcp代理 了,记录一下
· 别再用vector<bool>了!Google高级工程师:这可能是STL最大的设计失误
· 单元测试从入门到精通
点击右上角即可分享
微信分享提示