##安装前准备
#关闭selinux ### sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
#关闭防火墙 systemctl stop firewalld ## systemctl disable firewalld
#时间同步 ntpdate cn.pool.ntp.org
#关闭交换分区 swapoff -a #/etc/fstab 永久注释开机启动交换分区#/dev/mapper/centos_master1-swap swap swap defaults 0 0
#主机名修改 hostnamectl set-hostname master1 ##hostnamectl set-hostname node1
# /etc/hosts 创建主机名解析
192.168.24.31 master1
192.168.24.32 node1
# 更换&下载yum源
(1)备份原来的yum源 mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup (2)下载阿里的yum源 wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo (3)生成新的yum缓存 yum makecache fast (4)配置安装k8s需要的yum源 cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=0 EOF (5)清理yum缓存 yum clean all (6)生成新的yum缓存 yum makecache fast (7)更新yum源 yum -y update (8)安装软件包 yum -y install yum-utilsdevice-mapper-persistent-data lvm2 (9)添加新的软件源 ##yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
wget -O /etc/yum.repos.d/docker-ce.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo yum clean all yum makecache fast
(10) 安装基础软件包
yum -y install wget net-tools nfs-utils lrzsz gcc gcc-c++make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntplibaio-devel wget vim ncurses-devel autoconf automake zlib-devel python-devel epel-release openssh-serversocat ipvsadm conntrack ntpdate
查看docker版本
yum list docker-ce --showduplicates |sort -r
#安装docker 19.03.7版本
yum install -y docker-ce-19.03.7-3.el7
# docker开机启动
systemctl enable docker && systemctl start docker
# 修改docker配置文件
cat > /etc/docker/daemon.json <<EOF { "exec-opts":["native.cgroupdriver=systemd"], "log-driver":"json-file", "log-opts": { "max-size": "100m" }, "storage-driver":"overlay2", "storage-opts": [ "overlay2.override_kernel_check=true" ] } EOF
#重启docker
systemctl daemon-reload && systemctl restartdocker
# 在master 和node上安装kubeadm 以及kubelet 1.18.2
yum install kubeadm-1.18.2 kubelet-1.18.2 -y
systemctl enable kubelet
#下载docker-compose(harbor用)
#网址参考 https://docs.linuxserver.io/general/docker-compose
# 搭建harbor 上传K8S相关镜像到harbor仓库
#下载地址 wget https://github.com/goharbor/harbor/releases/download/v2.5.0/harbor-offline-installer-v2.5.0.tgz 修改harbor.yml (#地址 端口 密码 目录等)
#配置docker配置文件/etc/docker/daemon.json 重启docker服务
{ "insecure-registries": ["192.168.24.33:32800"], ###添加此处配置 "registry-mirrors": ["https://registry.docker-cn.com"], "exec-opts":["native.cgroupdriver=systemd"], "log-driver":"json-file", "log-opts": { "max-size": "100m" }, "storage-driver":"overlay2", "storage-opts": [ "overlay2.override_kernel_check=true" ] }
##重启docker
systemctl daemon-reload && systemctl restart docker
#登录harbor 存储认证令牌
#解压镜像 修改镜像 push镜像到harbor仓库
#解压镜像
##一次性全部解压### for i in `find ./ -name "1-18*"`;do docker load -i $i;done
#或单个解压 docker load -i 1-18-kube-apiserver.tar.gz docker load -i 1-18-kube-scheduler.tar.gz docker load -i 1-18-kube-controller-manager.tar.gz docker load -i 1-18-pause.tar.gz docker load -i 1-18-cordns.tar.gz docker load -i 1-18-etcd.tar.gz docker load -i 1-18-kube-proxy.tar.gz #修改镜像
docker images | grep k8s | awk 'BEGIN {OFS=":";}{print $1,$2}' | awk -F"/" '{print "docker tag "$var" 192.168.24.33:32800/base/"$NF | "bash"}'
#推送镜像到harbor仓库
docker images | grep "192.168.24.33:32800"| awk 'BEGIN {OFS=":";} {print "docker push "$1,$2 | "bash"}'
###利用kubeadm部署
安装kubeadm
准备开始
一台兼容的 Linux 主机。Kubernetes 项目为基于 Debian 和 Red Hat 的 Linux 发行版以及一些不提供包管理器的发行版提供通用的指令
每台机器 2 GB 或更多的 RAM (如果少于这个数字将会影响你应用的运行内存)
2 CPU 核或更多
集群中的所有机器的网络彼此均能相互连接(公网和内网都可以)
节点之中不可以有重复的主机名、MAC 地址或 product_uuid。请参见这里了解更多详细信息。
开启机器上的某些端口。
禁用交换分区。为了保证 kubelet 正常工作,你 必须 禁用交换分区
允许 iptables 检查桥接流量
##确保 br_netfilter 模块被加载
sudo modprobe br_netfilter
lsmod | grep br_netfilter
##为了让你的 Linux 节点上的 iptables 能够正确地查看桥接流量,你需要确保在你的 sysctl 配置中将 net.bridge.bridge-nf-call-iptables 设置为 1
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sudo sysctl --system
##后补充 开启ipvs (否则pod内ping service会无法通讯)
# 临时生效
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4 ## 或者nf_conntrack (如果nf_conntrack_ipv4报错说明内核版本过高 去掉_ipv4)
# 永久生效
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4 ## 或者nf_conntrack
EOF
检查所需端口
telnet 127.0.0.1 6443
kubeadm 初始化
#在master主机上执行
kubeadm init --kubernetes-version=v1.18.2 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.32.128 --image-repository=192.168.32.128:32800/base
#成功后回输出以下信息
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.24.31:6443 --token wux3tn.dzwcquxks8ekexbr \
--discovery-token-ca-cert-hash sha256:67f9e400399ee693988c529d01b8da5d8928463a8b37f935d5d0cda7e0937ab9
#按照提示 执行相关操作 (红色部分) kubeadm join 是在node节点上执行 如果忘记则在master执行
kubeadm token create --print-join-command
#node加入集群
kubeadm join 192.168.24.31:6443 --token wux3tn.dzwcquxks8ekexbr \ --discovery-token-ca-cert-hash sha256:67f9e400399ee693988c529d01b8da5d8928463a8b37f935d5d0cda7e0937ab9
#查看集群状态
#Status 为NotReady 是因为网络插件还没有安装 init初始化并不会安装网络插件 需要自行安装
[root@master1 ]# kubectl get node NAME STATUS ROLES AGE VERSION master1 NotReady master 40m v1.18.2 node1 NotReady <none> 39m v1.18.2
##后补充
#修改kube-proxy代理模式为ipvs(否则pod与service之间无法通讯) kubectl edit cm kube-proxy -n kube-system #在master上重启kube-proxy kubectl get pod -n kube-system | grep kube-proxy | awk '{print $1}' | xargs kubectl delete pod -n kube-system
#还会出现有些pod初始化失败 例如:
[root@master1 ]# kubectl get pods -A NAMESPACE NAME READY STATUS RESTARTS AGE ... kube-system kube-proxy-8tfk8 0/1 ImagePullBackOff 0 65m ...
kubectl describe pod -n kube-system kube-proxy-8tfk8 Warning FailedCreatePodSandBox 24m (x126 over 64m) kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed pulling image "192.168.24.33:32800/base/pause:3.2": Error response from daemon: unauthorized: unauthorized to access repository: base/pause, action: pull: unauthorized to access repository: base/pause, action: pull Warning Failed 19m (x3 over 20m) kubelet Error: ImagePullBackOff Normal Pulling 4m47s (x8 over 20m) kubelet Pulling image "192.168.24.33:32800/base/kube-proxy:v1.18.2"
Error response from daemon: unauthorized: unauthorized to access repository: base/pause
这是因为没有harbor仓库的secret密钥
#注意!!对于K8S的系统镜像 harbor里需要设置为公开 否则就算添加了harbor的密钥 对于K8S系统镜像 有时候也无法生效 原因不明~
#等待片刻再次查看kube-proxy pod状态
[root@master1 ~]# kubectl get pod -A NAMESPACE NAME READY STATUS RESTARTS AGE kube-system kube-proxy-8tfk8 1/1 Running 0 95m kube-system kube-proxy-kgbcg 1/1 Running 0 95m
...
# 通过创建docker-registry的secrets 可以实现类似docker login的功能(这里指的是业务创建pod时所需要的imagepullsecret)
[root@master1 pki]# kubectl create secret docker-registry local-harbor-secret --docker-server="192.168.24.33:32800" --docker-username="admin" --docker-password="Harbor12345"
secret/local-harbor-secret created
# 安装calico网络插件 #或通过官网方式安装 本次因网络原因通过上传镜像的方式
#上传calico所需镜像到仓库 1、calico-node.tar.gz 2、cni.tar.gz [root@24d33 tmp]# docker load -i calico-node.tar.gz d9ff549177a9: Loading layer [==================================================>] 4.671MB/4.671MB c90cb449e31c: Loading layer [==================================================>] 5.661MB/5.661MB 438251805de4: Loading layer [==================================================>] 6.589MB/6.589MB bd93aff9c601: Loading layer [==================================================>] 2.945MB/2.945MB 2f3dd05fddf7: Loading layer [==================================================>] 35.33kB/35.33kB aef5612cc463: Loading layer [==================================================>] 53.6MB/53.6MB Loaded image: quay.io/calico/node:v3.5.3 [root@24d33 tmp]# docker load -i cni.tar.gz af1b90527f03: Loading layer [==================================================>] 79.23MB/79.23MB 192af4d49c51: Loading layer [==================================================>] 9.728kB/9.728kB cff7b6ff09c3: Loading layer [==================================================>] 2.56kB/2.56kB Loaded image: quay.io/calico/cni:v3.5.3
#上传到本地仓库
[root@24d33 tmp]# docker tag quay.io/calico/node:v3.5.3 192.168.24.33:32800/base/node:v3.5.3
[root@24d33 tmp]# docker tag quay.io/calico/cni:v3.5.3 192.168.24.33:32800/base/cni:v3.5.3
[root@24d33 tmp]# docker push 192.168.24.33:32800/base/node:v3.5.3
[root@24d33 tmp]# docker push 192.168.24.33:32800/base/cni:v3.5.3
#calico.yam文件
# Calico Version v3.5.3 # https://docs.projectcalico.org/v3.5/releases#v3.5.3 # This manifest includes the following component versions: # calico/node:v3.5.3 # calico/cni:v3.5.3 # This ConfigMap is used to configure a self-hosted Calico installation. kind: ConfigMap apiVersion: v1 metadata: name: calico-config namespace: kube-system data: # Typha is disabled. typha_service_name: "none" # Configure the Calico backend to use. calico_backend: "bird" # Configure the MTU to use veth_mtu: "1440" # The CNI network configuration to install on each node. The special # values in this config will be automatically populated. cni_network_config: |- { "name": "k8s-pod-network", "cniVersion": "0.3.0", "plugins": [ { "type": "calico", "log_level": "info", "datastore_type": "kubernetes", "nodename": "__KUBERNETES_NODE_NAME__", "mtu": __CNI_MTU__, "ipam": { "type": "host-local", "subnet": "usePodCidr" }, "policy": { "type": "k8s" }, "kubernetes": { "kubeconfig": "__KUBECONFIG_FILEPATH__" } }, { "type": "portmap", "snat": true, "capabilities": {"portMappings": true} } ] } --- # This manifest installs the calico/node container, as well # as the Calico CNI plugins and network config on # each master and worker node in a Kubernetes cluster. kind: DaemonSet apiVersion: apps/v1 metadata: name: calico-node namespace: kube-system labels: k8s-app: calico-node spec: selector: matchLabels: k8s-app: calico-node updateStrategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 template: metadata: labels: k8s-app: calico-node annotations: # This, along with the CriticalAddonsOnly toleration below, # marks the pod as a critical add-on, ensuring it gets # priority scheduling and that its resources are reserved # if it ever gets evicted. scheduler.alpha.kubernetes.io/critical-pod: '' spec: nodeSelector: beta.kubernetes.io/os: linux hostNetwork: true tolerations: # Make sure calico-node gets scheduled on all nodes. - effect: NoSchedule operator: Exists # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - effect: NoExecute operator: Exists serviceAccountName: calico-node # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. terminationGracePeriodSeconds: 0 initContainers: # This container installs the Calico CNI binaries # and CNI network config file on each node. - name: install-cni image: quay.io/calico/cni:v3.5.3 command: ["/install-cni.sh"] env: # Name of the CNI config file to create. - name: CNI_CONF_NAME value: "10-calico.conflist" # The CNI network config to install on each node. - name: CNI_NETWORK_CONFIG valueFrom: configMapKeyRef: name: calico-config key: cni_network_config # Set the hostname based on the k8s node name. - name: KUBERNETES_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName # CNI MTU Config variable - name: CNI_MTU valueFrom: configMapKeyRef: name: calico-config key: veth_mtu # Prevents the container from sleeping forever. - name: SLEEP value: "false" volumeMounts: - mountPath: /host/opt/cni/bin name: cni-bin-dir - mountPath: /host/etc/cni/net.d name: cni-net-dir containers: # Runs calico/node container on each Kubernetes node. This # container programs network policy and routes on each # host. - name: calico-node image: quay.io/calico/node:v3.5.3 env: # Use Kubernetes API as the backing datastore. - name: DATASTORE_TYPE value: "kubernetes" # Wait for the datastore. - name: WAIT_FOR_DATASTORE value: "true" # Set based on the k8s node name. - name: NODENAME valueFrom: fieldRef: fieldPath: spec.nodeName # Choose the backend to use. - name: CALICO_NETWORKING_BACKEND valueFrom: configMapKeyRef: name: calico-config key: calico_backend # Cluster type to identify the deployment type - name: CLUSTER_TYPE value: "k8s,bgp" # Auto-detect the BGP IP address. - name: IP value: "autodetect" - name: IP_AUTODETECTION_METHOD value: "can-reach=192.168.0.56" # Enable IPIP - name: CALICO_IPV4POOL_IPIP value: "Always" # Set MTU for tunnel device used if ipip is enabled - name: FELIX_IPINIPMTU valueFrom: configMapKeyRef: name: calico-config key: veth_mtu # The default IPv4 pool to create on startup if none exists. Pod IPs will be # chosen from this range. Changing this value after installation will have # no effect. This should fall within `--cluster-cidr`. - name: CALICO_IPV4POOL_CIDR value: "10.244.0.0/16" # Disable file logging so `kubectl logs` works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" # Set Felix endpoint to host default action to ACCEPT. - name: FELIX_DEFAULTENDPOINTTOHOSTACTION value: "ACCEPT" # Disable IPv6 on Kubernetes. - name: FELIX_IPV6SUPPORT value: "false" # Set Felix logging to "info" - name: FELIX_LOGSEVERITYSCREEN value: "info" - name: FELIX_HEALTHENABLED value: "true" securityContext: privileged: true resources: requests: cpu: 250m livenessProbe: httpGet: path: /liveness port: 9099 host: localhost periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 readinessProbe: exec: command: - /bin/calico-node - -bird-ready - -felix-ready periodSeconds: 10 volumeMounts: - mountPath: /lib/modules name: lib-modules readOnly: true - mountPath: /run/xtables.lock name: xtables-lock readOnly: false - mountPath: /var/run/calico name: var-run-calico readOnly: false - mountPath: /var/lib/calico name: var-lib-calico readOnly: false volumes: # Used by calico/node. - name: lib-modules hostPath: path: /lib/modules - name: var-run-calico hostPath: path: /var/run/calico - name: var-lib-calico hostPath: path: /var/lib/calico - name: xtables-lock hostPath: path: /run/xtables.lock type: FileOrCreate # Used to install CNI. - name: cni-bin-dir hostPath: path: /opt/cni/bin - name: cni-net-dir hostPath: path: /etc/cni/net.d --- apiVersion: v1 kind: ServiceAccount metadata: name: calico-node namespace: kube-system --- # Create all the CustomResourceDefinitions needed for # Calico policy and networking mode. apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: felixconfigurations.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: FelixConfiguration plural: felixconfigurations singular: felixconfiguration --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: bgppeers.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: BGPPeer plural: bgppeers singular: bgppeer --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: bgpconfigurations.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: BGPConfiguration plural: bgpconfigurations singular: bgpconfiguration --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: ippools.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: IPPool plural: ippools singular: ippool --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: hostendpoints.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: HostEndpoint plural: hostendpoints singular: hostendpoint --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: clusterinformations.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: ClusterInformation plural: clusterinformations singular: clusterinformation --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: globalnetworkpolicies.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: GlobalNetworkPolicy plural: globalnetworkpolicies singular: globalnetworkpolicy --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: globalnetworksets.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: GlobalNetworkSet plural: globalnetworksets singular: globalnetworkset --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: networkpolicies.crd.projectcalico.org spec: scope: Namespaced group: crd.projectcalico.org version: v1 names: kind: NetworkPolicy plural: networkpolicies singular: networkpolicy --- # Include a clusterrole for the calico-node DaemonSet, # and bind it to the calico-node serviceaccount. kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico-node rules: # The CNI plugin needs to get pods, nodes, and namespaces. - apiGroups: [""] resources: - pods - nodes - namespaces verbs: - get - apiGroups: [""] resources: - endpoints - services verbs: # Used to discover service IPs for advertisement. - watch - list # Used to discover Typhas. - get - apiGroups: [""] resources: - nodes/status verbs: # Needed for clearing NodeNetworkUnavailable flag. - patch # Calico stores some configuration information in node annotations. - update # Watch for changes to Kubernetes NetworkPolicies. - apiGroups: ["networking.k8s.io"] resources: - networkpolicies verbs: - watch - list # Used by Calico for policy information. - apiGroups: [""] resources: - pods - namespaces - serviceaccounts verbs: - list - watch # The CNI plugin patches pods/status. - apiGroups: [""] resources: - pods/status verbs: - patch # Calico monitors various CRDs for config. - apiGroups: ["crd.projectcalico.org"] resources: - globalfelixconfigs - felixconfigurations - bgppeers - globalbgpconfigs - bgpconfigurations - ippools - globalnetworkpolicies - globalnetworksets - networkpolicies - clusterinformations - hostendpoints verbs: - get - list - watch # Calico must create and update some CRDs on startup. - apiGroups: ["crd.projectcalico.org"] resources: - ippools - felixconfigurations - clusterinformations verbs: - create - update # Calico stores some configuration information on the node. - apiGroups: [""] resources: - nodes verbs: - get - list - watch # These permissions are only requried for upgrade from v2.6, and can # be removed after upgrade or on fresh installations. - apiGroups: ["crd.projectcalico.org"] resources: - bgpconfigurations - bgppeers verbs: - create - update --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: calico-node roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-node subjects: - kind: ServiceAccount name: calico-node namespace: kube-system ---
安装calico网络插件 ps:注意修改image地址
ps:calico.yaml中关于网络的一些主要参数
calico-node服务的主要参数:
CALICO_IPV4POOL_CIDR: Calico中IPAM的IP地址池 pod的IP地址将从这里分配
CALICO_IPV4POOL_IPIP: 是否启用IPIP模式 启用时 ,Calico将在node上创建一个名为tun10的虚拟隧道
IP_AUTODETECTION_METHOD 获取node ip地址的方式 默认使用第一个网络接口的IP地址 对于安装了多块网卡的Node 可以使用正则表达式来名确 例如 interface=ens.* 表示选择名称以ens开头的网卡的IP地址
kubectl apply -f calico.yaml
#再次查看集群状态
[root@master1 ~]# kubectl get pods -n kube-system NAME READY STATUS RESTARTS AGE calico-node-wqw4h 0/1 Running 0 6s calico-node-xznxx 0/1 Running 0 6s coredns-589994f5c6-7bbm7 0/1 Running 0 107m coredns-589994f5c6-klkzs 0/1 Running 0 107m etcd-master1 1/1 Running 0 107m kube-apiserver-master1 1/1 Running 0 107m kube-controller-manager-master1 1/1 Running 0 107m kube-proxy-8tfk8 1/1 Running 0 106m kube-proxy-kgbcg 1/1 Running 0 107m kube-scheduler-master1 1/1 Running 0 107m
[root@master1 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
master1 Ready master 110m v1.18.2
node1 Ready <none> 109m v1.18.2
#集群安装完成
#安装traefik (适用于K8S主流的控制器有ingress-control以及traefik)
1、解压镜像 [root@24d33 tmp]# docker load -i traefik_1_7_9.tar.gz [root@24d33 tmp]# docker tag k8s.gcr.io/traefik:1.7.9 192.168.24.33:32800/base/traefik:1.7.9 [root@24d33 tmp]# docker push 192.168.24.33:32800/base/traefik:1.7.9 2、创建traefik证书 在master上操作 mkdir ~/ikube/tls/ -p echo """ [req] distinguished_name = req_distinguished_name prompt = yes [ req_distinguished_name ] countryName = Country Name (2 letter code) countryName_value = CN stateOrProvinceName = State orProvince Name (full name) stateOrProvinceName_value = Beijing localityName = Locality Name (eg, city) localityName_value =Haidian organizationName =Organization Name (eg, company) organizationName_value = Channelsoft organizationalUnitName = OrganizationalUnit Name (eg, section) organizationalUnitName_value = R & D Department commonName = Common Name (eg, your name or your server\'s hostname) commonName_value =*.multi.io emailAddress = Email Address emailAddress_value =lentil1016@gmail.com """ > ~/ikube/tls/openssl.cnf
3、生产所需key
openssl req -newkey rsa:4096 -nodes -config ~/ikube/tls/openssl.cnf -days 3650 -x509 -out ~/ikube/tls/tls.crt -keyout ~/ikube/tls/tls.key
4、创建所需secret
kubectl create -n kube-system secret tls ssl --cert ~/ikube/tls/tls.crt --key ~/ikube/tls/tls.key
traefik yaml文件
--- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: traefik-ingress-controller rules: - apiGroups: - "" resources: - services - endpoints - secrets verbs: - get - list - watch - apiGroups: - extensions resources: - ingresses verbs: - get - list - watch --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: traefik-ingress-controller roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: traefik-ingress-controller subjects: - kind: ServiceAccount name: traefik-ingress-controller namespace: kube-system --- apiVersion: v1 kind: ServiceAccount metadata: name: traefik-ingress-controller namespace: kube-system --- kind: ConfigMap apiVersion: v1 metadata: name: traefik-conf namespace: kube-system data: traefik.toml: | insecureSkipVerify = true defaultEntryPoints = ["http","https"] [entryPoints] [entryPoints.http] address = ":80" [entryPoints.https] address = ":443" [entryPoints.https.tls] [[entryPoints.https.tls.certificates]] CertFile = "/ssl/tls.crt" KeyFile = "/ssl/tls.key" --- kind: DaemonSet apiVersion: apps/v1 metadata: name: traefik-ingress-controller namespace: kube-system labels: k8s-app: traefik-ingress-lb spec: selector: matchLabels: k8s-app: traefik-ingress-lb name: traefik-ingress-lb template: metadata: labels: k8s-app: traefik-ingress-lb name: traefik-ingress-lb spec: serviceAccountName: traefik-ingress-controller tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule terminationGracePeriodSeconds: 60 hostNetwork: true volumes: - name: ssl secret: secretName: ssl - name: config configMap: name: traefik-conf containers: - image: k8s.gcr.io/traefik:1.7.9 name: traefik-ingress-lb ports: - name: http containerPort: 80 hostPort: 80 - name: admin containerPort: 8080 securityContext: privileged: true args: - --configfile=/config/traefik.toml - -d - --web - --kubernetes volumeMounts: - mountPath: "/ssl" name: "ssl" - mountPath: "/config" name: "config" --- kind: Service apiVersion: v1 metadata: name: traefik-ingress-service spec: selector: k8s-app: traefik-ingress-lb ports: - protocol: TCP port: 80 name: web - protocol: TCP port: 8080 name: admin - protocol: TCP port: 443 name: https type: NodePort --- apiVersion: v1 kind: Service metadata: name: traefik-web-ui namespace: kube-system spec: selector: k8s-app: traefik-ingress-lb ports: - port: 80 targetPort: 8080 --- apiVersion: extensions/v1beta1 kind: Ingress metadata: name: traefik-web-ui namespace: kube-system annotations: kubernetes.io/ingress.class: traefik spec: rules: - host: ingress.multi.io http: paths: - backend: serviceName: traefik-web-ui servicePort: 80
kubectl apply -f traefik.yaml
[root@master1 ~]# kubectl get pods -A NAMESPACE NAME READY STATUS RESTARTS AGE ... kube-system traefik-ingress-controller-kcxrb 1/1 Running 0 10s kube-system traefik-ingress-controller-q4ncp 1/1 Running 0 10s ...
#安装k8s默认的dashboard
#安装metrics-server组件 metrics-server是用来监控k8s资源的组件,可以查看node或pod的CPU使用率,内存大小等。(Metrics-server
并不直接采集cpu等监控数据,它是从cadvisor
组件中去聚合响应的监控数据,cadvisor
已经内置到k8s agent中了,所以我们只需要安装Metrics-server
即可) 默认k8s是没有安装的; 该组件默认在kube-system命名空间中
[root@24d33 tmp]# docker load -i metrics-server-amd64_0_3_1.tar.gz
[root@24d33 tmp]# docker tag k8s.gcr.io/metrics-server-amd64:v0.3.1 192.168.24.33:32800/base/metrics-server-amd64:v0.3.1
[root@24d33 tmp]# docker push 192.168.24.33:32800/base/metrics-server-amd64:v0.3.1
[root@24d33 tmp]# docker load -i addon.tar.gz
[root@24d33 tmp]# docker tag k8s.gcr.io/addon-resizer:1.8.4 192.168.24.33:32800/base/addon-resizer:1.8.4
[root@24d33 tmp]# docker push 192.168.24.33:32800/base/addon-resizer:1.8.4
#metrics-server yaml
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: metrics-server:system:auth-delegator labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:auth-delegator subjects: - kind: ServiceAccount name: metrics-server namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: metrics-server-auth-reader namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: extension-apiserver-authentication-reader subjects: - kind: ServiceAccount name: metrics-server namespace: kube-system --- apiVersion: v1 kind: ServiceAccount metadata: name: metrics-server namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: system:metrics-server labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile rules: - apiGroups: - "" resources: - pods - nodes - nodes/stats - namespaces verbs: - get - list - watch - apiGroups: - "extensions" resources: - deployments verbs: - get - list - update - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: system:metrics-server labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:metrics-server subjects: - kind: ServiceAccount name: metrics-server namespace: kube-system --- apiVersion: v1 kind: ConfigMap metadata: name: metrics-server-config namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: EnsureExists data: NannyConfiguration: |- apiVersion: nannyconfig/v1alpha1 kind: NannyConfiguration --- apiVersion: apps/v1 kind: Deployment metadata: name: metrics-server namespace: kube-system labels: k8s-app: metrics-server kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile version: v0.3.1 spec: selector: matchLabels: k8s-app: metrics-server version: v0.3.1 template: metadata: name: metrics-server labels: k8s-app: metrics-server version: v0.3.1 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: priorityClassName: system-cluster-critical serviceAccountName: metrics-server containers: - name: metrics-server image: k8s.gcr.io/metrics-server-amd64:v0.3.1 imagePullPolicy: IfNotPresent command: - /metrics-server - --metric-resolution=30s - --kubelet-preferred-address-types=InternalIP - --kubelet-insecure-tls ports: - containerPort: 443 name: https protocol: TCP - name: metrics-server-nanny image: k8s.gcr.io/addon-resizer:1.8.4 imagePullPolicy: IfNotPresent resources: limits: cpu: 100m memory: 300Mi requests: cpu: 5m memory: 50Mi env: - name: MY_POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: MY_POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace volumeMounts: - name: metrics-server-config-volume mountPath: /etc/config command: - /pod_nanny - --config-dir=/etc/config - --cpu=300m - --extra-cpu=20m - --memory=200Mi - --extra-memory=10Mi - --threshold=5 - --deployment=metrics-server - --container=metrics-server - --poll-period=300000 - --estimator=exponential - --minClusterSize=2 volumes: - name: metrics-server-config-volume configMap: name: metrics-server-config tolerations: - key: "CriticalAddonsOnly" operator: "Exists" - key: node-role.kubernetes.io/master effect: NoSchedule --- apiVersion: v1 kind: Service metadata: name: metrics-server namespace: kube-system labels: addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/cluster-service: "true" kubernetes.io/name: "Metrics-server" spec: selector: k8s-app: metrics-server ports: - port: 443 protocol: TCP targetPort: https --- apiVersion: apiregistration.k8s.io/v1beta1 kind: APIService metadata: name: v1beta1.metrics.k8s.io labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: service: name: metrics-server namespace: kube-system group: metrics.k8s.io version: v1beta1 insecureSkipTLSVerify: true groupPriorityMinimum: 100 versionPriority: 100
kubectl apply -f metrics.yaml
[root@master1 ~]# kubectl get pods -n kube-system NAME READY STATUS RESTARTS AGE ... metrics-server-5466d65cf-8k82v 2/2 Running 0 35s ...
#通过kubectl top 查看k8s pod以及nodes使用情况
[root@master1 ~]# kubectl top pods -n kube-system NAME CPU(cores) MEMORY(bytes) calico-node-wqw4h 14m 38Mi calico-node-xznxx 15m 39Mi coredns-589994f5c6-7bbm7 2m 12Mi coredns-589994f5c6-klkzs 2m 11Mi etcd-master1 10m 35Mi kube-apiserver-master1 29m 303Mi kube-controller-manager-master1 12m 51Mi kube-proxy-8tfk8 1m 16Mi kube-proxy-kgbcg 1m 20Mi kube-scheduler-master1 3m 17Mi metrics-server-5466d65cf-8k82v 1m 20Mi traefik-ingress-controller-kcxrb 4m 24Mi traefik-ingress-controller-q4ncp 4m 24Mi [root@master1 ~]# kubectl top nodes NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% master1 124m 3% 1790Mi 46% node1 71m 1% 1146Mi 30%
#dashboard yaml
# Copyright 2017 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: v1 kind: Namespace metadata: name: kubernetes-dashboard --- apiVersion: v1 kind: ServiceAccount metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboard --- kind: Service apiVersion: v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboard spec: ports: - port: 443 targetPort: 8443 selector: k8s-app: kubernetes-dashboard --- apiVersion: v1 kind: Secret metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-certs namespace: kubernetes-dashboard type: Opaque --- apiVersion: v1 kind: Secret metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-csrf namespace: kubernetes-dashboard type: Opaque data: csrf: "" --- apiVersion: v1 kind: Secret metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-key-holder namespace: kubernetes-dashboard type: Opaque --- kind: ConfigMap apiVersion: v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-settings namespace: kubernetes-dashboard --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboard rules: # Allow Dashboard to get, update and delete Dashboard exclusive secrets. - apiGroups: [""] resources: ["secrets"] resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] verbs: ["get", "update", "delete"] # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. - apiGroups: [""] resources: ["configmaps"] resourceNames: ["kubernetes-dashboard-settings"] verbs: ["get", "update"] # Allow Dashboard to get metrics. - apiGroups: [""] resources: ["services"] resourceNames: ["heapster", "dashboard-metrics-scraper"] verbs: ["proxy"] - apiGroups: [""] resources: ["services/proxy"] resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] verbs: ["get"] --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard rules: # Allow Metrics Scraper to get metrics from the Metrics server - apiGroups: ["metrics.k8s.io"] resources: ["pods", "nodes"] verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboard roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: kubernetes-dashboard subjects: - kind: ServiceAccount name: kubernetes-dashboard namespace: kubernetes-dashboard --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: kubernetes-dashboard roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: kubernetes-dashboard subjects: - kind: ServiceAccount name: kubernetes-dashboard namespace: kubernetes-dashboard --- kind: Deployment apiVersion: apps/v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboard spec: replicas: 1 revisionHistoryLimit: 10 selector: matchLabels: k8s-app: kubernetes-dashboard template: metadata: labels: k8s-app: kubernetes-dashboard spec: securityContext: seccompProfile: type: RuntimeDefault containers: - name: kubernetes-dashboard image: kubernetesui/dashboard:v2.6.0 imagePullPolicy: Always ports: - containerPort: 8443 protocol: TCP args: - --auto-generate-certificates - --namespace=kubernetes-dashboard # Uncomment the following line to manually specify Kubernetes API server Host # If not specified, Dashboard will attempt to auto discover the API server and connect # to it. Uncomment only if the default does not work. # - --apiserver-host=http://my-address:port volumeMounts: - name: kubernetes-dashboard-certs mountPath: /certs # Create on-disk volume to store exec logs - mountPath: /tmp name: tmp-volume livenessProbe: httpGet: scheme: HTTPS path: / port: 8443 initialDelaySeconds: 30 timeoutSeconds: 30 securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true runAsUser: 1001 runAsGroup: 2001 volumes: - name: kubernetes-dashboard-certs secret: secretName: kubernetes-dashboard-certs - name: tmp-volume emptyDir: {} serviceAccountName: kubernetes-dashboard nodeSelector: "kubernetes.io/os": linux # Comment the following tolerations if Dashboard must not be deployed on master tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule --- kind: Service apiVersion: v1 metadata: labels: k8s-app: dashboard-metrics-scraper name: dashboard-metrics-scraper namespace: kubernetes-dashboard spec: ports: - port: 8000 targetPort: 8000 selector: k8s-app: dashboard-metrics-scraper --- kind: Deployment apiVersion: apps/v1 metadata: labels: k8s-app: dashboard-metrics-scraper name: dashboard-metrics-scraper namespace: kubernetes-dashboard spec: replicas: 1 revisionHistoryLimit: 10 selector: matchLabels: k8s-app: dashboard-metrics-scraper template: metadata: labels: k8s-app: dashboard-metrics-scraper spec: securityContext: seccompProfile: type: RuntimeDefault containers: - name: dashboard-metrics-scraper image: kubernetesui/metrics-scraper:v1.0.8 ports: - containerPort: 8000 protocol: TCP livenessProbe: httpGet: scheme: HTTP path: / port: 8000 initialDelaySeconds: 30 timeoutSeconds: 30 volumeMounts: - mountPath: /tmp name: tmp-volume securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true runAsUser: 1001 runAsGroup: 2001 serviceAccountName: kubernetes-dashboard nodeSelector: "kubernetes.io/os": linux # Comment the following tolerations if Dashboard must not be deployed on master tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule volumes: - name: tmp-volume emptyDir: {}
[root@24d33 tmp]# docker load -i dashboard_2_0_0.tar.gz 954115f32d73: Loading layer [==================================================>] 91.22MB/91.22MB Loaded image: kubernetesui/dashboard:v2.0.0-beta8 [root@24d33 tmp]# docker load -i metrics-scrapter-1-0-1.tar.gz 89ac18ee460b: Loading layer [==================================================>] 238.6kB/238.6kB 878c5d3194b0: Loading layer [==================================================>] 39.87MB/39.87MB 1dc71700363a: Loading layer [==================================================>] 2.048kB/2.048kB Loaded image: kubernetesui/metrics-scraper:v1.0.1 [root@24d33 tmp]# docker tag kubernetesui/dashboard:v2.0.0-beta8 192.168.24.33:32800/base/dashboard:v2.0.0-beta8 [root@24d33 tmp]# docker tag kubernetesui/metrics-scraper:v1.0.1 192.168.24.33:32800/base/metrics-scraper:v1.0.1
[root@24d33 tmp]# docker push 192.168.24.33:32800/base/dashboard:v2.0.0-beta8 [root@24d33 tmp]# docker push 192.168.24.33:32800/base/metrics-scraper:v1.0.1
kubectl apply -f kubernets-dashboard.yaml #修改默认的type cluster_ip 为NodePort 以便于访问 [root@master1 ~]# kubectl edit svc -n kubernetes-dashboard kubernetes-dashboard service/kubernetes-dashboard edited [root@master1 ~]# kubectl get svc -n kubernetes-dashboard NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE dashboard-metrics-scraper ClusterIP 10.102.236.94 <none> 8000/TCP 6m20s kubernetes-dashboard NodePort 10.107.32.36 <none> 443:30712/TCP 6m20s
##浏览器访问即可
###解决Kubernetes Dashboard由于自身证书问题导致一些浏览器不能打开的问题
--------------------------------------------------------------------------------------------------------
正常安装部署完Kubernetes Dashboard后,通过某些浏览器却不能访问(比如Chrome浏览器),通常是由于部署Kubernetes Dashboard时生成的证书日期有问题,解决办法,可以通过自签证书的形式解决
1、生成自签证书
1、生成自签证书 1)生成证书请求的key openssl genrsa -out dashboard.key 2048 2)生成证书请求 openssl req -days 3650 -new -out dashboard.csr -key dashboard.key -subj '/CN=**192.168.32.31**' 3)生成自签证书 openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt ----------------------------------- 2、创建与KubernetesDashboard 部署文件中同名的secret 1)删除之前部署的Dashboard kubectl delete -f kubernetes-dashboard.yaml
2) 创建与KubernetesDashboard 部署文件中同名的secret
#apiVersion: v1 #kind: Secret #metadata: # labels: # k8s-app: kubernetes-dashboard # name: kubernetes-dashboard-certs # namespace: kubernetes-dashboard #type: Opaque
kubectl create namespace kubernetes-dashboard
kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kubernetes-dashboard
3)注释 kubernetes-dashboard.yaml文件中关于Dashboard Secret部分 (也就是上述注释的部分)
4)重新应用部署kubernetes-dashboard.yaml文件
kubectl apply -f kubernets-dashboard.yaml
#再次修改svc 为NodePort 测试访问
--------------------------------------------------------------------------------
#查看kubernetes-dashboard名称空间下的secret 找到带有kubernetes-dashboard-token的secret 查看token值 复制到浏览器即可
[root@master1 ~]# kubectl describe secret kubernetes-dashboard-token-dggnt -n kubernetes-dashboard Name: kubernetes-dashboard-token-dggnt Namespace: kubernetes-dashboard Labels: <none> Annotations: kubernetes.io/service-account.name: kubernetes-dashboard kubernetes.io/service-account.uid: 6869153c-9df3-4346-9f09-1bd4e13ead11 Type: kubernetes.io/service-account-token Data ==== ca.crt: 1025 bytes namespace: 20 bytes token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImswYXhTbEtMZE5udEJzdnNKTUNfNURpY2NzVkxQZTBmMTgyY0p0VGpveHcifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1kZ2dudCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjY4NjkxNTNjLTlkZjMtNDM0Ni05ZjA5LTFiZDRlMTNlYWQxMSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.gEb-5Z3FcHuj_iFLOsGEpLSgqzSqblxGS2uT1FNvQxS3HG0TRYWtFbxLM-i9JqN47MPV7FsogjlhH4aBdcwPf78zeeCLvEhsHbstLkjUkNLV6TjrMgWUVmZLzjafCRNFvgfUASLTCdolznis58HYJ38hAswTZ0ZtFJX-U4H_dJPCjghKg9sL0Xx6lDMVbd6Thnp-o8ONjUKJbJ8Jx0XUxE5g3DKt06Td5t7aGt7JERtzVyO-6mutO5Y2VB3pKoMA0YsiWgD7ZThsr2Fbh7PtVFtcyGa4MVfwcxWlCACSyxkZXU1_yys0Uw3e_CDSWH8yxrleXDE3KBIgxOa0zVDjlg
#默认情况下只能看到default命名空间且权限较小
#创建管理员token,可查看任何空间权限
kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard
#dashboard 搭建完成