二进制-k8s-shell

1,etcd 

测试     一台节点

[root@localhost etcd]# cat install_etcd.sh 
#!/bin/bash

# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

## Create etcd.conf, etcd.service, and start etcd service.


mkdir -p /usr/local/kubernetes/config
mkdir -p /usr/local/kubernetes/bin

etcd_data_dir=/var/lib/etcd      #根据环境更改
mkdir -p ${etcd_data_dir}

#ETCD_NAME=${1:-"etcd01"}
#ETCD_INITIAL_CLUSTER=${3:-"etcd01=http://192.168.100.50:2380,etcd02=http://192.168.100.51:2380,etcd03=http://192.168.100.52:2380"}
#CURRENT_HOST_IP=`ifconfig ens192 | grep 'inet ' | awk '{ print $2}'`

ETCD_NAME="$1"
ETCD_INITIAL_CLUSTER="$2"

if [ ! $ETCD_NAME ]; then
  echo "ENTER ETCD_NAME eg:etcd01"
  exit 1
fi

if [ ! $ETCD_INITIAL_CLUSTER ]; then
  echo "ENTER ETCD_INITIAL_CLUSTER eg:etcd01=http://192.168.100.50:2380,etcd02=http://192.168.100.51:2380,etcd03=http://192.168.100.52:2380"
  exit 1
fi

cp -rf bin/* /usr/local/kubernetes/bin
chmod +x /usr/local/kubernetes/bin/*

ETCD_LISTEN_IP=`ip -4 a|grep ens32| grep 'inet ' | awk '{ print $2}'|awk -F / '{ print $1}'`

cat <<EOF >/usr/local/kubernetes/config/etcd.conf
# [member]
ETCD_NAME="${ETCD_NAME}"
ETCD_DATA_DIR="${etcd_data_dir}/default.etcd"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"

#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://${ETCD_LISTEN_IP}:2380"
ETCD_INITIAL_CLUSTER="${ETCD_INITIAL_CLUSTER}"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="http://${ETCD_LISTEN_IP}:2379"
EOF

cat <<EOF >/usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target

[Service]
Type=simple
WorkingDirectory=${etcd_data_dir}
EnvironmentFile=-/usr/local/kubernetes/config/etcd.conf   #变量文件
ExecStart=/usr/local/kubernetes/bin/etcd \\
    --name=\${ETCD_NAME} \\
    --data-dir=\${ETCD_DATA_DIR} \\
    --listen-peer-urls=\${ETCD_LISTEN_PEER_URLS} \\
    --listen-client-urls=\${ETCD_LISTEN_CLIENT_URLS} \\
    --advertise-client-urls=\${ETCD_ADVERTISE_CLIENT_URLS} \\
    --initial-advertise-peer-urls=\${ETCD_INITIAL_ADVERTISE_PEER_URLS} \\
    --initial-cluster=\${ETCD_INITIAL_CLUSTER} \\
    --initial-cluster-token=\${ETCD_INITIAL_CLUSTER_TOKEN} \\
    --initial-cluster-state=\${ETCD_INITIAL_CLUSTER_STATE}
Type=notify

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable etcd
systemctl restart etcd
[root@localhost etcd]# ./install_etcd.sh etcd01 etcd01=http://192.168.80.128:2380

 

[root@localhost etcd]# /usr/local/kubernetes/bin/etcdctl member list
f086cd33a6575fac: name=etcd01 peerURLs=http://192.168.80.128:2380 clientURLs=http://192.168.80.128:2379 isLeader=true

2,flannel 

每台节点,都要有etcdctl命令

scp etc* 192.168.80.129:/usr/local/kubernetes/bin/  
[root@localhost flannel]# ./install_flannel.sh http://192.168.80.128:2379
[root@localhost flannel]# ./install_flannel.sh http://192.168.80.128:2379
Created symlink from /etc/systemd/system/multi-user.target.wants/flannel.service to /usr/lib/systemd/system/flannel.service.
Created symlink from /etc/systemd/system/docker.service.requires/flannel.service to /usr/lib/systemd/system/flannel.service.
[root@localhost flannel]# cat install_flannel.sh 
#!/bin/bash

# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

mkdir -p /usr/local/kubernetes/config
mkdir -p /usr/local/kubernetes/bin


#ETCD_SERVERS=${1:-"http://192.168.100.50:2379,http://192.168.100.51:2379,http://192.168.100.52:2379"}

ETCD_SERVERS="$1"

if [ ! $ETCD_SERVERS ]; then
  echo "ENTER ETCD_SERVERS eg:http://192.168.100.50:2379,http://192.168.100.51:2379,http://192.168.100.52:2379"
  exit 1
fi

cp -rf bin/* /usr/local/kubernetes/bin
chmod +x /usr/local/kubernetes/bin/*

FLANNEL_NET='{"Network":"172.18.0.0/16", "SubnetMin": "172.18.1.0", "SubnetMax": "172.18.254.0",  "Backend": {"Type": "vxlan"}}'

#IFACE="eth0"
IFACE="ens32"

cat <<EOF >/usr/local/kubernetes/config/flannel.conf
FLANNEL_ETCD="--etcd-endpoints=${ETCD_SERVERS}"
FLANNEL_ETCD_KEY="--etcd-prefix=/cmpk8s/network"
FLANNEL_IFACE="--iface=${IFACE}"
EOF

cat <<EOF >/usr/lib/systemd/system/flannel.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target

[Service]
EnvironmentFile=-/usr/local/kubernetes/config/flannel.conf
ExecStartPre=/usr/local/kubernetes/bin/remove-docker0.sh
ExecStart=/usr/local/kubernetes/bin/flanneld \\
    --ip-masq \\
    \${FLANNEL_ETCD} \\
    \${FLANNEL_ETCD_KEY} 
ExecStartPost=/usr/local/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker


Type=notify

[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF

# Store FLANNEL_NET to etcd.
attempt=0
while true; do
#  /usr/local/kubernetes/bin/etcdctl --ca-file ${CA_FILE} --cert-file ${CERT_FILE} --key-file ${KEY_FILE} \
  /usr/local/kubernetes/bin/etcdctl \
    --no-sync -C ${ETCD_SERVERS} \           #\表示和下面命令为一行
    get /cmpk8s/network/config >/dev/null 2>&1
  if [[ "$?" == 0 ]]; then         #上次执行结果是否为0
    break
  else
    if (( attempt > 600 )); then
      echo "timeout for waiting network config" > ~/kube/err.log
      exit 2
    fi

#    /usr/local/kubernetes/bin/etcdctl --ca-file ${CA_FILE} --cert-file ${CERT_FILE} --key-file ${KEY_FILE} \
    /usr/local/kubernetes/bin/etcdctl \
      --no-sync -C ${ETCD_SERVERS} \
      mk /cmpk8s/network/config "${FLANNEL_NET}" >/dev/null 2>&1
    attempt=$((attempt+1))                  #次数加1,超过600,失败
    sleep 3
  fi
done
wait

systemctl enable flannel
systemctl daemon-reload
systemctl restart flannel

3,docker

每台执行

[root@localhost docker]# cat install-docker.sh 
#!/bin/sh

usage(){
  echo "Usage: $0 FILE_NAME_DOCKER_CE_TAR_GZ"
  echo "       $0 docker-17.09.0-ce.tgz"
  echo "Get docker-ce binary from: https://download.docker.com/linux/static/stable/x86_64/"
  echo "eg: wget https://download.docker.com/linux/static/stable/x86_64/docker-17.09.0-ce.tgz"
  echo ""
}
SYSTEMDDIR=/usr/lib/systemd/system   #传参
SERVICEFILE=docker.service
DOCKERDIR=/usr/bin/
DOCKERBIN=docker
SERVICENAME=docker

if [ $# -ne 1 ]; then        #判断传入的是否为一个参数,不为1调用usage函数,exit返回非0
  usage                
  exit 1
else
  FILETARGZ="$1"
fi

if [ ! -f ${FILETARGZ} ]; then      #判断文件是否存在
  echo "Docker binary tgz files does not exist, please check it"
  echo "Get docker-ce binary from: https://download.docker.com/linux/static/stable/x86_64/"
  echo "eg: wget https://download.docker.com/linux/static/stable/x86_64/docker-17.09.0-ce.tgz"
  exit 1
fi

echo "##unzip : tar xvpf ${FILETARGZ}"
tar xvpf ${FILETARGZ}
echo

echo "##binary : ${DOCKERBIN} copy to ${DOCKERDIR}"
cp -p ${DOCKERBIN}/* ${DOCKERDIR} >/dev/null 2>&1
which ${DOCKERBIN}

echo "##systemd service: ${SERVICEFILE}"
echo "##docker.service: create docker systemd file"
cat >${SYSTEMDDIR}/${SERVICEFILE} <<EOF
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
After=network.target docker.socket flannel.service
Requires=flannel.service

[Service]
Type=notify
EnvironmentFile=-/run/flannel/docker
WorkingDirectory=/usr/local/kubernetes/bin/
ExecStart=/usr/bin/dockerd \\
                \$DOCKER_OPT_BIP \\
                \$DOCKER_OPT_MTU \\
                -H tcp://0.0.0.0:4243 \
                -H unix:///var/run/docker.sock \
                --selinux-enabled=false \
                --log-opt max-size=1g
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF

echo ""

systemctl daemon-reload
echo "##Service status: ${SERVICENAME}"
systemctl status ${SERVICENAME}
echo "##Service restart: ${SERVICENAME}"
systemctl restart ${SERVICENAME}
echo "##Service status: ${SERVICENAME}"
systemctl status ${SERVICENAME}

echo "##Service enabled: ${SERVICENAME}"
systemctl enable ${SERVICENAME}

echo "## docker version"
docker version
[root@localhost docker]# ./install-docker.sh  docker-18.03.0-ce.tgz

4,k8s_master

[root@localhost master]# cat install_k8s_master.sh 
#!/bin/bash


#MASTER_ADDRESS=${1:-"192.168.100.50"}
MASTER_ADDRESS="$1"

ETCD_SERVERS="$2"

if [ ! $MASTER_ADDRESS ]; then
  echo "ENTER MASTER_ADDRESS eg:192.168.100.50"
  exit 1
fi

if [ ! $ETCD_SERVERS ]; then
  echo "ENTER ETCD_SERVERS eg:http://192.168.100.50:2379,http://192.168.100.51:2379,http://192.168.100.52:2379"
  exit 1
fi


export MASTER_ADDRESS="$MASTER_ADDRESS"

export ETCD_SERVERS="$ETCD_SERVERS"

echo "MASTER_ADDRESS: ${MASTER_ADDRESS}"

echo "ETCD_SERVERS: ${ETCD_SERVERS}"


mkdir -p /usr/local/kubernetes/config
mkdir -p /usr/local/kubernetes/bin

echo "cp file ..."

cp -rf bin/* /usr/local/kubernetes/bin
chmod +x /usr/local/kubernetes/bin/*

echo "set  kubeconfig ..."

echo "export K8S_HOME=/usr/local/kubernetes" >> /etc/profile
echo "export PATH=\$PATH:\$K8S_HOME/bin" >> /etc/profile
source /etc/profile


./kubeconfig.sh

echo "set  apiserver ..."

./apiserver.sh

sleep 5s

echo "set  controller-manager ..."

./controller-manager.sh

echo "set  scheduler ..."

./scheduler.sh

echo "set proxy ..."

./proxy.sh

echo "install success ..."

[root@localhost master]# ./install_k8s_master.sh 192.168.80.128 http://192.168.80.128:2379
echo 'KUBE_SERVICE_ADDRESSES_NODE_PORT="--service-node-port-range=1-65535"' >>/usr/local/kubernetes/config/kube-apiserver.conf   #修改k8s调用端口范围
echo 'KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.200.0.0/16" >>/usr/local/kubernetes/config/kube-apiserver.conf   #修改service IP地址池
[root@localhost ~]# systemctl daemon-reload && systemctl restart kube-apiserver.service     

5,k8s-node

[root@localhost ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.80.129 192-168-80-129
192.168.80.128 192-168-80-128
192.168.80.130 192-168-80-130
[root@localhost node]# ./install_k8s_node.sh 192.168.80.128
MASTER_ADDRESS: 192.168.80.128
cp file ...
install success ...
[root@localhost bin]# pwd
/usr/local/kubernetes/bin
[root@localhost bin]# cp kubectl /usr/bin/
[root@localhost node]# sed -i "s/\${HOSTNAME}/192-168-80-130/g" kubelet.sh  #128 129
[root@localhost node]# sed -i "s/\${HOSTNAME}/192-168-80-130/g" proxy.sh    #128 129 
#DNS_SERVER_IP=${1:-"172.18.0.250"} 需要更改
[root@localhost node]# vim /usr/local/kubernetes/config/kubelet.conf
KUBELET_DNS_IP="--cluster-dns=100.254.0.250" #更改各节点kubelet默认DNS,与clusterIP一致

[root@localhost bin]# kubectl get node
NAME STATUS ROLES AGE VERSION
192-168-80-128 Ready <none> 39s v1.18
192-168-80-131 Ready <none> 35s v1.18
192-168-80-132 Ready <none> 33s v1.18

[root@localhost images]# docker load -i gcr.io~google_containers~pause-amd64~3.0.tar.gz  #加载pod基础镜像

6,安装Kube-dns,master节点执行

[root@localhost images]# docker load -i k8s-dns-dnsmasq-nanny-amd64_v1.14.7.tar
[root@localhost images]# docker load -i k8s-dns-kube-dns-amd64_1.14.7.tar
[root@localhost images]# docker load -i k8s-dns-sidecar-amd64_1.14.7.tar

 

[root@localhost kube-dns]# cat kube-dns.yaml
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
# in sync with this file.

# Warning: This is a file generated from the base underscore template file: kube-dns.yaml.base

apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "KubeDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.254.0.250   #dns ip
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    addonmanager.kubernetes.io/mode: EnsureExists
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    rollingUpdate:
      maxSurge: 10%
      maxUnavailable: 0
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
    spec:
      tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"
      volumes:
      - name: kube-dns-config
        configMap:
          name: kube-dns
          optional: true
      #imagePullSecrets:
      #- name: registrykey-aliyun-vpc
      containers:
      - name: kubedns
        image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
        resources:
          # TODO: Set memory limits when we've profiled the container for large
          # clusters, then set request = limit to keep this container in
          # guaranteed class. Currently, this container falls into the
          # "burstable" category so the kubelet doesn't backoff from restarting it.
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        livenessProbe:
          httpGet:
            path: /healthcheck/kubedns
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /readiness
            port: 8081
            scheme: HTTP
          # we poll on pod startup for the Kubernetes master service and
          # only setup the /readiness HTTP server once that's available.
          initialDelaySeconds: 3
          timeoutSeconds: 5
        args:
        - --domain=cluster.local
        - --dns-port=10053
        - --config-dir=/kube-dns-config
        - --kube-master-url=http://192.168.80.128:8080   #master节点
        - --v=2
        env:
        - name: PROMETHEUS_PORT
          value: "10055"
        ports:
        - containerPort: 10053
          name: dns-local
          protocol: UDP
        - containerPort: 10053
          name: dns-tcp-local
          protocol: TCP
        - containerPort: 10055
          name: metrics
          protocol: TCP
        volumeMounts:
        - name: kube-dns-config
          mountPath: /kube-dns-config
      - name: dnsmasq
        image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
        livenessProbe:
          httpGet:
            path: /healthcheck/dnsmasq
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        args:
        - -v=2
        - -logtostderr
        - -configDir=/etc/k8s/dns/dnsmasq-nanny
        - -restartDnsmasq=true
        - --
        - -k
        - --cache-size=1000
        - --no-negcache
        - --log-facility=-
        - --server=/cluster.local/127.0.0.1#10053
        - --server=/in-addr.arpa/127.0.0.1#10053
        - --server=/ip6.arpa/127.0.0.1#10053
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        # see: https://github.com/kubernetes/kubernetes/issues/29055 for details
        resources:
          requests:
            cpu: 150m
            memory: 20Mi
        volumeMounts:
        - name: kube-dns-config
          mountPath: /etc/k8s/dns/dnsmasq-nanny
      - name: sidecar
        image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
        livenessProbe:
          httpGet:
            path: /metrics
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        args:
        - --v=2
        - --logtostderr
        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV
        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV
        ports:
        - containerPort: 10054
          name: metrics
          protocol: TCP
        resources:
          requests:
            memory: 20Mi
            cpu: 10m
      dnsPolicy: Default  # Don't use cluster DNS.

 

[root@localhost kube-dns]# kubectl get po -A
NAMESPACE     NAME                        READY   STATUS    RESTARTS   AGE
default       nginx-server                1/1     Running   0          12m
kube-system   kube-dns-57b4899fc9-brsqd   3/3     Running   0          7m9s
/ # ping www.baidu.com 
PING www.baidu.com (110.242.68.3): 56 data bytes
64 bytes from 110.242.68.3: seq=0 ttl=127 time=70.298 ms
64 bytes from 110.242.68.3: seq=1 ttl=127 time=12.618 m

 

7,dashboard

[root@localhost images]# docker load -i kubernetes-dashboard-amd64_v1.8.3.tar

 

[root@192-168-80-128 dashboard]# cat kubernetes-dashboard.yaml
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
      - name: kubernetes-dashboard
        image: gcr.io/google_containers/kubernetes-dashboard-amd64:v2.1.0 #对应k8s-1.18版本
        ports:
        - containerPort: 9090
          protocol: TCP
        args:
          # Uncomment the following line to manually specify Kubernetes API server Host
          # If not specified, Dashboard will attempt to auto discover the API server and connect
          # to it. Uncomment only if the default does not work.
          # - --apiserver-host=http://my-address:port
          - --apiserver-host=http://192.168.80.128:8080    #更改此处
        volumeMounts:
          # Create on-disk volume to store exec logs
        - mountPath: /tmp
          name: tmp-volume
        livenessProbe:
          httpGet:
            path: /
            port: 9090
          initialDelaySeconds: 30
          timeoutSeconds: 30
      volumes:
      - name: tmp-volume
        emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
      - key: node-role.kubernetes.io/master
        effect: NoSchedule

---
# ------------------- Dashboard Service ------------------- #

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  type: NodePort
  ports:
  - port: 80
    targetPort: 9090
    nodePort: 30090
  selector:
    k8s-app: kubernetes-dashboard

 

 

posted @ 2022-02-16 20:58  gg888666  阅读(69)  评论(0编辑  收藏  举报