Kubernetes 企业级部署实战之部署addons插件
Kubernetes 企业级部署实战之部署addons插件
验证kubernetes集群
- 在任意一个运算节点,创建一个资源配置清单 这里我们选择HDSS7-21.host.com
[root@hdss7-21 ~]# vi nginx-ds.yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: nginx-ds
spec:
template:
metadata:
labels:
app: nginx-ds
spec:
containers:
- name: my-nginx
image: harbor.od.com/public/nginx:v1.15
ports:
- containerPort: 80
[root@hdss7-22 ~]# kubectl create -f nginx-ds.yaml
daemonset.extensions/nginx-ds created
[root@hdss7-21 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-1 Healthy {"health": "true"}
etcd-0 Healthy {"health": "true"}
etcd-2 Healthy {"health": "true"}
[root@hdss7-21 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
hdss7-21.host.com Ready master,node 17h v1.15.2
hdss7-22.host.com Ready master,node 16h v1.15.2
[root@hdss7-21 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-ds-jw5c5 1/1 Running 0 116s
nginx-ds-nsfrf 1/1 Running 0 116s
部署flannel
集群规划
主机名 | 角色 | ip |
---|---|---|
HDSS7-21.host.com | kube-proxy | 10.0.0.21 |
HDSS7-22.host.com | kube-proxy | 10.0.0.22 |
注意:这里部署文档以HDSS7-21,。host.com主机为例,另外一台运算节点安装部署方法类似
在各运算几点上增加iptables规则
注意:iptables规则各主机的略有不同,其他运算节点上执行时注意修改。
- 优化SNAT规则,各运算节点之间的各POD之间的网络通信不再出网
各运算几点保存iptables规则
下载软件,解压,创建软链接
[root@hdss7-21 ~]# cd /opt/src/
[root@hdss7-21 src]# mkdir /opt/flannel-v0.11.0
[root@hdss7-21 src]# wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
[root@hdss7-21 src]# tar xf flannel-v0.11.0-linux-amd64.tar.gz -C /opt/flannel-v0.11.0/
[root@hdss7-21 src]# ln -s /opt/flannel-v0.11.0/ /opt/flannel
[root@hdss7-21 src]# ls -l /opt/flannel/
total 34436
-rwxr-xr-x 1 root root 35249016 Jan 29 2019 flanneld
-rwxr-xr-x 1 root root 2139 Oct 23 2018 mk-docker-opts.sh
-rw-r--r-- 1 root root 4300 Oct 23 2018 README.md
最终目录结构
[root@hdss7-21 src]# cd /opt/
[root@hdss7-21 opt]# tree -L 2
.
├── containerd
│ ├── bin
│ └── lib
├── etcd -> etcd-v3.1.20/
├── etcd-v3.1.20
│ ├── certs
│ ├── Documentation
│ ├── etcd
│ ├── etcdctl
│ ├── etcd-server-startup.sh
│ ├── README-etcdctl.md
│ ├── README.md
│ └── READMEv2-etcdctl.md
├── flannel -> /opt/flannel-v0.11.0/
├── flannel-v0.11.0
│ ├── flanneld
│ ├── mk-docker-opts.sh
│ └── README.md
├── kubernetes -> kubernetes-v1.15.2/
├── kubernetes-v1.15.2
│ ├── addons
│ ├── LICENSES
│ └── server
└── src
├── etcd-v3.1.20-linux-amd64.tar.gz
├── flannel-v0.11.0-linux-amd64.tar.gz
└── kubernetes-server-linux-amd64-v1.15.2.tar.gz
拷贝证书
[root@hdss7-21 flannel]# mkdir cert
[root@hdss7-21 flannel]# cd cert/
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/ca.pem .
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/client.pem .
[root@hdss7-21 cert]# scp hdss7-200:/opt/certs/client-key.pem .
创建配置
[root@hdss7-21 flannel]# vi subnet.env
FLANNEL_NETWORK=172.7.0.0/16
FLANNEL_SUBNET=172.7.21.1/24
FLANNEL_MTU=1500
FLANNEL_IPMASQ=false
注意:flannel集群各主机的配置略有不同,部署其他节点时注意修改。
创建启动脚本
[root@hdss7-21 flannel]# vi flanneld.sh
#!/bin/sh
./flanneld \
--public-ip=10.0.0.21 \
--etcd-endpoints=https://10.0.0.12:2379,https://10.0.0.21:2379,https://10.0.0.22:2379 \
--etcd-keyfile=./cert/client-key.pem \
--etcd-certfile=./cert/client.pem \
--etcd-cafile=./cert/ca.pem \
--iface=ens33 \
--subnet-file=./subnet.env \
--healthz-port=2401
注意:flannel集群各主机的启动脚本略有不同,部署其他节点时注意修改。
检查配置,权限,创建日志目录
[root@hdss7-21 flannel]# chmod +x flanneld.sh
[root@hdss7-21 flannel]# mkdir -p /data/logs/flanneld
操作etcd,增加host-gw
[root@hdss7-21 flannel]# cd /opt/etcd
[root@hdss7-21 etcd]# ./etcdctl set /coreos.com/network/config '{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}'
{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}
创建supervisor配置
[root@hdss7-21 etcd]# vi /etc/supervisord.d/flannel.ini
[program:flanneld-7-21]
command=/opt/flannel/flanneld.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/flannel ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/flanneld/flanneld.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
killallgroup=true
stopgroup=true
[root@hdss7-21 etcd]# supervisorctl update
启动服务并检查
[root@hdss7-21 etcd]# supervisorctl update
flanneld-7-21: added process group
[root@hdss7-21 etcd]# supervisorctl status
etcd-server-7-21 RUNNING pid 6601, uptime 4 days, 17:08:54
flanneld-7-21 RUNNING pid 73590, uptime 0:03:59
kube-apiserver-7-21 RUNNING pid 6604, uptime 4 days, 17:08:54
kube-controller-manager-7-21 RUNNING pid 6609, uptime 4 days, 17:08:54
kube-kubelet-7-21 RUNNING pid 6600, uptime 4 days, 17:08:54
kube-proxy-7-21 RUNNING pid 6610, uptime 4 days, 17:08:54
kube-scheduler-7-21 RUNNING pid 6616, uptime 4 days, 17:08:54
安装部署启动检查所有集群规划主机上的flannel服务
再次验证集群
flannel其他工作模型
VxLAN模型
[root@hdss7-21 etcd]# ./etcdctl set /coreos.com/network/config '{"Network": "172.7.0.0/16", "Backend": {"Type": "VxLAN"}}'
直接路由模型
[root@hdss7-21 etcd]# ./etcdctl set /coreos.com/network/config '{"Network": "172.7.0.0/16", "Backend": {"Type": "VxLAN","Directrouting": true}}'
flannel之SNAT规则优化
[root@hdss7-21 ~]# yum install iptables-services -y
[root@hdss7-21 ~]# iptables -t nat -D POSTROUTING -s 172.7.21.0/24 ! -o docker0 -j MASQUERADE
[root@hdss7-21 ~]# iptables -t nat -I POSTROUTING -s 172.7.21.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE
[root@hdss7-21 ~]# service iptables save
[root@hdss7-21 ~]# iptables -t filter -D INPUT -j REJECT --reject-with icmp-host-prohibited
[root@hdss7-21 ~]# iptables -t filter -D FORWARD -j REJECT --reject-with icmp-host-prohibited
[root@hdss7-21 ~]# service iptables save
10.0.0.21主机上的,来源是172.7.21.0/24段的docker的ip,目标ip不是172.7.0.0/16段,网络发包不从docker0桥设备出站的,才进行SNAT转换
部署k8s资源配置清单的内网http服务
在运维主机HDSS7-200.host.com上,配置一个nginx虚拟主机,用以提供k8s统一的资源配置清单访问入口
[root@hdss7-200 ~]# mkdir /data/k8s-yaml
[root@hdss7-200 ~]# vi /etc/nginx/conf.d/k8s-yaml.od.com.conf
server {
listen 80;
server_name k8s-yaml.od.com;
location / {
autoindex on;
default_type text/plain;
root /data/k8s-yaml;
}
}
[root@hdss7-200 ~]## nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
配置内网DNS解析
[root@hdss7-11 ~]# vi /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2019121003 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.0.0.11
harbor A 10.0.0.200
k8s-yaml A 10.0.0.200
[root@hdss7-11 ~]# systemctl restart named
以后所有的资源配置清单统一放置在运维主机的/data/k8s-yaml目录下即可
[root@hdss7-200 ~]# nginx -s reload
部署kube-dns(coredns)
coredns官方GitHub
coredns官方DockerHub
准备coredns-v1.3.1镜像
运维主机HDSS-200.host.com上
[root@hdss7-200 ~]# docker pull coredns/coredns:1.6.1
1.6.1: Pulling from coredns/coredns
c6568d217a00: Pull complete
d7ef34146932: Pull complete
Digest: sha256:9ae3b6fcac4ee821362277de6bd8fd2236fa7d3e19af2ef0406d80b595620a7a
Status: Downloaded newer image for coredns/coredns:1.6.1
docker.io/coredns/coredns:1.6.1
[root@hdss7-200 ~]# docker tag c0f6e815079e harbor.od.com/public/coredns:v1.6.1
[root@hdss7-200 ~]# docker push harbor.od.com/public/coredns:v1.6.1
The push refers to repository [harbor.od.com/public/coredns]
da1ec456edc8: Pushed
225df95e717c: Pushed
v1.6.1: digest: sha256:c7bf0ce4123212c87db74050d4cbab77d8f7e0b49c041e894a35ef15827cf938 size: 739
准备资源配置管理清单
[root@hdss7-200 ~]# cd /data/k8s-yaml/
[root@hdss7-200 k8s-yaml]# mkdir coredns
[root@hdss7-200 k8s-yaml]# cd coredns/
[root@hdss7-200 coredns]# vi /data/k8s-yaml/coredns/rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
[root@hdss7-200 coredns]# vi /data/k8s-yaml/coredns/cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
log
health
ready
kubernetes cluster.local 10.254.0.0/16
forward . 10.0.0.11
cache 30
loop
reload
loadbalance
}
[root@hdss7-200 coredns]# vi /data/k8s-yaml/coredns/dp.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: coredns
template:
metadata:
labels:
k8s-app: coredns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
containers:
- name: coredns
image: harbor.od.com/public/coredns:v1.6.1
args:
- -conf
- /etc/coredns/Corefile
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
[root@hdss7-200 coredns]# vi /data/k8s-yaml/coredns/svc.yaml
apiVersion: v1
kind: Service
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: coredns
clusterIP: 10.254.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
- name: metrics
port: 9153
protocol: TCP
依次执行创建
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/rbac.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/cm.yaml
configmap/coredns created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/dp.yaml
deployment.apps/coredns created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/svc.yaml
service/coredns created
检查
[root@hdss7-21 ~]# kubectl get all -n kube-system
NAME READY STATUS RESTARTS AGE
pod/coredns-6b6c4f9648-q9lr8 1/1 Running 0 111s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/coredns ClusterIP 10.254.0.2 <none> 53/UDP,53/TCP,9153/TCP 105s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/coredns 1/1 1 1 110s
NAME DESIRED CURRENT READY AGE
replicaset.apps/coredns-6b6c4f9648 1 1 1 111s
[root@hdss7-21 ~]# kubectl create deployment nginx-dp --image=harbor.od.com/public/nginx:v1.16 -n kube-public
deployment.apps/nginx-dp created
[root@hdss7-21 ~]# kubectl expose deployment nginx-dp --port=80 -n kube-public
service/nginx-dp exposed
[root@hdss7-21 ~]# kubectl get svc -n kube-public
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx-dp ClusterIP 10.254.232.123 <none> 80/TCP 12s
[root@hdss7-21 ~]# dig -t A nginx-dp.kube-public.svc.cluster.local. @10.254.0.2 +short
10.254.232.123
部署treafik (ingress)
准备treafik镜像
运维主机HDSS7-200.host.com上:
[root@hdss7-200 ~]# docker pull traefik:v1.7.2-alpine
v1.7.2-alpine: Pulling from library/traefik
4fe2ade4980c: Pull complete
8d9593d002f4: Pull complete
5d09ab10efbd: Pull complete
37b796c58adc: Pull complete
Digest: sha256:cf30141936f73599e1a46355592d08c88d74bd291f05104fe11a8bcce447c044
Status: Downloaded newer image for traefik:v1.7.2-alpine
docker.io/library/traefik:v1.7.2-alpine
[root@hdss7-200 ~]# docker tag traefik:v1.7.2-alpine harbor.od.com/public/traefik:v1.7.2
[root@hdss7-200 ~]# docker push harbor.od.com/public/traefik:v1.7.2
The push refers to repository [harbor.od.com/public/traefik]
a02beb48577f: Pushed
ca22117205f4: Pushed
3563c211d861: Pushed
df64d3292fd6: Pushed
v1.7.2: digest: sha256:6115155b261707b642341b065cd3fac2b546559ba035d0262650b3b3bbdd10ea size: 1157
准备资源配置清单
[root@hdss7-200 ~]# mkdir -p /data/k8s-yaml/traefik && cd /data/k8s-yaml/traefik
[root@hdss7-200 traefik]# vi /data/k8s-yaml/traefik/rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system
[root@hdss7-200 traefik]# vi /data/k8s-yaml/traefik/ds.yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: traefik-ingress
namespace: kube-system
labels:
k8s-app: traefik-ingress
spec:
template:
metadata:
labels:
k8s-app: traefik-ingress
name: traefik-ingress
spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: harbor.od.com/public/traefik:v1.7.2
name: traefik-ingress
ports:
- name: controller
containerPort: 80
hostPort: 81
- name: admin-web
containerPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --logLevel=INFO
- --insecureskipverify=true
- --kubernetes.endpoint=https://10.0.0.10:7443
- --accesslog
- --accesslog.filepath=/var/log/traefik_access.log
- --traefiklog
- --traefiklog.filepath=/var/log/traefik.log
- --metrics.prometheus
[root@hdss7-200 traefik]# vi /data/k8s-yaml/traefik/svc.yaml
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress
ports:
- protocol: TCP
port: 80
name: controller
- protocol: TCP
port: 8080
name: admin-web
[root@hdss7-200 traefik]# vi /data/k8s-yaml/traefik/ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: traefik-web-ui
namespace: kube-system
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: traefik.od.com
http:
paths:
- path: /
backend:
serviceName: traefik-ingress-service
servicePort: 8080
解析域名
[root@hdss7-11 ~]# vi /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2019121003 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.0.0.11
harbor A 10.0.0.200
k8s-yaml A 10.0.0.200
traefik A 10.0.0.10
依次执行创建
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/rbac.yaml
serviceaccount/traefik-ingress-controller created
clusterrole.rbac.authorization.k8s.io/traefik-ingress-controller created
clusterrolebinding.rbac.authorization.k8s.io/traefik-ingress-controller created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/ds.yaml
daemonset.extensions/traefik-ingress created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/svc.yaml
service/traefik-ingress-service created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/ingress.yaml
ingress.extensions/traefik-web-ui created
配置反代
HDSS7-11.host.com和HDSS7-12.host.com两台主机上的nginx均需要配置
[root@hdss7-11 ~]# vi /etc/nginx/conf.d/od.com.conf
upstream default_backend_traefik {
server 10.0.0.21:81 max_fails=3 fail_timeout=10s;
server 10.0.0.22:81 max_fails=3 fail_timeout=10s;
}
server {
server_name *.od.com;
location / {
proxy_pass http://default_backend_traefik;
proxy_set_header Host $http_host;
proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
}
}
[root@hdss7-11 ~]# nginx -t
[root@hdss7-11 ~]# nginx -s reload
浏览器访问
部署dashboard
准备dashboard镜像
[root@hdss7-200 ~]# docker pull k8scn/kubernetes-dashboard-amd64:v1.8.3
v1.8.3: Pulling from k8scn/kubernetes-dashboard-amd64
a4026007c47e: Pull complete
Digest: sha256:ebc993303f8a42c301592639770bd1944d80c88be8036e2d4d0aa116148264ff
Status: Downloaded newer image for k8scn/kubernetes-dashboard-amd64:v1.8.3
docker.io/k8scn/kubernetes-dashboard-amd64:v1.8.3
[root@hdss7-200 ~]# docker images | grep dashboard
k8scn/kubernetes-dashboard-amd64 v1.8.3 fcac9aa03fd6 18 months ago 102MB
[root@hdss7-200 ~]# docker tag fcac9aa03fd6 harbor.od.com/public/dashboard:v1.8.3
[root@hdss7-200 ~]# docker push harbor.od.com/public/dashboard:v1.8.3
The push refers to repository [harbor.od.com/public/dashboard]
23ddb8cbb75a: Pushed
v1.8.3: digest: sha256:ebc993303f8a42c301592639770bd1944d80c88be8036e2d4d0aa116148264ff size: 529
准备资源配置清单
运维主机HDSS7-200.host.com上:
[root@hdss7-200 ~]# mkdir -p /data/k8s-yaml/dashboard && cd /data/k8s-yaml/dashboard
[root@hdss7-200 dashboard]# vi rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard-admin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard-admin
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard-admin
namespace: kube-system
[root@hdss7-200 dashboard]# vi dp.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- name: kubernetes-dashboard
image: harbor.od.com/public/dashboard:v1.8.3
resources:
limits:
"/data/k8s-yaml/dashboard/dp.yaml" 58L, 1451C 1,1 Top
- containerPort: 8443
protocol: TCP
env:
- name: ACCEPT_LANGUAGE
value: english
args:
# PLATFORM-SPECIFIC ARGS HERE
- --auto-generate-certificates
volumeMounts:
- name: tmp-volume
mountPath: /tmp
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
[root@hdss7-200 dashboard]# vi svc.yaml
apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
k8s-app: kubernetes-dashboard
ports:
- port: 443
targetPort: 8443
[root@hdss7-200 dashboard]# vi ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: kubernetes-dashboard
namespace: kube-system
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: dashboard.od.com
http:
paths:
- backend:
serviceName: kubernetes-dashboard
servicePort: 443
依次执行创建
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/rbac.yaml
serviceaccount/kubernetes-dashboard-admin created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-admin created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/dp.yaml
deployment.apps/kubernetes-dashboard created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/svc.yaml
service/kubernetes-dashboard created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/ingress.yaml
ingress.extensions/kubernetes-dashboard created
解析域名
[root@hdss7-11 ~]# vi /var/named/od.com.zone
dashboard A 10.0.0.10
[root@hdss7-11 ~]# systemctl restart named
浏览器访问
配置认证
- 签发证书
[root@hdss7-200 certs]# (umask 077;openssl genrsa -out dashboard.od.com.key 2048)
Generating RSA private key, 2048 bit long modulus
..........................................................................+++
..................+++
e is 65537 (0x10001)
[root@hdss7-200 certs]# openssl req -new -key dashboard.od.com.key -out dashboard.od.com.csr -subj "/CN=dashboard.od.com/C=CN/ST=BJ/L=Beijing/O=OldboyEdu/OU=ops"
[root@hdss7-200 certs]# openssl x509 -req -in dashboard.od.com.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out dashboard.od.com.crt -days 3650
Signature ok
subject=/CN=dashboard.od.com/C=CN/ST=BJ/L=Beijing/O=OldboyEdu/OU=ops
Getting CA Private Key
- 修改nginx配置,走https
[root@hdss7-11 ~]# cd /etc/nginx/
[root@hdss7-11 nginx]# mkdir certs/
[root@hdss7-11 nginx]# cd certs/
[root@hdss7-11 certs]# scp hdss7-200:/opt/certs/dashboard.od.com.crt .
[root@hdss7-11 certs]# scp hdss7-200:/opt/certs/dashboard.od.com.key .
[root@hdss7-11 ~]# vi /etc/nginx/conf.d/dashboard.od.conf
server {
listen 80;
server_name dashboard.od.com;
rewrite ^(.*)$ https://${server_name}$1 permanent;
}
server {
listen 443 ssl;
server_name dashboard.od.com;
ssl_certificate "certs/dashboard.od.com.crt";
ssl_certificate_key "certs/dashboard.od.com.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location / {
proxy_pass http://default_backend_traefik;
proxy_set_header Host $http_host;
proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
}
}
- 获取token
[root@hdss7-21 ~]# kubectl get secrets -n kube-system
[root@hdss7-21 ~]# kubectl describe secrets kubernetes-dashboard-admin-token-sxtjj -n kube-system
Name: kubernetes-dashboard-admin-token-sxtjj
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: kubernetes-dashboard-admin
kubernetes.io/service-account.uid: 22d44e85-7a2b-44c8-bc40-871f9de5b7d6
Type: kubernetes.io/service-account-token
Data
====
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbi10b2tlbi1zeHRqaiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjIyZDQ0ZTg1LTdhMmItNDRjOC1iYzQwLTg3MWY5ZGU1YjdkNiIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTprdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbiJ9.W_BueL9kxZXwSWhvmynaZp7L_QNcVkDgMYB6KYxisdMfQ50wxzVjj5gQFlTfkp67i4ZLE5ipegFt4z78GrotytM2H14TF8O6n_A4wV7LjCRS62_t9CvnOwlqS2tj4GVgFbH-4gsgHKri1BWwva1_aTaofxpmqZURhZI0g2ywDvWEC7jHbXc4rnl17GE7kjDkNUuyLTYZ74XvJNaZ2sRbkfFid_yzcgZ9jKJh3uH4kqwaByPIsWZOzyhgNIyTcuZjRmwTR6HWXf1cfa_6A-hbYZqVYNTSLt4zx2mBzpnrB0WGxC8uDF-X8rcetzMzxwmYBBV-yZfKQycsfhtC75iImg
ca.crt: 1346 bytes
namespace: 11 bytes
- 下载新版dashboard
[root@hdss7-200 ~]# docker pull hexun/kubernetes-dashboard-amd64:v1.10.1
[root@hdss7-200 ~]# docker tag f9aed6605b81 harbor.od.com/public/dashboard:v1.10.1
[root@hdss7-200 ~]# docker push harbor.od.com/public/dashboard:v1.10.1
- 应用新版dashboard
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/dp.yaml
- dashboard分权举例
[root@hdss7-200 dashboard]# cat rbac-minimal.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
部署heapster
准备heapster镜像
[root@hdss7-200 ~]# docker pull quay.io/bitnami/heapster:1.5.4
[root@hdss7-200 ~]# docker tag c359b95ad38b harbor.od.com/public/heapster:v1.5.4
[root@hdss7-200 ~]# docker push harbor.od.com/public/heapster:v1.5.4
准备资源配置清单
[root@hdss7-200 ~]# vi /data/k8s-yaml/heapster/rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: heapster
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: heapster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:heapster
subjects:
- kind: ServiceAccount
name: heapster
namespace: kube-system
[root@hdss7-200 ~]# vi /data/k8s-yaml/heapster/dp.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
task: monitoring
k8s-app: heapster
spec:
serviceAccountName: heapster
containers:
- name: heapster
image: harbor.od.com/public/heapster:v1.5.4
imagePullPolicy: IfNotPresent
command:
- /opt/bitnami/heapster/bin/heapster
- --source=kubernetes:https://kubernetes.default
[root@hdss7-200 ~]# vi /data/k8s-yaml/heapster/svc.yaml
apiVersion: v1
kind: Service
metadata:
labels:
task: monitoring
# For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
# If you are NOT using this as an addon, you should comment out this line.
kubernetes.io/cluster-service: 'true'
kubernetes.io/name: Heapster
name: heapster
namespace: kube-system
spec:
ports:
- port: 80
targetPort: 8082
selector:
k8s-app: heapster
应用资源配置清单
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/heapster/rbac.yaml
serviceaccount/heapster created
clusterrolebinding.rbac.authorization.k8s.io/heapster created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/heapster/dp.yaml
deployment.extensions/heapster created
[root@hdss7-21 ~]# kubectl apply -f http://k8s-yaml.od.com/heapster/svc.yaml
重启dashboard
浏览器访问:http://dashboard.od.com