7 部署kubelete
NAME STATUS ROLES AGE VERSION
6 部署kubelet
部署Node节点服务
部署kubelet
集群规划
主机名 角色 ip
rstx-203.rongbiz.cn kubelet 192.168.1.203
rstx-204.rongbiz.cn kubelet 192.168.1.204
注意:这里部署文档以rstx-203.host.com主机为例,另外一台计算节点安装部署方法类似
安装其他节点,需要先拷贝
HDSS7-122上:
[root@rstx-203 cert]# cd /opt/kubernetes/server/bin/conf
[root@hdss7-122 conf]# scp rstx-203:/opt/kubernetes/server/bin/conf/kubelet.kubeconfig .
签发kubelet证书
运维主机 rstx-53.rongbiz.cn
创建生成证书签名请求(csr)的JSON配置文件
[root@rstx-53 etc]# cd /opt/certs/
[root@rstx-53 certs]# vi kubelet-csr.json
{
"CN": "k8s-kubelet",
"hosts": [
"127.0.0.1",
"192.168.1.200",
"192.168.1.201",
"192.168.1.202",
"192.168.1.203",
"192.168.1.204",
"192.168.1.205"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
# 添加node节点IP,多些一些可有能安装使用的IP,如果新node的ip不在证书内,需要重新编写证书,拷贝至所有主机
生成证书
[root@rstx-53 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server kubelet-csr.json | cfssl-json -bare kubelet
[root@rstx-53 certs]# ll
-rw-r--r-- 1 root root 1115 12月 11 16:16 kubelet.csr
-rw-r--r-- 1 root root 498 12月 11 16:16 kubelet-csr.json
-rw------- 1 root root 1679 12月 11 16:16 kubelet-key.pem
-rw-r--r-- 1 root root 1468 12月 11 16:16 kubelet.pem
拷贝证书、私钥,注意私钥文件属性600,拷贝至:/opt/kubernetes/server/bin/cert/
[root@rstx-203 ~]# cd /opt/kubernetes/server/bin/certs/
root@rstx-203 cert]# scp rstx4-53:/opt/certs/kubelet.pem .
[root@rstx-203 cert]# scp rstx4-53:/opt/certs/kubelet-key.pem .
注意:后面的点代表当前目录
----------
set-context -- 只做一次,最后生成的 kubelet.kubeconfig 拷贝至其他节点
注意:在conf目录下
[root@rstx-203 cert]# cd /opt/kubernetes/server/bin/conf
# IP地址提前改好再粘贴复制IP为keeplive的VIP地址
[root@rstx-203 conf]# kubectl config set-cluster myk8s \
--certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem \
--embed-certs=true \
--server=https://192.168.1.200:7443 \
--kubeconfig=kubelet.kubeconfig
Cluster "myk8s" set.
----------
[root@rstx-203 conf]# kubectl config set-credentials k8s-node \
--client-certificate=/opt/kubernetes/server/bin/certs/client.pem \
--client-key=/opt/kubernetes/server/bin/certs/client-key.pem \
--embed-certs=true \
--kubeconfig=kubelet.kubeconfig
User "k8s-node" set.
----------
[root@rstx-203 conf]# kubectl config set-context myk8s-context \
--cluster=myk8s \
--user=k8s-node \
--kubeconfig=kubelet.kubeconfig
Context "myk8s-context" created.
----------
[root@rstx-203 conf]# kubectl config use-context myk8s-context --kubeconfig=kubelet.kubeconfig
Switched to context "myk8s-context".
----------
授予权限,角色绑定 -- 只创建一次就好,存到etcd里,然后拷贝到各个node节点上
[root@rstx-203 conf]# vi k8s-node.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: k8s-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: k8s-node
[root@rstx-203 conf]# kubectl create -f k8s-node.yaml
clusterrolebinding.rbac.authorization.k8s.io/k8s-node created
[root@rstx-203 conf]# kubectl get clusterrolebinding k8s-node -o yaml
----------
rstx-204上:
[root@rstx-204 cert]# cd /opt/kubernetes/server/bin/conf
[root@rstx-204 conf]# scp rstx4-203:/opt/kubernetes/server/bin/conf/kubelet.kubeconfig .
----------
准备pause基础镜像 -- 边车模式
运维主机rstx-53.rongbiz.cn上:
[root@rstx-53 ~]# docker pull kubernetes/pause
[root@rstx-53 ~]# docker tag f9d5de079539 harbor.rongbiz.cn/public/pause:latest
[root@rstx-53 ~]# docker push harbor.rongbiz.cn/public/pause:latest
----------
编写启动脚本 -- # 更改主机名
[root@rstx-203 conf]# vi /opt/kubernetes/server/bin/kubelet.sh
#!/bin/sh
./kubelet \
--anonymous-auth=false \
--cgroup-driver systemd \
--cluster-dns 10.254.0.2 \
--cluster-domain cluster.local \
--runtime-cgroups=/systemd/system.slice \
--kubelet-cgroups=/systemd/system.slice \
--fail-swap-on="false" \
--client-ca-file ./certs/ca.pem \
--tls-cert-file ./certs/kubelet.pem \
--tls-private-key-file ./certs/kubelet-key.pem \
--hostname-override rstx-203.rongbiz.cn \
--kubeconfig ./conf/kubelet.kubeconfig \
--log-dir /data/logs/kubernetes/kube-kubelet \
--pod-infra-container-image harbor.rongbiz.cn/public/pause:latest \
--root-dir /data/kubelet
[root@rstx-203 conf]# mkdir -p /data/logs/kubernetes/kube-kubelet /data/kubelet
[root@rstx-203 conf]# chmod +x /opt/kubernetes/server/bin/kubelet.sh
----------
[root@rstx-203 conf]# vi /etc/supervisord.d/kube-kubelet.ini
[program:kube-kubelet-203]
command=/opt/kubernetes/server/bin/kubelet.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
killasgroup=true
stopasgroup=true
[root@hdss7-122 conf]# supervisorctl update
[root@hdss7-21 conf]# supervisorctl status
etcd-server-7-121 RUNNING pid 6565, uptime 0:24:15
kube-apiserver-7-121 RUNNING pid 6566, uptime 0:24:15
kube-controller-manager-7-121 RUNNING pid 6551, uptime 0:24:15
kube-kubelet-203 RUNNING pid 16663, uptime 0:01:14
kube-scheduler-7-121 RUNNING pid 6552, uptime 0:24:15
[root@rstx-203 cert]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
hdss7-21.host.com Ready <none> 15h v1.15.2
hdss7-22.host.com Ready <none> 8m51s v1.15.2
# ROlES添加标签,设定节点角色,可同时加两个标签
[root@rstx-203 cert]# kubectl label node rstx-204.rongbiz.cn node-role.kubernetes.io/master=
[root@rstx-203 cert]# kubectl label node rstx-204.rongbiz.cn node-role.kubernetes.io/node=
[root@hdss7-122 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
rstx-203.rongbiz.cn Ready master,node 33m v1.15.4
rstx-204.rongbiz.cn Ready master,node 2m38s v1.15.4
rstx-205.rongbiz.cn Ready node 4m6s v1.15.4