7 部署kube-proxy
部署kube-proxy
集群规划
主机名 角色 ip
rstx-203.rongbiz.cn kube-proxy 192.168.1.121
rstx-204.rongbiz.cn kube-proxy 192.168.1.122
注意:这里部署文档以rstx-203.rongbiz.cn主机为例。另外一台运算节点安装部署方法类似
----------
第一台node节点部署完成后,将生成的配置文件拷贝至各个Node节点
[root@rstx-204 cert]# cd /opt/kubernetes/server/bin/conf
[root@rstx-204 conf]# scp rstx-203:/opt/kubernetes/server/bin/conf/kube-proxy.kubeconfig .
----------
签发kube-proxy证书
运维主机rstx-53.rongbiz.cn上:
签发生成证书签名请求(CSR)的JSON配置文件
[root@rstx-53 certs]# vi /opt/certs/kube-proxy-csr.json
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
生成证书
[root@rstx-53 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json |cfssl-json -bare kube-proxy-client
[root@rstx-53 certs]# ll
-rw-r--r-- 1 root root 1005 12月 12 10:23 kube-proxy-client.csr
-rw------- 1 root root 1679 12月 12 10:23 kube-proxy-client-key.pem
-rw-r--r-- 1 root root 1375 12月 12 10:23 kube-proxy-client.pem
-rw-r--r-- 1 root root 267 12月 12 10:22 kube-proxy-csr.json
----------
分发证书,将证书拷贝到node节点,注意私钥文件属性600
[root@rstx-203 ~]# cd /opt/kubernetes/server/bin/certs/
[root@rstx-203 cert]#scp rstx-53.rongbiz.cn:/opt/certs/kube-proxy-client.pem .
[root@rstx-203 cert]# scp rstx-53.rongbiz.cn:/opt/certs/kube-proxy-client-key.pem .
----------
在conf文件夹下创建配置 -- 只做一次,然后将kube-proxy.kubeconfig拷贝至各个node节点
[root@rstx-203 cert]# cd /opt/kubernetes/server/bin/conf
# --server=https://192.168.1.200:7443 此IP地址是keeplive的VIP地址,注意修改
[root@rstx-203 conf]# kubectl config set-cluster myk8s \
--certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem \
--embed-certs=true \
--server=https://192.168.1.200:7443 \
--kubeconfig=kube-proxy.kubeconfig
[root@rstx-203 conf]# ls
audit.yaml k8s-node.yaml kubelet.kubeconfig kube-proxy.kubeconfig
[root@rstx-203 conf]# kubectl config set-credentials kube-proxy \
--client-certificate=/opt/kubernetes/server/bin/certs/kube-proxy-client.pem \
--client-key=/opt/kubernetes/server/bin/certs/kube-proxy-client-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
[root@rstx-203 conf]# kubectl config set-context myk8s-context \
--cluster=myk8s \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
[root@rstx-203 conf]# kubectl config use-context myk8s-context --kubeconfig=kube-proxy.kubeconfig
第一台node节点部署完成后,将生成的配置文件拷贝至各个Node节点
[root@rstx-204 cert]# cd /opt/kubernetes/server/bin/conf
[root@rstx-204conf]# scp rstx-203.rongbiz.cn:/opt/kubernetes/server/bin/conf/kube-proxy.kubeconfig .
----------
* 加载ipvs模块 -- 脚本需要设置成开启自动运行
[root@rstx-203 conf]# vi /root/ipvs.sh
#!/bin/bash
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for i in $(ls $ipvs_mods_dir|grep -o "^[^.]*")
do
/sbin/modinfo -F filename $i &>/dev/null
if [ $? -eq 0 ];then
/sbin/modprobe $i
fi
done
[root@rstx-203 conf]# chmod +x /root/ipvs.sh
执行脚本
[root@rstx-203 conf]# /root/ipvs.sh
查看内核是否加载ipvs模块
[root@rstx-203 conf]# lsmod | grep ip_vs
ip_vs_wrr 12697 0
ip_vs_wlc 12519 0
ip_vs_sh 12688 0
ip_vs_sed 12519 0
ip_vs_rr 12600 0
ip_vs_pe_sip 12740 0
nf_conntrack_sip 33860 1 ip_vs_pe_sip
ip_vs_nq 12516 0
ip_vs_lc 12516 0
ip_vs_lblcr 12922 0
ip_vs_lblc 12819 0
ip_vs_ftp 13079 0
ip_vs_dh 12688 0
ip_vs 145497 24 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_pe_sip,ip_vs_lblcr,ip_vs_lblc
nf_nat 26787 3 ip_vs_ftp,nf_nat_ipv4,nf_nat_masquerade_ipv4
nf_conntrack 133095 8 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_sip,nf_conntrack_ipv4
libcrc32c 12644 4 xfs,ip_vs,nf_nat,nf_conntrack
设置开机自动启动
[root@rstx-203 ~]# vi /etc/rc.d/rc.local
/root/ipvs.sh
开启开机自启动脚本功能 -- 详见本文件夹内 开启开机自启动脚本文件
[root@rstx-203 ~]# chmod +x /etc/rc.d/rc.local
[root@rstx-203 ~]# mkdir -p /usr/lib/system/system/
[root@rstx-203 ~]# vim /usr/lib/systemd/system/rc-local.service
[Unit]
Description=/etc/rc.d/rc.local Compatibility
ConditionFileIsExecutable=/etc/rc.d/rc.local
After=network.target
[Service]
Type=forking
ExecStart=/etc/rc.d/rc.local start
TimeoutSec=0
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target
[root@rstx-203 ~]# ln -s '/lib/systemd/system/rc-local.service' '/etc/systemd/system/multi-user.target.wants/rc-local.service'
开启 rc-local.service 服务:
[root@rstx-203 ~]# systemctl --system daemon-reload && systemctl start rc-local.service
[root@rstx-203 ~]# systemctl enable rc-local.service
----------
创建kube-proxy启动脚本
rstx-203.rongbiz.cn:
[root@rstx-204 ~]# vi /opt/kubernetes/server/bin/kube-proxy.sh
#!/bin/sh
./kube-proxy \
--cluster-cidr 172.7.0.0/16 \
--hostname-override rstx-203.rongbiz.cn \
--proxy-mode=ipvs \
--ipvs-scheduler=nq \
--kubeconfig ./conf/kube-proxy.kubeconfig
[root@rstx-204 ~]# chmod +x /opt/kubernetes/server/bin/kube-proxy.sh
[root@rstx-204 ~]# mkdir -p /data/logs/kubernetes/kube-proxy
[root@rstx-204 ~]# vi /etc/supervisord.d/kube-proxy.ini
[program:kube-proxy-203]
command=/opt/kubernetes/server/bin/kube-proxy.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
killasgroup=true
stopasgroup=true
[root@rstx-204 ~]# supervisorctl update
[root@rstx-204 ~]# supervisorctl status
kube-proxy-22 RUNNING pid 6873, uptime 0:28:15
[root@rstx-204 ~]# netstat -luntp |grep kube-proxy
tcp 0 0 127.0.0.1:10249 0.0.0.0:* LISTEN 7310/./kube-proxy
tcp6 0 0 :::10256 :::* LISTEN 7310/./kube-proxy
----------
查看ipvs是否生效
[root@rstx-203 ~]# yum install -y ipvsadm # 只安装,不启动
[root@rstx-203 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.254.0.1:443 nq
-> 192.168.153.21:6443 Masq 1 0 0
-> 192.168.153.22:6443 Masq 1 0 0
# 注意:kube-proxy集群各主机启动脚本略有不同,部署其他节点注意修改
[root@rstx-203 ~]# cat /data/logs/kubernetes/kube-proxy/proxy.stdout.log
验证kuberneters集群
在任意一个运算节点,创建一个资源配置清单
这里我们选择rstx-203.rongbiz.cn主机
[root@rstx-204 ~]# vi /root/nginx-ds.yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: nginx-ds
spec:
template:
metadata:
labels:
app: nginx-ds
spec:
containers:
- name: my-nginx
image: harbor.rongbiz.cn/public/nginx:v1.7.9
ports:
- containerPort: 80
测试完删除
[root@rstx-204 ~]# kubectl create -f nginx-ds.yaml
daemonset.extensions/nginx-ds created
[root@rstx-203 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-2 Healthy {"health": "true"}
etcd-0 Healthy {"health": "true"}
etcd-1 Healthy {"health": "true"}
[root@rstx-203 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
rstx-203.rongbiz.cn Ready master,node 94m v1.15.4
rstx-204.rongbiz.cn Ready master,node 86m v1.15.4
[root@rstx-203 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-ds-64pxp 1/1 Running 0 4m49s
nginx-ds-q4wd9 1/1 Running 0 4m50s