本文的环境介绍
[root@m-30-1 ~]# kubectl version
Client Version: version.Info{Major:"1", Minor:"10", GitVersion:"v1.10.2", GitCommit:"81753b10df112992bf51bbc2c2f85208aad78335", GitTreeState:"clean", BuildDate:"2018-04-27T09:22:21Z", GoVersion:"go1.9.3", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"10", GitVersion:"v1.10.2", GitCommit:"81753b10df112992bf51bbc2c2f85208aad78335", GitTreeState:"clean", BuildDate:"2018-04-27T09:10:24Z", GoVersion:"go1.9.3", Compiler:"gc", Platform:"linux/amd64"}
[root@m-30-1 ~]# rpm -qf /etc/issue
centos-release-7-4.1708.el7.centos.x86_64
kubernetes集群是通过kubeadm安装的
本文中很多内容都参考了净超大神的网站,大家可以多看看,O(∩_∩)O~
本文目录:
kubernetes 通过pod的mac地址找到对应的宿主机虚拟网卡
kubernetes替换默认的kube-dns为coredns
helm以daemonset方式部署ingress-nginx
glusterfs-restapi-heketi部署
kubernetes 通过pod的mac地址找到对应的宿主机虚拟网卡
创建一个 centos 镜像的 pod ,安装 iproute
kubectl -n wis run centos --image=centos --replicas=2 -- tail -f /dev/null
kubectl -n wis exec centos-7f55f898b4-7w69j -it -- bash
# 进入 pod
yum install -y iproute
# 查看 mac 地址
ip a
3: eth0@if30: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 0a:58:0a:f4:00:65 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 10.244.0.101/24 scope global eth0
valid_lft forever preferred_lft forever
然后宿主机上执行下面的命令查看网桥的信息。
yum install -y bridge-utils
brctl show
brctl showmacs kube-bridge
还有就是直接从 pod 里面 ping 百度,然后宿主机上 ping pod ip 。通过 ip neigh 查看 mac 地址也能知道 pod 对应宿主机的虚拟网卡。通过 brctl showmacs kube-bridge 可以看到 pod 对应的 mac 地址和宿主机上对应的网卡其实是在同一个端口上,然后一个 local yes, 一个是 no 。
[root@node1 ~]# brctl showmacs kube-bridge|head -1;brctl showmacs kube-bridge| grep 21
port no mac addr is local? ageing timer
21 0a:58:0a:f4:00:62 no 0.39
21 ca:46:f0:00:ec:1d yes 0.00
21 ca:46:f0:00:ec:1d yes 0.00
kubernetes替换默认的kube-dns为coredns
之所以是因为好多人说kube-dns太不好用了,而且我是用kubeadm安装的运行了一段时间,master节点的pod的DNS就用不了了,我就换了coredns就好了。
# 导出原来的deployment
kubectl get -n kube-system deployment kube-dns -o yaml > /tmp/kube-dns-deployment.yaml
# 然后就开始替换了
cd /opt
git clone https://github.com/coredns/deployment.git
cd /opt/coredns/deployment/kubernetes
./deploy.sh |kubectl apply -f -
kubectl delete --namespace=kube-system deployment kube-dns
helm以daemonset方式部署ingress-nginx
首先查看整体的变量然后修改对应的参数,具体如下
[root@m-30-1 deploy]# helm inspect values stable/nginx-ingress|egrep -v "^$" > value2.yaml
[root@m-30-1 deploy]# diff value.yaml value2.yaml
1d0
<
17c16
< hostNetwork: true
---
> hostNetwork: false
24c23
< useHostPort: true
---
> useHostPort: false
66c65
< kind: DaemonSet
---
> kind: Deployment
88,89c87
< nodeSelector:
< kubernetes.io/hostname: m-30-2
---
> nodeSelector: {}
270c268,287
我们还是测试环境所以设置了nodeSelector。
glusterfs restapi heketi部署
主要是如果用普通用户启动并且ssh的executor需要设置"sudo": true
。
[root@m-30-2 kubernetes]# cat /etc/heketi/heketi.json
{
"_port_comment": "Heketi Server Port Number",
"port": "8090",
"_use_auth": "Enable JWT authorization. Please enable for deployment",
"use_auth": false,
"_jwt": "Private keys for access",
"jwt": {
"_admin": "Admin has access to all APIs",
"admin": {
"key": "My Secret"
},
"_user": "User only has access to /volumes endpoint",
"user": {
"key": "My Secret"
}
},
"_glusterfs_comment": "GlusterFS Configuration",
"glusterfs": {
"_executor_comment": [
"Execute plugin. Possible choices: mock, ssh",
"mock: This setting is used for testing and development.",
" It will not send commands to any node.",
"ssh: This setting will notify Heketi to ssh to the nodes.",
" It will need the values in sshexec to be configured.",
"kubernetes: Communicate with GlusterFS containers over",
" Kubernetes exec api."
],
"executor": "ssh",
"_sshexec_comment": "SSH username and private key file information",
"sshexec": {
"keyfile": "/home/heketi/.ssh/id_rsa",
"user": "heketi",
"port": "20443",
"sudo": true,
"fstab": "Optional: Specify fstab file on node. Default is /etc/fstab"
},
"_kubeexec_comment": "Kubernetes configuration",
"kubeexec": {
"host" :"https://kubernetes.host:8443",
"cert" : "/path/to/crt.file",
"insecure": false,
"user": "kubernetes username",
"password": "password for kubernetes user",
"namespace": "OpenShift project or Kubernetes namespace",
"fstab": "Optional: Specify fstab file on node. Default is /etc/fstab"
},
"_db_comment": "Database file name",
"db": "/var/lib/heketi/heketi.db",
"_loglevel_comment": [
"Set log level. Choices are:",
" none, critical, error, warning, info, debug",
"Default is warning"
],
"loglevel" : "debug"
}
}
[root@m-30-2 gluster]# pwd
/opt/gluster
[root@m-30-2 gluster]# cat topology.json
{
"clusters": [
{
"nodes": [
{
"node": {
"hostnames": {
"manage": [
"172.16.30.1"
],
"storage": [
"172.16.30.1"
]
},
"zone": 1
},
"devices": [
"/dev/sdb"
]
},
{
"node": {
"hostnames": {
"manage": [
"172.16.30.2"
],
"storage": [
"172.16.30.2"
]
},
"zone": 1
},
"devices": [
"/dev/sdb"
]
},
{
"node": {
"hostnames": {
"manage": [
"172.16.30.3"
],
"storage": [
"172.16.30.3"
]
},
"zone": 1
},
"devices": [
"/dev/sdb"
]
}
]
}
]
}
export HEKETI_CLI_SERVER=http://172.16.30.2:8090
heketi-cli topology load --json=topology.json
有时候磁盘上有标记需要清除一下wipefs --all /dev/sdb
helm 安装小计
kubectl delete svc tiller-deploy -n kube-system
kubectl -n kube-system delete deploy tiller-deploy
kubectl create serviceaccount --namespace kube-system tiller
kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
helm init --service-account tiller