Kubernetes核心存储之Etcd高可用
1、基础环境
1.安装cfssl(只需在k8s-master01节点即可)
$ wget -O /bin/cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
$ wget -O /bin/cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
$ wget -O /bin/cfssl-certinfo https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
$ for cfssl in `ls /bin/cfssl*`;do chmod +x $cfssl;done;
2.配置hosts文件
cat >>/etc/hosts<< EOF
k8s-master01 10.0.0.31
k8s-master02 10.0.0.32
k8s-master03 10.0.0.39
EOF
2、etcd证书生成
1.安装etcd
$ yum install -y etcd
2.配置证书
vim etcd-csr.json
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"10.0.0.31",
"10.0.0.32",
"10.0.0.39"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "GD",
"L": "shenzh",
"O": "etcd",
"OU": "Etcd Security"
}
]
}
vim ca-config.json
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"www": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
vim ca-csr.json
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "GD",
"L": "shenzh",
"O": "etcd",
"OU": "Etcd Security"
}
]
}
3.创建CA证书和私钥
$ cfssl gencert -initca ca-csr.json | cfssljson -bare ca
4.生成etcd证书和私钥
$ cfssl gencert -ca=ca.pem \
-ca-key=ca-key.pem \
-config=config.json \
-profile=kubernetes etcd-csr.json | cfssljson -bare etcd
// 查看生成的证书
$ ls *.pem
ca-key.pem ca.pem etcd-key.pem etcd.pem
$ mkdir -pv /etc/etcd/ssl
$ cp -r ./{ca-key,ca,etcd-key,etcd}.pem /etc/etcd/ssl/
5.将复制证书到其他节点
scp -r ./ root@10.0.0.32:/etc/etcd/ssl
scp -r ./ root@10.0.0.39:/etc/etcd/ssl
6.修改etcd各个节点的etcd.conf配置文件
k8s-master01
$ vim /etc/etcd/etcd.conf
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.0.0.31:2380"
ETCD_LISTEN_CLIENT_URLS="https://127.0.0.1:2379,https://10.0.0.31:2379"
ETCD_NAME="k8s-master01"
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.0.31:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://127.0.0.1:2379,https://10.0.0.31:2379"
ETCD_INITIAL_CLUSTER="k8s-master01=https://10.0.0.31:2380,k8s-master02=https://10.0.0.32:2380,k8s-master03=https://10.0.0.39:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem"
ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/ca.pem"
k8s-master02
$ vim /etc/etcd/etcd.conf
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.0.0.32:2380"
ETCD_LISTEN_CLIENT_URLS="https://127.0.0.1:2379,https://10.0.0.32:2379"
ETCD_NAME="k8s-master02"
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.0.32:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://127.0.0.1:2379,https://10.0.0.32:2379"
ETCD_INITIAL_CLUSTER="k8s-master01=https://10.0.0.31:2380,k8s-master02=https://10.0.0.32:2380,k8s-master03=https://10.0.0.39:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem"
ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/ca.pem"
k8s-master03
$ vim /etc/etcd/etcd.conf
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.0.0.39:2380"
ETCD_LISTEN_CLIENT_URLS="https://127.0.0.1:2379,https://10.0.0.39:2379"
ETCD_NAME="k8s-master03"
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.0.39:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://127.0.0.1:2379,https://10.0.0.39:2379"
ETCD_INITIAL_CLUSTER="k8s-master01=https://10.0.0.31:2380,k8s-master02=https://10.0.0.32:2380,k8s-master03=https://10.0.0.39:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem"
ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/ca.pem"
配置参数含义
· ETCD_NAME 节点名称
· ETCD_DATA_DIR 数据目录
· ETCD_LISTEN_PEER_URLS 集群通信监听地址
· ETCD_LISTEN_CLIENT_URLS 客户端访问监听地址
· ETCD_INITIAL_ADVERTISE_PEER_URLS 集群通告地址
· ETCD_ADVERTISE_CLIENT_URLS 客户端通告地址
· ETCD_INITIAL_CLUSTER 集群节点地址
· ETCD_INITIAL_CLUSTER_TOKEN 集群Token
· ETCD_INITIAL_CLUSTER_STATE 加入集群的当前状态,new是新集群,existing表示加入已有集群
在各个节点运行如下命令启动etcd及开机启动
$ systemctl start etcd
$ systemctl enable etcd
查看etcd是否运行正常
$ etcdctl --endpoints "https://10.0.0.31:2379,https://10.0.0.32:2379,https://10.0.0.39:2379" --ca-file=/etc/etcd/ssl/ca.pem --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem cluster-health
// 输出如下
member 61105fb5ea81da2 is healthy: got healthy result from https://10.0.0.39:2379
member 1f46bee47a4f04aa is healthy: got healthy result from https://10.0.0.31:2379
member 6443b97f5544707b is healthy: got healthy result from https://10.0.0.32:2379
cluster is healthy
至此etcd集群搭建完毕
3、master高可用
1.两台lb(负载均衡机器)安装haproxy
$ yum install haproxy
$ cat /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 10000
user haproxy
group haproxy
daemon
stats socket /var/lib/haproxy/stats
defaults
mode tcp
log global
retries 3
timeout connect 10s
timeout client 1m
timeout server 1m
frontend kube-apiserver
bind *:6443 # 指定前端端口
mode tcp
default_backend master
backend master # 指定后端机器及端口,负载方式为轮询
balance roundrobin
server k8s-master01 10.0.0.31:6443 check maxconn 2000
server k8s-master02 10.0.0.32:6443 check maxconn 2000
server k8s-master03 10.0.0.39:6443 check maxconn 2000
启动haproxy
$ systemctl enable haproxy
$ systemctl start haproxy
2.两台lb部署
$ yum install keepalived
主lb
$ vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email_from Alexandre.Cassen@firewall.loc
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.101 #漂移ip
}
}
备lb
! Configuration File for keepalived
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.101 #漂移ip
}
}
启动keepalived
systemctl start keepalived
systemctl enable keepalived
3.将etcd的证书移动至/etc/kubernetes/pki/
$ mkdir -p /etc/kubernetes/pki/
$ cp -r {ca,etcd,etcd-key}.pem /etc/kubernetes/pki/
4.配置kubeadm-config.yaml文件
$ cat /root/kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 10.0.0.31
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-master01
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "10.0.0.101:6443" # 该地址和vip要一致
controllerManager: {}
dns:
type: CoreDNS
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.15.1
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
scheduler: {}
etcd:
external:
endpoints:
- https://10.0.0.31:2379
- https://10.0.0.32:2379
- https://10.0.0.39:2379
caFile: /etc/kubernetes/pki/ca.pem
certFile: /etc/kubernetes/pki/etcd.pem
keyFile: /etc/kubernetes/pki/etcd-key.pem
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
初始化master
$ kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs
初始化完成后提示执行的操作
$ mkdir -p $HOME/.kube
$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
$ sudo chown $(id -u):$(id -g) $HOME/.kube/config
5.加入另外一个master节点
原来的kubeadm版本,join命令只用于工作节点的加入,而新版本加入了 --control-plane 参数后,控制平面(master)节点也可以通过kubeadm join命令加入集群了。
$ kubeadm join 10.0.0.101:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:699cdd59cfa20509cc25794c5b153678a8ff354c9401e215cb1e41d750cbeb54 \
--control-plane --certificate-key 87d0f654fd4d2d563969ce24fa226321a3fd098477e0528479476fce3bf404c3
6.加入node节点
$ kubeadm join 10.0.0.101:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:699cdd59cfa20509cc25794c5b153678a8ff354c9401e215cb1e41d750cbeb54