# 生成证书的CSR文件:证书签名请求文件,配置了一些域名、公司、单位
[root@k8s-master01 ~]# cd /root/k8s-ha-install/pki
# 生成etcd CA证书和CA证书的key
[root@k8s-master01 pki]# cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca
2020/12/21 01:58:02 [INFO] generating a new CA key and certificate from CSR
2020/12/21 01:58:02 [INFO] generate received request
2020/12/21 01:58:02 [INFO] received CSR
2020/12/21 01:58:02 [INFO] generating key: rsa-2048
2020/12/21 01:58:03 [INFO] encoded CSR
2020/12/21 01:58:03 [INFO] signed certificate with serial number 140198241947074029848239512164671290627608591138
# 可以在-hostname 参数后面预留几个ip,方便日后扩容
[root@k8s-master01 pki]# cfssl gencert \
-ca=/etc/etcd/ssl/etcd-ca.pem \
-ca-key=/etc/etcd/ssl/etcd-ca-key.pem \
-config=ca-config.json \
-hostname=127.0.0.1,k8s-master01,k8s-master02,k8s-master03,192.168.1.100,192.168.1.101,192.168.1.102 \
-profile=kubernetes \
etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd
# 执行结果
2020/12/21 02:00:04 [INFO] generate received request
2020/12/21 02:00:04 [INFO] received CSR
2020/12/21 02:00:04 [INFO] generating key: rsa-2048
2020/12/21 02:00:05 [INFO] encoded CSR
2020/12/21 02:00:05 [INFO] signed certificate with serial number 470467884878418179395781489624244078991295464856
6.2.3、将证书复制到其他节点(Master01节点)
MasterNodes='k8s-master02 k8s-master03'
WorkNodes='k8s-node01 k8s-node02'
for NODE in $MasterNodes; do
ssh $NODE "mkdir -p /etc/etcd/ssl"
for FILE in etcd-ca-key.pem etcd-ca.pem etcd-key.pem etcd.pem; do
scp /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}
done
done
6.3、k8s组件证书
6.3.1、生成kubernetes证书(Master01节点)
[root@k8s-master01 ~]# cd /root/k8s-ha-install/pki
[root@k8s-master01 pki]# cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca
# 执行结果
2020/12/21 02:05:33 [INFO] generating a new CA key and certificate from CSR
2020/12/21 02:05:33 [INFO] generate received request
2020/12/21 02:05:33 [INFO] received CSR
2020/12/21 02:05:33 [INFO] generating key: rsa-2048
2020/12/21 02:05:34 [INFO] encoded CSR
2020/12/21 02:05:34 [INFO] signed certificate with serial number 41601140313910114593243737048758611445671732018
# 10.96.0.1是k8s service的网段,如果说需要更改k8s service网段,那就需要更改10.96.0.1,# 如果不是高可用集群,192.168.1.246为Master01的IP
[root@k8s-master01 pki]# cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -hostname=10.96.0.1,192.168.1.246,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,192.168.1.100,192.168.1.101,192.168.1.102 -profile=kubernetes apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver
# 执行结果
2020/12/21 02:07:26 [INFO] generate received request
2020/12/21 02:07:26 [INFO] received CSR
2020/12/21 02:07:26 [INFO] generating key: rsa-2048
2020/12/21 02:07:26 [INFO] encoded CSR
2020/12/21 02:07:26 [INFO] signed certificate with serial number 538625498609814572541825087295197801303230523180
6.3.2、生成apiserver的聚合证书(Master01节点)
[root@k8s-master01 pki]# cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca
# 执行结果
2020/12/21 02:08:45 [INFO] generating a new CA key and certificate from CSR
2020/12/21 02:08:45 [INFO] generate received request
2020/12/21 02:08:45 [INFO] received CSR
2020/12/21 02:08:45 [INFO] generating key: rsa-2048
2020/12/21 02:08:46 [INFO] encoded CSR
2020/12/21 02:08:46 [INFO] signed certificate with serial number 614553480240998616305316696839282255811191572397
[root@k8s-master01 pki]# cfssl gencert -ca=/etc/kubernetes/pki/front-proxy-ca.pem -ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem -config=ca-config.json -profile=kubernetes front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client
# 返回结果(忽略警告)
2020/12/21 02:09:23 [INFO] generate received request
2020/12/21 02:09:23 [INFO] received CSR
2020/12/21 02:09:23 [INFO] generating key: rsa-2048
2020/12/21 02:09:23 [INFO] encoded CSR
2020/12/21 02:09:23 [INFO] signed certificate with serial number 525521597243375822253206665544676632452020336672
2020/12/21 02:09:23 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
6.3.3、生成controller-manage的证书(Master01节点)
[root@k8s-master01 pki]# cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager
# 执行结果
2020/12/21 02:10:59 [INFO] generate received request
2020/12/21 02:10:59 [INFO] received CSR
2020/12/21 02:10:59 [INFO] generating key: rsa-2048
2020/12/21 02:10:59 [INFO] encoded CSR
2020/12/21 02:10:59 [INFO] signed certificate with serial number 90004917734039884153079426464391358123145661914
2020/12/21 02:10:59 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
# 注意,如果不是高可用集群,192.168.1.246:8443改为master01的地址,8443改为apiserver的端口,默认是6443# set-cluster:设置一个集群项,192.168.1.246是VIP
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.1.246:8443 \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 执行结果
Cluster "kubernetes" set.
# 设置一个环境项,一个上下文
[root@k8s-master01 pki]# kubectl config set-context system:kube-controller-manager@kubernetes \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 执行结果
Context "system:kube-controller-manager@kubernetes" created.
# set-credentials 设置一个用户项
[root@k8s-master01 pki]# kubectl config set-credentials system:kube-controller-manager \
--client-certificate=/etc/kubernetes/pki/controller-manager.pem \
--client-key=/etc/kubernetes/pki/controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 执行结果
User "system:kube-controller-manager" set.
# 使用某个环境当做默认环境
[root@k8s-master01 pki]# kubectl config use-context system:kube-controller-manager@kubernetes \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 执行结果
Switched to context "system:kube-controller-manager@kubernetes".
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler
# 执行结果
2020/12/21 02:16:12 [INFO] generate received request
2020/12/21 02:16:12 [INFO] received CSR
2020/12/21 02:16:12 [INFO] generating key: rsa-2048
2020/12/21 02:16:12 [INFO] encoded CSR
2020/12/21 02:16:12 [INFO] signed certificate with serial number 74188665800103042050582037108256409976332653077
2020/12/21 02:16:12 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
# 注意,如果不是高可用集群,192.168.1.246:8443改为master01的地址,8443改为apiserver的端口,默认是6443
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.1.246:8443 \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler \
--client-certificate=/etc/kubernetes/pki/scheduler.pem \
--client-key=/etc/kubernetes/pki/scheduler-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kubectl config set-context system:kube-scheduler@kubernetes \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kubectl config use-context system:kube-scheduler@kubernetes \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin
# 注意,如果不是高可用集群,192.168.1.246:8443改为master01的地址,8443改为apiserver的端口,默认是6443
kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.1.246:8443 --kubeconfig=/etc/kubernetes/admin.kubeconfig
kubectl config set-credentials kubernetes-admin --client-certificate=/etc/kubernetes/pki/admin.pem --client-key=/etc/kubernetes/pki/admin-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/admin.kubeconfig
kubectl config set-context kubernetes-admin@kubernetes --cluster=kubernetes --user=kubernetes-admin --kubeconfig=/etc/kubernetes/admin.kubeconfig
kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=/etc/kubernetes/admin.kubeconfig
6.3.4、创建ServiceAccount Key à secret
[root@k8s-master01 pki]# openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
# 执行结果
Generating RSA private key, 2048 bit long modulus
..............................+++
.............................................................+++
e is 65537 (0x10001)
[root@k8s-master01 pki]# openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
# 执行结果
writing RSA key
6.3.5、发送证书至其他节点
for NODE in k8s-master02 k8s-master03; do
for FILE in $(ls /etc/kubernetes/pki | grep -v etcd); do
scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE};
done;
for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do
scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE};
done;
done;
注意:如果要修改bootstrap.secret.yaml的token-id和token-secret,需要保证下图红圈内的字符串一致的,并且位数是一样的。还要保证上个命令的黄色字体:c8ad9c.2e4d610cf3e7426e与你修改的字符串要一致apiVersion:v1kind:Secretmetadata:name:bootstrap-token-c8ad9cnamespace:kube-systemtype:bootstrap.kubernetes.io/tokenstringData:description:"The default bootstrap token generated by 'kubelet '."token-id:c8ad9c#这个跟metadata.name 后面那个一样
[root@k8s-master01 ~]# cd /root/k8s-ha-install/bootstrap
[root@k8s-master01 bootstrap]# mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config
[root@k8s-master01 bootstrap]# kubectl create -f bootstrap.secret.yaml
secret/bootstrap-token-c8ad9c created
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created
clusterrolebinding.rbac.authorization.k8s.io/node-autoapprove-bootstrap created
clusterrolebinding.rbac.authorization.k8s.io/node-autoapprove-certificate-rotation created
clusterrole.rbac.authorization.k8s.io/system:kube-apiserver-to-kubelet created
clusterrolebinding.rbac.authorization.k8s.io/system:kube-apiserver created
十一、Node节点配置
11.1、复制证书至Node节点
[root@k8s-master01 ~]# cd /etc/kubernetes/
for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02; do
ssh $NODE mkdir -p /etc/kubernetes/pki /etc/etcd/ssl /etc/etcd/ssl
for FILE in etcd-ca.pem etcd.pem etcd-key.pem; do
scp /etc/etcd/ssl/$FILE $NODE:/etc/etcd/ssl/
done
for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig; do
scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}
done
done
[root@k8s-master01 calico]# kubectl apply -f calico-etcd.yaml
# 执行结果
secret/calico-etcd-secrets created
configmap/calico-config created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
12.3、查看容器状态
如果容器状态异常可以使用kubectl describe 或者logs查看容器的日志
[root@k8s-master01 calico]# kubectl get po -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-5f6d4b864b-pq2qw 0/1 Pending 0 45s
calico-node-75blv 0/1 Init:0/2 0 46s
calico-node-hw27b 0/1 Init:0/2 0 46s
calico-node-k2wdf 0/1 Init:0/2 0 46s
calico-node-l58lz 0/1 Init:0/2 0 46s
calico-node-v2qlq 0/1 Init:0/2 0 46s
coredns-867d46bfc6-8vzrk 0/1 Pending 0 10m
cd /root/k8s-ha-install/
[root@k8s-master01 k8s-ha-install]# sed -i "s#10.96.0.10#10.96.0.10#g" CoreDNS/coredns.yaml
13.2、安装coredns
[root@k8s-master01 k8s-ha-install]# kubectl create -f CoreDNS/coredns.yaml
# 执行结果
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created
13.3、安装最新版CoreDNS(不建议)
git clone https://github.com/coredns/deployment.git
cd deployment/kubernetes
# ./deploy.sh -s -i 10.96.0.10 | kubectl apply -f -
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created
查看状态
# kubectl get po -n kube-system -l k8s-app=kube-dns
NAME READY STATUS RESTARTS AGE
coredns-85b4878f78-h29kh 1/1 Running 0 8h
[root@k8s-master01 ~]# cd /root/k8s-ha-install/metrics-server-0.4.x/
[root@k8s-master01 metrics-server-0.4.x]# kubectl create -f .
# 执行结果
serviceaccount/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
service/metrics-server created
deployment.apps/metrics-server created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
[root@k8s-master01 ~]# cd /root/k8s-ha-install/dashboard/
[root@k8s-master01 dashboard]# kubectl create -f .
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
# 角色名字更改
[root@k8s-master01 ~]# kubectl label node k8s-master01 node-role.kubernetes.io/matser=''
node/k8s-master01 labeled
[root@k8s-master01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready matser 129m v1.20.0 # 成功更改
k8s-master02 Ready <none> 129m v1.20.0
k8s-master03 Ready <none> 129m v1.20.0
k8s-node01 Ready <none> 129m v1.20.0
k8s-node02 Ready <none> 129m v1.20.0
18、安装总结
1、 kubeadm
2、 二进制
3、 自动化安装
a) Ansible
i. Master节点安装不需要写自动化。
ii. 添加Node节点,playbook。
4、 安装需要注意的细节
a) 上面的细节配置
b) 生产环境中etcd一定要和系统盘分开,一定要用ssd硬盘。
c) Docker数据盘也要和系统盘分开,有条件的话可以使用ssd硬盘
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· Linux系列:如何用heaptrack跟踪.NET程序的非托管内存泄露
· 开发者必知的日志记录最佳实践
· SQL Server 2025 AI相关能力初探
· Linux系列:如何用 C#调用 C方法造成内存泄露
· AI与.NET技术实操系列(二):开始使用ML.NET
· 被坑几百块钱后,我竟然真的恢复了删除的微信聊天记录!
· 没有Manus邀请码?试试免邀请码的MGX或者开源的OpenManus吧
· 【自荐】一款简洁、开源的在线白板工具 Drawnix
· 园子的第一款AI主题卫衣上架——"HELLO! HOW CAN I ASSIST YOU TODAY
· Docker 太简单,K8s 太复杂?w7panel 让容器管理更轻松!