Kubernetes二进制安装
一、环境配置
1.1主机名设置
[root@localhost ~]# hostnamectl set-hostname master [root@localhost ~]# hostnamectl set-hostname node1 [root@localhost ~]# hostnamectl set-hostname node2
1.2关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
1.3 关闭selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config setenforce 0
1.4关闭swap分区
sed -i '/swap/s/^\(.*\)$/#\1/g' /etc/fstab swapoff -a
1.5将桥接的IPv4流量传递到iptables的链
cat > /etc/sysctl.d/kubernetes.conf << EOF net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward = 1 EOF
加载
modprobe br_netfilter
是否加载成功
sysctl --system
1.6开启ipvs
cat > /etc/sysconfig/modules/ipvs.modules <<EOF #!/bin/bash modprobe -- ip_vs modprobe -- ip_vs_rr modprobe -- ip_vs_wrr modprobe -- ip_vs_sh modprobe -- nf_conntrack_ipv4 EOF
授权
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
是否加载成功
lsmod | grep -e ipvs -e nf_conntrack_ipv4
1.7安装依赖
cd /etc/yum.repos.d/ mkdir bak mv *.repo bak wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
安装依赖包
yum install -y conntrack ipvsadm ipset jq sysstat curl iptables libseccomp yum install -y utils device-mapper persistent-data lvm2 yum-utils device-mapper-persistent-data yum install -y wget jg pamisc vim net-tools telnet
1.8添加主机名和地址对应关系
cat >> /etc/hosts << EOF 192.168.43.80 master 192.168.43.81 node1 192.168.43.82 node2 EOF
1.9设置时间同步
yum install ntpdate -y
ntpdate time.windows.com
设置定时任务
crontab -e */5 * * * * ntpdate time.windows.com
1.10设置时区
timedatectl set-timezone 'Asia/Shanghai'
重新加载
systemctl restart chronyd.service
1.11.重启主机
reboot
二、准备 cfssl 证书生成工具(master节点执行)
cfssl 是一个开源的证书管理工具,使用 json 文件生成证书,相比 openssl 更方便使用。 找任意一台服务器操作,这里用 Master 节点。
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64 mv cfssl_linux-amd64 /usr/local/bin/cfssl mv cfssljson_linux-amd64 /usr/local/bin/cfssljson mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo chmod +x /usr/bin/cfssl*
2.1生成 Etcd 证书
创建工作目录
mkdir -p ~/TLS/{etcd,k8s}
cd TLS/etcd
2.2自签CA
cat > ca-config.json << EOF { "signing": { "default": { "expiry": "87600h" }, "profiles": { "www": { "expiry": "87600h", "usages": [ "signing", "key encipherment", "server auth", "client auth" ] } } } } EOF
cat > ca-csr.json << EOF { "CN": "etcd CA", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "Beijing", "ST": "Beijing" } ] } EOF
2.3生产证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
查看
[root@master etcd]# ls ca*pem
ca-key.pem ca.pem
2.4使用自签 CA 签发 Etcd HTTPS 证书
cat > server-csr.json << EOF { "CN": "etcd", "hosts": [ "192.168.43.80", "192.168.43.81", "192.168.43.82" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "BeiJing", "ST": "BeiJing" } ] } EOF
2.5生产证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
查看
ls server*pem
三、部署etcd集群
3.1下载
https://github.com/etcd-io/etcd/releases/tag/v3.4.14
3.2创建工作目录
mkdir /opt/etcd/{bin,cfg,ssl} -p tar -zxvf etcd-v3.4.14-linux-amd64.tar.gz mv etcd-v3.4.14-linux-amd64/{etcd,etcdctl} /opt/etcd/bin/
3.3创建etcd.conf
cat > /opt/etcd/cfg/etcd.conf << EOF #[Member] ETCD_NAME="etcd-1" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="https://192.168.43.80:2380" ETCD_LISTEN_CLIENT_URLS="https://192.168.43.80:2379" #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.43.80:2380" ETCD_ADVERTISE_CLIENT_URLS="https://192.168.43.80:2379" ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.43.80:2380,etcd-2=https://192.168.43.81:2380,etcd-3=https://192.168.43.82:2380" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ETCD_INITIAL_CLUSTER_STATE="new" EOF
- ETCD_NAME:节点名称,集群中唯一
- ETCD_DATA_DIR:数据目录
- ETCD_LISTEN_PEER_URLS:集群通信监听地址
- ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址
- ETCD_INITIAL_ADVERTISE_PEER_URLS:集群通告地址
- ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址
- ETCD_INITIAL_CLUSTER:集群节点地址
- ETCD_INITIAL_CLUSTER_TOKEN:集群 Token
- ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态,new 是新集群,existing 表示加入 已有集群
3.4创建etcd.service
cat > /usr/lib/systemd/system/etcd.service << EOF [Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target [Service] Type=notify EnvironmentFile=/opt/etcd/cfg/etcd.conf ExecStart=/opt/etcd/bin/etcd \ --cert-file=/opt/etcd/ssl/server.pem \ --key-file=/opt/etcd/ssl/server-key.pem \ --peer-cert-file=/opt/etcd/ssl/server.pem \ --peer-key-file=/opt/etcd/ssl/server-key.pem \ --trusted-ca-file=/opt/etcd/ssl/ca.pem \ --peer-trusted-ca-file=/opt/etcd/ssl/ca.pem \ --logger=zap Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
3.5拷贝证书
cp ~/TLS/etcd/ca*pem ~/TLS/etcd/server*pem /opt/etcd/ssl/
3.6将上面节点 1 所有生成的文件拷贝到节点 2 和节点 3
scp -r /opt/etcd/ root@192.168.43.81:/opt/ scp /usr/lib/systemd/system/etcd.service root@192.168.43.81:/usr/lib/systemd/system/ scp -r /opt/etcd/ root@192.168.43.82:/opt/ scp /usr/lib/systemd/system/etcd.service root@192.168.43.82:/usr/lib/systemd/system/
在node-1和node-2上修改分别修改 etcd.conf 配置文件中的节点名称和当前服务器 IP:(node1改为 etcd-2
,node2 改为 etcd-3
)
[root@node1 ~]# vi /opt/etcd/cfg/etcd.conf #[Member] ETCD_NAME="etcd-2" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="https://192.168.43.81:2380" ETCD_LISTEN_CLIENT_URLS="https://192.168.43.81:2379" #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.43.81:2380" ETCD_ADVERTISE_CLIENT_URLS="https://192.168.43.81:2379" ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.43.80:2380,etcd-2=https://192.168.43.81:2380,etcd-3=https://192.168.43.82:2380" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ETCD_INITIAL_CLUSTER_STATE="new"
vi /opt/etcd/cfg/etcd.conf #[Member] ETCD_NAME="etcd-2" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="https://192.168.43.82:2380" ETCD_LISTEN_CLIENT_URLS="https://192.168.43.82:2379" #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.43.82:2380" ETCD_ADVERTISE_CLIENT_URLS="https://192.168.43.82:2379" ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.43.80:2380,etcd-2=https://192.168.43.81:2380,etcd-3=https://192.168.43.82:2380" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ETCD_INITIAL_CLUSTER_STATE="new"
3.7启动并设置开机启动 (三台)
systemctl daemon-reload
systemctl start etcd
systemctl enable etcd
查看状态
/opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.43.80:2379,https://192.168.43.81:2379,https://192.168.43.82:2379" endpoint health
四、安装docker(三台执行)
4.1下载
下载地址:https://download.docker.com/linux/static/stable/x86_64/ 版本:19.03.9
4.2解压安装
tar -zxvf docker-19.03.9.tgz mv docker/* /usr/bin
4.3systemd 管理 docker
cat > /usr/lib/systemd/system/docker.service << EOF [Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com After=network-online.target firewalld.service Wants=network-online.target [Service] Type=notify ExecStart=/usr/bin/dockerd ExecReload=/bin/kill -s HUP $MAINPID LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity TimeoutStartSec=0 Delegate=yes KillMode=process Restart=on-failure StartLimitBurst=3 StartLimitInterval=60s [Install] WantedBy=multi-user.target EOF
4.4创建镜像加速
cat > /etc/docker/daemon.json << EOF { "registry-mirrors": ["https://j75wwuc0.mirror.aliyuncs.com"], "exec-opts": ["native.cgroupdriver=systemd"] } EOF
4.5启动并设置开机启动
systemctl daemon-reload
systemctl start docker
systemctl enable docker
五、部署master节点
5.1生成 kube-apiserver 证书
cat > ca-config.json << EOF { "signing": { "default": { "expiry": "87600h" }, "profiles": { "kubernetes": { "expiry": "87600h", "usages": [ "signing", "key encipherment", "server auth", "client auth" ] } } } } EOF
cat > ca-csr.json << EOF { "CN": "kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "Beijing", "ST": "Beijing", "O": "k8s", "OU": "System" } ] } EOF
5.2生成证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
[root@master k8s]# ls *pem
ca-key.pem ca.pem
5.3使用自签 CA 签发 kube-apiserver HTTPS 证书
cat > server-csr.json << EOF { "CN": "kubernetes", "hosts": [ "10.0.0.1", "127.0.0.1", "192.168.43.80", "192.168.43.81", "192.168.43.82", "192.168.43.83", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "BeiJing", "ST": "BeiJing", "O": "k8s", "OU": "System" } ] } EOF
5.4生产SERVER证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
查看
[root@master k8s]# ls server*pem
server-key.pem server.pem
5.5下载k8s并安装
https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.18.md#server-binaries
mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs} tar zxvf kubernetes-server-linux-amd64.tar.gz cd kubernetes/server/bin cp kube-apiserver kube-scheduler kube-controller-manager /opt/kubernetes/bin cp kubectl /usr/bin/
5.6部署api-server
cat > /opt/kubernetes/cfg/kube-apiserver.conf << EOF KUBE_APISERVER_OPTS="--logtostderr=false \\ --v=2 \\ --log-dir=/opt/kubernetes/logs \\ --etcd-servers=https://192.168.43.80:2379,https://192.168.43.81:2379,https://192.168.43.82:2379 \\ --bind-address=192.168.43.80 \\ --secure-port=6443 \\ --advertise-address=192.168.43.80 \\ --allow-privileged=true \\ --service-cluster-ip-range=10.0.0.0/24 \\ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\ --authorization-mode=RBAC,Node \\ --enable-bootstrap-token-auth=true \\ --token-auth-file=/opt/kubernetes/cfg/token.csv \\ --service-node-port-range=30000-32767 \\ --kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \\ --kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \\ --tls-cert-file=/opt/kubernetes/ssl/server.pem \\ --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\ --client-ca-file=/opt/kubernetes/ssl/ca.pem \\ --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\ --etcd-cafile=/opt/etcd/ssl/ca.pem \\ --etcd-certfile=/opt/etcd/ssl/server.pem \\ --etcd-keyfile=/opt/etcd/ssl/server-key.pem \\ --audit-log-maxage=30 \\ --audit-log-maxbackup=3 \\ --audit-log-maxsize=100 \\ --audit-log-path=/opt/kubernetes/logs/k8s-audit.log" EOF
上面两个\ \ 第一个是转义符,第二个是换行符,使用转义符是为了使用 EOF 保留换行符。
- –logtostderr:启用日志
- —v:日志等级
- –log-dir:日志目录
- –etcd-servers:etcd 集群地址
- –bind-address:监听地址
- –secure-port:https 安全端口
- –advertise-address:集群通告地址
- –allow-privileged:启用授权
- –service-cluster-ip-range:Service 虚拟 IP 地址段
- –enable-admission-plugins:准入控制模块
- –authorization-mode:认证授权,启用 RBAC 授权和节点自管理
- –enable-bootstrap-token-auth:启用 TLS bootstrap 机制
- –token-auth-file:bootstrap token 文件
- –service-node-port-range:Service nodeport 类型默认分配端口范围
- –kubelet-client-xxx:apiserver 访问 kubelet 客户端证书
- –tls-xxx-file:apiserver https 证书
- –etcd-xxxfile:连接 Etcd 集群证书
- –audit-log-xxx:审计日志
5.7把生成的证书拷贝到配置文件中的路径
cp ~/TLS/k8s/ca*pem ~/TLS/k8s/server*pem /opt/kubernetes/ssl/
5.8创建上述配置文件中 token 文件
cat > /opt/kubernetes/cfg/token.csv << EOF c47ffb939f5ca36231d9e3121a252940,kubelet-bootstrap,10001,"system:node-bootstrapper" EOF
5.9systemd 管理 apiserver
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF [Unit] Description=Kubernetes API Server Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS Restart=on-failure [Install] WantedBy=multi-user.target EOF
设置开机启动
systemctl daemon-reload systemctl start kube-apiserver systemctl enable kube-apiserver
5.10授权 kubelet-bootstrap 用户允许请求证书
kubectl create clusterrolebinding kubelet-bootstrap \ --clusterrole=system:node-bootstrapper \ --user=kubelet-bootstrap
5.11部署 kube-controller-manager
cat > /opt/kubernetes/cfg/kube-controller-manager.conf << EOF KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\ --v=2 \\ --log-dir=/opt/kubernetes/logs \\ --leader-elect=true \\ --master=127.0.0.1:8080 \\ --bind-address=127.0.0.1 \\ --allocate-node-cidrs=true \\ --cluster-cidr=10.244.0.0/16 \\ --service-cluster-ip-range=10.0.0.0/24 \\ --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\ --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\ --root-ca-file=/opt/kubernetes/ssl/ca.pem \\ --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\ --experimental-cluster-signing-duration=87600h0m0s" EOF
-
–master:通过本地非安全本地端口 8080 连接 apiserver
-
–leader-elect:当该组件启动多个时,自动选举(HA)
-
–cluster-signing-cert-file/–cluster-signing-key-file:自动为 kubelet 颁发证书的 CA,与 apiserver 保持一致
5.12 systemd 管理 controller-manager
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF [Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.conf ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS Restart=on-failure [Install] WantedBy=multi-user.target EOF
启动并设置开机启动
systemctl daemon-reload systemctl start kube-controller-manager systemctl enable kube-controller-manager
5.13部署 kube-scheduler
cat > /opt/kubernetes/cfg/kube-scheduler.conf << EOF KUBE_SCHEDULER_OPTS="--logtostderr=false \ --v=2 \ --log-dir=/opt/kubernetes/logs \ --leader-elect \ --master=127.0.0.1:8080 \ --bind-address=127.0.0.1" EOF
–master:通过本地非安全本地端口 8080 连接 apiserver
–leader-elect:当该组件启动多个时,自动选举(HA)
systemd管理
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF [Unit] Description=Kubernetes Scheduler Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS Restart=on-failure [Install] WantedBy=multi-user.target EOF
启动并设置开机启动
5.14查看集群状态
[root@master bin]# kubectl get cs NAME STATUS MESSAGE ERROR scheduler Healthy ok controller-manager Healthy ok etcd-2 Healthy {"health":"true"} etcd-1 Healthy {"health":"true"} etcd-0 Healthy {"health":"true"}
六、部署Worker Node
6.1k8s安装包解压安装
mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs} tar zxvf kubernetes-server-linux-amd64.tar.gz cd kubernetes/server/bin cp kubelet kube-proxy /opt/kubernetes/bin cp kubectl /usr/bin/
6.2配置kubelet(node不同,hostname-override不同)此处hostname写出错重新认证参考
cat > /opt/kubernetes/cfg/kubelet.conf << EOF KUBELET_OPTS="--logtostderr=false \\ --v=2 \\ --log-dir=/opt/kubernetes/logs \\ --hostname-override=node1 \\ --network-plugin=cni \\ --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\ --bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\ --config=/opt/kubernetes/cfg/kubelet-config.yml \\ --cert-dir=/opt/kubernetes/ssl \\ --pod-infra-container-image=lizhenliang/pause-amd64:3.0" EOF
- –hostname-override:显示名称,集群中唯一
- –network-plugin:启用CNI
- –kubeconfig:空路径,会自动生成,后面用于连接apiserver
- –bootstrap-kubeconfig:首次启动向apiserver申请证书
- –config:配置参数文件
- –cert-dir:kubelet证书生成目录
- –pod-infra-container-image:管理Pod网络容器的镜像
配置参数文件
cat > /opt/kubernetes/cfg/kubelet-config.yml << EOF kind: KubeletConfiguration apiVersion: kubelet.config.k8s.io/v1beta1 address: 0.0.0.0 port: 10250 readOnlyPort: 10255 cgroupDriver: cgroupfs clusterDNS: - 10.0.0.2 clusterDomain: cluster.local failSwapOn: false authentication: anonymous: enabled: false webhook: cacheTTL: 2m0s enabled: true x509: clientCAFile: /opt/kubernetes/ssl/ca.pem authorization: mode: Webhook webhook: cacheAuthorizedTTL: 5m0s cacheUnauthorizedTTL: 30s evictionHard: imagefs.available: 15% memory.available: 100Mi nodefs.available: 10% nodefs.inodesFree: 5% maxOpenFiles: 1000000 maxPods: 110 EOF
6.3将master文件拷贝到node节点(master节点执行)
scp -r /opt/kubernetes/ssl root@192.168.43.81:/opt/kubernetes scp -r /opt/kubernetes/ssl root@192.168.43.82:/opt/kubernetes
6.4 生成bootstrap.kubeconfig文件
KUBE_APISERVER="https://192.168.43.80:6443" # apiserver IP:PORT
TOKEN="c47ffb939f5ca36231d9e3121a252940" # 与token.csv里保持一致,在master节点的/opt/kubernetes/cfg目录下
kubectl config set-cluster kubernetes \ --certificate-authority=/opt/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER} \ --kubeconfig=bootstrap.kubeconfig kubectl config set-credentials "kubelet-bootstrap" \ --token=${TOKEN} \ --kubeconfig=bootstrap.kubeconfig kubectl config set-context default \ --cluster=kubernetes \ --user="kubelet-bootstrap" \ --kubeconfig=bootstrap.kubeconfig kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
移动文件
mv bootstrap.kubeconfig /opt/kubernetes/cfg
6.5 systemd管理kubelet
cat > /usr/lib/systemd/system/kubelet.service << EOF [Unit] Description=Kubernetes Kubelet After=docker.service [Service] EnvironmentFile=/opt/kubernetes/cfg/kubelet.conf ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
启动并设置开机启动
systemctl daemon-reload
systemctl start kubelet
systemctl enable kubelet
发现报错
排查:查看docker的Cgroup
与kubelet的Cgroup不一致导致的
修改docker的配置
[root@node1 cfg]# vi /etc/docker/daemon.json
6.6 批准kubelet证书申请并加入集群(master执行)
[root@master ~]# kubectl get csr NAME AGE SIGNERNAME REQUESTOR CONDITION node-csr-H0Pnw1vsYsyJ5hCwMJpwDcSfzM4vj06q4vz07Qecr38 2m8s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending node-csr-PPfEomEAKqMcgtxq3GXNNlTcl7gYJ9zFe6IalcXEMNU 2m7s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
批准申请
[root@master ~]# kubectl certificate approve node-csr-H0Pnw1vsYsyJ5hCwMJpwDcSfzM4vj06q4vz07Qecr38 certificatesigningrequest.certificates.k8s.io/node-csr-H0Pnw1vsYsyJ5hCwMJpwDcSfzM4vj06q4vz07Qecr38 approved [root@master ~]# kubectl certificate approve node-csr-PPfEomEAKqMcgtxq3GXNNlTcl7gYJ9zFe6IalcXEMNU certificatesigningrequest.certificates.k8s.io/node-csr-PPfEomEAKqMcgtxq3GXNNlTcl7gYJ9zFe6IalcXEMNU approved
如果hostname错误重新认知参考
k8s删除一个节点使用以下命令 删除一个节点前,先驱赶掉上面的pod kubectl drain $nodeIP --delete-local-data 因为我这里是测试环境还没有pod就直接删除了,没有做驱逐 1、删除node节点重新注册 master节点上: kubectl delete node $nodename 2、node节点上删除client文件 rm -f /etc/kubernetes/ssl/kubelet-client-* 3、node节点上重启kubelet服务 systemctl restart kubelet #重启后会自动生成kubelet-client-文件 4、master上查看注册请求 kubectl get csr #CONDITION是pending状态就是待审批,Approved,Issued是已审批 5、master上审批注册请求 kubectl certificate approve $NAME [root@master1 work]# kubectl certificate approve node-csr-nbczXKuKZpXVEwrEfplaF2WZcjaphB5_PNyCAUW46TU certificatesigningrequest.certificates.k8s.io/node-csr-nbczXKuKZpXVEwrEfplaF2WZcjaphB5_PNyCAUW46TU approved #$NAME是上一步get出来的请求 6、master上kubectl get csr显示已审批完 ----------------------------------- ©著作权归作者所有:来自51CTO博客作者夜尽天已明的原创作品,请联系作者获取转载授权,否则将追究法律责任 K8S二进制部署时候遇到的问题 https://blog.51cto.com/u_13522483/2878514
查看节点
6.7部署kube-proxy
cat > /opt/kubernetes/cfg/kube-proxy.conf << EOF KUBE_PROXY_OPTS="--logtostderr=false \\ --v=2 \\ --log-dir=/opt/kubernetes/logs \\ --config=/opt/kubernetes/cfg/kube-proxy-config.yml" EOF
node1
cat > /opt/kubernetes/cfg/kube-proxy-config.yml << EOF kind: KubeProxyConfiguration apiVersion: kubeproxy.config.k8s.io/v1alpha1 bindAddress: 0.0.0.0 metricsBindAddress: 0.0.0.0:10249 clientConnection: kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig hostnameOverride: node1 clusterCIDR: 10.0.0.0/24 EOF
node2
cat > /opt/kubernetes/cfg/kube-proxy-config.yml << EOF kind: KubeProxyConfiguration apiVersion: kubeproxy.config.k8s.io/v1alpha1 bindAddress: 0.0.0.0 metricsBindAddress: 0.0.0.0:10249 clientConnection: kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig hostnameOverride: node2 clusterCIDR: 10.0.0.0/24 EOF
6.8 生成kube-proxy.kubeconfig文件(master生成传到node)
# 切换工作目录
cd ~/TLS/k8s/
# 创建证书请求文件 cat > kube-proxy-csr.json << EOF { "CN": "system:kube-proxy", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "BeiJing", "ST": "BeiJing", "O": "k8s", "OU": "System" } ] } EOF
生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
查看证书
[root@master k8s]# ls kube-proxy*pem
kube-proxy-key.pem kube-proxy.pem
拷贝
scp /root/TLS/k8s/kube-proxy*pem root@192.168.43.81:/opt/kubernetes/ssl scp /root/TLS/k8s/kube-proxy*pem root@192.168.43.82:/opt/kubernetes/ssl
6.9生成kubeconfig文件
KUBE_APISERVER="https://192.168.43.80:6443" # apiserver IP:PORT
kubectl config set-cluster kubernetes \ --certificate-authority=/opt/kubernetes/ssl/ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER} \ --kubeconfig=kube-proxy.kubeconfig kubectl config set-credentials kube-proxy \ --client-certificate=/opt/kubernetes/ssl/kube-proxy.pem \ --client-key=/opt/kubernetes/ssl/kube-proxy-key.pem \ --embed-certs=true \ --kubeconfig=kube-proxy.kubeconfig kubectl config set-context default \ --cluster=kubernetes \ --user=kube-proxy \ --kubeconfig=kube-proxy.kubeconfig kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
6.10systemd管理kube-proxy
cat > /usr/lib/systemd/system/kube-proxy.service << EOF [Unit] Description=Kubernetes Proxy After=network.target [Service] EnvironmentFile=/opt/kubernetes/cfg/kube-proxy.conf ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
开机启动
systemctl daemon-reload systemctl start kube-proxy systemctl enable kube-proxy
7、部署CNI网络
下载
https://github.com/containernetworking/plugins/releases/tag/v0.8.6
node节点操作
mkdir -p /opt/cni/bin
tar zxvf cni-plugins-linux-amd64-v0.8.6.tgz -C /opt/cni/bin
master节点操作:
wget --no-check-certificate https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml kubectl apply -f kube-flannel.yml
https://www.cnblogs.com/zjfjava/p/15982106.html