1.单master双node集群搭建

 先安装好基础软件包
yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlibdevel python-devel epel-release openssh-server socat ipvsadm conntrack ntpdate telnet ipvsadm
 
1.初始化安装 k8s 集群的实验环境
1.1 修改机器 IP,变成静态 IP
master: 192.168.1.160
node1: 192.168.1.162
node2: 192.168.1.163
1.2 分别在对应主机上配置机器主机名
hostnamectl set-hostname master1 && bash 
hostnamectl set-hostname node1 && bash 
hostnamectl set-hostname node2 && bash
1.3 配置主机 hosts 文件,相互之间通过主机名互相访问 修改每台机器的/etc/hosts 文件,增加如下三行(all)
192.168.1.160 master1
192.168.1.162 node1
192.168.1.163 node2
1.4 配置主机之间无密码登录(all)
ssh-keygen
ssh-copy-id root@master1
ssh-copy-id root@node1
ssh-copy-id root@node2
1.5 关闭交换分区 swap,提升性能(all)
# 临时关闭
swapoff -a

# 永久关闭
vi /etc/fatab
删除swap那一行

# 关闭原因
Swap 是交换分区,如果机器内存不够,会使用 swap 分区,但是 swap 分区的性能较低,k8s 设计的时候为了能提升性能,默认是不允许使用姜欢分区的。Kubeadm 初始化的时候会检测 swap 是否关闭,如果没关闭,那就初始化失败。如果不想要关闭交换分区,安装 k8s 的时候可以指定--ignorepreflight-errors=Swap 来解决。
1.6 修改机器内核参数(all)
# 加载模块,开启内核转发功能
modprobe br_netfilter
echo "modprobe br_netfilter" >> /etc/profile
cat > /etc/sysctl.d/k8s.conf <<EOF 
net.bridge.bridge-nf-call-ip6tables = 1 
net.bridge.bridge-nf-call-iptables = 1 
net.ipv4.ip_forward = 1 
EOF
sysctl -p /etc/sysctl.d/k8s.conf


#net.ipv4.ip_forward 是数据包转发:
出于安全考虑,Linux 系统默认是禁止数据包转发的。所谓转发即当主机拥有多于一块的网卡时,其中一块收到数据包,根据数据包的目的 ip 地址将数据包发往本机另一块网卡,该网卡根据路由表继续发送数据包。这通常是路由器所要实现的功能。
要让 Linux 系统具有路由转发功能,需要配置一个 Linux 的内核参数net.ipv4.ip_forward。这个参数指定了 Linux 系统当前对路由转发功能的支持情况;其值为 0 时表示禁止进行 IP 转发;如果是 1,则说明 IP 转发功能已经打开。
1.7 关闭 firewalld 防火墙和selinux (all)
systemctl stop firewalld && systemctl disable firewalld
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
1.8 安装repo 源 (all)
# 基础软件源
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

# docker源
wget -O /etc/yum.repos.d/docker-ce.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# k8s源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
1.10 配置时间同步 (all)
crontab -e
* */1 * * * /usr/sbin/ntpdate cn.pool.ntp.org
systemctl restart crond
1.11 开启 ipvs (all)
# 每次开机都会加载这些模块 (EOF前面反斜杠是防止变量替换用的)
cat > /etc/sysconfig/modules/ipvs.modules << \EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in ${ipvs_modules}; do
 /sbin/modinfo -F filename ${kernel_module} > /dev/null 2>&1
 if [ 0 -eq 0 ]; then
 /sbin/modprobe ${kernel_module}
 fi
done
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules 

# 查看模块是否被加载上
lsmod | grep ip_vs
ip_vs_ftp 13079 0 
nf_nat 26583 1 ip_vs_ftp 
ip_vs_sed 12519 0 
ip_vs_nq 12516 0 
ip_vs_sh 12688 0 
ip_vs_dh 12688 0

# 问题 1:ipvs 是什么? 
ipvs (IP Virtual Server) 实现了传输层负载均衡,也就是我们常说的 4 层 LAN 交换,作为 Linux 内核的一部分。ipvs 运行在主机上,在真实服务器集群前充当负载均衡器。ipvs 可以将基于 TCP 和 UDP的服务请求转发到真实服务器上,并使真实服务器的服务在单个 IP 地址上显示为虚拟服务。 

# 问题 2:ipvs 和 iptable 对比分析 
kube-proxy 支持 iptables 和 ipvs 两种模式, 在 kubernetes v1.8 中引入了 ipvs 模式,在 v1.9 中处于 beta 阶段,在 v1.11 中已经正式可用了。iptables 模式在 v1.1 中就添加支持了,从 v1.2 版本开始 iptables 就是 kube-proxy 默认的操作模式,ipvs 和 iptables 都是基于 netfilter的,但是 ipvs 采用的是 hash 表,因此当 service 数量达到一定规模时,hash 查表的速度优势就会显现出来,从而提高 service 的服务性能。那么 ipvs 模式和 iptables 模式之间有哪些差异呢?
1、ipvs 为大型集群提供了更好的可扩展性和性能
2、ipvs 支持比 iptables 更复杂的复制均衡算法(最小负载、最少连接、加权等等) 
3、ipvs 支持服务器健康检查和连接重试等功能
 

1.12安装iptables服务,用来开机关闭iptables (all)

yum install iptables-services -y
service iptables stop && systemctl disable iptables
iptables -F

  

2.安装集群软件

2.1安装docker,并设置开机启动 (all)

yum install docker-ce -y
systemctl start docker && systemctl enable docker

2.2配置docker镜像加速器和驱动 (all)

#修改 docker 文件驱动为 systemd,默认为 cgroupfs,kubelet 默认使用 systemd,两者必须一致才可
以。
cat > /etc/docker/daemon.json << \EOF
{
 "registry-mirrors":["https://rsbud4vc.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn","https://dockerhub.azk8s.cn","http://hub-mirror.c.163.com","http://qtid6917.mirror.aliyuncs.com", "https://rncxm540.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"]
} 
EOF

systemctl restart docker

2.3安装k8s所需软件

# mater节点
yum install kubelet-1.20.10 kubeadm-1.20.10 kubectl-1.20.10 -y
systemctl start kubelet && systemctl enable kubelet

# node节点
yum install kubeadm-1.20.10 kubelet-1.20.10 -y
systemctl start kubelet && systemctl enable kubelet

3.初始化集群

3.1初始化master节点 (master)

kubeadm init --kubernetes-version=1.20.10 --apiserver-advertise-address=192.168.1.160 --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=SystemVerification

# --image-repository registry.aliyuncs.com/google_containers:手动指定仓库地址为
registry.aliyuncs.com/google_containers。kubeadm 默认从 k8s.grc.io 拉取镜像,但是 k8s.gcr.io
访问不到,所以需要指定从 registry.aliyuncs.com/google_containers 仓库拉取镜像。

3.2配置用户 (master)

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

3.3主节点生成加入集群命令 (master)

kubeadm token create --print-join-command

# 输出如下
kubeadm join 192.168.1.160:6443 --token n7ol4u.2e1gwjggvktuqrb6     --discovery-token-ca-cert-hash sha256:777010ff2f82ffe39430a40dc9cf0f7e6aa584bddcbda2112a30a0a4b87309c0 

3.4从节点使用刚刚生成的命令加入集群 (node)

kubeadm join 192.168.1.160:6443 --token n7ol4u.2e1gwjggvktuqrb6     --discovery-token-ca-cert-hash sha256:777010ff2f82ffe39430a40dc9cf0f7e6aa584bddcbda2112a30a0a4b87309c0

# 输出如下
[preflight] Running pre-flight checks
	[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.12. Latest validated version: 19.03
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

3.5主节点查看集群状态 (master)

kubectl get nodes

# 输出如下
NAME      STATUS     ROLES                  AGE     VERSION
master1   NotReady   control-plane,master   18h     v1.20.10
node1     NotReady   <none>                 3m46s   v1.20.10
node2     NotReady   <none>                 3m24s   v1.20.10

3.6将从节点的角色更改为worker (master)

kubectl label node node1  node-role.kubernetes.io/worker=worker
kubectl label node node2  node-role.kubernetes.io/worker=worker

# 查看节点角色
kubectl get node
NAME      STATUS     ROLES                  AGE     VERSION
master1   NotReady   control-plane,master   18h     v1.20.10
node1     NotReady   worker                 7m13s   v1.20.10
node2     NotReady   worker                 6m51s   v1.20.10

3.7安装网络插件calico (master)

# 下载calico的清单文件
wget  https://docs.projectcalico.org/manifests/calico.yaml

# 根据清单文件部署calico
kubectl apply -f calico.yaml

# 等待calico组件全部启动完成后
kubectl get pods -n kube-system -o wide
NAME                                       READY   STATUS    RESTARTS   AGE     IP              NODE      NOMINATED NODE   READINESS GATES
calico-kube-controllers-547686d897-v2qjj   1/1     Running   0          5m53s   10.244.137.66   master1   <none>           <none>
calico-node-7n9kx                          1/1     Running   0          5m53s   192.168.1.163   node2     <none>           <none>
calico-node-m4lh8                          1/1     Running   0          5m53s   192.168.1.160   master1   <none>           <none>
calico-node-zkb2b                          1/1     Running   0          5m53s   192.168.1.162   node1     <none>           <none>

# 查看集群状态,全部为正常
kubectl get nodes
NAME      STATUS   ROLES                  AGE   VERSION
master1   Ready    control-plane,master   18h   v1.20.10
node1     Ready    worker                 17m   v1.20.10
node2     Ready    worker                 17m   v1.20.10

3.8测试集群网络 (master)

kubectl run busybox --image busybox --restart=Never --rm -it busybox -- ping www.baidu.com

# 输出如下
If you don't see a command prompt, try pressing enter.

64 bytes from 39.156.66.14: seq=1 ttl=52 time=8.916 ms
64 bytes from 39.156.66.14: seq=2 ttl=52 time=9.066 ms
64 bytes from 39.156.66.14: seq=3 ttl=52 time=9.299 ms

3.9安装 metrics-server 组件 (master)

#metrics-server 是一个集群范围内的资源数据集和工具,同样的,metrics-server 也只是显示数据,并不提供数据存储服务,主要关注的是资源度量 API 的实现,比如 CPU、文件描述符、内存、请求延时等指标,metric-server 收集数据给 k8s 集群内使用,如kubectl,hpa,scheduler 等

# 先允许在不修改 Kubernetes 核心代码的同时扩展 Kubernetes API
vim /etc/kubernetes/manifests/kube-apiserver.yaml
  containers:
  - command:
    - kube-apiserver
    - --enable-aggregator-routing=true  # 添加这一行
    - --advertise-address=192.168.1.160

kubectl apply -f /etc/kubernetes/manifests/kube-apiserver.yaml

#把 CrashLoopBackOff 状态的 pod 删除
kubectl delete pods kube-apiserver -n kube-system

# 部署metrics, yaml文件如下
kubectl apply -f metrics.yaml

  metrics.yaml (镜像拉取站点已经更改为阿里云)

  1 apiVersion: rbac.authorization.k8s.io/v1
  2 kind: ClusterRoleBinding
  3 metadata:
  4   name: metrics-server:system:auth-delegator
  5   labels:
  6     kubernetes.io/cluster-service: "true"
  7     addonmanager.kubernetes.io/mode: Reconcile
  8 roleRef:
  9   apiGroup: rbac.authorization.k8s.io
 10   kind: ClusterRole
 11   name: system:auth-delegator
 12 subjects:
 13 - kind: ServiceAccount
 14   name: metrics-server
 15   namespace: kube-system
 16 ---
 17 apiVersion: rbac.authorization.k8s.io/v1
 18 kind: RoleBinding
 19 metadata:
 20   name: metrics-server-auth-reader
 21   namespace: kube-system
 22   labels:
 23     kubernetes.io/cluster-service: "true"
 24     addonmanager.kubernetes.io/mode: Reconcile
 25 roleRef:
 26   apiGroup: rbac.authorization.k8s.io
 27   kind: Role
 28   name: extension-apiserver-authentication-reader
 29 subjects:
 30 - kind: ServiceAccount
 31   name: metrics-server
 32   namespace: kube-system
 33 ---
 34 apiVersion: v1
 35 kind: ServiceAccount
 36 metadata:
 37   name: metrics-server
 38   namespace: kube-system
 39   labels:
 40     kubernetes.io/cluster-service: "true"
 41     addonmanager.kubernetes.io/mode: Reconcile
 42 ---
 43 apiVersion: rbac.authorization.k8s.io/v1
 44 kind: ClusterRole
 45 metadata:
 46   name: system:metrics-server
 47   labels:
 48     kubernetes.io/cluster-service: "true"
 49     addonmanager.kubernetes.io/mode: Reconcile
 50 rules:
 51 - apiGroups:
 52   - ""
 53   resources:
 54   - pods
 55   - nodes
 56   - nodes/stats
 57   - namespaces
 58   verbs:
 59   - get
 60   - list
 61   - watch
 62 - apiGroups:
 63   - "extensions"
 64   resources:
 65   - deployments
 66   verbs:
 67   - get
 68   - list
 69   - update
 70   - watch
 71 ---
 72 apiVersion: rbac.authorization.k8s.io/v1
 73 kind: ClusterRoleBinding
 74 metadata:
 75   name: system:metrics-server
 76   labels:
 77     kubernetes.io/cluster-service: "true"
 78     addonmanager.kubernetes.io/mode: Reconcile
 79 roleRef:
 80   apiGroup: rbac.authorization.k8s.io
 81   kind: ClusterRole
 82   name: system:metrics-server
 83 subjects:
 84 - kind: ServiceAccount
 85   name: metrics-server
 86   namespace: kube-system
 87 ---
 88 apiVersion: v1
 89 kind: ConfigMap
 90 metadata:
 91   name: metrics-server-config
 92   namespace: kube-system
 93   labels:
 94     kubernetes.io/cluster-service: "true"
 95     addonmanager.kubernetes.io/mode: EnsureExists
 96 data:
 97   NannyConfiguration: |-
 98     apiVersion: nannyconfig/v1alpha1
 99     kind: NannyConfiguration
100 ---
101 apiVersion: apps/v1
102 kind: Deployment
103 metadata:
104   name: metrics-server
105   namespace: kube-system
106   labels:
107     k8s-app: metrics-server
108     kubernetes.io/cluster-service: "true"
109     addonmanager.kubernetes.io/mode: Reconcile
110     version: v0.3.6
111 spec:
112   selector:
113     matchLabels:
114       k8s-app: metrics-server
115       version: v0.3.6
116   template:
117     metadata:
118       name: metrics-server
119       labels:
120         k8s-app: metrics-server
121         version: v0.3.6
122       annotations:
123         scheduler.alpha.kubernetes.io/critical-pod: ''
124         seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
125     spec:
126       priorityClassName: system-cluster-critical
127       serviceAccountName: metrics-server
128       containers:
129       - name: metrics-server
130         image: registry.aliyuncs.com/google_containers/metrics-server-amd64:v0.3.6
131         imagePullPolicy: IfNotPresent
132         command:
133         - /metrics-server
134         - --metric-resolution=30s
135         - --kubelet-preferred-address-types=InternalIP
136         - --kubelet-insecure-tls
137         ports:
138         - containerPort: 443
139           name: https
140           protocol: TCP
141       - name: metrics-server-nanny
142         image: registry.aliyuncs.com/google_containers/addon-resizer:1.8.4
143         imagePullPolicy: IfNotPresent
144         resources:
145           limits:
146             cpu: 100m
147             memory: 300Mi
148           requests:
149             cpu: 5m
150             memory: 50Mi
151         env:
152           - name: MY_POD_NAME
153             valueFrom:
154               fieldRef:
155                 fieldPath: metadata.name
156           - name: MY_POD_NAMESPACE
157             valueFrom:
158               fieldRef:
159                 fieldPath: metadata.namespace
160         volumeMounts:
161         - name: metrics-server-config-volume
162           mountPath: /etc/config
163         command:
164           - /pod_nanny
165           - --config-dir=/etc/config
166           - --cpu=300m
167           - --extra-cpu=20m
168           - --memory=200Mi
169           - --extra-memory=10Mi
170           - --threshold=5
171           - --deployment=metrics-server
172           - --container=metrics-server
173           - --poll-period=300000
174           - --estimator=exponential
175           - --minClusterSize=2
176       volumes:
177         - name: metrics-server-config-volume
178           configMap:
179             name: metrics-server-config
180       tolerations:
181         - key: "CriticalAddonsOnly"
182           operator: "Exists"
183         - key: node-role.kubernetes.io/master
184           effect: NoSchedule
185 ---
186 apiVersion: v1
187 kind: Service
188 metadata:
189   name: metrics-server
190   namespace: kube-system
191   labels:
192     addonmanager.kubernetes.io/mode: Reconcile
193     kubernetes.io/cluster-service: "true"
194     kubernetes.io/name: "Metrics-server"
195 spec:
196   selector:
197     k8s-app: metrics-server
198   ports:
199   - port: 443
200     protocol: TCP
201     targetPort: https
202 ---
203 apiVersion: apiregistration.k8s.io/v1
204 kind: APIService
205 metadata:
206   name: v1beta1.metrics.k8s.io
207   labels:
208     kubernetes.io/cluster-service: "true"
209     addonmanager.kubernetes.io/mode: Reconcile
210 spec:
211   service:
212     name: metrics-server
213     namespace: kube-system
214   group: metrics.k8s.io
215   version: v1beta1
216   insecureSkipTLSVerify: true
217   groupPriorityMinimum: 100
218   versionPriority: 100
metrics.yaml

3.10查看 pod和node负载情况

[root@master1 ~]# kubectl top node
NAME      CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
master1   309m         7%     2219Mi          28%       
node1     166m         2%     1974Mi          12%       
node2     151m         1%     2091Mi          13%       

[root@master1 ~]# kubectl top pods -n kube-system
NAME                                       CPU(cores)   MEMORY(bytes)   
calico-kube-controllers-547686d897-v2qjj   3m           21Mi            
calico-node-7n9kx                          48m          156Mi           
calico-node-m4lh8                          42m          134Mi           
calico-node-zkb2b                          58m          163Mi           
coredns-7f89b7bc75-bghsq                   3m           15Mi            
coredns-7f89b7bc75-rfjp7                   3m           12Mi            
etcd-master1                               23m          114Mi           
kube-apiserver-master1                     73m          318Mi           
kube-controller-manager-master1            15m          55Mi            
kube-proxy-8rkwg                           1m           15Mi            
kube-proxy-8w4fh                           1m           19Mi            
kube-proxy-q52rk                           1m           19Mi            
kube-scheduler-master1                     4m           24Mi            
metrics-server-5c5bfcb9f6-ln66r            2m           17Mi        

  

4.把 scheduler、controller-manager 端口变成物理机可以监听的端口

[root@master1]# kubectl get cs 
NAME STATUS MESSAGE ERROR 
controller-manager Unhealthy Get "http://127.0.0.1:10252/healthz": dial tcp 
127.0.0.1:10252: connect: connection refused 
scheduler Unhealthy Get "http://127.0.0.1:10251/healthz": dial tcp 
127.0.0.1:10251: connect: connection refused 
etcd-0 Healthy {"health":"true"} 
默认在 1.19 之后 10252 和 10251 都是绑定在 127 的,如果想要通过 prometheus 监控,会采集不到数据,所以可以把端口绑定到物理机

可按如下方法处理:
vim /etc/kubernetes/manifests/kube-scheduler.yaml
修改如下内容:
把--bind-address=127.0.0.1 变成--bind-address=192.168.1.160 
把httpGet:字段下的 hosts 由 127.0.0.1 变成 192.168.1.160 
把--port=0 删除
注意:192.168.1.160 是 k8s 的控制节点master1 的 ip

vim /etc/kubernetes/manifests/kube-controller-manager.yaml
把--bind-address=127.0.0.1 变成--bind-address=192.168.1.160 
把httpGet:字段下的 hosts 由 127.0.0.1 变成 192.168.1.160 
把--port=0 删除

修改之后在 k8s 各个节点重启下 kubelet
systemctl restart kubelet

[root@master1]# kubectl get cs
scheduler Healthy ok 
controller-manager Healthy ok 
etcd-0 Healthy {"health":"true"}
ss -antulp | grep :10251
ss -antulp | grep :10252

可以看到相应的端口已经被物理机监听了

  

 

 

  

  

  

 

 

 

posted @ 2022-02-25 20:50  ForLivetoLearn  阅读(160)  评论(0编辑  收藏  举报