kubenetes-使用kubeadm搭建集群

kubenetes 官方文档

https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/

system requirement
2C2G

k8s基础知识学习

k8s训练营:https://www.qikqiak.com/k8strain/

开始部署

安装必需软件

yum install -y chrony conntrack ipvsadm ipset iptables curl sysstat libseccomp wget socat git

优化内核参数

cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.vs.ignore_no_rs_error=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
net.ipv4.tcp_tw_reuse=1
net.ipv4.tcp_timestamps=1
net.ipv4.neigh.default.gc_thresh1=1024
net.ipv4.neigh.default.gc_thresh2=2048
net.ipv4.neigh.default.gc_thresh3=4096
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
# 刷新参数
sysctl --system

加载内核模块

## https://github.com/kubernetes/kubernetes/blob/master/pkg/proxy/ipvs/README.md
# modprobe -- ip_vs
# modprobe -- ip_vs_rr
# modprobe -- ip_vs_wrr
# modprobe -- ip_vs_sh
# modprobe -- nf_conntrack_ipv4
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

检查加载的模块

lsmod | grep -e ipvs -e nf_conntrack_ipv4

SELinux

setenforce 0
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

关闭 swap 分区

swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

防火墙

设置iptables默认转发策略

systemctl stop firewalld
systemctl disable firewalld
iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat
iptables -P FORWARD ACCEPT

关闭无用服务

systemctl stop postfix && systemctl disable postfix

安装Aliyun YUM Repo

## https://developer.aliyun.com/mirror/
### k8s
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安装kube

# 列出yum可以安装的所有版本
yum list kubeadm --showduplicates | sort -r
# 安装
yum install kubeadm-1.27.6 kubectl-1.27.6 kubelet-1.27.6 -y
systemctl enable kubelet && systemctl start kubelet

安装docker or containerd

  • 注意:在k8s的1.24版本以后弃用docker,采用containerd作为容器运行时。

安装docker 并配置镜像加速

yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# https://docs.docker.com/engine/install/centos/
yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine
# 安装指定版本
yum install -y docker-ce-18.09.9 docker-ce-cli-18.09.9
# 安装最新版本
yum install docker-ce docker-ce-cli
mkdir -p /etc/docker && cat <<EOF>/etc/docker/daemon.json
{
"registry-mirrors": [
"https://xxxx.mirror.aliyuncs.com" # 使用阿里云镜像加速地址
],
"log-driver":"json-file", # 设置docker日志类型
"log-opts": {"max-size":"100m", "max-file":"3"} # 设置docker日志大小和保留个数
}
EOF
systemctl start docker.service && systemctl enable docker.service
systemctl status docker.service

安装containerd&crictl

containerd 容器运行时,crictl (cri-tools 兼容docker的命令,例如:crictl pull imagename 拉取镜像) CRI 兼容的容器运行时命令行接口。 你可以使用它来检查和调试 Kubernetes 节点上的容器运行时和应用程序。 crictl 和它的源代码在 cri-tools 代码库。crictl 默认连接到 unix:///var/run/dockershim.sock。 对于其他的运行时,你可以用多种不同的方法设置端点:

通过设置参数 --runtime-endpoint 和 --image-endpoint
通过设置环境变量 CONTAINER_RUNTIME_ENDPOINT 和 IMAGE_SERVICE_ENDPOINT
通过在配置文件中设置端点 --config=/etc/crictl.yaml

yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install -y containerd.io cri-tools
# 注意: 不配置文件/etc/containerd/config.toml在kubeadm初始化时kubelet服务启动会报错
# 创建 containerd的配置文件,文件路径:/etc/containerd/config.toml
# 手动生成配文件:
containerd config default > /etc/containerd/config.toml
# 更换默认的pause 镜像,默认是k8s的镜像 registry.k8s.io/pause:3.6
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9" # 注意调整版本
# 配置crictl命令 不配置在拉取镜像时无法正常拉取:
cat <<EOF> /etc/crictl.yaml
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF
#启动服务
systemctl start containerd.service && systemctl enable containerd.service

注意:使用containerd 经常会有拉取镜像超时问题,可以通过如下方式解决:使用ctr拉取镜像

crictl images 主要用于 Kubernetes CRI(Container Runtime Interface),默认访问 k8s.io 命名空间

ctr images ls 默认访问 default 命名空间,除非你手动指定其他命名空间。

ctr和crictl的区别:crictl和ctr拉取镜像和存储的区别 - 蒲公英PGY - 博客园

# 拉取镜像
ctr -n k8s.io images pull docker.io/calico/cni:v3.25.0
# crictl 查看镜像
crictl images

kubeadm 配置文件准备

# kubectl autocompletion
# https://kubernetes.io/zh-cn/docs/tasks/tools/#enabling-shell-autocompletion
kubeadm config
vim kubeadm-config.yaml
# kubeadm init --config kubeadm-config.yaml --upload-certs
# kubeadm config images pull --config kubeadm-config.yaml # 先拉取镜像
# kubeadm config print init-defaults
# https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
# https://storage.googleapis.com/kubernetes-release/release/stable.txt
# kubernetesVersion: stable
kubernetesVersion: v1.22.5
controlPlaneEndpoint: <your-lb-ip>:<port> #apiserver地址和端口(或者是前端代理lb负载的IP和端口)
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
# apiServer:
# timeoutForControlPlane: 4m0s
# controllerManager: {}
# scheduler: {}
imageRepository: registry.aliyuncs.com/google_containers
networking:
dnsDomain: cluster.local
podSubnet: 172.30.0.0/16
serviceSubnet: 10.96.0.0/12
dns: # 在k8s的1.24版本以后默认dns解析为coredns,不需要再添加此项,否则初始化会报错
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
## 自定义CA
# https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/
# This means you can, for example, copy an existing CA into /etc/kubernetes/pki/ca.crt and /etc/kubernetes/pki/ca.key, and kubeadm will use this CA for signing the rest of the certificates
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

添加master节点的token

kubeadm join 192.168.1.220:6443 --token ayixkv.zla8vgb997qzisbw \
--discovery-token-ca-cert-hash sha256:a00b5f0018906bdb4f4b169e12eb91fd1b091cc1c9bd8eb9adb40bdc7e96411b \
--control-plane
kubeadm join 192.168.1.220:6443 --token m8epvp.7wr184qymryb4uxj \
--discovery-token-ca-cert-hash sha256:93fb325362fb9509879f103836b295c103924c962df0babfcab8d460eee23598 \
--control-plane --certificate-key a00b5f0018906bdb4f4b169e12eb91fd1b091cc1c9bd8eb9adb40bdc7e96411b

添加node节点的token

kubeadm join 192.168.1.220:6443 --token ayixkv.zla8vgb997qzisbw \
--discovery-token-ca-cert-hash sha256:1b1650eff0bfcc738226d68315e25292a4a7846b49808dbd96598b6bb69f7a36

安装calico网络组件

curl -LO https://docs.projectcalico.org/v3.14/manifests/calico.yaml
# 更改CALICO_IPV4POOL_CIDR 为podSubnet: 172.30.0.0/16
- name: CALICO_IPV4POOL_CIDR
value: "172.30.0.0/16"
# 更改为BGP模式 https://docs.projectcalico.org/reference/node/configuration
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "Never"
kubectl apply -f calico.yaml
# core-dns pod不再pending

注意:

如有问题重建集群操作

kubeadm reset
kubectl delete node <node-name>

切换默认namespace

kubectl config set-context $(kubectl config current-context) --namespace=<insert-namespace-name-here>
# Validate it
kubectl config view | grep namespace

创建registry secret

# create registry secret
kubectl create secret docker-registry harbor --docker-server=harbor.test.local --docker-username=admin --docker-password=Admin@123 --docker-email=admin@gmail.com --namespace=boer-public

join node

kubeadm token list
kubeadm token create --print-join-command
kubeadm join k8s.meitianiot.lom:6443 --token youtoken

stop node

kubectl drain $NODENAME
kubectl uncordon $NODENAME
# https://docs.docker.com/engine/reference/commandline/ps/#filtering
docker rm $(docker ps -a -f status=exited -q)
docker ps --format "{{.ID}}\t{{.Command}}\t{{.Status}}\t{{.Ports}}"
docker ps --filter "status=exited"

etcdctl操作

etcdctl
# https://jimmysong.io/kubernetes-handbook/guide/using-etcdctl-to-access-kubernetes-data.html
curl -LO etcd-v3.4.3-linux-amd64.tar.gz
alias etcdctl='etcdctl --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/peer.crt --key=/etc/kubernetes/pki/etcd/peer.key'
etcdctl get /registry/namespaces/kube-system -w=json | jq .
etcdctl member list
etcdctl help
# 备份
etcdctl snapshot save
etcdctl snapshot status
etcdctl snapshot restore
Calicoctl

calicoctl 操作

# https://docs.projectcalico.org/archive/v3.14/getting-started/clis/calicoctl/
curl -O -L https://github.com/projectcalico/calicoctl/releases/download/v3.14.1/calicoctl
mv calicoctl /usr/local/bin/
chmod a+x /usr/local/bin/calicoctl
vim /etc/calico/calicoctl.cfg
apiVersion: projectcalico.org/v3
kind: CalicoAPIConfig
metadata:
spec:
datastoreType: "kubernetes"
kubeconfig: "/root/.kube/config"
calicoctl get nodes
calicoctl node status # 查看calico运行模式
calicoctl get ipPool -o yaml

Helm v2安装

# https://qhh.me/2019/08/08/Helm-%E5%AE%89%E8%A3%85%E4%BD%BF%E7%94%A8/
curl -LO https://get.helm.sh/helm-v2.16.6-linux-amd64.tar.gz
vim rbac-config.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: tiller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: kube-system
helm init --service-account tiller -i registry.aliyuncs.com/google_containers/tiller:v2.16.6
helm repo list
helm repo add stable https://mirror.azure.cn/kubernetes/charts
helm repo add incubator https://mirror.azure.cn/kubernetes/charts-incubator
helm repo update
helm fetch stable/mysql # 当前目录现在xxx.tgz
helm install stable/mysql

MetalLB

# 不让私有云用户成为K8S世界的二等公民
# https://metallb.universe.tf/installation/
# 用法demo
apiVersion: v1
kind: Service
metadata:
name: theapp-service
annotations:
metallb.universe.tf/address-pool: default
labels:
app: theapp
spec:
type: LoadBalancer
# type: NodePort
# type: ClusterIP
ports:
- port: 5000
targetPort: 5000
# nodePort: 31090
selector:
app: theapp
kubectl get svc # curl -v EXTERNAL-IP
#集群内部dns无法解析修改全局的coredns 或者pod的host
kubectl -n kube-system edit cm coredns
apiVersion: v1
data:
Corefile: |
.:53 {
errors
health
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
hosts {
10.10.253.36 gitea.meitianiot.lo
10.10.253.36 harbor.meitianiot.lo
10.10.253.31 k8s.meitianiot.lom
fallthrough
}
prometheus :9153
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
kind: ConfigMap
metadata:
creationTimestamp: "2020-06-29T07:54:44Z"
name: coredns
namespace: kube-system
resourceVersion: "12239265"
selfLink: /api/v1/namespaces/kube-system/configmaps/coredns
uid: 3d0c9b9d-fa27-4a11-8ea2-12b3db11b848

删除helm

#删除k8s里面的helm
helm reset
helm reset --force
kubectl delete deployment tiller-deploy --namespace kube-system

外部存储openebs

# https://github.com/openebs/openebs
# https://zhuanlan.zhihu.com/p/519172233
OpenEBS
helm install --namespace openebs --name openebs stable/openebs --version 1.11.0
StorageClass
pvc
posted @   蒲公英PGY  阅读(228)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· 无需6万激活码!GitHub神秘组织3小时极速复刻Manus,手把手教你使用OpenManus搭建本
· C#/.NET/.NET Core优秀项目和框架2025年2月简报
· 什么是nginx的强缓存和协商缓存
· 一文读懂知识蒸馏
· Manus爆火,是硬核还是营销?
点击右上角即可分享
微信分享提示