* 参考
https://docs.rancher.cn/docs/k3s/quick-start/_index
https://docs.k3s.io/zh/installation/configuration
https://picluster.ricsanfre.com/docs/home
https://www.fullstaq.com/knowledge-hub/blogs/setting-up-your-own-k3s-home-cluster
https://traefik.io/blog/secure-web-applications-with-traefik-proxy-cert-manager-and-lets-encrypt/
架构
安装项目
* K3S as Kubernetes distribution
* MetalLB as load balancer
* Cert-Manager as certificate manager
* Rancher as cluster management
* KubeSphere as cluster management
* Traefik as ingress controller
k3s
集群负载均衡 https://docs.k3s.io/zh/datastore/cluster-loadbalancer
1. 安装 3台 master
* master-1
# curl -sfL https://rancher-mirror.rancher.cn/k3s/k3s-install.sh |INSTALL_K3S_VERSION="v1.26.7+k3s1" \
INSTALL_K3S_MIRROR=cn INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_SELINUX_WARN=true \
K3S_KUBECONFIG_OUTPUT=~/.kube/config \
sh -s - server --cluster-init --disable traefik --disable servicelb
* cluster token
# cat /var/lib/rancher/k3s/server/token
K10436907fd6f93d46fc66f43dfb6ba84a903bf70ed1a375b032aaeba40641b1484::server:e19a070fbdb7ff553d665a45e0bb46f4
* master-2 & master-3
# curl -sfL https://rancher-mirror.rancher.cn/k3s/k3s-install.sh |INSTALL_K3S_VERSION="v1.26.7+k3s1" \
INSTALL_K3S_MIRROR=cn INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_SELINUX_WARN=true \
K3S_KUBECONFIG_OUTPUT=~/.kube/config \
K3S_TOKEN="K10436907fd6f93d46fc66f43dfb6ba84a903bf70ed1a375b032aaeba40641b1484::server:e19a070fbdb7ff553d665a45e0bb46f4" \
sh -s - server --server https://192.168.10.35:6443 --disable traefik --disable servicelb
* 3台 master service 自启
# systemctl enable k3s
2. install worker-1 & worker-2
* 安装
# curl -sfL https://rancher-mirror.rancher.cn/k3s/k3s-install.sh |INSTALL_K3S_VERSION="v1.26.7+k3s1" \
INSTALL_K3S_MIRROR=cn INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_SELINUX_WARN=true \
K3S_KUBECONFIG_OUTPUT=~/.kube/config K3S_URL=https://192.168.10.35:6443 \
K3S_TOKEN="K10436907fd6f93d46fc66f43dfb6ba84a903bf70ed1a375b032aaeba40641b1484::server:e19a070fbdb7ff553d665a45e0bb46f4" \
sh -
* 2台 worker service 自启
# systemctl enable k3s-agent
3. 验证
# kubectl get nodes -A
NAME STATUS ROLES AGE VERSION
vm-33.local Ready <none> 96s v1.26.7+k3s1
vm-34.local Ready <none> 88s v1.26.7+k3s1
vm-35.local Ready control-plane,etcd,master 3m56s v1.26.7+k3s1
vm-36.local Ready control-plane,etcd,master 2m52s v1.26.7+k3s1
vm-37.local Ready control-plane,etcd,master 2m36s v1.26.7+k3s1
4. uninstall
* master
# k3s-killall.sh && k3s-uninstall.sh
* worker
# k3s-killall.sh && k3s-agent-uninstall.sh
helm
* kubeconfig
# cat /etc/rancher/k3s/k3s.yaml && cp /etc/rancher/k3s/k3s.yaml ~/.kube/config
* install
# curl -l https://get.helm.sh/helm-v3.12.3-linux-amd64.tar.gz -o helm-v3.12.3-linux-amd64.tar.gz
# tar xvfz /helm-v3.12.3-linux-amd64.tar.gz
# cp helm /usr/local/bin/helm && chmod 655 /usr/local/bin/helm
# helm version
* repo
# helm repo add azure http://mirror.azure.cn/kubernetes/charts
# helm repo add aliyun https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
# helm repo update
# helm repo list
# helm search repo azure
# helm search repo aliyun
metallb
* repo
# helm repo add metallb https://metallb.github.io/metallb
# helm repo update
# helm search repo metallb
* install
# https_proxy=http://192.168.10.13:10809 \
helm install metallb metallb/metallb \
-n metallb-system --create-namespace
* show values
# https_proxy=http://192.168.10.13:10809 \
helm show values metallb/metallb > value.yml
* configure
# cat > example.yml << EOF
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: example-pool
namespace: metallb-system
spec:
addresses:
- 192.168.10.241-192.168.10.250
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: example-l2
namespace: metallb-system
spec:
ipAddressPools:
- example-pool
EOF
# kubectl apply -f example.yml
* 验证
# kubectl get service -n metallb-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
metallb-webhook-service ClusterIP 10.43.143.197 <none> 443/TCP 3h12m
* uninstall
# https_proxy=http://192.168.10.13:10809 \
helm uninstall metallb -n metallb-system
cert-manager
* repo
# helm repo add jetstack https://charts.jetstack.io
# helm repo update
* install
# helm install cert-manager jetstack/cert-manager \
-n cert-manager --create-namespace \
--set installCRDs=true
* configure
# cat > cert-manager.yml << EOF
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: selfsigned
spec:
selfSigned: {}
EOF
# kubectl apply -f cert-manager.yml
* 验证
# kubectl get service -n cert-manager
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
cert-manager ClusterIP 10.43.185.68 <none> 9402/TCP 176m
cert-manager-webhook ClusterIP 10.43.191.203 <none> 443/TCP 176m
* uninstall
# helm uninstall cert-manager -n cert-manager
rancher
* repo
# helm repo add rancher https://releases.rancher.com/server-charts/latest
# helm repo update
* install
# helm install rancher rancher/rancher \
-n cattle-system --create-namespace \
--set hostname=rancher.example.com --set bootstrapPassword=rancher
* configure
# kubectl edit service rancher -n cattle-system
type: ClusterIP to type: LoadBalancer
* 验证
# kubectl get service -n cattle-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
rancher LoadBalancer 10.43.77.105 192.168.10.241 80:30516/TCP,443:32124/TCP 142m
rancher-webhook ClusterIP 10.43.119.248 <none> 443/TCP 140m
webhook-service ClusterIP 10.43.243.214 <none> 443/TCP 140m
* 访问
Console: https://192.168.10.241
Account: admin
Password: rancher
* uninstall
# helm uninstall rancher -n cattle-system
kubernetes-dashboard
* repo
# helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
# helm repo update
* install
# helm install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard \
-n kubernetes-dashboard --create-namespace
* configure
# kubectl edit service kubernetes-dashboard -n kubernetes-dashboard
type: ClusterIP to type: LoadBalancer
* 验证
# kubectl get service -n kubernetes-dashboard
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes-dashboard LoadBalancer 10.43.250.121 192.168.10.244 443:32370/TCP 2m38s
* 访问
Console: https://192.168.10.244
* uninstall
# helm uninstall kubernetes-dashboard -n kubernetes-dashboard
kubeSphere
* install
# https_proxy=http://192.168.10.13:10809 \
kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.4.0/kubesphere-installer.yaml
# https_proxy=http://192.168.10.13:10809 \
kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.4.0/cluster-configuration.yaml
# kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f
* configure
# kubectl edit service ks-console -n kubesphere-system
type: NodePort to type: LoadBalancer
* 验证
# kubectl get service -n kubesphere-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
ks-apiserver ClusterIP 10.43.34.184 <none> 80/TCP 99m
ks-console LoadBalancer 10.43.72.247 192.168.10.243 80:30880/TCP 99m
ks-controller-manager ClusterIP 10.43.255.213 <none> 443/TCP 99m
redis ClusterIP 10.43.182.93 <none> 6379/TCP 101m
* 访问
Console: http://192.168.10.243
Account: admin
Password: P@88w0rd
default password change to Abcd@1234
* uninstall
# https_proxy=http://192.168.10.13:10809 \
curl -l https://raw.githubusercontent.com/kubesphere/ks-installer/release-3.4.0/scripts/kubesphere-delete.sh -o kubesphere-delete.sh
# sh kubesphere-delete.sh
traefik
* repo
# helm repo add traefik https://helm.traefik.io/traefik
# helm repo update
* install
# helm install traefik traefik/traefik -n traefik-system --create-namespace
* configure
# cat > traefik.yml << EOF
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
name: dashboard
spec:
entryPoints:
- web
routes:
- match: Host(\`traefik.example.com\`) && (PathPrefix(\`/dashboard\`) || PathPrefix(\`/api\`))
kind: Rule
services:
- name: api@internal
kind: TraefikService
EOF
# kubectl apply -f traefik.yml
* 验证
# kubectl get service -n traefik-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
traefik LoadBalancer 10.43.193.170 192.168.10.242 80:32748/TCP,443:32090/TCP 2m22s
# curl -l -H Host:traefik.example.com http://192.168.10.242/dashboard/
* 访问
dns: traefik.example.com -> 192.168.10.242
Console: https://traefik.example.com/dashboard/
* uninstall
# helm uninstall traefik -n traefik-system
argo cd
* install
# kubectl create namespace argocd
# https_proxy=http://192.168.10.13:10809 \
curl -l https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml -o argocd.yaml
# kubectl apply -n argocd -f argocd.yaml
* configure
# kubectl patch service argocd-server -n argocd -p '{"spec": {"type": "LoadBalancer"}}'
* verify
# kubectl get service -n argocd
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
argocd-applicationset-controller ClusterIP 10.43.92.10 <none> 7000/TCP,8080/TCP 103s
argocd-dex-server ClusterIP 10.43.140.25 <none> 5556/TCP,5557/TCP,5558/TCP 102s
argocd-metrics ClusterIP 10.43.156.139 <none> 8082/TCP 102s
argocd-notifications-controller-metrics ClusterIP 10.43.33.241 <none> 9001/TCP 102s
argocd-redis ClusterIP 10.43.12.142 <none> 6379/TCP 102s
argocd-repo-server ClusterIP 10.43.206.70 <none> 8081/TCP,8084/TCP 102s
argocd-server LoadBalancer 10.43.41.160 192.168.10.243 80:31013/TCP,443:31707/TCP 102s
argocd-server-metrics ClusterIP 10.43.100.137 <none> 8083/TCP 102s
* uninstall
# kubectl delete -n argocd -f argocd.yaml
* ui
获取admin密码
# kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2
or
# kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d; echo
altxfqNJ5PIXK1e8
console: http://192.168.10.234
user: admin
password: altxfqNJ5PIXK1e8 -> change to Abcd@1234
private registry
* configure
# cat > /etc/rancher/k3s/registries.yaml << EOF
mirrors:
docker.io:
endpoint:
- "https://192.168.10.32:5000"
configs:
"192.168.10.32:5000":
auth:
username: docker
password: docker
tls:
cert_file: /etc/rancher/auth/domain.crt
key_file: /etc/rancher/auth/domain.key
ca_file: /etc/rancher/auth/ca.crt
EOF
* 拷贝证书到所有集群节点
# mkdir -p /etc/rancher/{k3s,auth}
# scp domain.crt domain.key ca.crt root@192.168.10.x:/etc/rancher/auth
* 重启服务
# systemctl restart k3s
or
# systemctl restart k3s-agent
* 所有集群节点测试
# crictl pull 192.168.10.32:5000/project_ci/app
demo测试
- configure
# cat > namespace.yml << EOF
apiVersion: v1
kind: Namespace
metadata:
name: demo
EOF
# cat > deployment.yml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: demo
spec:
replicas: 3
selector:
matchLabels:
# manage pods with the label app: nginx
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
EOF
# cat > service.yml << EOF
apiVersion: v1
kind: Service
metadata:
name: nginx-svc
namespace: demo
spec:
ports:
- name: http
port: 80
targetPort: 80
- name: https
port: 443
targetPort: 443
selector:
app: nginx
type: LoadBalancer
EOF
# cat > ingress.yml << EOF
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: nginx-ingress
namespace: demo
annotations:
kubernetes.io/ingress.class: traefik
cert-manager.io/cluster-issuer: selfsigned
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
rules:
- host: demo.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nginx-svc
port:
number: 80
tls:
- hosts:
- demo.example.com
secretName: demo.example.com
EOF
# cat > cert.yml << EOF
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: demo.example.com
namespace: demo
spec:
commonName: demo.example.com
secretName: demo.example.com
dnsNames:
- demo.example.com
issuerRef:
name: selfsigned
kind: ClusterIssuer
EOF
# kubectl apply -f namespace.yml -f deployment.yml -f service.yml -f igress.yml -f cert.yml
- 验证
# kubectl get service -n demo
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx-svc LoadBalancer 10.43.197.171 192.168.10.244 80:32750/TCP,443:32669/TCP 9m50s
# kubectl get pod -n demo
NAME READY STATUS RESTARTS AGE
nginx-7f456874f4-4rn5b 1/1 Running 0 10m
nginx-7f456874f4-5b49g 1/1 Running 0 10m
nginx-7f456874f4-87d5x 1/1 Running 0 10m
# curl -l -I Host:demo.example.com http://192.168.10.244
- 访问
dns: demo.example.com -> 192.168.10.244
Console: http://demo.example.com
pod调度
控制pod部署位置,可以选择'nodeSelector & nodeAffinity' or 'taints & tolerations'
1. nodeSelector & nodeAffinity 指派pod运行在特定节点
# kubectl label node nodes vm-36.local node=worker
# kubectl label node nodes vm-37.local node=worker
deployment.yml
spec:
......
nodeSelector:
node: worker
* taints & tolerations 避免pod运行在特定节点
# kubectl taint nodes vm-33.local dedicated=control:NoSchedule
# kubectl taint nodes vm-34.local dedicated=control:NoSchedule
# kubectl taint nodes vm-35.local dedicated=control:NoSchedule
# kubectl taint nodes vm-36.local deploy=dev:NoSchedule
# kubectl taint nodes vm-37.local deploy=dev:NoSchedule
deployment.yml
spec:
......
tolerations:
- key: "env"
operator: "Equal"
value: "dev"
effect: "NoSchedule"
ha
static pod directory: /var/lib/rancher/k3s/server/manifests,
每个k3s control node通过static pod方式部署 keepalived + haproxy
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· DeepSeek 开源周回顾「GitHub 热点速览」
· 物流快递公司核心技术能力-地址解析分单基础技术分享
· .NET 10首个预览版发布:重大改进与新特性概览!
· AI与.NET技术实操系列(二):开始使用ML.NET
· 单线程的Redis速度为什么快?