Kubernetes 初始化容器及静态Pod和标签注释
初始化容器
kubernetes 1.3版本引入了init container 初始化容器特性。主要用于在启动应用容器(app container)前来启动一个或多个初始化容器,作为应用容器的一个基础。
init 1 --> init 2 --> 所有的初始化容器加载运行完成后,才能运行 应用容器 --> app container
实战举例
# 查看要修改的内核参数
[root@kmaster ~]# sysctl -a|grep vm.overcommit_ratio
vm.overcommit_ratio = 50
# 输出yaml文件
[root@kmaster ~]# kubectl run initpod --image centos --image-pull-policy IfNotPresent --dry-run=client -o yaml -- sleep 3600> /root/pod_yaml/initpod.yaml
[root@kmaster ~]# cd pod_yaml/
[root@kmaster pod_yaml]# ls
initpod.yaml
# 修改yaml文件
[root@kmaster pod_yaml]# vim initpod.yaml
[root@kmaster pod_yaml]# cat initpod.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: initpod
name: initpod
spec:
containers:
- args:
- sleep
- "3600"
image: centos
imagePullPolicy: IfNotPresent
name: initpod
resources: {}
initContainers:
- name: initpod1
image: alpine
imagePullPolicy: IfNotPresent
command: ["/sbin/sysctl","-w","vm.overcommit_ratio=55"]
securityContext:
privileged: true
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
# 编排Pod
[root@kmaster pod_yaml]# kubectl apply -f initpod.yaml
pod/initpod created
# 查看Pod
[root@kmaster pod_yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
initpod 1/1 Running 0 14s 10.244.69.210 knode2 <none> <none>
# 查看knode2上是否已经修改
[root@knode2 ~]# sysctl -a|grep vm.overcommit_ratio
vm.overcommit_ratio = 55
# 输出yaml文件
[root@kmaster pod_yaml]# kubectl run initpod2 --image centos --image-pull-policy IfNotPresent --dry-run=client -o yaml -- sleep 3600 > /root/pod_yaml/initpod2.yaml
[root@kmaster pod_yaml]# ls
initpod2.yaml initpod.yaml
# 修改yaml文件
[root@kmaster pod_yaml]# vim initpod2.yaml
[root@kmaster pod_yaml]# cat initpod2.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: initpod2
name: initpod2
spec:
containers:
- args:
- sleep
- "3600"
image: centos
imagePullPolicy: IfNotPresent
name: initpod2
resources: {}
volumeMounts:
- name: testdir
mountPath: /test
initContainers:
- name: alpine
image: alpine
imagePullPolicy: IfNotPresent
command: ["sh","-c","echo 123 > /initest/test.txt"]
volumeMounts:
- name: testdir
mountPath: /initest
volumes:
- name: testdir
emptyDir: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
# 编排Pod
[root@kmaster pod_yaml]# kubectl apply -f initpod2.yaml
pod/initpod2 created
# 查看状态
[root@kmaster pod_yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
initpod 1/1 Running 0 32m 10.244.69.210 knode2 <none> <none>
initpod2 1/1 Running 0 2m25s 10.244.195.149 knode1 <none> <none>
# 进入容器查看是否挂载成功
[root@kmaster pod_yaml]# kubectl exec -it pods/initpod2 -- bash
Defaulted container "initpod2" out of: initpod2, alpine (init)
[root@initpod2 /]# cd /test/
[root@initpod2 test]# ls
test.txt
[root@initpod2 test]# cat test.txt
123
[root@initpod2 test]#
静态Pod
注意不要在master上操作,因为master上跑的是集群核心静态pod,在node上去做
# 创建一个目录
[root@knode1 ~]# mkdir /etc/kubernetes/test
# 编辑配置文件
[root@knode1 ~]# vim /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
[root@knode1 ~]# cat /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
# 在环境变量参数里面加上 --pod-manifest-path=/etc/kubernetes/test ,指向创建的目录
# Note: This dropin only works with kubeadm and kubelet v1.11+
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml --pod-manifest-path=/etc/kubernetes/test"
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/sysconfig/kubelet
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS
# 重启k8s
[root@knode1 ~]# systemctl daemon-reload
[root@knode1 ~]# systemctl restart kubelet.service
# 在test目录里面编写yaml文件
[root@knode1 ~]# cd /etc/kubernetes/test
[root@knode1 test]# kubectl run pod1 --image nginx --image-pull-policy IfNotPresent --dry-run=client -o yaml > pod1.yaml
[root@knode1 test]# cat pod1.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: pod1
name: pod1
spec:
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: pod1
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
[root@knode1 test]#
[root@knode1 test]# ls
pod1.yaml
# 去master上查询,就会看到一个静态pod
[root@kmaster ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
pod1-knode1 0/1 ContainerCreating 0 14s
# 一旦移走了yaml文件,静态pod也将被删除
[root@knode1 test]# mv pod1.yaml /tmp/
[root@knode1 test]# ls
[root@kmaster ~]# kubectl get pod -o wide
No resources found in kongshuo namespace.
定义主机标签
创建标签并调度指定主机
# 为主机定义标签
[root@kmaster ~]# kubectl label nodes knode1 aaa=knode1
node/knode1 labeled
[root@kmaster ~]# kubectl get nodes knode1 --show-labels
NAME STATUS ROLES AGE VERSION LABELS
knode1 Ready <none> 4d v1.26.0 aaa=knode1,beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=knode1,kubernetes.io/os=linux
[root@kmaster ~]# kubectl label nodes knode2 bbb=knode2
node/knode2 labeled
[root@kmaster ~]# kubectl get nodes knode2 --show-labels
NAME STATUS ROLES AGE VERSION LABELS
knode2 Ready <none> 4d v1.26.0 bbb=knode2,beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=knode2,kubernetes.io/os=linux
# 测试Pod指定调度主机knode1
[root@kmaster ~]# kubectl run pod1 --image nginx --image-pull-policy IfNotPresent --dry-run=client -o yaml > pod1.yaml
[root@kmaster ~]# vim pod1.yaml
[root@kmaster ~]# cat pod1.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: pod1
name: pod1
spec:
nodeSelector:
aaa: knode1
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: pod1
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
[root@kmaster ~]# kubectl apply -f pod1.yaml
pod/pod1 created
[root@kmaster ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod1 1/1 Running 0 31s 10.244.195.150 knode1 <none> <none>
# 测试Pod指定调度主机knode2
[root@kmaster ~]# kubectl run pod2 --image nginx --image-pull-policy IfNotPresent --dry-run=client -o yaml > pod2.yaml
[root@kmaster ~]# vim pod2.yaml
[root@kmaster ~]# cat pod2.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
run: pod2
name: pod2
spec:
nodeSelector:
bbb: knode2
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: pod2
resources: {}
dnsPolicy: ClusterFirst
restartPolicy: Always
status: {}
[root@kmaster ~]# kubectl apply -f pod2.yaml
pod/pod2 created
[root@kmaster ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod1 1/1 Running 0 3m4s 10.244.195.150 knode1 <none> <none>
pod2 1/1 Running 0 9s 10.244.69.211 knode2 <none> <none>
删除主机标签
[root@kmaster ~]# kubectl get nodes/knode1 --show-labels
NAME STATUS ROLES AGE VERSION LABELS
knode1 Ready <none> 4d1h v1.26.0 aaa=knode1,beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=knode1,kubernetes.io/os=linux
[root@kmaster ~]# kubectl label nodes/knode1 aaa-
node/knode1 unlabeled
[root@kmaster ~]# kubectl get nodes/knode1 --show-labels
NAME STATUS ROLES AGE VERSION LABELS
knode1 Ready <none> 4d1h v1.26.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=knode1,kubernetes.io/os=linux
[root@kmaster ~]# kubectl get nodes/knode2 --show-labels
NAME STATUS ROLES AGE VERSION LABELS
knode2 Ready <none> 4d1h v1.26.0 bbb=knode2,beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=knode2,kubernetes.io/os=linux
[root@kmaster ~]# kubectl label nodes/knode2 bbb-
node/knode2 unlabeled
[root@kmaster ~]# kubectl get nodes/knode2 --show-labels
NAME STATUS ROLES AGE VERSION LABELS
knode2 Ready <none> 4d1h v1.26.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=knode2,kubernetes.io/os=linux
定义标签Pod
创建标签
# 为Pod定义标签
[root@kmaster ~]# kubectl get pods/pod1 --show-labels
NAME READY STATUS RESTARTS AGE LABELS
pod1 1/1 Running 0 4m17s run=pod1
[root@kmaster ~]# kubectl label pods/pod1 aaa=pod1
pod/pod1 labeled
[root@kmaster ~]# kubectl get pods/pod1 --show-labels
NAME READY STATUS RESTARTS AGE LABELS
pod1 1/1 Running 0 10m aaa=pod1,run=pod1
[root@kmaster ~]# kubectl get pods/pod2 --show-labels
NAME READY STATUS RESTARTS AGE LABELS
pod2 1/1 Running 0 9m27s run=pod2
[root@kmaster ~]# kubectl label pods/pod2 bbb=pod2
pod/pod2 labeled
[root@kmaster ~]# kubectl get pods/pod2 --show-labels
NAME READY STATUS RESTARTS AGE LABELS
pod2 1/1 Running 0 9m33s bbb=pod2,run=pod2
删除标签
[root@kmaster ~]# kubectl get pods/pod1 --show-labels
NAME READY STATUS RESTARTS AGE LABELS
pod1 1/1 Running 0 15m aaa=pod1,run=pod1
[root@kmaster ~]# kubectl label pods/pod1 aaa-
pod/pod1 unlabeled
[root@kmaster ~]# kubectl get pods/pod1 --show-labels
NAME READY STATUS RESTARTS AGE LABELS
pod1 1/1 Running 0 15m run=pod1
[root@kmaster ~]# kubectl get pods/pod2 --show-labels
NAME READY STATUS RESTARTS AGE LABELS
pod2 1/1 Running 0 13m bbb=pod2,run=pod2
[root@kmaster ~]# kubectl label pods/pod2 bbb-
pod/pod2 unlabeled
[root@kmaster ~]# kubectl get pods/pod2 --show-labels
NAME READY STATUS RESTARTS AGE LABELS
pod2 1/1 Running 0 13m run=pod2
定义主机ROLES标签
[root@kmaster ~]# kubectl get nodes --show-labels
NAME STATUS ROLES AGE VERSION LABELS
kmaster Ready control-plane 19h v1.26.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=kmaster,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node.kubernetes.io/exclude-from-external-load-balancers=
knode1 Ready <none> 19h v1.26.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=knode1,kubernetes.io/os=linux
knode2 Ready <none> 19h v1.26.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=knode2,kubernetes.io/os=linux
# 定义master标签并删除control-plane
[root@kmaster ~]# kubectl label nodes kmaster node-role.kubernetes.io/kmaster=
node/kmaster labeled
[root@kmaster ~]# kubectl get nodes kmaster --show-labels
NAME STATUS ROLES AGE VERSION LABELS
kmaster Ready control-plane,kmaster 19h v1.26.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=kmaster,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/kmaster=,node.kubernetes.io/exclude-from-external-load-balancers=
[root@kmaster ~]# kubectl label nodes kmaster node-role.kubernetes.io/control-plane-
node/kmaster unlabeled
[root@kmaster ~]# kubectl get nodes kmaster --show-labels
NAME STATUS ROLES AGE VERSION LABELS
kmaster Ready kmaster 19h v1.26.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=kmaster,kubernetes.io/os=linux,node-role.kubernetes.io/kmaster=,node.kubernetes.io/exclude-from-external-load-balancers=
# 定义node1标签
[root@kmaster ~]# kubectl label nodes knode1 node-role.kubernetes.io/knode1=
node/knode1 labeled
[root@kmaster ~]# kubectl get nodes knode1 --show-labels
NAME STATUS ROLES AGE VERSION LABELS
knode1 Ready knode1 19h v1.26.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=knode1,kubernetes.io/os=linux,node-role.kubernetes.io/knode1=
# 定义node2标签
[root@kmaster ~]# kubectl label nodes knode2 node-role.kubernetes.io/knode2=
node/knode2 labeled
[root@kmaster ~]# kubectl get nodes knode2 --show-labels
NAME STATUS ROLES AGE VERSION LABELS
knode2 Ready knode2 19h v1.26.0 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=knode2,kubernetes.io/os=linux,node-role.kubernetes.io/knode2=
# 定义完成
[root@kmaster ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kmaster Ready kmaster 19h v1.26.0
knode1 Ready knode1 19h v1.26.0
knode2 Ready knode2 19h v1.26.0
添加Annotations注释
[root@kmaster ~]# kubectl describe pods pod222 |head -20
Name: pod222
Namespace: kongshuo
Priority: 0
Service Account: default
Node: knode1/192.168.239.144
Start Time: Fri, 08 Sep 2023 03:01:43 -0400
Labels: run=pod222
Annotations: cni.projectcalico.org/containerID: 3717208f3174b12a7e339d6b1bef0dbd984598916f7462f457c222d2347469a4
cni.projectcalico.org/podIP: 10.244.195.141/32
cni.projectcalico.org/podIPs: 10.244.195.141/32
Status: Running
IP: 10.244.195.141
IPs:
IP: 10.244.195.141
Containers:
pod222:
Container ID: containerd://91ab8d14ea2b641868c7703d2c58f047f5ad037a7162eb2ac7fc8c919532f60b
Image: nginx
Image ID: docker.io/library/nginx@sha256:b437ad7a4d9964a2d5cbee850946380722082d04a1a731275a583825d68eec92
Port: <none>
[root@kmaster ~]# kubectl annotate pods pod222 zhe.shi.yi.tiao.zhu.shi=
pod/pod222 annotated
[root@kmaster ~]# kubectl describe pods pod222 |head -20
Name: pod222
Namespace: kongshuo
Priority: 0
Service Account: default
Node: knode1/192.168.239.144
Start Time: Fri, 08 Sep 2023 03:01:43 -0400
Labels: run=pod222
Annotations: cni.projectcalico.org/containerID: 3717208f3174b12a7e339d6b1bef0dbd984598916f7462f457c222d2347469a4
cni.projectcalico.org/podIP: 10.244.195.141/32
cni.projectcalico.org/podIPs: 10.244.195.141/32
zhe.shi.yi.tiao.zhu.shi:
Status: Running
IP: 10.244.195.141
IPs:
IP: 10.244.195.141
Containers:
pod222:
Container ID: containerd://91ab8d14ea2b641868c7703d2c58f047f5ad037a7162eb2ac7fc8c919532f60b
Image: nginx
Image ID: docker.io/library/nginx@sha256:b437ad7a4d9964a2d5cbee850946380722082d04a1a731275a583825d68eec92
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 全程不用写代码,我用AI程序员写了一个飞机大战
· DeepSeek 开源周回顾「GitHub 热点速览」
· MongoDB 8.0这个新功能碉堡了,比商业数据库还牛
· 记一次.NET内存居高不下排查解决与启示
· 白话解读 Dapr 1.15:你的「微服务管家」又秀新绝活了