极客时间运维进阶训练营第十八周作业

1、在 K8s 环境基于 daemonset 部署日志收集组件实现 pod 日志收集

# 制作镜像

root@k8s-master1:~/elk_case/1.daemonset-logstash/1.logstash-image-Dockerfile# cat Dockerfile
FROM logstash:7.12.1


USER root
WORKDIR /usr/share/logstash
#RUN rm -rf config/logstash-sample.conf
ADD logstash.yml /usr/share/logstash/config/logstash.yml
ADD logstash.conf /usr/share/logstash/pipeline/logstash.conf

root@k8s-master1:~/elk_case/1.daemonset-logstash/1.logstash-image-Dockerfile# cat logstash.conf
input {
  file {
    path => "/var/lib/docker/containers/*/*-json.log" #docker
    #path => "/var/log/pods/*/*/*.log"
    start_position => "beginning"
    type => "jsonfile-daemonset-applog"
  }

  file {
    path => "/var/log/*.log"
    start_position => "beginning"
    type => "jsonfile-daemonset-syslog"
  }
}

output {
  if [type] == "jsonfile-daemonset-applog" {
    kafka {
      bootstrap_servers => "${KAFKA_SERVER}"
      topic_id => "${TOPIC_ID}"
      batch_size => 16384  #logstash每次向kafka传输的数据量大小,单位为字节
      codec => "${CODEC}"
   } }

  if [type] == "jsonfile-daemonset-syslog" {
    kafka {
      bootstrap_servers => "${KAFKA_SERVER}"
      topic_id => "${TOPIC_ID}"
      batch_size => 16384
      codec => "${CODEC}" #系统日志不是json格式
  }}
}
root@k8s-master1:~/elk_case/1.daemonset-logstash/1.logstash-image-Dockerfile# cat logstash.yml
http.host: "0.0.0.0"
#xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ]


root@k8s-master1:~/elk_case/1.daemonset-logstash/1.logstash-image-Dockerfile# cat build-commond.sh
#!/bin/bash

#docker build -t harbor.magedu.local/baseimages/logstash:v7.12.1-json-file-log-v4 .

#docker push harbor.magedu.local/baseimages/logstash:v7.12.1-json-file-log-v4

docker build -t harbor.iclinux.com/baseimages/logstash:v7.12.1-json-file-log-v1 .

docker push harbor.iclinux.com/baseimages/logstash:v7.12.1-json-file-log-v1
root@k8s-master1:~/elk_case/1.daemonset-logstash/1.logstash-image-Dockerfile# bash build-commond.sh


root@k8s-master1:~/elk_case/1.daemonset-logstash# cat 2.DaemonSet-logstash.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: logstash-elasticsearch
  namespace: kube-system
  labels:
    k8s-app: logstash-logging
spec:
  selector:
    matchLabels:
      name: logstash-elasticsearch
  template:
    metadata:
      labels:
        name: logstash-elasticsearch
    spec:
      tolerations:
      # this toleration is to have the daemonset runnable on master nodes
      # remove it if your masters can't run pods
      - key: node-role.kubernetes.io/master
        operator: Exists
        effect: NoSchedule
      containers:
      - name: logstash-elasticsearch
        image: harbor.iclinux.com/baseimages/logstash:v7.12.1-json-file-log-v1
        env:
        - name: "KAFKA_SERVER"
          value: "172.31.2.107:9092,172.31.2.108:9092,172.31.2.109:9092"
        - name: "TOPIC_ID"
          value: "jsonfile-log-topic"
        - name: "CODEC"
          value: "json"
#        resources:
#          limits:
#            cpu: 1000m
#            memory: 1024Mi
#          requests:
#            cpu: 500m
#            memory: 1024Mi
        volumeMounts:
        - name: varlog #定义宿主机系统日志挂载路径
          mountPath: /var/log #宿主机系统日志挂载点
        - name: varlibdockercontainers #定义容器日志挂载路径,和logstash配置文件中的收集路径保持一直
          mountPath: /var/lib/docker/containers #docker挂载路径
          #mountPath: /var/log/pods #containerd挂载路径,此路径与logstash的日志收集路径必须一致
          readOnly: false
      terminationGracePeriodSeconds: 30
      volumes:
      - name: varlog
        hostPath:
          path: /var/log #宿主机系统日志
      - name: varlibdockercontainers
        hostPath:
          path: /var/lib/docker/containers #docker的宿主机日志路径
          #path: /var/log/pods #containerd的宿主机日志路径
root@k8s-master1:~/elk_case/1.daemonset-logstash# kubectl  apply -f 2.DaemonSet-logstash.yaml


root@k8s-master1:~/elk_case/1.daemonset-logstash# kubectl  get pods -n kube-system|grep logstash
logstash-elasticsearch-6hzgs               1/1     Running             0              52s
logstash-elasticsearch-dcqw4               1/1     Running             0              52s
logstash-elasticsearch-pdnm4               0/1     ContainerCreating   0              52s
logstash-elasticsearch-pjfng               0/1     ContainerCreating   0              52s
logstash-elasticsearch-s85td               1/1     Running             0              52s
logstash-elasticsearch-zlnnd               1/1     Running             0              52s


root@elk-logstash1:/etc/logstash/conf.d# hostname -I
172.31.2.104
root@elk-logstash1:/etc/logstash/conf.d# cat kafka-to-es.conf
input {
  kafka {
    bootstrap_servers => "172.31.2.107:9092,172.31.2.108:9092,172.31.2.109:9092"
    topics => ["jsonfile-log-topic"]
    codec => "json"
  }
}


output {
  #if [fields][type] == "app1-access-log" {
  if [type] == "jsonfile-daemonset-applog" {
    elasticsearch {
      hosts => ["172.31.2.101:9200","172.31.2.102:9200"]
      index => "jsonfile-daemonset-applog-%{+YYYY.MM.dd}"
     user => "magedu"
      password => "123456"
    }}

  if [type] == "jsonfile-daemonset-syslog" {
    elasticsearch {
      hosts => ["172.31.2.101:9200","172.31.2.102:9200"]
      index => "jsonfile-daemonset-syslog-%{+YYYY.MM.dd}"
     user => "magedu"
      password => "123456"
    }}

}
root@elk-logstash1:/etc/logstash/conf.d# systemctl  restart logstash.service
# 制备nfs环境 nfs服务器执行
 mkdir -pv /data/k8sdata/magedu/images
 mkdir -pv /data/k8sdata/magedu/static
# 创建nginx测试服务
root@k8s-master1:/opt/k8s-data/yaml/magedu/nginx# cat nginx.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: magedu-nginx-deployment-label
  name: magedu-nginx-deployment
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: magedu-nginx-selector
  template:
    metadata:
      labels:
        app: magedu-nginx-selector
    spec:
      containers:
      - name: magedu-nginx-container
        #image: harbor.magedu.net/magedu/nginx-web1:dsr23dd-20220807172712
        image: nginx:1.20.2
        #command: ["/apps/tomcat/bin/run_tomcat.sh"]
        imagePullPolicy: IfNotPresent
        #imagePullPolicy: Always
        ports:
        - containerPort: 80
          protocol: TCP
          name: http
        - containerPort: 443
          protocol: TCP
          name: https
        env:
        - name: "password"
          value: "123456"
        - name: "age"
          value: "20"
        resources:
          limits:
            cpu: 500m
            memory: 512Mi
          requests:
            cpu: 500m
            memory: 256Mi

        volumeMounts:
        - name: magedu-images
          mountPath: /usr/local/nginx/html/webapp/images
          readOnly: false
        - name: magedu-static
          mountPath: /usr/local/nginx/html/webapp/static
          readOnly: false
      volumes:
      - name: magedu-images
        nfs:
          server: 172.31.7.109
          path: /data/k8sdata/magedu/images
      - name: magedu-static
        nfs:
          server: 172.31.7.109
          path: /data/k8sdata/magedu/static
      #nodeSelector:
      #  group: magedu



---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: magedu-nginx-service-label
  name: magedu-nginx-service
  namespace: magedu
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 30092
  - name: https
    port: 443
    protocol: TCP
    targetPort: 443
    nodePort: 30091
  selector:
    app: magedu-nginx-selector
root@k8s-master1:/opt/k8s-data/yaml/magedu/nginx# kubectl  apply -f nginx.yaml
deployment.apps/magedu-nginx-deployment created
service/magedu-nginx-service created

<!-- 访问url 即可发现日志已经可以在kibana查看 -->
View Code

2、在 K8s 环境对 pod 添加 sidecar 容器实现业务日志收集

root@k8s-master1:~/elk_case/2.sidecar-logstash/1.logstash-image-Dockerfile# cat Dockerfile
FROM logstash:7.12.1


USER root
WORKDIR /usr/share/logstash
#RUN rm -rf config/logstash-sample.conf
ADD logstash.yml /usr/share/logstash/config/logstash.yml
ADD logstash.conf /usr/share/logstash/pipeline/logstash.conf

root@k8s-master1:~/elk_case/2.sidecar-logstash/1.logstash-image-Dockerfile# cat logstash.conf
input {
  file {
    path => "/var/log/applog/catalina.out"
    start_position => "beginning"
    type => "app1-sidecar-catalina-log"
  }
  file {
    path => "/var/log/applog/localhost_access_log.*.txt"
    start_position => "beginning"
    type => "app1-sidecar-access-log"
  }
}

output {
  if [type] == "app1-sidecar-catalina-log" {
    kafka {
      bootstrap_servers => "${KAFKA_SERVER}"
      topic_id => "${TOPIC_ID}"
      batch_size => 16384  #logstash每次向ES传输的数据量大小,单位为字节
      codec => "${CODEC}"
   } }

  if [type] == "app1-sidecar-access-log" {
    kafka {
      bootstrap_servers => "${KAFKA_SERVER}"
      topic_id => "${TOPIC_ID}"
      batch_size => 16384
      codec => "${CODEC}"
  }}
}
root@k8s-master1:~/elk_case/2.sidecar-logstash/1.logstash-image-Dockerfile# cat logstash.yml
http.host: "0.0.0.0"
#xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ]
root@k8s-master1:~/elk_case/2.sidecar-logstash/1.logstash-image-Dockerfile# cat build-commond.sh
#!/bin/bash

#docker build -t harbor.magedu.local/baseimages/logstash:v7.12.1-sidecar .

#docker push harbor.magedu.local/baseimages/logstash:v7.12.1-sidecar
docker  build -t harbor.iclinux.com/baseimages/logstash:v7.12.1-sidecar .
docker push harbor.iclinux.com/baseimages/logstash:v7.12.1-sidecar
root@k8s-master1:~/elk_case/2.sidecar-logstash/1.logstash-image-Dockerfile# bash build-commond.sh

root@k8s-master1:~/elk_case/2.sidecar-logstash# cat 2.tomcat-app1.yaml
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: magedu-tomcat-app1-deployment-label
  name: magedu-tomcat-app1-deployment #当前版本的deployment 名称
  namespace: magedu
spec:
  replicas: 3
  selector:
    matchLabels:
      app: magedu-tomcat-app1-selector
  template:
    metadata:
      labels:
        app: magedu-tomcat-app1-selector
    spec:
      containers:
      - name: sidecar-container
        image: harbor.iclinux.com/baseimages/logstash:v7.12.1-sidecar
        imagePullPolicy: IfNotPresent
        #imagePullPolicy: Always
        env:
        - name: "KAFKA_SERVER"
          value: "172.31.2.107:9092,172.31.2.108:9092,172.31.2.109:9092"
        - name: "TOPIC_ID"
          value: "tomcat-app1-topic"
        - name: "CODEC"
          value: "json"
        volumeMounts:
        - name: applogs
          mountPath: /var/log/applog
      - name: magedu-tomcat-app1-container
        image: registry.cn-hangzhou.aliyuncs.com/zhangshijie/tomcat-app1:v1
        imagePullPolicy: IfNotPresent
        #imagePullPolicy: Always
        ports:
        - containerPort: 8080
          protocol: TCP
          name: http
        env:
        - name: "password"
          value: "123456"
        - name: "age"
          value: "18"
        resources:
          limits:
            cpu: 1
            memory: "512Mi"
          requests:
            cpu: 500m
            memory: "512Mi"
        volumeMounts:
        - name: applogs
          mountPath: /apps/tomcat/logs
        startupProbe:
          httpGet:
            path: /myapp/index.html
            port: 8080
          initialDelaySeconds: 5 #首次检测延迟5s
          failureThreshold: 3  #从成功转为失败的次数
          periodSeconds: 3 #探测间隔周期
        readinessProbe:
          httpGet:
            #path: /monitor/monitor.html
            path: /myapp/index.html
            port: 8080
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3
        livenessProbe:
          httpGet:
            #path: /monitor/monitor.html
            path: /myapp/index.html
            port: 8080
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3
      volumes:
      - name: applogs #定义通过emptyDir实现业务容器与sidecar容器的日志共享,以让sidecar收集业务容器中的日志
        emptyDir: {}

root@k8s-master1:~/elk_case/2.sidecar-logstash# kubectl  apply -f 2.tomcat-app1.yaml
deployment.apps/magedu-tomcat-app1-deployment created

root@k8s-master1:~/elk_case/2.sidecar-logstash# cat  3.tomcat-service.yaml
---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: magedu-tomcat-app1-service-label
  name: magedu-tomcat-app1-service
  namespace: magedu
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
    nodePort: 40080
  selector:
    app: magedu-tomcat-app1-selector

root@k8s-master1:~/elk_case/2.sidecar-logstash# kubectl  apply -f 3.tomcat-service.yaml
service/magedu-tomcat-app1-service created

root@elk-logstash1:/etc/logstash/conf.d# hostname -I
172.31.2.104
root@elk-logstash1:/etc/logstash/conf.d# cat kafka-to-es.conf
input {
  kafka {
    bootstrap_servers => "172.31.2.107:9092,172.31.2.108:9092,172.31.4.109:9092"
    topics => ["tomcat-app1-topic"]
    codec => "json"
  }
}

output {
  if [type] == "app1-sidecar-access-log" {
    elasticsearch {
      hosts => ["172.31.2.101:9200","172.31.2.102:9200"]
      index => "app1-sidecar-access-log-%{+YYYY.MM.dd}"
     user => "magedu"
      password => "123456"
    }
  }

  if [type] == "app1-sidecar-catalina-log" {
    elasticsearch {
      hosts => ["172.31.2.101:9200","172.31.2.102:9200"]
      index => "app1-sidecar-catalina-log-%{+YYYY.MM.dd}"
     user => "magedu"
      password => "123456"
    }
  }

}
root@elk-logstash1:/etc/logstash/conf.d# systemctl  restart logstash
View Code

3、在 K8s 环境容器中启动日志收集进程实现业务日志收集

root@k8s-master1:~/elk_case/3.container-filebeat-process/1.webapp-filebeat-image-Dockerfile# cat Dockerfile
#tomcat web1
FROM harbor.iclinux.com/pub-images/tomcat-base:v8.5.43

ADD catalina.sh /apps/tomcat/bin/catalina.sh
ADD server.xml /apps/tomcat/conf/server.xml
#ADD myapp/* /data/tomcat/webapps/myapp/
ADD myapp.tar.gz /data/tomcat/webapps/myapp/
ADD run_tomcat.sh /apps/tomcat/bin/run_tomcat.sh
ADD filebeat.yml /etc/filebeat/filebeat.yml
RUN chown  -R tomcat.tomcat /data/ /apps/
#ADD filebeat-7.5.1-x86_64.rpm /tmp/
#RUN cd /tmp && yum localinstall -y filebeat-7.5.1-amd64.deb

EXPOSE 8080 8443

CMD ["/apps/tomcat/bin/run_tomcat.sh"]
root@k8s-master1:~/elk_case/3.container-filebeat-process/1.webapp-filebeat-image-Dockerfile# cat filebeat.yml
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /apps/tomcat/logs/catalina.out
  fields:
    type: filebeat-tomcat-catalina
- type: log
  enabled: true
  paths:
    - /apps/tomcat/logs/localhost_access_log.*.txt
  fields:
    type: filebeat-tomcat-accesslog
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
setup.template.settings:
  index.number_of_shards: 1
setup.kibana:

output.kafka:
  hosts: ["172.31.2.107:9092"]
  required_acks: 1
  topic: "filebeat-magedu-app1"
  compression: gzip
  max_message_bytes: 1000000
#output.redis:
#  hosts: ["172.31.2.105:6379"]
#  key: "k8s-magedu-app1"
#  db: 1
#  timeout: 5
#  password: "123456"
root@k8s-master1:~/elk_case/3.container-filebeat-process/1.webapp-filebeat-image-Dockerfile# cat run_tomcat.sh
#!/bin/bash
#echo "nameserver 223.6.6.6" > /etc/resolv.conf
#echo "192.168.7.248 k8s-vip.example.com" >> /etc/hosts

/usr/share/filebeat/bin/filebeat -e -c /etc/filebeat/filebeat.yml -path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat &
su - tomcat -c "/apps/tomcat/bin/catalina.sh start"
tail -f /etc/hosts
root@k8s-master1:~/elk_case/3.container-filebeat-process/1.webapp-filebeat-image-Dockerfile# cat build-command.sh
#!/bin/bash
TAG=$1
docker build -t  harbor.iclinux.com/magedu/tomcat-app1:${TAG} .

docker push harbor.iclinux.com/magedu/tomcat-app1:${TAG}
#sleep 3
#docker push  harbor.magedu.net/magedu/tomcat-app1:${TAG}
#nerdctl build -t  harbor.magedu.net/magedu/tomcat-app1:${TAG}  .
#nerdctl push harbor.magedu.net/magedu/tomcat-app1:${TAG}
root@k8s-master1:~/elk_case/3.container-filebeat-process/1.webapp-filebeat-image-Dockerfile# bash build-command.sh  v1


root@k8s-master1:~/elk_case/3.container-filebeat-process# cat 3.tomcat-app1.yaml
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: magedu-tomcat-app1-filebeat-deployment-label
  name: magedu-tomcat-app1-filebeat-deployment
  namespace: magedu
spec:
  replicas: 2
  selector:
    matchLabels:
      app: magedu-tomcat-app1-filebeat-selector
  template:
    metadata:
      labels:
        app: magedu-tomcat-app1-filebeat-selector
    spec:
      containers:
      - name: magedu-tomcat-app1-filebeat-container
        image: harbor.iclinux.com/magedu/tomcat-app1:v1
        imagePullPolicy: IfNotPresent
        #imagePullPolicy: Always
        ports:
        - containerPort: 8080
          protocol: TCP
          name: http
        env:
        - name: "password"
          value: "123456"
        - name: "age"
          value: "18"
        resources:
          limits:
            cpu: 1
            memory: "512Mi"
          requests:
            cpu: 500m
            memory: "512Mi"
root@k8s-master1:~/elk_case/3.container-filebeat-process# kubectl  apply -f 3.tomcat-app1.yaml
deployment.apps/magedu-tomcat-app1-filebeat-deployment configured
root@k8s-master1:~/elk_case/3.container-filebeat-process# cat 4.tomcat-service.yaml
---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: magedu-tomcat-app1-filebeat-service-label
  name: magedu-tomcat-app1-filebeat-service
  namespace: magedu
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
    nodePort: 30092
  selector:
    app: magedu-tomcat-app1-filebeat-selector

root@k8s-master1:~/elk_case/3.container-filebeat-process# kubectl  apply -f 4.tomcat-service.yaml
service/magedu-tomcat-app1-filebeat-service created

# 收取kafka日志到es
# cat kafka-to-es.conf
input {
  kafka {
    bootstrap_servers => "172.31.2.107:9092,172.31.2.108:9092,172.31.2.109:9092"
    topics => ["filebeat-magedu-app1"]
    codec => "json"
  }
}




output {
  if [fields][type] == "filebeat-tomcat-catalina" {
    elasticsearch {
      hosts => ["172.31.2.101:9200","172.31.2.102:9200"]
      index => "filebeat-tomcat-catalina-%{+YYYY.MM.dd}"
      user => "magedu"
      password => "123456"
    }}

  if [fields][type] == "filebeat-tomcat-accesslog" {
    elasticsearch {
      hosts => ["172.31.2.101:9200","172.31.2.102:9200"]
      index => "filebeat-tomcat-accesslog-%{+YYYY.MM.dd}"
      user => "magedu"
      password => "123456"
    }}

}
View Code

4、通过 prometheus 对 CoreDNS 进行监控并在 grafana 显示监控图形

# k8s coredns 配置

root@k8s-master1:~# cat coredns-v1.8.6/coredns-v1.8.6.yaml
# __MACHINE_GENERATED_WARNING__

apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get

- apiGroups:
  - discovery.k8s.io
  resources:
  - endpointslices
  verbs:
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
  labels:
      addonmanager.kubernetes.io/mode: EnsureExists
data:
  Corefile: |
    .:53 {
        errors
        health {
            lameduck 5s
        }
        ready
        kubernetes magedu.local. in-addr.arpa ip6.arpa {
            pods insecure
            fallthrough in-addr.arpa ip6.arpa
            ttl 30
        }
        prometheus :9153
        forward . 8.8.8.8 {
            max_concurrent 1000
        }
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
    spec:
      securityContext:
        seccompProfile:
          type: RuntimeDefault
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      affinity:
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
          - weight: 100
            podAffinityTerm:
              labelSelector:
                matchExpressions:
                  - key: k8s-app
                    operator: In
                    values: ["kube-dns"]
              topologyKey: kubernetes.io/hostname
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        kubernetes.io/os: linux
      containers:
      - name: coredns
        #image: k8s.gcr.io/coredns/coredns:v1.8.0
        image: coredns/coredns:1.8.6
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 512Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.100.0.10
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
# 注意: 生产环境建议值 2C 2G 以上,最好将其分配到功能单一的节点或专用主机,默认转发,建议配置国内dns,如有自建dns一定要设置,要使用较新的配置不要太老

# prometheus 收集
root@prometheus-server01:/apps/prometheus# vim prometheus.yml
  - job_name: "coredns"
    static_configs:
      - targets: ["172.31.7.112:30009"]
root@prometheus-server01:/apps/prometheus# systemctl  restart  prometheus.service
View Code

5、对 K8s 集群进行 master 节点扩容、node 节点扩容

5.1  master 节点扩容

root@k8s-deploy:/etc/kubeasz# ./ezctl  add-master k8s-01  172.31.7.103
root@k8s-deploy:/etc/kubeasz# kubectl  get node
NAME           STATUS                     ROLES    AGE   VERSION
172.31.7.101   Ready,SchedulingDisabled   master   16m   v1.23.1
172.31.7.102   Ready,SchedulingDisabled   master   16m   v1.23.1
172.31.7.103   Ready,SchedulingDisabled   master   97s   v1.23.1
172.31.7.111   Ready                      node     14m   v1.23.1
172.31.7.112   Ready                      node     14m   v1.23.1
View Code

5.2 node 节点扩容

# 增加node
root@k8s-deploy:/etc/kubeasz# ./ezctl  add-node k8s-01  172.31.7.113
root@k8s-deploy:/etc/kubeasz# kubectl  get node
NAME           STATUS                     ROLES    AGE     VERSION
172.31.7.101   Ready,SchedulingDisabled   master   20m     v1.23.1
172.31.7.102   Ready,SchedulingDisabled   master   20m     v1.23.1
172.31.7.103   Ready,SchedulingDisabled   master   5m34s   v1.23.1
172.31.7.111   Ready                      node     18m     v1.23.1
172.31.7.112   Ready                      node     18m     v1.23.1
172.31.7.113   Ready                      node     119s    v1.23.1
View Code

6、对 K8s 集群进行小版本升级

# 升级基于二进制部署的k8s
# 如下4个升级包
https://dl.k8s.io/v1.23.17/kubernetes.tar.gz
https://dl.k8s.io/v1.23.17/kubernetes-client-linux-amd64.tar.gz
https://dl.k8s.io/v1.23.17/kubernetes-server-linux-amd64.tar.gz
https://dl.k8s.io/v1.23.17/kubernetes-node-linux-amd64.tar.gz
# 上传到部署节点并解压
root@k8s-deploy:~/k8s-update# tar xf kubernetes-client-linux-amd64.tar.gz
root@k8s-deploy:~/k8s-update# tar xf kubernetes-server-linux-amd64.tar.gz
root@k8s-deploy:~/k8s-update# tar xf kubernetes-node-linux-amd64.tar.gz
root@k8s-deploy:~/k8s-update# tar xf kubernetes.tar.gz

root@k8s-deploy:~/k8s-update# mv kubernetes /usr/local/src/
root@k8s-deploy:/usr/local/src/kubernetes# ls server/bin/ -l

root@k8s-deploy:/usr/local/src/kubernetes# mkdir  /opt/k8sbak-for-update/
root@k8s-deploy:/usr/local/src/kubernetes# cp -r /etc/kubeasz/bin/ /opt/k8sbak-for-update/

root@k8s-deploy:/usr/local/src/kubernetes/server/bin# cp kube-apiserver kube-controller-manager kube-scheduler kubelet  kube-proxy kubectl   /etc/kubeasz/bin/
# 检查
root@k8s-deploy:/etc/kubeasz/bin# ./kube-apiserver --version
Kubernetes v1.23.17
root@k8s-deploy:/etc/kubeasz/bin# kubectl  get node
NAME           STATUS                     ROLES    AGE   VERSION
172.31.7.101   Ready,SchedulingDisabled   master   15d   v1.23.1
172.31.7.102   Ready,SchedulingDisabled   master   15d   v1.23.1
172.31.7.103   Ready,SchedulingDisabled   master   15d   v1.23.1
172.31.7.111   Ready                      node     15d   v1.23.1
172.31.7.112   Ready                      node     15d   v1.23.1
172.31.7.113   Ready                      node     15d   v1.23.1

# 升级k8s
root@k8s-deploy:/etc/kubeasz# ./ezctl  upgrade k8s-01

# 检查

root@k8s-deploy:/etc/kubeasz# kubectl  get node
NAME           STATUS                     ROLES   AGE   VERSION
172.31.7.101   Ready,SchedulingDisabled   node    15d   v1.23.17
172.31.7.102   Ready,SchedulingDisabled   node    15d   v1.23.17
172.31.7.103   Ready,SchedulingDisabled   node    15d   v1.23.17
172.31.7.111   Ready                      node    15d   v1.23.17
172.31.7.112   Ready                      node    15d   v1.23.17
172.31.7.113   Ready                      node    15d   v1.23.17


```
> 注意:升级大版本前需要备份好模板文件,按新版特性修改好后统一执行
```bash
root@k8s-deploy:/etc/kubeasz# ls  roles/kube-master/templates/
aggregator-proxy-csr.json.j2        kube-scheduler.service.j2
kube-apiserver.service.j2           kubernetes-csr.json.j2
kube-controller-manager.service.j2

```
View Code

7、基于 ceph rbd 及 cephfs 持久化 K8s 中 pod 的业务数据

7.1  rbd  持久化业务数据

# 查看ceph 状态
cephadmin@ceph-deploy:~$ ceph -s
  cluster:
    id:     b4574dcd-b2d5-41f5-933b-ccb3c211a35d
    health: HEALTH_OK

  services:
    mon: 3 daemons, quorum ceph-mon1,ceph-mon2,ceph-mon3 (age 11m)
    mgr: ceph-mgr1(active, since 11m), standbys: ceph-mgr2
    mds: 2/2 daemons up, 2 standby
    osd: 20 osds: 20 up (since 10m), 20 in (since 81m)
    rgw: 2 daemons active (2 hosts, 1 zones)

  data:
    volumes: 1/1 healthy
    pools:   14 pools, 433 pgs
    objects: 457 objects, 110 MiB
    usage:   2.0 GiB used, 66 TiB / 66 TiB avail
    pgs:     433 active+clean

cephadmin@ceph-deploy:~$ ceph osd pool ls
device_health_metrics
myrbd2
.rgw.root
default.rgw.log
default.rgw.control
default.rgw.meta
cephfs-metadate
cephfs-data
mypool3
rbd-data1
default.rgw.buckets.index
default.rgw.buckets.data
my-hddpool
my-ssdpool
cephadmin@ceph-deploy:~$

# 创建rbd存储池


```

### k8s使用ceph rdb块存储
```bash
# 创建pool
cephadmin@ceph-deploy:~$ ceph  osd pool create shijie-rbd-pool1 32 32
pool 'shijie-rbd-pool1' created
cephadmin@ceph-deploy:~$ ceph osd pool ls
device_health_metrics
myrbd2
.rgw.root
default.rgw.log
default.rgw.control
default.rgw.meta
cephfs-metadate
cephfs-data
mypool3
rbd-data1
default.rgw.buckets.index
default.rgw.buckets.data
my-hddpool
my-ssdpool
shijie-rbd-pool1

cephadmin@ceph-deploy:~$ rbd pool init -p shijie-rbd-pool1
cephadmin@ceph-deploy:~$ rbd create shijie-img-img1 --size 3G --pool shijie-rbd-pool1 --image-format 2 --image-feature layering



# k8s准备
# 所有mater、node节点均要执行
wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
echo "deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific focal main" >> /etc/apt/sources.list
apt update
apt-cache madison ceph-common
apt install -y ceph-common=16.2.11-1focal

# 创建用户
cephadmin@ceph-deploy:~$ ceph auth get-or-create client.magedu-shijie mon 'allow r' osd 'allow * pool=shijie-rbd-pool1'
[client.magedu-shijie]
        key = AQBo6RpkJZMiGhAAS9wpjCZprGGaHpXQ+/CK2w==
##验证用户

cephadmin@ceph-deploy:~$ ceph auth get client.magedu-shijie
[client.magedu-shijie]
        key = AQBo6RpkJZMiGhAAS9wpjCZprGGaHpXQ+/CK2w==
        caps mon = "allow r"
        caps osd = "allow * pool=shijie-rbd-pool1"
exported keyring for client.magedu-shijie
## 导出用户
cephadmin@ceph-deploy:~$ cd ~/ceph-cluster/ && ceph auth get client.magedu-shijie -o ceph.client.magedu-shijie.keyring
## 分发文件到各k8s认证节点,master、node 均需要
cephadmin@ceph-deploy:~/ceph-cluster$ scp ceph.client.magedu-shijie.keyring  root@172.31.7.111:/etc/ceph
cephadmin@ceph-deploy:~/ceph-cluster$ scp ceph.conf root@172.31.7.111:/etc/ceph
# 验证

root@k8s-node3:~# ceph --user magedu-shijie -s
  cluster:
    id:     b4574dcd-b2d5-41f5-933b-ccb3c211a35d
    health: HEALTH_OK

  services:
    mon: 3 daemons, quorum ceph-mon1,ceph-mon2,ceph-mon3 (age 115m)
    mgr: ceph-mgr1(active, since 115m), standbys: ceph-mgr2
    mds: 2/2 daemons up, 2 standby
    osd: 20 osds: 20 up (since 114m), 20 in (since 3h)
    rgw: 2 daemons active (2 hosts, 1 zones)

  data:
    volumes: 1/1 healthy
    pools:   15 pools, 465 pgs
    objects: 461 objects, 110 MiB
    usage:   1.9 GiB used, 66 TiB / 66 TiB avail
    pgs:     465 active+clean

root@k8s-node3:~# rbd --user magedu-shijie ls --pool=shijie-rbd-pool1
shijie-img-img1
rbd: --user is deprecated, use --id
root@k8s-node3:~# rbd --id magedu-shijie ls --pool=shijie-rbd-pool1
shijie-img-img1

## k8s 各节点设置域名解析
tee -a /etc/hosts << "EOF"
172.31.6.100 ceph-deploy.example.local  ceph-deploy
172.31.6.101 ceph-mon1.example.local    ceph-mon1
172.31.6.102 ceph-mon2.example.local    ceph-mon2
172.31.6.103 ceph-mon3.example.local    ceph-mon3
172.31.6.104 ceph-mgr1.example.local   ceph-mgr1
172.31.6.105 ceph-mgr2.example.local   ceph-mgr2
172.31.6.106 ceph-node1.example.local  ceph-node1
172.31.6.107 ceph-node2.example.local  ceph-node2
172.31.6.108 ceph-node3.example.local  ceph-node3
172.31.6.109 ceph-node4.example.local  ceph-node4
EOF

## 验证rbd

root@k8s-master1:~/ceph-case# cat case1-busybox-keyring.yaml
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - image: busybox
    command:
      - sleep
      - "3600"
    imagePullPolicy: Always
    name: busybox
    #restartPolicy: Always
    volumeMounts:
    - name: rbd-data1
      mountPath: /data
  volumes:
    - name: rbd-data1
      rbd:
        monitors:
        - '172.31.6.101:6789'
        - '172.31.6.102:6789'
        - '172.31.6.103:6789'
        pool: shijie-rbd-pool1
        image: shijie-img-img1
        fsType: ext4
        readOnly: false
        user: magedu-shijie
        keyring: /etc/ceph/ceph.client.magedu-shijie.keyring
root@k8s-master1:~/ceph-case# kubectl  apply -f case1-busybox-keyring.yaml
pod/busybox created


root@k8s-master1:~/ceph-case# kubectl  get pods
NAME        READY   STATUS    RESTARTS      AGE
busybox     1/1     Running   0             42s
net-test1   1/1     Running   7 (46h ago)   15d


root@k8s-master1:~/ceph-case# kubectl  exec -it busybox -- sh
/ # df -Th
Filesystem           Type            Size      Used Available Use% Mounted on
overlay              overlay       118.9G     13.3G    105.7G  11% /
tmpfs                tmpfs          64.0M         0     64.0M   0% /dev
tmpfs                tmpfs           3.9G         0      3.9G   0% /sys/fs/cgroup
/dev/rbd0            ext4            2.9G     24.0K      2.9G   0% /data
/ # echo  'data test1' >> /data/jike.log
/ # cat /data/jike.log
data test1

# 至此ceph rbd 准备完毕

root@k8s-master1:~/ceph-case# kubectl   delete  -f case1-busybox-keyring.yaml
pod "busybox" deleted
```
- 实例1
```bash

root@k8s-master1:~/ceph-case# cat case2-1-nginx-keyring-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        #image: mysql:5.6.46
        env:
          # Use secret in real usage
        - name: MYSQL_ROOT_PASSWORD
          value: magedu123456
        ports:
        - containerPort: 80

        volumeMounts:
        - name: rbd-data1
          mountPath: /usr/share/nginx/html/jike
          #mountPath: /var/lib/mysql
      volumes:
        - name: rbd-data1
          rbd:
            monitors:
            - '172.31.6.101:6789'
            - '172.31.6.102:6789'
            - '172.31.6.103:6789'
            pool: shijie-rbd-pool1
            image: shijie-img-img1
            fsType: ext4
            readOnly: false
            user: magedu-shijie
            keyring: /etc/ceph/ceph.client.magedu-shijie.keyring

root@k8s-master1:~/ceph-case# kubectl  apply -f case2-1-nginx-keyring-deployment.yaml
deployment.apps/nginx-deployment created
root@k8s-master1:~/ceph-case#

root@k8s-master1:~/ceph-case# kubectl  get pods
NAME                                READY   STATUS    RESTARTS      AGE
net-test1                           1/1     Running   7 (46h ago)   15d
nginx-deployment-6c78c4c5fb-8fgr9   1/1     Running   0             38s
root@k8s-master1:~/ceph-case# kubectl  exec -it nginx-deployment-6c78c4c5fb-8fgr9 -- sh
# ls  /usr/share/nginx/html
50x.html  index.html  jike
# cd /usr/share/nginx/html/jike
# ls
jike.html  jike.log  lost+found
# cat jike.html
<h1>date test1</h1>
#

root@k8s-master1:~/ceph-case# kubectl  get pods -o wide
NAME                                READY   STATUS    RESTARTS      AGE     IP              NODE           NOMINATED NODE   READINESS GATES
net-test1                           1/1     Running   7 (46h ago)   15d     10.200.218.20   172.31.7.111   <none>           <none>
nginx-deployment-6c78c4c5fb-8fgr9   1/1     Running   0             4m47s   10.200.218.31   172.31.7.111   <none>           <none>
root@k8s-master1:~/ceph-case#
root@k8s-master1:~/ceph-case# curl 10.200.218.31:80/jike/jike.html
<h1>date test1</h1>

root@k8s-master1:~/ceph-case# cat case2-2-nginx-service.yaml
---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: ng-deploy-80-label
  name: ng-deploy-80
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 40081
  selector:
    app: ng-deploy-80
root@k8s-master1:~/ceph-case# kubectl  apply -f case2-2-nginx-service.yaml
service/ng-deploy-80 created

root@k8s-master1:~/ceph-case# kubectl  get ep
NAME           ENDPOINTS                                               AGE
kubernetes     172.31.7.101:6443,172.31.7.102:6443,172.31.7.103:6443   15d
ng-deploy-80   10.200.218.31:80                                        52s
root@k8s-master1:~/ceph-case# kubectl  get svc
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes     ClusterIP   10.100.0.1      <none>        443/TCP        15d
ng-deploy-80   NodePort    10.100.37.215   <none>        80:40081/TCP   61s

```
![](assets/k8s/img-20230322201912.png)

- 通过keyring 直接挂载 -nginx
```bash
root@k8s-master1:~/ceph-case# kubectl  delete  -f case2-2-nginx-service.yaml  -f case2-1-nginx-keyring-deployment.yaml
service "ng-deploy-80" deleted
deployment.apps "nginx-deployment" deleted

cephadmin@ceph-deploy:~/ceph-cluster$ ceph auth  print-key client.magedu-shijie | base64
QVFCbzZScGtKWk1pR2hBQVM5d3BqQ1pwckdHYUhwWFErL0NLMnc9PQ==


root@k8s-master1:~/ceph-case# cat case3-secret-client-shijie.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-magedu-shijie
type: "kubernetes.io/rbd"
data:
  key: QVFCbzZScGtKWk1pR2hBQVM5d3BqQ1pwckdHYUhwWFErL0NLMnc9PQ==
root@k8s-master1:~/ceph-case# kubectl  apply -f case3-secret-client-shijie.yaml
secret/ceph-secret-magedu-shijie created
root@k8s-master1:~/ceph-case# kubectl  get secrets
NAME                        TYPE                                  DATA   AGE
ceph-secret-magedu-shijie   kubernetes.io/rbd                     1      12s
default-token-cvjgq         kubernetes.io/service-account-token   3      16d
root@k8s-master1:~/ceph-case# kubectl  get secrets  ceph-secret-magedu-shijie -o json
{
    "apiVersion": "v1",
    "data": {
        "key": "QVFCbzZScGtKWk1pR2hBQVM5d3BqQ1pwckdHYUhwWFErL0NLMnc9PQ=="
    },
    "kind": "Secret",
    "metadata": {
        "annotations": {
            "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"key\":\"QVFCbzZScGtKWk1pR2hBQVM5d3BqQ1pwckdHYUhwWFErL0NLMnc9PQ==\"},\"kind\":\"Secret\",\"metadata\":{\"annotations\":{},\"name\":\"ceph-secret-magedu-shijie\",\"namespace\":\"default\"},\"type\":\"kubernetes.io/rbd\"}\n"
        },
        "creationTimestamp": "2023-03-22T12:39:00Z",
        "name": "ceph-secret-magedu-shijie",
        "namespace": "default",
        "resourceVersion": "2055875",
        "uid": "9d9353c4-4365-4263-8e2b-cedf7942bb3e"
    },
    "type": "kubernetes.io/rbd"
}


root@k8s-master1:~/ceph-case# cat case4-nginx-secret.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80

        volumeMounts:
        - name: rbd-data1
          mountPath: /usr/share/nginx/html/rbd
      volumes:
        - name: rbd-data1
          rbd:
            monitors:
            - '172.31.6.101:6789'
            - '172.31.6.102:6789'
            - '172.31.6.103:6789'
            pool: shijie-rbd-pool1
            image: shijie-img-img1
            fsType: ext4
            readOnly: false
            user: magedu-shijie
            secretRef:
              name: ceph-secret-magedu-shijie
root@k8s-master1:~/ceph-case# kubectl  apply -f case4-nginx-secret.yaml
deployment.apps/nginx-deployment created
root@k8s-master1:~/ceph-case# kubectl  get pods
NAME                               READY   STATUS              RESTARTS      AGE
net-test1                          1/1     Running             7 (47h ago)   15d
nginx-deployment-f7fcbcd87-gbxm6   0/1     ContainerCreating   0             5s
root@k8s-master1:~/ceph-case# kubectl  get pods
NAME                               READY   STATUS    RESTARTS      AGE
net-test1                          1/1     Running   7 (47h ago)   15d
nginx-deployment-f7fcbcd87-gbxm6   1/1     Running   0             22s
root@k8s-master1:~/ceph-case#

root@k8s-master1:~/ceph-case# kubectl  get pods
NAME                               READY   STATUS    RESTARTS      AGE
net-test1                          1/1     Running   7 (47h ago)   15d
nginx-deployment-f7fcbcd87-gbxm6   1/1     Running   0             45s
root@k8s-master1:~/ceph-case# kubectl  exec -it nginx-deployment-f7fcbcd87-gbxm6   -- bash
root@nginx-deployment-f7fcbcd87-gbxm6:/# df -Th
Filesystem                        Type     Size  Used Avail Use% Mounted on
overlay                           overlay  119G   14G  106G  12% /
tmpfs                             tmpfs     64M     0   64M   0% /dev
tmpfs                             tmpfs    3.9G     0  3.9G   0% /sys/fs/cgroup
/dev/mapper/ubuntu--vg-ubuntu--lv xfs      119G   14G  106G  12% /etc/hosts
shm                               tmpfs     64M     0   64M   0% /dev/shm
/dev/rbd0                         ext4     2.9G   32K  2.9G   1% /usr/share/nginx/html/rbd
tmpfs                             tmpfs    7.5G   12K  7.5G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                             tmpfs    3.9G     0  3.9G   0% /proc/acpi
tmpfs                             tmpfs    3.9G     0  3.9G   0% /proc/scsi
tmpfs                             tmpfs    3.9G     0  3.9G   0% /sys/firmware
root@nginx-deployment-f7fcbcd87-gbxm6:/# cd  /usr/share/nginx/html/rbd
root@nginx-deployment-f7fcbcd87-gbxm6:/usr/share/nginx/html/rbd# ls
jike.html  jike.log  lost+found
root@nginx-deployment-f7fcbcd87-gbxm6:/usr/share/nginx/html/rbd# echo "<h2>date test2</h2>" >> index.html
root@nginx-deployment-f7fcbcd87-gbxm6:/usr/share/nginx/html/rbd# cat jike.html
<h1>date test1</h1>

# 清理环境
root@k8s-master1:~/ceph-case# kubectl  delete -f .
```

### k8s使用StorageClass 与ceph 为pod提供动态rdb块存储
```bash
# 注意 如果使用kubeadm 安装的k8s 则需求提前解决 无法执行ceph 命令的问题

# 制备ceph admin 秘钥,ceph执行
cephadmin@ceph-deploy:~/ceph-cluster$ ceph auth  print-key client.admin |base64
QVFDTE9xaGp4cW4wS3hBQU9pdnBuQzhaWEJFTmFvN1o1OVNiWmc9PQ==

root@k8s-master1:~/ceph-case# cat case5-secret-admin.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-admin
type: "kubernetes.io/rbd"
data:
  key: QVFDTE9xaGp4cW4wS3hBQU9pdnBuQzhaWEJFTmFvN1o1OVNiWmc9PQ==
root@k8s-master1:~/ceph-case# kubectl   apply -f case5-secret-admin.yaml
secret/ceph-secret-admin created


root@k8s-master1:~/ceph-case# cat case3-secret-client-shijie.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-magedu-shijie
type: "kubernetes.io/rbd"
data:
  key: QVFCbzZScGtKWk1pR2hBQVM5d3BqQ1pwckdHYUhwWFErL0NLMnc9PQ==
root@k8s-master1:~/ceph-case# kubectl  apply -f case3-secret-client-shijie.yaml
secret/ceph-secret-magedu-shijie created

root@k8s-master1:~/ceph-case# kubectl  get secrets
NAME                        TYPE                                  DATA   AGE
ceph-secret-admin           kubernetes.io/rbd                     1      80s
ceph-secret-magedu-shijie   kubernetes.io/rbd                     1      29s
default-token-cvjgq         kubernetes.io/service-account-token   3      16d
root@k8s-master1:~/ceph-case#

# 至此 管理员秘钥及普通用户秘钥创建完毕

root@k8s-master1:~/ceph-case# cat case6-ceph-storage-class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: ceph-storage-class-shijie
  annotations:
    storageclass.kubernetes.io/is-default-class: "false" #设置为默认存储类
provisioner: kubernetes.io/rbd
parameters:
  monitors: 172.31.6.101:6789,172.31.6.102:6789,172.31.6.103:6789
  adminId: admin
  adminSecretName: ceph-secret-admin
  adminSecretNamespace: default
  pool: shijie-rbd-pool1
  userId: magedu-shijie
  userSecretName: ceph-secret-magedu-shijie
root@k8s-master1:~/ceph-case# kubectl  apply -f case6-ceph-storage-class.yaml
storageclass.storage.k8s.io/ceph-storage-class-shijie created
root@k8s-master1:~/ceph-case# kubectl  get storageclasses.storage.k8s.io
NAME                        PROVISIONER         RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
ceph-storage-class-shijie   kubernetes.io/rbd   Delete          Immediate           false                  17s

# 创建pvc
root@k8s-master1:~/ceph-case# cat case7-mysql-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-data-pvc
spec:
  accessModes:
    - ReadWriteOnce
  storageClassName: ceph-storage-class-shijie
  resources:
    requests:
      storage: '5Gi'
root@k8s-master1:~/ceph-case# kubectl  apply  -f case7-mysql-pvc.yaml
persistentvolumeclaim/mysql-data-pvc created

root@k8s-master1:~/ceph-case# kubectl  get pvc
NAME             STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS                AGE
mysql-data-pvc   Bound    pvc-2b3f9684-1b81-4b14-bb24-2ae5a2c30468   5Gi        RWO            ceph-storage-class-shijie   62s
root@k8s-master1:~/ceph-case#

# ceph 验证
cephadmin@ceph-deploy:~/ceph-cluster$ rbd  ls --pool shijie-rbd-pool1
kubernetes-dynamic-pvc-72309414-b139-4794-bfbb-712551397558
shijie-img-img1

# 创建mysql

root@k8s-master1:~/ceph-case# cat case8-mysql-single.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec:
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
      - image: mysql:5.6.46
        name: mysql
        env:
          # Use secret in real usage
        - name: MYSQL_ROOT_PASSWORD
          value: magedu123456
        ports:
        - containerPort: 3306
          name: mysql
        volumeMounts:
        - name: mysql-persistent-storage
          mountPath: /var/lib/mysql
      volumes:
      - name: mysql-persistent-storage
        persistentVolumeClaim:
          claimName: mysql-data-pvc


---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: mysql-service-label
  name: mysql-service
spec:
  type: NodePort
  ports:
  - name: http
    port: 3306
    protocol: TCP
    targetPort: 3306
    nodePort: 33306
  selector:
    app: mysql
root@k8s-master1:~/ceph-case# kubectl  apply -f case8-mysql-single.yaml
deployment.apps/mysql created
service/mysql-service created
root@k8s-master1:~/ceph-case# kubectl get pods
NAME                     READY   STATUS              RESTARTS      AGE
mysql-65bbd64658-mhbqp   0/1     ContainerCreating   0             7s
net-test1                1/1     Running             7 (47h ago)   15d


root@k8s-master1:~/ceph-case# kubectl  logs -f mysql-65bbd64658-mhbqp
2023-03-22 12:59:14+00:00 [Note] [Entrypoint]: Entrypoint script for MySQL Server 5.6.46-1debian9 started.
2023-03-22 12:59:14+00:00 [Note] [Entrypoint]: Switching to dedicated user 'mysql'
2023-03-22 12:59:14+00:00 [Note] [Entrypoint]: Entrypoint script for MySQL Server 5.6.46-1debian9 started.
2023-03-22 12:59:14+00:00 [Note] [Entrypoint]: Initializing database files
2023-03-22 12:59:14 0 [Warning] TIMESTAMP with implicit DEFAULT value is deprecated. Please use --explicit_defaults_for_timestamp server option (see documentation for more details).
# 验证

root@k8s-master1:~/ceph-case# mysql -uroot -pmagedu123456 -h 172.31.7.111 -P33306
mysql: [Warning] Using a password on the command line interface can be insecure.
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 1
Server version: 5.6.46 MySQL Community Server (GPL)

Copyright (c) 2000, 2023, Oracle and/or its affiliates.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql> show databases;
+---------------------+
| Database            |
+---------------------+
| information_schema  |
| #mysql50#lost+found |
| mysql               |
| performance_schema  |
+---------------------+
4 rows in set (0.00 sec)

mysql> create database jijke;
Query OK, 1 row affected (0.00 sec)

```
View Code

7.2 cepfs  持久化业务数据

目的:实现共享存储,实现业务中数据共享、持久化、高性能、高可用的目的
```bash
# 准备cephfs
cephadmin@ceph-deploy:~/ceph-cluster$ ceph mds stat
mycephfs:2 {0=ceph-mon3=up:active,1=ceph-mon2=up:active} 2 up:standby

# 创建应用
root@k8s-master1:~/ceph-case#
root@k8s-master1:~/ceph-case# cat case9-nginx-cephfs.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 3
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80

        volumeMounts:
        - name: magedu-staticdata-cephfs
          mountPath: /usr/share/nginx/html/cephfs
      volumes:
        - name: magedu-staticdata-cephfs
          cephfs:
            monitors:
            - '172.31.6.101:6789'
            - '172.31.6.102:6789'
            - '172.31.6.103:6789'
            path: /
            user: admin
            secretRef:
              name: ceph-secret-admin

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: ng-deploy-80-service-label
  name: ng-deploy-80-service
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 33380
  selector:
    app: ng-deploy-80
root@k8s-master1:~/ceph-case# kubectl  apply -f case9-nginx-cephfs.yaml
deployment.apps/nginx-deployment created
service/ng-deploy-80-service created

root@k8s-master1:~/ceph-case# kubectl  get pods
NAME                                READY   STATUS              RESTARTS      AGE
mysql-65bbd64658-mhbqp              1/1     Running             0             9m42s
net-test1                           1/1     Running             7 (47h ago)   15d
nginx-deployment-5c7cf8dd55-2mtnm   0/1     ContainerCreating   0             9s
nginx-deployment-5c7cf8dd55-cjzr6   0/1     ContainerCreating   0             9s
nginx-deployment-5c7cf8dd55-xjpdk   0/1     ContainerCreating   0             9s
root@k8s-master1:~/ceph-case# kubectl  get pods
NAME                                READY   STATUS    RESTARTS      AGE
mysql-65bbd64658-mhbqp              1/1     Running   0             11m
net-test1                           1/1     Running   7 (47h ago)   15d
nginx-deployment-5c7cf8dd55-2mtnm   1/1     Running   0             2m4s
nginx-deployment-5c7cf8dd55-cjzr6   1/1     Running   0             2m4s
nginx-deployment-5c7cf8dd55-xjpdk   1/1     Running   0             2m4s

root@k8s-master1:~/ceph-case# kubectl  exec -it nginx-deployment-5c7cf8dd55-2mtnm   -- bash
root@nginx-deployment-5c7cf8dd55-2mtnm:/# df -T
Filesystem                                              Type      1K-blocks     Used   Available Use% Mounted on
overlay                                                 overlay   124715524 15574216   109141308  13% /
tmpfs                                                   tmpfs         65536        0       65536   0% /dev
tmpfs                                                   tmpfs       4058664        0     4058664   0% /sys/fs/cgroup
/dev/mapper/ubuntu--vg-ubuntu--lv                       xfs       124715524 15574216   109141308  13% /etc/hosts
shm                                                     tmpfs         65536        0       65536   0% /dev/shm
172.31.6.101:6789,172.31.6.102:6789,172.31.6.103:6789:/ ceph    22389465088     8192 22389456896   1% /usr/share/nginx/html/cephfs
tmpfs                                                   tmpfs       7810128       12     7810116   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                                                   tmpfs       4058664        0     4058664   0% /proc/acpi
tmpfs                                                   tmpfs       4058664        0     4058664   0% /proc/scsi
tmpfs                                                   tmpfs       4058664        0     4058664   0% /sys/firmware
root@nginx-deployment-5c7cf8dd55-2mtnm:/# ls /usr/share/nginx/html/cephfs
syslog
root@nginx-deployment-5c7cf8dd55-2mtnm:/# echo  "nginx web v1 cephfs" > /usr/share/nginx/html/cephfs/index.html
root@nginx-deployment-5c7cf8dd55-2mtnm:/#

root@k8s-master1:~/ceph-case# kubectl  get svc
NAME                   TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
kubernetes             ClusterIP   10.100.0.1       <none>        443/TCP          16d
mysql-service          NodePort    10.100.205.252   <none>        3306:33306/TCP   14m
ng-deploy-80-service   NodePort    10.100.140.107   <none>        80:33380/TCP     5m

```
View Code

 

posted @ 2023-03-17 18:36  john221100  阅读(20)  评论(0编辑  收藏  举报