k8s之emptyDir、hostPath、configMap、secret、pv/pvc、

k8s之emptyDir、hostPath、configMap、secret、pv/pvc、

存储卷概念

数据持久化
	pod是由容器组成的,而容器宕机或停止后,数据就随之丢失了,所以引出了存储卷的概念,存储卷就是为了pod保存数据而生的,
    #存储卷类型有很多 常用的四种比如 emptydir、hostpath、nfs以及云存储(ceph,glasterfs)等

1、emptyDir

	#临时挂载卷,emptyDir类型的volume在pod分配到node上时被创建,kubernetes会在node上自动分配 一个目录,因此无需指定宿主机node上对应的目录文件。这个目录的初始内容为空,当Pod从node上移除时,emptyDir中的数据会被永久删除。emptyDir Volume主要用于某些应用程序无需永久保存的临时目录。
	总结:emptyDir不需要创建 、k8s随机在node节点分配目录、pod从node删除,emptyDir也会立即删除。
	目的是为了多个pod直接数据临时共享,比如共享日志,避免重复创建数据,
	两个pod挂载一个
1、创建一个存储卷
2、挂载

#vim emptydir.yaml
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: emptydir
spec:
  replicas: 2  #两个副本
  selector:
    matchLabels:
      app: emptydir
  template:
    metadata:
      labels:
        app: emptydir
    spec:
      containers:
        - name: busybox
          image: busybox
          command:
            - '/bin/sh'
            - '-c'
            - 'while true; do echo `hostname` > /opt/test/index.html; sleep 1; done'
          volumeMounts: 
            - mountPath: /opt/test/#挂载地址(容器里)
              name: test01 #使用那个存储卷
      volumes: #创建一个存储卷
        - name: test01 #存储卷名字
          emptyDir: {} #存储卷类型


#启动
kubectl apply -f emptydir.yaml 
#查看
kubectl get pods



#验证
[root@k8s-master-01 k8s]# kubectl exec -it emptydir-64989648f-gbws8 -- sh
/ # cd /opt/
/opt # ls 
test
/opt # cd test/
/opt/test # ll
sh: ll: not found
/opt/test # ls
index.html
/opt/test # cat index.html 
emptydir-64989648f-gbws8
/opt/test # exit

2、hostPath

在宿主主机上创建一个存储卷。

容器部署到那一台主机上,就相当于跟当前主机创建一个存储卷。
#通常用来关联系统时间 字符编码

运行在那台主机上,就挂载到那台主机上

---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: hostname
spec:
  selector:
    matchLabels:
      app: hostname
  template:
    metadata:
      labels:
        app: hostname
    spec:
      containers:
        - name: nginx
          image: nginx
          volumeMounts:
              #挂载目录
            - mountPath: /usr/share/nginx/html/
              name: test01 
      volumes:
        - name: test01
          hostPath:
            path: /opt #宿主主机上的路径 (node)

#启动

#发现地址为10.244.1.24  
    curl 10.244.1.24  
    报403
#因为宿主主机上/opt目录下没有index.html文件
创建完之后再次curl  10.244.1.24  
发现是index里面的内容

3、configMap

将配置资源化, 可以映射成一个文件、环境变量

#将配置文件写入到k8s的配置清单中

configmap 一旦挂载,当前目录中所有的文件全部删除。

subPath 添加他可以保留文件 ,但是热更新失效



1、映射成一个文件

2、映射成一个环境变量
Deployment.spec.template.spec.containers.envFrom
name: 上面内容
option: true

3.热更新nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: nginx
data:
  nginx.conf: |  #一行写不开 加个| 
    user nginx;
    worker_processes auto;
    error_log /var/log/nginx/error.log;
    pid /run/nginx.pid;
    events {
        worker_connections 1024;
    }
    http {
        log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                          '$status $body_bytes_sent "$http_referer" '
                          '"$http_user_agent" "$http_x_forwarded_for"';
        access_log  /var/log/nginx/access.log  main;
        sendfile            on;
        tcp_nopush          on;
        tcp_nodelay         on;
        keepalive_timeout   65;
        types_hash_max_size 4096;
        default_type        application/octet-stream;
        include /etc/nginx/conf.d/*.conf;
    }
  default.conf: |
    server {
        listen 80;
        server_name www.test.com;
        location / {
            root /opt/;
            index index.html;
        }
    }
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: nginx
spec:
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
        - name: nginx
          image: nginx
          volumeMounts:
            - mountPath: /etc/nginx/ #下面相对路径的参照路径
              name: nginxconf  #存储卷名字
      volumes:
        - name: nginxconf 
          configMap:
            name: nginx
            items:
              - key: nginx.conf 
                path: ./nginx.conf #相对路径 部署路径
              - key: default.conf
                path: ./conf.d/default.conf

4、secret

加密版的configMap。  密码加密放在容器里

#加密 echo -n '123456' | base64  稍微那么安全一点
1、普通类型
---
kind: Secret
apiVersion: v1
metadata:
  name: secret
data:
  # 如果不存加密的 登录不上去
  MYSQL_ROOT_PASSWORD: YTZYQXUlKlZpXktiW0RVUk1ZI3gyc2cjZyNecm1oLl0=
type: 

2、存放docker仓库用户名密码
[root@k8s-master-01 ~]# kubectl create secret docker-registry aliyun01 --docker-server=registry.cn-hangzhou.aliyuncs.com --docker-username=yangyang091022 --docker-password=123456

3、存放集群秘钥证书
#创建证书


---
apiVersion: v1
data:
  tls.crt: >-
    LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQnBEQ0NBUTBDQVFBd1pERUxNQWtHQTFVRUJoTUNRMDR4RVRBUEJnTlZCQWdNQ0ZOb1lXNW5TR0ZwTVJBdwpEZ1lEVlFRSERBZFJhVzVuY0FoUU1Rc3dDUVlEVlFRS0RBSnpZakVNTUFvR0ExVUVDd3dEWW05NU1SVXdFd1lEClZRUUREQXhpWldGMWRHbG1kV3hDYjNrd2daOHdEUVlKS29aSWh2Y05BUUVCQlFBRGdZMEFNSUdKQW9HQkFNMGQKNjROVFhMaGcwbGJ2WlBtV1krZmsvdEpqYnFCSk1JU2p3MEhScjd0QjhZdGxNa3F5QlptRWp5UGhuKzhjazVWRgpvQVVhZXhLcCs5UHY1VkFsWGkrMzcySC9YaFJJTk1MTWpQaHNmbmRIWUZ1OGpPM095N2VwQlMrWVprOEk2RlZaCjlXUjBIL0l5WXBEWG1veHNFWlk1TXE1YVhwS0x1OTJ2MzZYTHBzbEZBZ01CQUFHZ0FEQU5CZ2txaGtpRzl3MEIKQVFzRkFBT0JnUUNhV2U4UzJVRVBzZ29CSDlSb1lQc2Q0cCtMM3F5L2tpZ1hoYmNHNjBOV3NGRHdLNUxVb2txYgpxUitrZ2JBbm9Qd01aSDE2MlVPZTh0VmhoWEM2bEFmVS94ZE1PV1Nvc0djZzZ1L0ViQzJhMHlhSTNpcEpVWnRpCmQxaDVsV3JCdHhYZVhyUktKK1grVHFqZzFwT0xmU1lBTGhoWGJPY3p4MVp5QThsVlpSZkFvUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFIFJFUVVFU1QtLS0tLQ==
  tls.key: >-
    LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpQcm9jLVR5cGU6IDQsRU5DUllQVEVECkRFSy1JbmZvOiBJREVBLUNCQyxENDg3QkQzM0FDRjhFNDQyCgp0QUJFWUVreGlFLy9GcytIYmE2VHl6M2pkSE1YSjAvR09UcDkwRUV0OWk4NUx4SWdaQUJTWkZDb3BIU0hRelpPCjlZSm5UeGpZQkdOYlFscEcyWDBVcnNqVm9id0FUL0ZERmZaaE9DUlNVeUJhdUcrRVl1QW9HMlpkQUZEL2YzZ1cKQUk2VGIySU5tV3JOeWg4VUFxT3lpUmw4TThOWGdFUkx6MkhZSzhnVnJkV1JuRWRUa3Nvck91RTQ2ck96UTlqUAp2RkNQd0QwUWVBU01uWkNaY1JLWllYU0ZiaktzclgybXMzVEZyay9wWS9SczlkWDBrTmZOd3U0ampzeUNvbEp0CndtdStDbEtNWmJVdGpUejZFQ2NnVm05SXlQeldlUld1V0IrelRhZE5yc21BZFVYL3FPNnR1KytReUNsT25LOWoKZkRXSmJSUzQvQW5oUzh2S2F0ZVN5cGJaL2RFekdEdjhKYWRkQWZOSTRVOTRNWEtYRXgyMDVrcHRhR3dmTUxGKwpqaU1GblhtaVN1OTFFZmlwSlhuaSthMGtMR1lrY0M1RjdNUXYxaEtTK2RXOUI2MzhETnVJM0NJS1d6RzNla2lNCnB3WVUyWXVCSGp6MkZtK0pxUjRpSksxcTByczRSSUxxYjU2QXJTenBMUjNEK05MclNhM0FJMDRrWUwwa2dNeE4KOGJkYzNsQ255MkVVVisxVXVNTFZoWW5EbWZNRE0vZ1cvM2xDZERXVnAvdlNrRklSUkhtVDRKR1VpUWZabjVSKwpnZkIxdWZ4NUVvd3FHN0FWWUxDZ2ZBbFJsTjRpZzVxa21rMi9RM0F0ZVJGdE9PQjVJbTRTMFlvWUh3OFlPYjdkClVheXJPUmF5SENwcVcvNjRYOEpCTEl6eXZiK01GTXBmYThBVTl5Qy8zTFRlZUl2SlR2clZnanorZ2JZNUx5akMKdEFKSTArWkZGUkFsZXN4SW1NbW9wVkEzTnVGNTdhU2FFQm9jMWZrcHB6RUNDTGVPZXlSeEFYekpiUXRpekkxeApqZG41WVJkZEZETXVIdC95RkxxVCswa25ZYWR0eGVCVG01ZCtjZ3N0YUJHdUpSQWVFZzk3VWc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQ==
immutable: false
kind: Secret
metadata:
  name: www-test-com
  namespace: default
type: kubernetes.io/tls
---
kind: Service
apiVersion: v1
metadata:
  name: aliyun01
spec:
  selector:
    app: aliyun01
  ports:
    - port: 80
      targetPort: 80
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: aliyun01
spec:
  selector:
    matchLabels:
      app: aliyun01
  template:
    metadata:
      labels:
        app: aliyun01
    spec:
      imagePullSecrets:
        - name: aliyun
        - name: aliyun01
      containers:
        - name: aliyun01
          image: registry.cn-hangzhou.aliyuncs.com/alvinos/nginx:v12

5、pv/pvc

pv	: 具体存储卷  类似于饭店后面的食材,提前准备好 集群级资源
pvc	: 存储请求 类似于客人点的菜,饭店需要根据pvc去寻找对应的pv 命名空间级资源


1、pv
[root@k8s-master-01 v1]# mkdir -p /nfs/v{1..5}
[root@k8s-master-01 v1]# cd /nfs/v1 
[root@k8s-master-01 v1]# yum install nfs-utils rpcbind -t

2、vim /etc/exports
	/nfs/v1 192.168.11.0/24(rw,sync,all_squash,anonuid=666,anongid=666)
/nfs/v2 192.168.11.0/24(rw,sync,all_squash,anonuid=666,anongid=666)
/nfs/v3 192.168.11.0/24(rw,sync,all_squash,anonuid=666,anongid=666)
/nfs/v4 192.168.11.0/24(rw,sync,all_squash,anonuid=666,anongid=666)
/nfs/v5 192.168.11.0/24(rw,sync,all_squash,anonuid=666,anongid=666)

3、systemctl start nfs-server rpcbind

4、[root@k8s-master-01 v1]# showmount -e
    Export list for k8s-master-01:
    /nfs/v5 192.168.11.0/24
    /nfs/v4 192.168.11.0/24
    /nfs/v3 192.168.11.0/24
    /nfs/v2 192.168.11.0/24
    /nfs/v1 192.168.11.0/24

5.1、访问策略

pv的访问策略有三种:
    1、ReadWriteMany		: 多路可读可写
    2、ReadWriteOnce		:单路可读可写
    3、ReadOnlyMany		:多路只读
    4、ReadWriteOncePod	: 当节点只读(1.22版本以上才有)

5.2、配置清单

#pv.yaml
kind: PersistentVolume
apiVersion: v1
metadata:
  name: pv001
  labels:
    app: pv001
spec:
  nfs:
    path: /nfs/v2
    server: 192.168.11.206  #masterip地址
  accessModes:
    - "ReadWriteMany"
    - "ReadWriteOnce"
  capacity:
    storage: 2Gi
---
kind: PersistentVolume
apiVersion: v1
metadata:
  name: pv001
  labels:
    app: pv001
spec:
  nfs:
    path: /nfs/v1
    server: 192.168.11.206  #masterip地址
  accessModes:
    - "ReadWriteMany"
    - "ReadWriteOnce"
  capacity:
    storage: 10Gi
---
#pvc.yaml

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc
  namespace: default
spec:
  accessModes:
    - "ReadWriteMany"
  resources:
    requests:
      storage: "6Gi"
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: pv-pvc
spec:
  selector:
    matchLabels:
      app: pv-pvc
  template:
    metadata:
      labels:
        app: pv-pvc
    spec:
      containers:
        - name: nginx
          image: nginx
          volumeMounts:
            - mountPath: /usr/share/nginx/html
              name: pvc #绑定存储卷的名字
      volumes:
        - name: pvc
          persistentVolumeClaim:
            claimName: pvc  #pvc的名字
---


  
  	
[root@k8s-master-01 k8s]# kubectl get pv     
    NAME    CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
    pv001   2Gi        RWO,RWX        Retain           Available                                   11s


posted @ 2022-01-10 21:48  迪迦张  阅读(1228)  评论(0编辑  收藏  举报