k8s创建存储类storageclass

1. 先分配账号并授权  rbac.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
   name: nfs-client-provisioner-runner
rules:
   - apiGroups: [""]
     resources: ["persistentvolumes"]
     verbs: ["get", "list", "watch", "create", "delete"]
   - apiGroups: [""]
     resources: ["persistentvolumeclaims"]
     verbs: ["get", "list", "watch", "update"]
   - apiGroups: ["storage.k8s.io"]
     resources: ["storageclasses"]
     verbs: ["get", "list", "watch"]
   - apiGroups: [""]
     resources: ["events"]
     verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
    name: run-nfs-client-provisioner
subjects:
    - kind: ServiceAccount
      name: nfs-client-provisioner
      namespace: default
roleRef:
      kind: ClusterRole
      name: nfs-client-provisioner-runner
      apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
    name: leader-locking-nfs-client-provisioner
    namespace: default
rules:
    - apiGroups: [""]
      resources: ["endpoints"]
      verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
    name: leader-locking-nfs-client-provisioner
    namespace: default
subjects:
    - kind: ServiceAccount
      name: nfs-client-provisioner
      namespace: default
roleRef:
      kind: Role
      name: leader-locking-nfs-client-provisioner
      apiGroup: rbac.authorization.k8s.io

2.创建provisioner.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  namespace: default
  labels:
    app: nfs-client-provisioner
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
     matchLabels:
        app: nfs-client-provisioner
  template:
    metadata:
      labels:
         app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: quay.io/external_storage/nfs-client-provisioner:latest
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: fuseim.pri/ifs
            - name: NFS_SERVER
              value: 1c97d48b39-khf68.cn-beijing.nas.aliyuncs.com
            - name: NFS_PATH
              value: /ifs/kubernetes
      volumes:
        - name: nfs-client-root
          nfs:
            server: 1c97d48b39-khf68.cn-beijing.nas.aliyuncs.com
            path: /ifs/kubernetes

3. 创建存储类storageclass.yaml

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: managed-nfs-storage
provisioner: fuseim.pri/ifs
parameters:
  archiveOnDelete: "false"
reclaimPolicy: Delete

4. 创建pvc测试

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: test-claim

spec:
  storageClassName: managed-nfs-storage
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 20Mi

 5.yaml示例

apiVersion: apps/v1
kind: Deployment
metadata:
  name: java-center
  labels:
    app: java-center
spec:
  replicas: 1
  selector:
    matchLabels:
      app: java-center
  template:
    metadata:
      labels:
        app: java-center
    spec:
      nodeName: sjwd-ywtest
      containers:
      - name: java-center
        image: registry-vpc.cn-beijing.aliyuncs.com/sjwd/wenduedu:platform-center-latest
        command:
          - 'sh'
          - '-c'
          - '/mnt/start.sh'
        volumeMounts:
          - mountPath: /tmp
            name: test
        resources:
          limits:
            cpu: 100
            memory: 500Mi
          requests:
            cpu: 100m
            memory: 500Mi
        ports:
        - containerPort: 7002
      volumes:
        - name: test
          persistentVolumeClaim:
            claimName: test-claim
---
apiVersion: v1
kind: Service
metadata:
  name: java-center
  labels:
    name: java-center
spec:
  type: NodePort    #或使用 LoadBalancer
  externalTrafficPolicy: Cluster
  ports:
  - port: 8090  #这里的端口和clusterIP 对应,供内部访问。
    targetPort: 7002 #端口一定要和container暴露出来的端口对应
    protocol: TCP
    nodePort: 30123   # 所有的节点都会开放此端口,此端口供外部调用。
  selector:
    app: java-center
View Code

 

posted @ 2021-08-17 09:10  李瑞鑫  阅读(1707)  评论(0编辑  收藏  举报