operator-sdk+ansible构建应用自动化交付

官方地址

环境说明

  • kubernetes v1.22.6+k0s (通过vCluster创建虚拟机集群,避免污染主集群。参考文档)
  • kubectl v1.22.8
  • operator-sdk v1.19.1
  • docker-ce 20.10.12
  • 阿里云容器镜像服务

测试应用架构

graph BT; MySQL --> Nacos; Gateway --> Nacos; Gateway --> Nginx; Postgres --> Gateway; Redis --> Gateway;

应用采用微服务架构:

  • Gateway代表了后端服务
  • 服务发现和配置采用Nacos
  • 业务数据使用Postgres
  • 缓存使用Redis
  • Portal代表前端服务

应用Operator初始化

# 假设应用名为app520
[root@k8stest ~]# mkdir app520-operator
[root@k8stest ~]# cd app520-operator/
# --plugins ansible创建基于ansible的目录、文件,--domain代表了组织域
[root@k8stest app520-operator]# operator-sdk init --domain example.com --plugins ansible
Writing kustomize manifests for you to edit...
Next: define a resource with:
$ operator-sdk create api
# 查看目录、文件
[root@k8stest app520-operator]# tree
.
├── config
│   ├── default
│   │   ├── kustomization.yaml
│   │   ├── manager_auth_proxy_patch.yaml
│   │   └── manager_config_patch.yaml
│   ├── manager
│   │   ├── controller_manager_config.yaml
│   │   ├── kustomization.yaml
│   │   └── manager.yaml
│   ├── manifests
│   │   └── kustomization.yaml
│   ├── prometheus
│   │   ├── kustomization.yaml
│   │   └── monitor.yaml
│   ├── rbac
│   │   ├── auth_proxy_client_clusterrole.yaml
│   │   ├── auth_proxy_role_binding.yaml
│   │   ├── auth_proxy_role.yaml
│   │   ├── auth_proxy_service.yaml
│   │   ├── kustomization.yaml
│   │   ├── leader_election_role_binding.yaml
│   │   ├── leader_election_role.yaml
│   │   ├── role_binding.yaml
│   │   ├── role.yaml
│   │   └── service_account.yaml
│   ├── scorecard
│   │   ├── bases
│   │   │   └── config.yaml
│   │   ├── kustomization.yaml
│   │   └── patches
│   │       ├── basic.config.yaml
│   │       └── olm.config.yaml
│   └── testing
│       ├── debug_logs_patch.yaml
│       ├── kustomization.yaml
│       ├── manager_image.yaml
│       └── pull_policy
│           ├── Always.yaml
│           ├── IfNotPresent.yaml
│           └── Never.yaml
├── Dockerfile
├── Makefile
├── molecule
│   ├── default
│   │   ├── converge.yml
│   │   ├── create.yml
│   │   ├── destroy.yml
│   │   ├── kustomize.yml
│   │   ├── molecule.yml
│   │   ├── prepare.yml
│   │   └── verify.yml
│   └── kind
│       ├── converge.yml
│       ├── create.yml
│       ├── destroy.yml
│       └── molecule.yml
├── playbooks
├── PROJECT
├── requirements.yml
├── roles
└── watches.yaml

16 directories, 45 files
[root@k8stest app520-operator]#
# 创建应用组件API
# --group 代表了组件类型,我们根据应用划分为:中间件、后端、前端,默认不支持多group,需要修改"app520-operator/PROJECT"文件,添加'multigroup: true'
# --kind 代表了具体的组件或服务
# --version 根据成熟度发布版本
# --generate-role 生成ansible中role模板

# 创建中间件组及各类型
[root@k8stest app520-operator]# operator-sdk create api --group middleware --version v1alpha1 --kind Mysql --generate-role
Writing kustomize manifests for you to edit...
[root@k8stest app520-operator]# operator-sdk create api --group middleware --version v1alpha1 --kind Postgres --generate-role
Writing kustomize manifests for you to edit...
[root@k8stest app520-operator]# operator-sdk create api --group middleware --version v1alpha1 --kind Redis --generate-role
Writing kustomize manifests for you to edit...
[root@k8stest app520-operator]# operator-sdk create api --group middleware --version v1alpha1 --kind Nacos --generate-role
Writing kustomize manifests for you to edit...
[root@k8stest app520-operator]#

# 创建后端组和其中一个服务类型Gateway
[root@k8stest app520-operator]# operator-sdk create api --group backend --version v1alpha1 --kind Gateway --generate-role
Writing kustomize manifests for you to edit...

# 创建前端组和其中一个服务类型Portal
[root@k8stest app520-operator]# operator-sdk create api --group frontend --version v1alpha1 --kind Portal --generate-role
Writing kustomize manifests for you to edit...
[root@k8stest app520-operator]#

配置服务role

# 查看role情况,
[root@k8stest app520-operator]# ls roles/
gateway  mysql  nacos  portal  postgres  redis
[root@k8stest app520-operator]# tree roles/portal/
roles/portal/
├── defaults
│   └── main.yml
├── files
├── handlers
│   └── main.yml
├── meta
│   └── main.yml
├── README.md
├── tasks
│   └── main.yml
├── templates
└── vars
    └── main.yml

7 directories, 6 files
[root@k8stest app520-operator]#

# 主要修改的文件:
# defaults/main.yml 定义默认参数,类似于helm中的value
# tasks/main.yml 配置服务在kubernetes上的编排,类似helm中的template
# 下面以nacos为例,了解其构建过程
# nacos数据库使用mysql,所以先构建mysql
# mysql/defaults/main.yml (定义默认变量的过程)
---
# defaults file for Mysql
name: 'mysql'
image: 'mysql:5.7.36'
root_password: 'Root@2022'
database: 'nacos_config'
user: 'nacos'
password: 'Nacos@2022'
svctype: 'NodePort'
scname: 'nfs-storage'
pvcsize: '10Gi'
requests:
  cpu: '200m'
  mem: '500Mi'
limits:
  cpu: '300m'
  mem: '500Mi'
# mysql/tasks/main.yml (设置部署yaml模板)
	# mysql-cm 包含nacos初始化sql脚本
	# mysql-headless-svc mysql有状态服务dns解析
	# mysql-svc 创建用于nacos连接的mysql服务
	# mysql-sts mysql有状态服务
---
# tasks file for Mysql
- name: deploy mysql-cm
  kubernetes.core.k8s:
    definition:
      kind: ConfigMap
      apiVersion: v1
      metadata:
        name: '{{ name }}-initsql'
        namespace: '{{ ansible_operator_meta.namespace }}'
        labels:
          app: '{{ name }}'
      data:
        init.sql: |
          use {{ database }};
          /******************************************/
          /*   数据库全名 = nacos_config   */
          /*   表名称 = config_info   */
          /******************************************/
          CREATE TABLE `config_info` (
            `id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'id',
            `data_id` varchar(255) NOT NULL COMMENT 'data_id',
            `group_id` varchar(255) DEFAULT NULL,
            `content` longtext NOT NULL COMMENT 'content',
            `md5` varchar(32) DEFAULT NULL COMMENT 'md5',
            `gmt_create` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
            `gmt_modified` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '修改时间',
            `src_user` text COMMENT 'source user',
            `src_ip` varchar(50) DEFAULT NULL COMMENT 'source ip',
            `app_name` varchar(128) DEFAULT NULL,
            `tenant_id` varchar(128) DEFAULT '' COMMENT '租户字段',
            `c_desc` varchar(256) DEFAULT NULL,
            `c_use` varchar(64) DEFAULT NULL,
            `effect` varchar(64) DEFAULT NULL,
            `type` varchar(64) DEFAULT NULL,
            `c_schema` text,
            PRIMARY KEY (`id`),
            UNIQUE KEY `uk_configinfo_datagrouptenant` (`data_id`,`group_id`,`tenant_id`)
          ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='config_info';
           ... ...(下方内容略)


- name: deploy mysql-headless-svc
  kubernetes.core.k8s:
    definition:
      apiVersion: v1
      kind: Service
      metadata:
        name: '{{ name }}-headless'
        namespace: '{{ ansible_operator_meta.namespace }}'
        labels:
          app: '{{ name }}'
      spec:
        selector:
          app: '{{ name }}'
        type: ClusterIP
        clusterIP: None
        ports:
        - name: mysql
          port: 3306
          protocol: TCP

- name: deploy mysql-svc
  kubernetes.core.k8s:
    definition:
      apiVersion: v1
      kind: Service
      metadata:
        name: '{{ name }}'
        namespace: '{{ ansible_operator_meta.namespace }}'
        labels:
          app: '{{ name }}'
      spec:
        selector:
          app: '{{ name }}'
        type: '{{ svctype }}'
        ports:
        - name: mysql
          port: 3306
          protocol: TCP

- name: deploy mysql-sts
  kubernetes.core.k8s:
    definition:
      apiVersion: apps/v1
      kind: StatefulSet
      metadata:
        name: '{{ name }}'
        namespace: '{{ ansible_operator_meta.namespace }}'
        labels:
          app: '{{ name }}'
      spec:
        selector:
          matchLabels:
            app: '{{ name }}'
        serviceName: '{{ name }}-headless'
        replicas: 1
        template:
          metadata:
            labels:
              app: '{{ name }}'
          spec:
            initContainers: # 如果存储卷为块存储需要删除/lost+found目录
            - name: initdata
              image: '{{ image }}'
              imagePullPolicy: IfNotPresent
              command:
                [
                  "sh",
                  "-c",
                  "if [ -e /data/lost+found ];then rm /data/lost+found -rf;fi",
                ]
              securityContext:
                privileged: true
              volumeMounts:
                - name: data
                  mountPath: /data
            containers:
            - name: '{{ name }}'
              image: '{{ image }}'
              env:
              - name: MYSQL_ROOT_PASSWORD
                value: '{{ root_password }}'
              - name: MYSQL_DATABASE
                value: '{{ database }}'
              - name: MYSQL_USER
                value: '{{ user }}'
              - name: MYSQL_PASSWORD
                value: '{{ password }}'
              args:
              - --character-set-server=utf8mb4
              - --collation-server=utf8mb4_unicode_ci
              ports:
              - containerPort: 3306
                name: mysql
              resources:
                requests:
                  cpu: '{{ requests.cpu }}'
                  memory: '{{ requests.mem }}'
                limits:
                  cpu: '{{ limits.cpu }}'
                  memory: '{{ limits.mem }}'
              livenessProbe:
                tcpSocket:
                  port: 3306
                initialDelaySeconds: 120
                timeoutSeconds: 5
                successThreshold: 1
                failureThreshold: 10
                periodSeconds: 10
              volumeMounts:
              - name: data
                mountPath: /var/lib/mysql
              - name: localtime
                mountPath: /etc/localtime
              - name: mysqlinit
                mountPath: /docker-entrypoint-initdb.d/init.sql
                subPath: init.sql
            volumes:
            - name: mysqlinit
              configMap:
                name: '{{ name }}-initsql'
            - name: localtime
              hostPath:
                path: /usr/share/zoneinfo/Asia/Shanghai
        volumeClaimTemplates:
        - metadata:
            name: data
            labels:
              app: '{{ name }}'
          spec:
            storageClassName: '{{ scname }}'
            accessModes: ["ReadWriteOnce"]
            resources:
              requests:
                storage: '{{ pvcsize }}'
# 下面是nacos的配置
# nacos/role/default/main.yml
---
# defaults file for Nacos
name: 'nacos'
image: 'nacos/nacos-server:2.0.3'
svctype: 'NodePort'
requests:
  cpu: '300m'
  mem: '2500Mi'
limits:
  cpu: '500m'
  mem: '2500Mi'

mysql_name: mysql
mysql_database: nacos
mysql_user: nacos
mysql_password: Nacos@2022

# nacos/role/task/main.yml
---
# tasks file for Nacos
- name: deploy nacos-headless-svc
  kubernetes.core.k8s:
    definition:
      apiVersion: v1
      kind: Service
      metadata:
        name: '{{ name }}-headless'
        namespace: '{{ ansible_operator_meta.namespace }}'
        labels:
          app: '{{ name }}'
      spec:
        selector:
          app: '{{ name }}'
        type: ClusterIP
        clusterIP: None
        ports:
        - name: http
          port: 8848
          protocol: TCP

- name: deploy nacos-svc
  kubernetes.core.k8s:
    definition:
      apiVersion: v1
      kind: Service
      metadata:
        name: '{{ name }}'
        namespace: '{{ ansible_operator_meta.namespace }}'
        labels:
          app: '{{ name }}'
      spec:
        selector:
          app: '{{ name }}'
        type: '{{ svctype }}'
        ports:
        - name: http
          port: 8848
          protocol: TCP

- name: deploy nacos-sts
  kubernetes.core.k8s:
    definition:
      apiVersion: apps/v1
      kind: StatefulSet
      metadata:
        name: '{{ name }}'
        namespace: '{{ ansible_operator_meta.namespace }}'
        labels:
          app: '{{ name }}'
      spec:
        selector:
          matchLabels:
            app: '{{ name }}'
        serviceName: '{{ name }}-headless'
        replicas: 1
        template:
          metadata:
            labels:
              app: '{{ name }}'
          spec:
            initContainers:    # 在nacos启动前探测mysql服务是否就绪
            - name: check-mysql
              image: '{{ image }}'
              imagePullPolicy: IfNotPresent
              command:
                [
                  "sh",
                  "-c",
                  "until nc -zvw3 {{ mysql_name }} 3306; do echo waiting for mysql; sleep 2; done",
                ]
            containers:
              - name: '{{ name }}'
                image: '{{ image }}'
                env:
                - name: TZ
                  value: 'Asia/Shanghai'
                - name: PREFER_HOST_MODE
                  value: hostname
                - name: MODE
                  value: standalone
                - name: SPRING_DATASOURCE_PLATFORM
                  value: mysql
                - name: MYSQL_SERVICE_HOST
                  value: '{{ mysql_name }}'
                - name: MYSQL_SERVICE_DB_NAME
                  value: '{{ mysql_database }}'
                - name: MYSQL_SERVICE_PORT
                  value: "3306"
                - name: MYSQL_SERVICE_USER
                  value: '{{ mysql_user }}'
                - name: MYSQL_SERVICE_PASSWORD
                  value: '{{ mysql_password }}'
                - name: MYSQL_SERVICE_DB_PARAM
                  value: characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&&useSSL=false
                ports:
                - containerPort: 8848
                  name: http
                resources:
                requests:
                  cpu: '{{ requests.cpu }}'
                  memory: '{{ requests.mem }}'
                limits:
                  cpu: '{{ limits.cpu }}'
                  memory: '{{ limits.mem }}'
                livenessProbe:
                  tcpSocket:
                    port: 8848
                  initialDelaySeconds: 30
                  timeoutSeconds: 3
                  successThreshold: 1
                  failureThreshold: 3
                  periodSeconds: 10
                volumeMounts:
                  - name: localtime
                    mountPath: /etc/localtime
            volumes:
              - name: localtime
                hostPath:
                  path: /usr/share/zoneinfo/Asia/Shanghai

权限配置

默认生成的operator controller对原生kubernetes对象只有下面这些:

# config/rbac/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: manager-role
rules:
  ##
  ## Base operator rules
  ##
  - apiGroups:
      - ""
    resources:
      - secrets
      - pods
      - pods/exec
      - pods/log
    verbs:
      - create
      - delete
      - get
      - list
      - patch
      - update
      - watch
  - apiGroups:
      - apps
    resources:
      - deployments
      - daemonsets
      - replicasets
      - statefulsets
    verbs:
      - create
      - delete
      - get
      - list
      - patch
      - update
      - watch

需要添加我们使用的configmap、service和pvc等类型:

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: manager-role
rules:
  ##
  ## Base operator rules
  ##
  - apiGroups:
      - ""
    resources:
      - secrets
      - pods
      - pods/exec
      - pods/log
      - services
      - services/finalizers
      - endpoints
      - persistentvolumeclaims
      - events
      - configmaps
      - serviceaccounts
      - namespaces
      - persistentvolumes
      - resourcequotas
    verbs:
      - create
      - delete
      - get
      - list
      - patch
      - update
      - watch
  - apiGroups:
      - apps
    resources:
      - deployments
      - daemonsets
      - replicasets
      - statefulsets
    verbs:
      - create
      - delete
      - get
      - list
      - patch
      - update
      - watch

构建镜像


未完待续

posted @ 2022-04-22 09:24  longtds  阅读(277)  评论(2编辑  收藏  举报