使用 Ceph CSI 在 Nomad 上配置卷

使用 Ceph CSI 在 Nomad 上配置卷

参考文档:https://itnext.io/provision-volumes-from-external-ceph-storage-on-kubernetes-and-nomad-using-ceph-csi-7ad9b15e9809

这篇文章中使用的重要组件如下

  • Nomad v1.2.3
  • Ceph Storage v14 (Nautilus)
  • Ceph CSI v3.3.1

在 Nomad 上提供卷

首先,将以下内容添加到 nomad 客户端配置中,使Docker容器可以在Nomad客户端节点上以特权运行。

cat <<EOC >> /etc/nomad.d/client.hcl 
plugin "docker" {
  config {
    allow_privileged = true
  }
}
EOC

systemctl restart nomad

继续之前,在所有nomad客户端节点上加载RBD模块。

sudo modprobe rbd;
sudo lsmod |grep rbd;
rbd                    83733  0
libceph               306750  1 rbd

CSI 由 controllernode 组成。

首先创建一个 Ceph CSI controller job,类型为service
修改以下内容并创建

  • clusterID:ceph -s |grep id 获取
  • monitors: ceph -s |grep mon 获取,不写主机名写ip
cat <<EOC > ceph-csi-plugin-controller.nomad
job "ceph-csi-plugin-controller" {
  datacenters = ["dc1"]
group "controller" {
    network {    
      port "metrics" {}
    }
    task "ceph-controller" {
template {
        data        = <<EOF
[{
    "clusterID": "ef8394c8-2b17-435d-ae47-9dff271875d1",
    "monitors": [
        "10.103.3.40:6789",
  "10.103.3.41:6789",
  "10.103.3.42:6789"
    ]
}]
EOF
        destination = "local/config.json"
        change_mode = "restart"
      }
      driver = "docker"
      config {
        image = "quay.io/cephcsi/cephcsi:v3.3.1"
        volumes = [
          "./local/config.json:/etc/ceph-csi-config/config.json"
        ]    
        args = [
          "--type=rbd",
          "--controllerserver=true",
          "--drivername=rbd.csi.ceph.com",         
          "--endpoint=unix://csi/csi.sock",
          "--nodeid=\${node.unique.name}",
    "--instanceid=\${node.unique.name}-controller", 
          "--pidlimit=-1",
    "--logtostderr=true",
          "--v=5",    
          "--metricsport=\$\${NOMAD_PORT_metrics}"
        ]  
      }   
   resources {
        cpu    = 500
        memory = 256
      }
      service {
        name = "ceph-csi-controller"
        port = "metrics"
        tags = [ "prometheus" ]
      }
csi_plugin {
        id        = "ceph-csi"
        type      = "controller"
        mount_dir = "/csi"
      }
    }
  }
}
EOC

再创建一个 Ceph CSI node job

cat <<EOC > ceph-csi-plugin-nodes.nomad
job "ceph-csi-plugin-nodes" {
  datacenters = ["dc1"]
  type        = "system"
  group "nodes" {
    network {    
      port "metrics" {}
    }
 
    task "ceph-node" {
      driver = "docker"
      template {
        data        = <<EOF
[{
    "clusterID": "62c42aed-9839-4da6-8c09-9d220f56e924",
    "monitors": [
        "10.0.0.3:6789",
  "10.0.0.4:6789",
  "10.0.0.5:6789" 
    ]
}]
EOF
        destination = "local/config.json"
        change_mode = "restart"
      }
      config {
        image = "quay.io/cephcsi/cephcsi:v3.3.1"
        volumes = [
          "./local/config.json:/etc/ceph-csi-config/config.json"
        ]
        mounts = [
          {
            type     = "tmpfs"
            target   = "/tmp/csi/keys"
            readonly = false
            tmpfs_options = {
              size = 1000000 # size in bytes
            }
          }
        ]
        args = [
          "--type=rbd",
          "--drivername=rbd.csi.ceph.com",        
          "--nodeserver=true",
          "--endpoint=unix://csi/csi.sock",
          "--nodeid=\${node.unique.name}",
          "--instanceid=\${node.unique.name}-nodes", 
          "--pidlimit=-1",
    "--logtostderr=true",
          "--v=5",       
          "--metricsport=\$\${NOMAD_PORT_metrics}"
        ]  
        privileged = true
      }   
   resources {
        cpu    = 500
        memory = 256
      }
      service {
        name = "ceph-csi-nodes"
        port = "metrics"
        tags = [ "prometheus" ]
      }
csi_plugin {
        id        = "ceph-csi"
        type      = "node"
        mount_dir = "/csi"
      }
    }
  }
}
EOC

此 Ceph node job 的类型是 system ,即将在所有 nomad 客户端节点上创建 ceph csi node 容器。

运行 Ceph CSI job

nomad job run ceph-csi-plugin-controller.nomad;
nomad job run ceph-csi-plugin-nodes.nomad;

查看 ceph csi 插件的状态

nomad plugin status ceph-csi;
ID                   = ceph-csi
Provider             = rbd.csi.ceph.com
Version              = v3.3.1
Controllers Healthy  = 1
Controllers Expected = 1
Nodes Healthy        = 2
Nodes Expected       = 2
Allocations
ID        Node ID   Task Group  Version  Desired  Status   Created    Modified
b6268d6d  457a8291  controller  0        run      running  1d21h ago  1d21h ago
ec265d25  709ee9cc  nodes       0        run      running  1d21h ago  1d21h ago
4cd7dffa  457a8291  nodes       0        run      running  1d21h ago  1d21h ago

现在,它可以使用ceph csi驱动程序从外部ceph存储装载卷了。

让我们创建一个ceph池myPool和管理员用户myPoolAdmin

# Create a ceph pool:
sudo ceph osd pool create myPool 64 64
sudo rbd pool init myPool;
# create admin user for pool.
sudo ceph auth get-or-create-key client.myPoolAdmin mds 'allow *' mgr 'allow *' mon 'allow *' osd 'allow * pool=myPool'
AQCKf4JgHPVxAxAALZ8ny4/R7s6/3rZWC2o5vQ==

现在我们需要一个卷在Nomad上注册,创建一个卷

cat <<EOF > ceph-volume.hcl
type = "csi"
id   = "ceph-mysql"
name = "ceph-mysql"
external_id     = "0001-0024-ef8394c8-2b17-435d-ae47-9dff271875d1-0000000000000001-00000000-1111-2222-bbbb-cacacacacaca"
capacity_max = "200G"
capacity_min = "100G"

capability {
  access_mode     = "single-node-writer"
  attachment_mode = "file-system"
}

plugin_id       = "ceph-csi"
secrets {
  userID  = "myPoolAdmin"
  userKey = "AQB0c/VhGvzwAhAAPtGhK1jkuPZaQKdGgsOkgA=="
}
context {
  clusterID = "ef8394c8-2b17-435d-ae47-9dff271875d1"
  pool      = "myPool"
  imageFeatures = "layering"
}
EOF

先修改 secretscontext 的值
然后来确认external_id

  • CSI ID 版本:0001
  • 群集 ID 长度:0024
  • 群集标识:62c42aed-9839–4da6–8c09–9d220f56e924
  • 池 ID:0000000000000009
  • UUID:00000000–1111–2222-bbbb-cacacacacaca

对于池 ID,获取方式为以下命令,可以看出 myPool的 ID 为 9。

sudo ceph osd lspools
1 cephfs_data
2 cephfs_metadata
3 foo
4 bar
5 .rgw.root
6 default.rgw.control
7 default.rgw.meta
8 default.rgw.log
9 myPool
10 default.rgw.buckets.index
11 default.rgw.buckets.data

UUID 是卷的唯一 ID。如果要从同一池创建新卷,则需要设置新的 UUID。

在 Nomad 上注册卷之前,需要先在池中创建映像 myPool,格式为 csi-vol-UUID

sudo rbd create csi-vol-00000000-1111-2222-bbbb-cacacacacaca --size 1024 --pool myPool --image-feature layering;

现在,在Nomad上注册该卷。

nomad volume register ceph-volume.hcl;

查看已注册卷的状态。

nomad volume status;
Container Storage Interface
ID        Name        Plugin ID  Schedulable  Access Mode
ceph-mys  ceph-mysql  ceph-csi   true         <none>

 # 取消注册
 # nomad volume deregister ceph-mysql 
 # nomad volume deregister  -force  ceph-mysql 

让我们挂载由 Ceph CSI 预配的卷,并在 Nomad 上运行示例 MySQL Server 作业。

cat <<EOF > mysql-server.nomad
job "mysql-server" {
  datacenters = ["dc1"]
  type        = "service"
  group "mysql-server" {
    count = 1
    volume "ceph-mysql" {
      type      = "csi"
      read_only = false
      source    = "ceph-mysql"
      attachment_mode = "file-system"
      access_mode     = "single-node-writer"
    }
    network {
      port "db" {
        static = 3306
      }
    }
    restart {
      attempts = 10
      interval = "5m"
      delay    = "25s"
      mode     = "delay"
    }
    task "mysql-server" {
      driver = "docker"
      volume_mount {
        volume      = "ceph-mysql"
        destination = "/srv"
        read_only   = false
      }
      env {
        MYSQL_ROOT_PASSWORD = "password"
      }
      config {
        image = "hashicorp/mysql-portworx-demo:latest"
        args  = ["--datadir", "/srv/mysql"]
        ports = ["db"]
      }
      resources {
        cpu    = 500
        memory = 1024
      }
      service {
        name = "mysql-server"
        port = "db"
      check {
          type     = "tcp"
          interval = "10s"
          timeout  = "2s"
        }
      }
    }
  }
}
EOF

# run mysql job.
nomad job run mysql-server.nomad;

让我们检查一下来自 Ceph RBD 的卷是否已通过输入分配的 mysql 服务器容器进行装载。

nomad alloc exec bfe37c92 sh
# df -h
Filesystem      Size  Used Avail Use% Mounted on
...
/dev/rbd0       976M  180M  781M  19% /srv
...

如图所示,该卷已装载到路径 。ceph-mysql/srv

现在,我们将在Nomad上重新提交mysql服务器作业后检查MySQL数据是否丢失。

让我们连接到容器中的MySQL服务器。

mysql -u root -p -D itemcollection;
... type the password of MYSQL_ROOT_PASSWORD
mysql> select * from items;
+----+----------+
| id | name     |
+----+----------+
|  1 | bike     |
|  2 | baseball |
|  3 | chair    |
+----+----------+

让我们添加一些行。

INSERT INTO items (name) VALUES ('glove');
INSERT INTO items (name) VALUES ('hat');
INSERT INTO items (name) VALUES ('keyboard');

确保已成功插入行。

mysql> select * from items;
+----+----------+
| id | name     |
+----+----------+
|  1 | bike     |
|  2 | baseball |
|  3 | chair    |
|  4 | glove    |
|  5 | hat      |
|  6 | keyboard |
+----+----------+
6 rows in set (0.00 sec)

现在,用这个停止mysql服务器作业。

nomad stop -purge mysql-server;

并且,再次提交 mysql 服务器作业。

nomad job run mysql-server.nomad

检查mysql数据是否存在而没有丢失。输入分配的 mysql 服务器容器后,按照前面所述键入以下内容。

mysql -u root -p -D itemcollection;
... type the password of MYSQL_ROOT_PASSWORD
mysql> select * from items;
+----+----------+
| id | name     |
+----+----------+
|  1 | bike     |
|  2 | baseball |
|  3 | chair    |
|  4 | glove    |
|  5 | hat      |
|  6 | keyboard |
+----+----------+
6 rows in set (0.00 sec)

即使 mysql 服务器作业已在 Nomad 上重新启动,mysql 服务器也不会丢失数据。

csi动态创建

创建pool

ceph osd pool create nomad 64 64
rbd pool init nomad
ceph auth get-or-create client.nomad mon 'profile rbd' osd 'profile rbd pool=nomad' mgr 'profile rbd pool=nomad'
[client.nomad]
	key = AQCLGw5ipUPSAxAAVb4B2OJPMbsmY83OjSzjpg==

创建 controller

job "ceph-csi-plugin-controller" {
  datacenters = ["dc1"]
  group "controller" {
    network {
      port "metrics" {}
    }
    task "ceph-controller" {
      template {
        data        = <<EOF
[{
    "clusterID": "ef8394c8-2b17-435d-ae47-9dff271875d1",
    "monitors": [
        "10.103.3.40:6789",
        "10.103.3.41:6789",
        "10.103.3.42:6789"
    ]
}]
EOF
        destination = "local/config.json"
        change_mode = "restart"
      }
      driver = "docker"
      config {
        image = "quay.io/cephcsi/cephcsi:v3.3.1"
        volumes = [
          "./local/config.json:/etc/ceph-csi-config/config.json"
        ]
        mounts = [
          {
            type     = "tmpfs"
            target   = "/tmp/csi/keys"
            readonly = false
            tmpfs_options = {
              size = 1000000 # size in bytes
            }
          }
        ]
        args = [
          "--type=rbd",
          "--controllerserver=true",
          "--drivername=rbd.csi.ceph.com",
          "--endpoint=unix://csi/csi.sock",
          "--nodeid=${node.unique.name}",
          "--instanceid=${node.unique.name}-controller",
          "--pidlimit=-1",
          "--logtostderr=true",
          "--v=5",
          "--metricsport=$${NOMAD_PORT_metrics}"
        ]
      }
      resources {
        cpu    = 500
        memory = 256
      }
      service {
        name = "ceph-csi-controller"
        port = "metrics"
        tags = [ "prometheus" ]
      }
      csi_plugin {
        id        = "ceph-csi"
        type      = "controller"
        mount_dir = "/csi"
      }
    }
  }
}

创建 nodes

job "ceph-csi-plugin-nodes" {
  datacenters = ["dc1"]
  type        = "system"
  group "nodes" {
    network {
      port "metrics" {}
    }
    task "ceph-node" {
      driver = "docker"
      template {
        data        = <<EOF
[{
    "clusterID": "ef8394c8-2b17-435d-ae47-9dff271875d1",
    "monitors": [
        "10.103.3.40:6789",
        "10.103.3.41:6789",
        "10.103.3.42:6789"
    ]
}]
EOF
        destination = "local/config.json"
        change_mode = "restart"
      }
      config {
        image = "quay.io/cephcsi/cephcsi:v3.3.1"
        volumes = [
          "./local/config.json:/etc/ceph-csi-config/config.json"
        ]
        mounts = [
          {
            type     = "tmpfs"
            target   = "/tmp/csi/keys"
            readonly = false
            tmpfs_options = {
              size = 1000000 # size in bytes
            }
          }
        ]
        args = [
          "--type=rbd",
          "--drivername=rbd.csi.ceph.com",
          "--nodeserver=true",
          "--endpoint=unix://csi/csi.sock",
          "--nodeid=${node.unique.name}",
          "--instanceid=${node.unique.name}-nodes",
          "--pidlimit=-1",
          "--logtostderr=true",
          "--v=5",
          "--metricsport=$${NOMAD_PORT_metrics}"
        ]
        privileged = true
      }
      resources {
        cpu    = 500
        memory = 256
      }
      service {
        name = "ceph-csi-nodes"
        port = "metrics"
        tags = [ "prometheus" ]
      }
      csi_plugin {
        id        = "ceph-csi"
        type      = "node"
        mount_dir = "/csi"
      }
    }
  }
}

创建测试

这里的userKey目前我还是写的admin 的 没用上面生成的

id = "ceph-mysql"
name = "ceph-mysql"
type = "csi"
plugin_id = "ceph-csi"
capacity_max = "200G"
capacity_min = "100G"

capability {
  access_mode     = "single-node-writer"
  attachment_mode = "file-system"
}

secrets {
  userID  = "admin"
  userKey = "AQBWYPVhs0XZIhAACN0PAHEJTitvq514oVC12A=="
}

parameters {
  clusterID = "ef8394c8-2b17-435d-ae47-9dff271875d1"
  pool = "nomad"
  imageFeatures = "layering"
}

测试

job "mysql-server" {
  datacenters = ["dc1"]
  type        = "service"
  group "mysql-server" {
    count = 1
    volume "ceph-mysql" {
      type      = "csi"
      attachment_mode = "file-system"
      access_mode     = "single-node-writer"
      read_only = false
      source    = "ceph-mysql"
    }
    network {
      port "db" {
        static = 3306
      }
    }
    restart {
      attempts = 10
      interval = "5m"
      delay    = "25s"
      mode     = "delay"
    }
    task "mysql-server" {
      driver = "docker"
      volume_mount {
        volume      = "ceph-mysql"
        destination = "/srv"
        read_only   = false
      }
      env {
        MYSQL_ROOT_PASSWORD = "password"
      }
      config {
        image = "hashicorp/mysql-portworx-demo:latest"
        args  = ["--datadir", "/srv/mysql"]
        ports = ["db"]
      }
      resources {
        cpu    = 500
        memory = 1024
      }
      service {
        name = "mysql-server"
        port = "db"
        check {
          type     = "tcp"
          interval = "10s"
          timeout  = "2s"
        }
      }
    }
  }
}

引用
https://rancher.com/docs/rancher/v2.x/en/cluster-admin/volumes-and-storage/ceph/
https://learn.hashicorp.com/tutorials/nomad/stateful-workloads-csi-volumes?in=nomad/stateful-workloads
https://github.com/hashicorp/nomad/tree/main/demo/csi/ceph-csi-plugin
https://docs.ceph.com/en/latest/rbd/rbd-nomad/

posted @ 2022-02-11 11:05  鸣昊  阅读(230)  评论(0编辑  收藏  举报