nomad ceph-csi动态接口
nomad ceph-csi动态接口
github地址: https://github.com/hashicorp/nomad/tree/main/demo/csi/ceph-csi-plugin
准备文件
plugin-cephrbd-controller.nomad
注意修改 clusterID
monitors
variable "cluster_id" {
type = string
# generated from uuid5(dns) with ceph.example.com as the seed
default = "ef8394c8-2b17-435d-ae47-9dff271875d1"
description = "cluster ID for the Ceph monitor"
}
job "plugin-cephrbd-controller" {
datacenters = ["dc1", "dc2"]
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
type = "service"
group "cephrbd" {
network {
port "prometheus" {}
}
service {
name = "prometheus"
port = "prometheus"
tags = ["ceph-csi"]
}
task "plugin" {
driver = "docker"
config {
image = "quay.io/cephcsi/cephcsi:canary"
args = [
"--drivername=rbd.csi.ceph.com",
"--v=5",
"--type=rbd",
"--controllerserver=true",
"--nodeid=${NODE_ID}",
"--instanceid=${POD_ID}",
"--endpoint=${CSI_ENDPOINT}",
"--metricsport=${NOMAD_PORT_prometheus}",
]
ports = ["prometheus"]
# we need to be able to write key material to disk in this location
mount {
type = "bind"
source = "secrets"
target = "/tmp/csi/keys"
readonly = false
}
mount {
type = "bind"
source = "ceph-csi-config/config.json"
target = "/etc/ceph-csi-config/config.json"
readonly = false
}
}
template {
data = <<-EOT
POD_ID=${NOMAD_ALLOC_ID}
NODE_ID=${node.unique.id}
CSI_ENDPOINT=unix://csi/csi.sock
EOT
destination = "${NOMAD_TASK_DIR}/env"
env = true
}
# ceph configuration file
template {
data = <<EOF
[{
"clusterID": "${var.cluster_id}",
"monitors": [
"10.103.3.40:6789",
"10.103.3.41:6789",
"10.103.3.42:6789"
]
}]
EOF
destination = "ceph-csi-config/config.json"
}
csi_plugin {
id = "cephrbd"
type = "controller"
mount_dir = "/csi"
}
# note: there's no upstream guidance on resource usage so
# this is a best guess until we profile it in heavy use
resources {
cpu = 256
memory = 256
}
}
}
}
plugin-cephrbd-node.nomad
注意修改 clusterID
monitors
,相对github增加了 /tmp/csi/keys
目录挂载
job "plugin-cephrbd-node" {
datacenters = ["dc1", "dc2"]
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
type = "system"
group "cephrbd" {
network {
port "prometheus" {}
}
service {
name = "prometheus"
port = "prometheus"
tags = ["ceph-csi"]
}
task "plugin" {
driver = "docker"
config {
image = "quay.io/cephcsi/cephcsi:canary"
args = [
"--drivername=rbd.csi.ceph.com",
"--v=5",
"--type=rbd",
"--nodeserver=true",
"--nodeid=${NODE_ID}",
"--instanceid=${POD_ID}",
"--endpoint=${CSI_ENDPOINT}",
"--metricsport=${NOMAD_PORT_prometheus}",
]
privileged = true
ports = ["prometheus"]
# we need to be able to write key material to disk in this location
mount {
type = "tmpfs"
#source = "secrets"
target = "/tmp/csi/keys"
readonly = false
}
mount {
type = "bind"
source = "ceph-csi-config/config.json"
target = "/etc/ceph-csi-config/config.json"
readonly = false
}
}
template {
data = <<-EOT
POD_ID=${NOMAD_ALLOC_ID}
NODE_ID=${node.unique.id}
CSI_ENDPOINT=unix://csi/csi.sock
EOT
destination = "${NOMAD_TASK_DIR}/env"
env = true
}
# ceph configuration file
template {
data = <<EOF
[{
"clusterID": "ef8394c8-2b17-435d-ae47-9dff271875d1",
"monitors": [
"10.103.3.40:6789",
"10.103.3.41:6789",
"10.103.3.42:6789"
]
}]
EOF
destination = "ceph-csi-config/config.json"
}
csi_plugin {
id = "cephrbd"
type = "node"
mount_dir = "/csi"
}
# note: there's no upstream guidance on resource usage so
# this is a best guess until we profile it in heavy use
resources {
cpu = 256
memory = 256
}
}
}
}
volume.hcl
注意修改 clusterID
secrets
id = "testvolume"
name = "test1"
type = "csi"
plugin_id = "cephrbd"
capacity_min = "100MB"
capacity_max = "1GB"
capability {
access_mode = "single-node-writer"
attachment_mode = "file-system"
}
capability {
access_mode = "single-node-writer"
attachment_mode = "block-device"
}
# mount_options {
# fs_type = "ext4"
# mount_flags = ["ro"]
# }
# creds should be coming from:
# /var/lib/ceph/mds/ceph-demo/keyring
# but instead we're getting them from:
# /etc/ceph/ceph.client.admin.keyring
secrets {
userID = "admin"
userKey = "AQBWYPVhs0XZIhAACN0PAHEJTitvq514oVC12A=="
}
parameters {
# seeded from uuid5(ceph.example.com)
clusterID = "ef8394c8-2b17-435d-ae47-9dff271875d1"
pool = "myPool"
imageFeatures = "layering"
}
job 测试
variables {
path = "test"
}
job "example" {
datacenters = ["dc1"]
group "cache" {
count = 2
volume "cache-volume" {
type = "csi"
source = "testvolume"
attachment_mode = "file-system"
access_mode = "single-node-writer"
#per_alloc = true
}
network {
port "db" {
to = 6379
}
}
task "redis" {
driver = "docker"
config {
image = "redis:3.2"
ports = ["db"]
}
resources {
cpu = 500
memory = 256
}
env {
# this will be available as the MOUNT_PATH environment
# variable in the task
MOUNT_PATH = "${NOMAD_ALLOC_DIR}/${var.path}"
}
volume_mount {
volume = "cache-volume"
destination = "${NOMAD_ALLOC_DIR}/${var.path}"
}
}
}
}
部署
Ceph CSI 节点任务要求设置 privileged = true
。控制器任务不需要这样做。
插件参数
-
--type=rbd
:驱动程序类型(或交替)rbdcephfs) -
--endpoint=unix:///csi/csi.sock
:此选项必须与任务的mount_dircsi_plugin
节中指定的选项匹配 -
--nodeid=${node.unique.id}
:运行任务的节点的唯一 ID。 -
--instanceid=${NOMAD_ALLOC_ID}
:一个唯一 ID,用于在 CSI 实例之间共享 Ceph 集群以进行配置时,将此 Ceph CSI 实例与其他实例区分开来。用于拓扑感知部署。
运行插件
$ nomad job run ./plugin-cephrbd-controller.nomad
==> Monitoring evaluation "c8e65575"
Evaluation triggered by job "plugin-cephrbd-controller"
==> Monitoring evaluation "c8e65575"
Evaluation within deployment: "b15b6b2b"
Allocation "1955d2ab" created: node "8dda4d46", group "cephrbd"
Evaluation status changed: "pending" -> "complete"
==> Evaluation "c8e65575" finished with status "complete"
$ nomad job run ./plugin-cephrbd-node.nomad
==> Monitoring evaluation "5e92c5dc"
Evaluation triggered by job "plugin-cephrbd-node"
==> Monitoring evaluation "5e92c5dc"
Allocation "5bb9e57a" created: node "8dda4d46", group "cephrbd"
Evaluation status changed: "pending" -> "complete"
==> Evaluation "5e92c5dc" finished with status "complete"
$ nomad plugin status cephrbd
ID = cephrbd
Provider = rbd.csi.ceph.com
Version = canary
Controllers Healthy = 1
Controllers Expected = 1
Nodes Healthy = 1
Nodes Expected = 1
Allocations
ID Node ID Task Group Version Desired Status Created Modified
1955d2ab 8dda4d46 cephrbd 0 run running 3m47s ago 3m37s ago
5bb9e57a 8dda4d46 cephrbd 0 run running 3m44s ago 3m43s ago
创建卷
必须使用从 secrets
中提取的 userID
和 userKey
值填充卷的块。一般在:/etc/ceph/ceph.client.<user>.keyring
$ nomad volume create ./volume.hcl
Created external volume 0001-0024-e9ba69fa-67ff-5920-b374-84d5801edd19-0000000000000002-3603408d-a9ca-11eb-8ace-080027c5bc64