glance-api.conf
| [glance_store]
filesystem_store_datadir = /opt/stack/data/glance/images/ |
| | [glance_store]
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
stores = file, http, rbd
default_store = rbd
filesystem_store_datadir = /opt/stack/data/glance/images/ |
|
nova.conf 或 nova-cpu.conf
| [libvirt]
live_migration_uri = qemu+ssh://stack@%s/system
cpu_mode = none
virt_type = kvm |
| 1
2
3
4
5
6
7
8
9
10
11
12 | [libvirt]
images_rbd_ceph_conf = /etc/ceph/ceph.conf
images_rbd_pool = vms
images_type = rbd
disk_cachemodes = network=writeback
inject_partition = -2
inject_key = false
rbd_secret_uuid = df0d0b60-047a-45f5-b5be-f7d2b4beadee
rbd_user = cinder
live_migration_uri = qemu+ssh://stack@%s/system
cpu_mode = none
virt_type = kvm |
|
cinder.conf
| [lvmdriver-1]
image_volume_cache_enabled = True
volume_clear = zero
lvm_type = auto
iscsi_helper = tgtadm
volume_group = stack-volumes-lvmdriver-1
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_backend_name = lvmdriver-1 |
| cinder.conf | [ceph]
image_volume_cache_enabled = True
volume_clear = zero
rbd_max_clone_depth = 5
rbd_flatten_volume_from_snapshot = False
rbd_secret_uuid = df0d0b60-047a-45f5-b5be-f7d2b4beadee
rbd_user = cinder
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph |
|
| ceph.conf 控制节点在配置好后显示的:
1
2
3
4
5
6
7
8
9
10
11
12 | [global]
rbd default features = 1
osd pool default size = 1
osd journal size = 100
osd crush chooseleaf type = 0
filestore_xattr_use_omap = true
auth_client_required = cephx
auth_service_required = cephx
auth_cluster_required = cephx
mon_host = 172.16.1.17
mon_initial_members = controller
fsid = eab37548-7aef-466a-861c-3757a12ce9e8 | 在ceph集群的admin节点(在该节点用ceph-deploy建立ceph存储集群)创建初始化monitor后,会得到多个秘钥文件,要将这些秘钥文件和该节点配置的ceph.conf文件分发到其他所有节点(其他ceph节点,计算节点,控制节点...) fsid是自动配置的 |