ceph 对接openstack mitaka

Ceph 准备工作

官方文档:http://docs.ceph.com/docs/master/rbd/rbd-openstack/

创建存储池

# ceph osd pool create images 64
# ceph osd pool create compute 64
# ceph osd pool create volumes 64
# ceph osd pool create backups 64

Create Ceph User ceph

# ceph auth get-or-create client.ceph mon 'allow r' osd 'allow class-read object_prefix rbd_children, \
allow rwx pool=images, allow rwx pool=compute, allow rwx pool=volumes, allow rwx pool=backups'
[client.ceph]
        key = AQDIistY4xELCxAAPkelu47q/fxM3R8B732jlw==


copy user.keyring to glance-api node and cinder-volume node

ceph-monitor # ceph auth get-or-create client.ceph >> ceph.client.ceph.keyring
# scp ceph.client.ceph.keyring ceph.conf controller:/etc/ceph/
# scp ceph.client.ceph.keyring ceph.conf cinder-volume:/etc/ceph/


对接glance-api

install rbd


# yum install ceph-common python-rbd

设置key权限


# chown glance:glance /etc/ceph/ceph.client.ceph.keyring

edit /etc/glance/glance-api.conf


[DEFAULT]
...
default_store = rbd
...
[glance_store]
stores = rbd
rbd_store_pool = images
rbd_store_user = ceph
rbd_store_ceph_conf = /etc/ceph/ceph.conf
rbd_store_chunk_size = 8

restart glance service


# stemctl restart openstack-glance-api.service   openstack-glance-registry.service

对接cinder-volume

install ceph-common

# yum install ceph-common   python-rbd

设置key权限


# chown cinder:cinder /etc/ceph/ceph.client.ceph.keyring

edit /etc/cinder/cinder.conf

[DEFAULT]

enabled_backends = ceph

[ceph]

volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
glance_api_version = 2
rbd_user = ceph
rbd_secret_uuid = 791c2ef6-bc56-43b0-b2c7-0cd863621040

restart cinder-volume service

# systemctl restart  openstack-cinder-volume.service

对接compute

install

# yum install ceph-common python-rbd

edit nova config

[cinder]
os_region_name = RegionOne

[libvirt]
images_type = rbd
images_rbd_pool = compute
images_rbd_ceph_conf = /etc/ceph/ceph.conf
disk_cachemodes="network=writeback"
rbd_user = ceph
rbd_secret_uuid = 791c2ef6-bc56-43b0-b2c7-0cd863621040

inject_password = false
inject_key = false
inject_partition = -2

live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
hw_disk_discard = unmap

copy ceph config

# scp ceph-node:/etc/ceph/ceph.conf /etc/ceph/ceph.client.ceph.keyring  /etc/ceph
# chown nova:nova /etc/ceph/ceph.conf  /etc/ceph/ceph.client.ceph.keyring

copy ceph user keyring


[ceph-monitor]# ceph auth get-key client.ceph >> client.ceph.key
[ceph-monitor]# scp client.ceph.key compute:/etc/nova

设置libvird
# uuidgen 
791c2ef6-bc56-43b0-b2c7-0cd863621040

 # cat > secret.xml <<EOF
<secret ephemeral='no' private='no'>
  <uuid>791c2ef6-bc56-43b0-b2c7-0cd863621040</uuid>
  <usage type='ceph'>
    <name>client.cinder secret</name>
  </usage>
</secret>
EOF

# virsh secret-define --file secret.xml
Secret 791c2ef6-bc56-43b0-b2c7-0cd863621040 created

# virsh secret-set-value --secret 791c2ef6-bc56-43b0-b2c7-0cd863621040 --base64 $(cat client.ceph.key)                                     
Secret value set

restart nova-compute

systemctl restart openstack-nova-compute.service
posted @ 2017-06-13 15:15  zhaogaolong  阅读(765)  评论(0编辑  收藏  举报