openstack pike + ceph +高可用集成 -- Cinder 存储节点 (十一)
#cinder块存储 #需要准备存储节点,分布式存储等 #使用ceph 块设备存储 #配置 #2个硬盘 #sdb 1.4 T #sdc 1.4 T
##在controller1节点执行
#创建ceph用户和密钥
cd /etc/ceph/
ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images'
#查询用户,写入文件
ceph auth get-key client.cinder >/etc/ceph/ceph.client.cinder.keyring
##生成的格式有问题 需要自己手动编辑以下 根据下面的格式修改
vi /etc/ceph/ceph.client.cinder.keyring
[client.cinder] key = AQAtZKdamLXNBhAANusabLORDaZquBplFkiyYg==
#拷贝秘钥到对应节点,修改权限 我的 compute 和 cinder是同一个节点
#nova compute Node=compute1 scp /etc/ceph/ceph.client.cinder.keyring $Node:/etc/ceph/ ssh $Node sudo chown nova:nova /etc/ceph/ceph.client.cinder.keyring
##前面 glance集群已经生成glance key 这里直接使用 scp /etc/ceph/ceph.client.glance.keyring $Node:/etc/ceph/ ssh $Node sudo chown nova:nova /etc/ceph/ceph.client.glance.keyring
#cinder Node=compute1 scp /etc/ceph/ceph.client.cinder.keyring $Node:/etc/ceph/ ssh $Node sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
#nova计算节点 ls -l /etc/ceph/ #ceph echo ' [client] rbd cache = true rbd cache writethrough until flush = true admin socket = /var/run/ceph/guests/$cluster-$type.$id.$pid.$cctid.asok log file = /var/log/qemu/qemu-guest-$pid.log rbd concurrent management ops = 20 [client.cinder] keyring = /etc/ceph/ceph.client.cinder.keyring '>>/etc/ceph/ceph.conf
mkdir -p /var/run/ceph/guests/ /var/log/qemu/
chown qemu:libvirt /var/run/ceph/guests /var/log/qemu/
#启动服务并设置开机自启动
#安装Cinder yum install -y openstack-cinder targetcli python-keystone lvm2 cp /etc/cinder/cinder.conf{,.bak} cp /etc/lvm/lvm.conf{,.bak}
#密钥加进libvirt #MyUID=$(uuidgen) && echo $MyUID #生成UID后面会用到# MyUID=5d8bc172-d375-4631-8be0-cbe11bf88a55 Key=$(awk '/key/ { print $3 }' /etc/ceph/ceph.client.cinder.keyring) ##查看变量是否都有值 没有手动拷贝赋值 echo $Key echo $MyUID ################################### echo ' <secret ephemeral="no" private="no"> <uuid>'$MyUID'</uuid> <usage type="ceph"> <name>client.cinder secret</name> </usage> </secret> '>ceph.xml virsh secret-define --file ceph.xml virsh secret-set-value --secret $MyUID --base64 $Key
##如果报错 查看 libvirtd服务是否启动
##systemctl status libvirtd
##如果启动失败 请重启该节点
#Cinder配置
echo '
[DEFAULT] auth_strategy = keystone log_dir = /var/log/cinder state_path = /var/lib/cinder glance_api_servers = http://controller:9292 transport_url = rabbit://openstack:openstack@controller enabled_backends = lvm,nfs,ceph [database] connection = mysql+pymysql://cinder:cinder@controller/cinder [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller1:11211,controller2:11211,controller3:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = cinder password = cinder [oslo_concurrency] lock_path = /var/lib/cinder/tmp [ceph] volume_driver = cinder.volume.drivers.rbd.RBDDriver rbd_pool = volumes rbd_ceph_conf = /etc/ceph/ceph.conf rbd_flatten_volume_from_snapshot = false rbd_max_clone_depth = 5 rbd_store_chunk_size = 4 rados_connect_timeout = -1 glance_api_version = 2 rbd_user = cinder rbd_secret_uuid = '$MyUID'
'>/etc/cinder/cinder.conf
chmod 640 /etc/cinder/cinder.conf chgrp cinder /etc/cinder/cinder.conf
#占时先不启动 nova还差一步配置 那边配置完成 在启动Cinder卷服务 #systemctl enable openstack-cinder-volume.service target.service #systemctl start openstack-cinder-volume.service target.service