*** 参考 ***
https://docs.openstack.org/project-deploy-guide/kolla-ansible/train/
一、服务器规划
centos
CentOS Linux release 7.9.2009 (Core)
admin
hostname | category | hardware | eth0 - mgmt | eth1 - provider (external) | eth2 | eth3 | gateway |
---|---|---|---|---|---|---|---|
vm-210 | deploy | core*4 / 4g / 20GB | 192.168.100.210 | 10.0.100.210 | 10.0.110.210 | 10.0.120.210 | 192.168.100.1 |
openstack
hostname | category | hardware | eth0 - mgmt | eth1 - provider (external) | eth2 - self-service (vxlan) | eth3 | gateway |
---|---|---|---|---|---|---|---|
vm-211 | control | core*4 / 8g / 20GB | 192.168.100.211 | 10.0.110.211 | 10.0.120.211 | 192.168.100.1 | |
vm-212 | control | core*4 / 8g / 20GB | 192.168.100.212 | 10.0.110.212 | 10.0.120.212 | 192.168.100.1 | |
vm-213 | control | core*4 / 8g / 20GB | 192.168.100.213 | 10.0.110.213 | 10.0.120.213 | 192.168.100.1 | |
vm-214 | compute | core*8 / 8g / 20GB | 192.168.100.214 | 10.0.110.214 | 10.0.120.214 | 192.168.100.1 | |
vm-215 | compute | core*8 / 8g / 20GB | 192.168.100.215 | 10.0.110.215 | 10.0.120.215 | 192.168.100.1 | |
vm-216 | network | core*2 / 4g / 20GB | 192.168.100.216 | 10.0.110.216 | 10.0.120.216 | 192.168.100.1 | |
vm-217 | network | core*2 / 4g / 20GB | 192.168.100.217 | 10.0.110.217 | 10.0.120.217 | 192.168.100.1 | |
vm-218 | storage | core*2 / 4g / 20GB,50GB | 192.168.100.218 | 10.0.110.218 | 10.0.120.218 | 192.168.100.1 | |
vm-219 | storage | core*2 / 4g / 20GB,50GB | 192.168.100.219 | 10.0.110.219 | 10.0.120.219 | 192.168.100.1 |
ceph
hostname | category | hardware | eth0 - public | eth1 | eth2 - cluster | eth3 | gateway |
---|---|---|---|---|---|---|---|
vm-201 | ceph-mon | core*1 / 2g / 20GB | 192.168.100.201 | 10.0.100.201 | 10.0.110.201 | 10.0.120.201 | 192.168.100.1 |
vm-202 | ceph-mon | core*1 / 2g / 20GB | 192.168.100.202 | 10.0.100.202 | 10.0.110.202 | 10.0.120.202 | 192.168.100.1 |
vm-203 | ceph-mon | core*1 / 2g / 20GB | 192.168.100.203 | 10.0.100.203 | 10.0.110.203 | 10.0.120.203 | 192.168.100.1 |
vm-204 | ceph-osd | core*4 / 4g / 20GB,10GBx2,30GBx4 | 192.168.100.204 | 10.0.100.204 | 10.0.110.204 | 10.0.120.204 | 192.168.100.1 |
vm-205 | ceph-osd | core*4 / 4g / 20GB,10GBx2,30GBx4 | 192.168.100.205 | 10.0.100.205 | 10.0.110.205 | 10.0.120.205 | 192.168.100.1 |
vm-206 | ceph-osd | core*4 / 4g / 20GB,10GBx2,30GBx4 | 192.168.100.206 | 10.0.100.206 | 10.0.110.206 | 10.0.120.206 | 192.168.100.1 |
二、proxmox虚拟化配置
# 192.168.100.0/24 桥接;10.0.100.0/24 nat
# cat /etc/network/interfaces
auto lo
iface lo inet loopback
iface ens3f0 inet manual
iface ens3f1 inet manual
iface enp5s0 inet manual
iface enp6s0 inet static
address 192.168.100.199/24
auto vmbr0
iface vmbr0 inet static
address 192.168.100.200/24
gateway 192.168.100.1
bridge-ports enp5s0
bridge-stp off
bridge-fd 0
auto vmbr1
iface vmbr1 inet static
address 10.0.100.1/24
bridge-ports none
bridge-stp off
bridge-fd 0
post-up echo 1 > /proc/sys/net/ipv4/ip_forward
post-up iptables -t nat -A POSTROUTING -s '10.0.100.0/24' -o enp6s0 -j MASQUERADE
post-down iptables -t nat -D POSTROUTING -s '10.0.100.0/24' -o enp6s0 -j MASQUERADE
auto vmbr2
iface vmbr2 inet static
address 10.0.110.1/24
bridge-ports none
bridge-stp off
bridge-fd 0
auto vmbr3
iface vmbr3 inet static
address 10.0.120.1/24
bridge-ports none
bridge-stp off
bridge-fd 0
三、部署安装
1. 管理机到节点机信任登录
ssh-keygen -b 1024 -t rsa -P '' -f ~/.ssh/id_rsa
for i in {211..219}; do ssh-copy-id -i .ssh/id_rsa.pub vm-$i; done
for i in {201..206}; do ssh-copy-id -i .ssh/id_rsa.pub vm-$i; done
2. 管理机静态指向
cat > /etc/hosts <<EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.0.100.210 vm-210
10.0.100.211 vm-211
10.0.100.212 vm-212
10.0.100.213 vm-213
10.0.100.214 vm-214
10.0.100.215 vm-215
10.0.100.216 vm-216
10.0.100.217 vm-217
10.0.100.218 vm-218
10.0.100.219 vm-219
10.0.100.201 vm-201
10.0.100.202 vm-202
10.0.100.203 vm-203
10.0.100.204 vm-204
10.0.100.205 vm-205
10.0.100.206 vm-206
EOF
3. 管理机安装docker
cat > /etc/yum.repos.d/docker-ce.repo << EOF
[docker-ce-stable]
name=Docker CE Stable - \$basearch
baseurl=https://mirrors.nju.edu.cn/docker-ce/linux/centos/\$releasever/\$basearch/stable
enabled=1
gpgcheck=0
gpgkey=https://mirrors.nju.edu.cn/docker-ce/linux/centos/gpg
EOF
yum install -y docker-ce
touch /etc/docker/daemon.json
cat > /etc/docker/daemon.json << EOF
{
"registry-mirrors": ["https://registry.docker-cn.com", "http://hub-mirror.c.163.com", "https://docker.mirrors.ustc.edu.cn"],
}
EOF
systemctl restart docker && systemctl enable docker
4. 所有节点pip相关依赖安装
yum -y install python-pip
mkdir ~/.pip
cat > ~/.pip/pip.conf <<EOF
[global]
index-url = https://pypi.douban.com/simple
EOF
pip install -U pip==19.3.1
pip install -U setuptools
pip install 'ansible<2.10'
pip install kolla-ansible==9.3.2
pip install -U docker
pip uninstall websocket-client -y
pip install websocket-client
5. 管理机ansible配置
mkdir -p /etc/ansible
cat > /etc/ansible/ansible.cfg <<EOF
[defaults]
host_key_checking=False
pipelining=True
forks=100
EOF
6. 管理机kolla-ansible inventory文件修改
mkdir -p /etc/kolla
cp -r /root/venv/share/kolla-ansible/etc_examples/kolla/* /etc/kolla
cp /root/venv/share/kolla-ansible/ansible/inventory/* .
# 修改部分multinode文件部分内容
cat /root/multinode
[control]
vm-[211:213]
[network]
vm-[216:217]
[compute]
vm-[214:215]
[monitoring]
vm-[211:213]
[storage]
vm-[218:219]
[deployment]
localhost ansible_connection=local
........
ansible -i multinode all -m ping
7. 管理机kolla-ansible 部署相关密码生成
kolla-genpwd
cat /etc/kolla/passwords.yml
8. 管理机kolla-ansible 全局配置文件修改
cat > /etc/kolla/globals.yml <<EOF
---
# Kolla options
kolla_base_distro: "centos"
kolla_install_type: "binary"
openstack_release: "train"
kolla_internal_vip_address: "192.168.100.240"
kolla_internal_fqdn: "{{ kolla_internal_vip_address }}"
kolla_external_vip_address: "10.0.100.240"
kolla_external_fqdn: "{{ kolla_external_vip_address }}"
# Docker options
#docker_registry: "192.168.100.210:4000"
docker_namespace: "kolla"
# Messaging options
om_rpc_transport: "rabbit"
# Neutron - Networking Options
network_interface: "eth0"
kolla_external_vip_interface: "eth1"
api_interface: "eth0"
storage_interface: "eth0"
cluster_interface: "eth2"
tunnel_interface: "eth2"
network_address_family: "ipv4"
neutron_external_interface: "eth1"
neutron_plugin_agent: "openvswitch"
neutron_enable_rolling_upgrade: "yes"
# keepalived options
keepalived_virtual_router_id: "51"
# TLS options
kolla_enable_tls_internal: "yes"
# Region options
openstack_region_name: "RegionOne"
# OpenStack options
openstack_logging_debug: "False"
# glance, keystone, neutron, nova, heat, and horizon.
enable_openstack_core: "yes"
enable_haproxy: "yes"
enable_mariadb: "yes"
enable_memcached: "yes"
enable_ceph: "no"
enable_ceph_mds: "no"
enable_ceph_rgw: "no"
enable_ceph_nfs: "no"
enable_chrony: "yes"
enable_cinder: "yes"
enable_cinder_backup: "yes"
enable_cinder_backend_lvm: "yes"
enable_fluentd: "no"
enable_grafana: "yes"
enable_nova_ssh: "yes"
enable_prometheus: "yes"
enable_redis: "yes"
enable_etcd: "yes"
enable_neutron_provider_networks: "yes" # 缺少此项,provider网络的虚拟机建立报错,日志显示 portbinding error
# Keystone - Identity Options
keystone_token_provider: 'fernet'
keystone_admin_user: "admin"
keystone_admin_project: "admin"
# Glance - Image Options
glance_backend_ceph: "no"
glance_backend_file: "yes"
# Cinder - Block Storage Options
cinder_backend_ceph: "{{ enable_ceph }}"
cinder_backend_vmwarevc_vmdk: "no"
cinder_volume_group: "cinder-volumes"
# Nova - Compute Options
nova_backend_ceph: "{{ enable_ceph }}"
nova_console: "novnc"
nova_compute_virt_type: "qemu" # 如果是使用虚拟化部署,此选项必须选择qemu, 如果是物理机,默认是kvm
# Hyper-V options
hyperv_username: "user"
hyperv_password: "password"
vswitch_name: "vswitch"
EOF
9. storage节点建立cinder-volumes卷组
parted /dev/sdb
mklabel gpt
mkpart gpt xfs 0 -1
yum install lvm* -y
pvcreate /dev/sdb1
vgcreate cinder-volumes /dev/sdb1
10. 管理机执行部署
kolla-ansible -i multinode certificates
kolla-ansible -i multinode bootstrap-servers
kolla-ansible -i multinode prechecks
kolla-ansible -i multinode pull
kolla-ansible -i multinode deploy
11. 各类节点镜像资源
# control
kolla/centos-binary-chrony:train
kolla/centos-binary-cinder-api:train
kolla/centos-binary-cinder-scheduler:train
kolla/centos-binary-cron:train
kolla/centos-binary-glance-api:train
kolla/centos-binary-grafana:train
kolla/centos-binary-heat-api:train
kolla/centos-binary-heat-api-cfn:train
kolla/centos-binary-heat-engine:train
kolla/centos-binary-horizon:train
kolla/centos-binary-keystone:train
kolla/centos-binary-keystone-fernet:train
kolla/centos-binary-keystone-ssh:train
kolla/centos-binary-kolla-toolbox:train
kolla/centos-binary-mariadb:train
kolla/centos-binary-memcached:train
kolla/centos-binary-neutron-server:train
kolla/centos-binary-nova-api:train
kolla/centos-binary-nova-conductor:train
kolla/centos-binary-nova-novncproxy:train
kolla/centos-binary-nova-scheduler:train
kolla/centos-binary-placement-api:train
kolla/centos-binary-prometheus-alertmanager:train
kolla/centos-binary-prometheus-blackbox-exporter:train
kolla/centos-binary-prometheus-cadvisor:train
kolla/centos-binary-prometheus-memcached-exporter:train
kolla/centos-binary-prometheus-mysqld-exporter:train
kolla/centos-binary-prometheus-node-exporter:train
kolla/centos-binary-prometheus-openstack-exporter:train
kolla/centos-binary-prometheus-server:train
kolla/centos-binary-rabbitmq:train
kolla/centos-binary-redis:train
kolla/centos-binary-redis-sentinel:train
# compute
kolla/centos-binary-chrony:train
kolla/centos-binary-cron:train
kolla/centos-binary-iscsid:train
kolla/centos-binary-kolla-toolbox:train
kolla/centos-binary-neutron-openvswitch-agent:train
kolla/centos-binary-nova-compute:train
kolla/centos-binary-nova-libvirt:train
kolla/centos-binary-nova-ssh:train
kolla/centos-binary-openvswitch-db-server:train
kolla/centos-binary-openvswitch-vswitchd:train
kolla/centos-binary-prometheus-cadvisor:train
kolla/centos-binary-prometheus-node-exporter:train
# network
kolla/centos-binary-chrony:train
kolla/centos-binary-cron:train
kolla/centos-binary-haproxy:train
kolla/centos-binary-keepalived:train
kolla/centos-binary-kolla-toolbox:train
kolla/centos-binary-neutron-dhcp-agent:train
kolla/centos-binary-neutron-l3-agent:train
kolla/centos-binary-neutron-metadata-agent:train
kolla/centos-binary-neutron-openvswitch-agent:train
kolla/centos-binary-openvswitch-db-server:train
kolla/centos-binary-openvswitch-vswitchd:train
kolla/centos-binary-prometheus-cadvisor:train
kolla/centos-binary-prometheus-haproxy-exporter:train
kolla/centos-binary-prometheus-node-exporter:train
# storage
kolla/centos-binary-chrony:train
kolla/centos-binary-cinder-backup:train
kolla/centos-binary-cinder-volume:train
kolla/centos-binary-cron:train
kolla/centos-binary-iscsid:train
kolla/centos-binary-kolla-toolbox:train
kolla/centos-binary-prometheus-cadvisor:train
kolla/centos-binary-prometheus-node-exporter:train
kolla/centos-binary-tgtd:train
*** 报错解决方法 ***
prechecks : Checking docker SDK version
FAILED! => {"changed": false, "cmd": ["/usr/bin/python", "-c", "import docker; print(docker.__version__)"]
# 出现上述提示,是由于python相关库版本依赖问题,对所有节点执行以下操作即可
for i in {211..219}; do ssh vm-$i 'pip install -U pip==19.3.1; pip uninstall websocket-client -y; pip install -U docker; pip install websocket-client'; done
12. openstackclient
# 生成环境文件admin-openrc.sh
kolla-ansible post-deploy
# 安装openstackclient
pip install PyYAML --ignore-installed PyYAML
pip install ipaddress --ignore-installed ipaddress
pip install requests --ignore-installed requests
pip install python-openstackclient
# 自动建立demo project,建议手工建立
source /etc/kolla/admin-openrc.sh
/usr/local/share/kolla-ansible/init-runonce
*** 报错解决方法 ***
执行openstack,提示 ImportError: No module named queue,原因是python2.7没有queue模块名
/usr/lib/python2.7/site-packages/openstack/utils.py
/usr/lib/python2.7/site-packages/openstack/cloud/openstackcloud.py
import queue 替换为 from multiprocessing import Queue as queue
13. dashboard
http://192.168.100.240
# admin / yP2p1EaAThI9pA2asJ5tAh3JJ34MQNGCXfQyNasf
# 在/etc/kolla/admin-openrc.sh里查询
* 变更 * 虚拟化部署的话,桥接网络用做external,否则建立的vm不能和外面通讯;如果是物理部署的话,保证external所在的网段能够对外访问。
proxmox网桥 | 外联方式 | 网段 | 用途 |
---|---|---|---|
vmbr0 | 桥接 | 192.168.100.0/24 | mgmt, ceph-public |
vmbr1 | 桥接 | 192.168.100.0/24 | provider |
vmbr2 | nat | 10.0.110.0/24 | vxlan, ceph-cluster |
vmbr3 | nat | 10.0.120.0/24 | 无用 |
四、openstack使用
# 查看openstack相关信息
openstack service list
openstack compute service list
openstack volume service list
openstack network agent list
openstack hypervisor list
# 参考手册
https://docs.openstack.org/python-openstackclient/train/cli/command-list.html
# 镜像修改密码和默认密码
virt-customize -a CentOS-7-x86_64-GenericCloud-2009.qcow2 --root-password password:abcd1234
cirros / gocubsgo.
# 导入镜像
openstack image create cirros-0.5.2-x86_64 --file /tmp/cirros-0.5.2-x86_64-disk.img --disk-format qcow2 --container-format bare --public
openstack image create centos-7-x86_64 --file /tmp/CentOS-7-x86_64-GenericCloud-2009.qcow2 --disk-format qcow2 --container-format bare --public
# 建立模板
openstack flavor create --id 0 --vcpus 1 --ram 256 --disk 1 m1.nano
openstack flavor create --id 1 --vcpus 1 --ram 2048 --disk 20 m1.small
# 建立provider network
openstack network create --share --external --provider-physical-network physnet1 --provider-network-type flat provider
openstack subnet create --network provider --allocation-pool start=192.168.100.221,end=192.168.100.230 --dns-nameserver 114.114.114.114 --gateway 192.168.100.1 --subnet-range 192.168.100.0/24 provider
# 建立selfservice network
openstack network create selfservice
openstack subnet create --network selfservice --dns-nameserver 114.114.114.114 --gateway 192.168.240.1 --subnet-range 192.168.240.0/24 selfservice
# 建立虚拟路由
openstack router create router
# 连接内外网络
openstack router add subnet router selfservice
openstack router set router --external-gateway provider
openstack port list
# 建立sshkey
ssh-keygen -q -N ""
openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey
# 建立安全策略
openstack security group rule create --proto icmp default
openstack security group rule create --proto tcp --dst-port 22 default
# 查看已建配置
openstack flavor list
openstack image list
openstack security group list
openstack port list
openstack network list
# 建立虚拟机
openstack server create --flavor m1.nano --image cirros-0.5.2-x86_64 --nic net-id=fe172dec-0522-472a-aed4-da70f6c269a6 --security-group default --key-name mykey provider-instance-01
openstack server create --flavor m1.nano --image cirros-0.5.2-x86_64 --nic net-id=c30c5057-607d-4736-acc9-31927cc9a22c --security-group default --key-name mykey selfservice-instance-01
# 指派对外服务ip
openstack floating ip create provider
openstack floating ip list
openstack server add floating ip selfservice-instance-01 10.0.100.227
openstack server list
# 私有云映射方法
iptables -t nat -A PREROUTING -i eth0 -p tcp --dport 1022 -j DNAT --to 192.168.122.231:22
iptables -t nat -D PREROUTING -i eth0 -p tcp --dport 1022 -j DNAT --to 192.168.122.231:22
# 注意
1. cirros 5.0 需要内存128M以上
2. centos7 ERROR nova.compute.manager [instance: e950095b-aa1e-47e5-bbb8-9942715eb1c3] 2021-10-15T02:17:33.897332Z qemu-kvm: cannot set up guest memory 'pc.ram': Cannot allocate memory
宿主机内存不够使用,临时解决方法 echo 1 > /proc/sys/vm/overcommit_memory
五、集成external ceph存储
https://docs.ceph.com/en/pacific/rbd/rbd-openstack
https://docs.openstack.org/kolla-ansible/train/reference/storage/external-ceph-guide.html
1. create ceph pool
ceph osd pool create images
ceph osd pool create instances
ceph osd pool create volumes
ceph osd pool create backups
rbd pool init images
rbd pool init instances
rbd pool init volumes
rbd pool init backups
2. create ceph user
ceph auth get-or-create client.glance mon 'profile rbd' osd 'profile rbd pool=images' mgr 'profile rbd pool=images'
ceph auth get-or-create client.cinder mon 'profile rbd' osd 'profile rbd pool=volumes, profile rbd pool=instances, profile rbd-read-only pool=images' mgr 'profile rbd pool=volumes, profile rbd pool=instances'
ceph auth get-or-create client.cinder-backup mon 'profile rbd' osd 'profile rbd pool=backups' mgr 'profile rbd pool=backups'
ceph auth get-or-create client.nova mon 'profile rbd' osd 'profile rbd pool=instances' mgr 'profile rbd pool=instances'
3. create keyring
ceph auth get-or-create client.glance > /tmp/ceph.client.glance.keyring
ceph auth get-or-create client.cinder > /tmp/ceph.client.cinder.keyring
ceph auth get-or-create client.cinder-backup > /tmp/ceph.client.cinder-backup.keyring
ceph auth get-or-create client.nova > /tmp/ceph.client.nova.keyring
4. kolla/globals.yml
# vim /etc/kolla/globals.yml
......
enable_ceph: "no"
glance_backend_ceph: "yes"
cinder_backend_ceph: "yes"
nova_backend_ceph: "yes"
......
5. kolla path
# mkdir -p /etc/kolla/config/{glance,cinder/cinder-volume,cinder/cinder-backup,nova}
4. ceph.conf
cat > /tmp/ceph.conf << EOF
[client]
rbd_cache = True
rbd_cache_max_dirty = 134217728
rbd_cache_max_dirty_age = 10
rbd_cache_size = 335544320
[global]
auth client required = cephx
auth cluster required = cephx
auth service required = cephx
fsid = 83baa63b-c421-480a-be24-0e2c59a70e17
mon host = [v2:192.168.100.201:3300,v1:192.168.100.201:6789],[v2:192.168.100.202:3300,v1:192.168.100.202:6789],[v2:192.168.100.203:3300,v1:192.168.100.203:6789]
mon initial members = vm-201,vm-202,vm-203
cluster network = 10.0.110.0/24
public network = 192.168.100.0/24
rbd_default_features = 7
EOF
cp /tmp/ceph.conf /etc/kolla/config/glance
cp /tmp/ceph.conf /etc/kolla/config/cinder
cp /tmp/ceph.conf /etc/kolla/config/nova
5. glance
cat > /etc/kolla/config/glance/glance-api.conf << EOF
[glance_store]
stores = rbd
default_store = rbd
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
EOF
scp vm-201:/tmp/ceph.client.glance.keyring /etc/kolla/config/glance
5. cinder
# cat /etc/kolla/passwords.yml |grep cinder_rbd_secret_uuid
cinder_rbd_secret_uuid: 546571a2-4117-4d43-874f-d6a1712f9c10
cat > /etc/kolla/config/cinder/cinder-volume.conf << EOF
[DEFAULT]
enabled_backends=rbd-1
[rbd-1]
rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_user=cinder
backend_host=rbd:volumes
rbd_pool=volumes
volume_backend_name=rbd-1
volume_driver=cinder.volume.drivers.rbd.RBDDriver
rbd_secret_uuid = {{ 546571a2-4117-4d43-874f-d6a1712f9c10 }}
EOF
cat > /etc/kolla/config/cinder/cinder-backup.conf << EOF
[DEFAULT]
backup_ceph_conf=/etc/ceph/ceph.conf
backup_ceph_user=cinder-backup
backup_ceph_chunk_size = 134217728
backup_ceph_pool=backups
backup_driver = cinder.backup.drivers.ceph.CephBackupDriver
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true
EOF
scp vm-201:/tmp/ceph.client.cinder.keyring /etc/kolla/config/cinder/cinder-backup
scp vm-201:/tmp/ceph.client.cinder.keyring /etc/kolla/config/cinder/cinder-volume
scp vm-201:/tmp/ceph.client.cinder-backup.keyring /etc/kolla/config/cinder/cinder-backup
7. nova
cat > /etc/kolla/config/nova/nova-compute.conf << EOF
[libvirt]
images_rbd_pool=instances
images_type=rbd
images_rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_user=nova
EOF
scp vm-201:/tmp/ceph.client.cinder.keyring /etc/kolla/config/nova
scp vm-201:/tmp/ceph.client.nova.keyring /etc/kolla/config/nova
8. tree
# tree /etc/kolla/config/
/etc/kolla/config/
├── cinder
│ ├── ceph.conf
│ ├── cinder-backup
│ │ ├── ceph.client.cinder-backup.keyring
│ │ └── ceph.client.cinder.keyring
│ ├── cinder-backup.conf
│ ├── cinder-volume
│ │ └── ceph.client.cinder.keyring
│ └── cinder-volume.conf
├── glance
│ ├── ceph.client.glance.keyring
│ ├── ceph.conf
│ └── glance-api.conf
└── nova
├── ceph.client.cinder.keyring
├── ceph.client.nova.keyring
├── ceph.conf
└── nova-compute.conf
9. deploy
kolla-ansible -i multinode deploy
六、 tls支持
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· AI与.NET技术实操系列(二):开始使用ML.NET
· 记一次.NET内存居高不下排查解决与启示
· 探究高空视频全景AR技术的实现原理
· 理解Rust引用及其生命周期标识(上)
· 浏览器原生「磁吸」效果!Anchor Positioning 锚点定位神器解析
· DeepSeek 开源周回顾「GitHub 热点速览」
· 物流快递公司核心技术能力-地址解析分单基础技术分享
· .NET 10首个预览版发布:重大改进与新特性概览!
· AI与.NET技术实操系列(二):开始使用ML.NET
· 单线程的Redis速度为什么快?
2020-09-24 全栈练手小项目
2016-09-24 openldap权限sudo