OpenStack部署
环境
部署版本:OpenStack Train (https://docs.openstack.org/install-guide)
操作系统:CentOS-7
节点规划:
一台控制节点(con)
配置 - 1C4G+50G
两台计算节点/存储节点(com1,com2)
配置 - 2C4G+50G+50G
网络规划:
管理网(OpenStack服务主机节点之间的通信网络)
外网(外部访问使用OpenStack服务所用的网络,比如外部访问VM)
内网(虚拟机之间通信所用网络)
[存储网(使用Ceph一类的存储后端需要,本文使用的是普通磁盘存储)]
控制节点 con
网卡ens33(内网/管理网)
IPADDR 192.168.73.137
NETMASK 255.255.255.0
网卡ens38(外网)192.168. 67.0/24
计算节点 com1
网卡ens33(内网/管理网)
IPADDR 192.168.73.135
NETMASK 255.255.255.0
网卡ens34(外网)192.168. 67.0/24
计算节点 com2
网卡ens33(内网/管理网)
IPADDR 192.168.73.136
NETMASK 255.255.255.0
网卡ens34(外网)192.168. 67.0/24
部署准备
密码
openstack用户/openstack各服务用户/数据库各服务用户/rabbitmq : a
配置IP
[ 控制节点con(计算节点类似,但是控制节点ens38和计算节点ens34的BOOTPROTO设置为none,且不配置ip)]
# vim /etc/sysconfig/network-scripts/ifcfg-ens33
BOOTPROTO="static"
ONBOOT="yes"
IPADDR="192.168.73.137"
NETMASK="255.255.255.0"
GATEWAY="192.168.73.2"
DNS1="8.8.8.8"
# systemctl restart network
(关闭防火墙和selinux)
# systemctl stop firewalld
# systemctl disable firewalld
# setenforce 0
# vim /etc/selinux/config
SELINUX=disabled
配置 host文件
[ 所有节点上 ]
# vim /etc/hosts
192.168.73.135 com1
192.168.73.136 com2
192.168.73.137 con
免密登录
[ 控制节点con(其他节点类似)]
# ssh-keygen
# ssh-copy-id -i /root/.ssh/id_rsa.pub root@com1
# ssh-copy-id -i /root/.ssh/id_rsa.pub root@com2
配置ntp
[ 控制节点con ]
# vim /etc/chrony.conf
allow 192.168.73.0/24
# systemctl enable chronyd.service
# systemctl restart chronyd.service
[ 计算节点 ]
# vim /etc/chrony.conf
server con iburst
# systemctl enable chronyd.service
# systemctl restart chronyd.service
# chronyc sources
210 Number of sources = 1
MS Name/IP address Stratum Poll Reach LastRx Last sample
===============================================================================
^* con 3 6 1 57 -32us[ -197ms] +/- 17ms
openstack包源
[ 所有节点 ]
# yum install centos-release-openstack-train -y
# yum upgrade -y
# yum install python-openstackclient -y
# yum install openstack-selinux -y
安装SQL DB
[ 控制节点con ]
安装包、修改配置文件
# yum install mariadb mariadb-server python2-PyMySQL -y
# vim /etc/my.cnf.d/openstack.cnf
[mysqld]
bind-address = 192.168.73.137
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
启动服务、设置密码
# systemctl enable mariadb.service
# systemctl start mariadb.service
# mysql_secure_installation
安装RabbitMQ
[ 控制节点con ]
# yum install rabbitmq-server -y
# systemctl enable rabbitmq-server.service
# systemctl start rabbitmq-server.service
# rabbitmqctl add_user openstack a
# rabbitmqctl set_permissions openstack ".*" ".*" ".*"
安装memcached
[ 控制节点con ]
# yum install memcached python-memcached -y
# vim /etc/sysconfig/memcached
OPTIONS="-l 127.0.0.1,::1,con"
# systemctl enable memcached.service
# systemctl start memcached.service
安装etcd
[ 控制节点con ]
# yum install etcd -y
# vim /etc/etcd/etcd.conf
ETCD_LISTEN_PEER_URLS="http://192.168.73.137:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.73.137:2379"
ETCD_NAME="con"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.73.137:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.73.137:2379"
ETCD_INITIAL_CLUSTER="con=http://192.168.73.137:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER_STATE="new"
# systemctl enable etcd
# systemctl start etcd
部署
认证服务keystone
[ 控制节点con ]
创建数据库
# mysql -u root -pa
> CREATE DATABASE keystone;
> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'a';
> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'a';
> exit
安装配置
# yum install openstack-keystone httpd mod_wsgi -y
# cd /etc/keystone
# cp keystone.conf keystone.confbak
# cat keystone.confbak | grep -v ^# | grep -v ^$ > keystone.conf
# vim keystone.conf
[DEFAULT]
[application_credential]
[assignment]
[auth]
[cache]
[catalog]
[cors]
[credential]
[database]
connection = mysql+pymysql://keystone:a@con/keystone
[domain_config]
[endpoint_filter]
[endpoint_policy]
[eventlet_server]
[federation]
[fernet_receipts]
[fernet_tokens]
[healthcheck]
[identity]
[identity_mapping]
[jwt_tokens]
[ldap]
[memcache]
[oauth1]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[policy]
[profiler]
[receipt]
[resource]
[revoke]
[role]
[saml]
[security_compliance]
[shadow_users]
[token]
provider = fernet
[tokenless_auth]
[totp]
[trust]
[unified_limit]
[wsgi]
# su -s /bin/sh -c "keystone-manage db_sync" keystone
(然后进入数据库查看是否有keystone数据表(show databases; use keystone; show tables;))
# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
# keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
# keystone-manage bootstrap --bootstrap-password a --bootstrap-admin-url http://con:5000/v3/ --bootstrap-internal-url http://con:5000/v3/ --bootstrap-public-url http://con:5000/v3/ --bootstrap-region-id RegionOne
# vim /etc/httpd/conf/httpd.conf
ServerName con
# ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
# systemctl enable httpd.service
# systemctl start httpd.service
设置环境变量
# cd
# vim openrc
export OS_USERNAME=admin
export OS_PASSWORD=a
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://con:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
# vim /etc/rc.d/rc.local
source /root/openrc
# vim /root/.bashrc
source /root/openrc
# source openrc
# openstack token issue
创建用户、角色等
openstack project create --domain default --description "Service Project" service
镜像服务glance
[ 控制节点con ]
创建数据库
# mysql -u root -pa
> CREATE DATABASE glance;
> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'a';
> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'a';
> exit
配置glance服务用户
openstack user create --domain default --password a glance
openstack role add --project service --user glance admin
openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region RegionOne image public http://con:9292
openstack endpoint create --region RegionOne image internal http://con:9292
openstack endpoint create --region RegionOne image admin http://con:9292
安装配置
# yum install openstack-glance -y
# cd /etc/glance
# cp glance-api.conf glance-api.confbak
# cat glance-api.confbak | grep -v ^# |grep -v ^$ > glance-api.conf
# vim glance-api.conf
[DEFAULT]
[cinder]
[cors]
[database]
connection = mysql+pymysql://glance:a@con/glance
[file]
[glance.store.http.store]
[glance.store.rbd.store]
[glance.store.sheepdog.store]
[glance.store.swift.store]
[glance.store.vmware_datastore.store]
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
[image_format]
[keystone_authtoken]
www_authenticate_uri = http://con:5000
auth_url = http://con:5000
memcached_servers = con:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = a
[oslo_concurrency]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]
[store_type_location_strategy]
[task]
[taskflow_executor]
# su -s /bin/sh -c "glance-manage db_sync" glance
# systemctl enable openstack-glance-api.service
# systemctl start openstack-glance-api.service
上传镜像到glance
# cd
# wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
# glance image-create --name "cirros" --file cirros-0.4.0-x86_64-disk.img --disk-format qcow2 --container-format bare --visibility public
# glance image-create --name "cirros_raw" --file cirros.raw --disk-format raw --container-format bare --visibility public
安置服务placement
[ 控制节点con ]
创建数据库
# mysql -u root -pa
> CREATE DATABASE placement;
> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY 'a';
> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'a';
> exit
配置placement服务用户
openstack user create --domain default --password a placement
openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://con:8778
openstack endpoint create --region RegionOne placement internal http://con:8778
openstack endpoint create --region RegionOne placement admin http://con:8778
配置安装
# yum install openstack-placement-api -y
# cd /etc/placement/
# cp placement.conf placement.confbak
# cat placement.confbak | grep -v ^# |grep -v ^$ > placement.conf
# vim placement.conf
[DEFAULT]
[api]
auth_strategy = keystone
[cors]
[keystone_authtoken]
auth_url = http://con:5000/v3
memcached_servers = con:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = a
[oslo_policy]
[placement]
[placement_database]
connection = mysql+pymysql://placement:a@con/placement
[profiler]
# su -s /bin/sh -c "placement-manage db sync" placement
# vim /etc/httpd/conf.d/00-placement-api.conf
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion >= 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
# systemctl restart httpd
# placement-status upgrade check
# yum install -y python-osc-placement
# openstack --os-placement-api-version 1.2 resource class list --sort-column name
# openstack --os-placement-api-version 1.6 trait list --sort-column name
计算服务nova
[ 控制节点con ]
创建数据库
# mysql -u root -pa
> CREATE DATABASE nova_api;
> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'a';
> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'a';
> CREATE DATABASE nova;
> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'a';
> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'a';
> CREATE DATABASE nova_cell0;
> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'a';
> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'a';
> exit
配置nova服务用户
openstack user create --domain default --password a nova
openstack role add --project service --user nova admin
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://con:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://con:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://con:8774/v2.1
安装配置
# yum install -y openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler
# vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:a@con:5672/
my_ip = 192.168.73.137
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
connection = mysql+pymysql://nova:a@con/nova_api
[barbican]
[cache]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[database]
connection = mysql+pymysql://nova:a@con/nova
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://con:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
www_authenticate_uri = http://con:5000/
auth_url = http://con:5000/
memcached_servers = con:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = a
[libvirt]
[metrics]
[mks]
[neutron]
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://con:5000/v3
username = placement
password = a
[powervm]
[privsep]
[profiler]
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip
[workarounds]
[wsgi]
[xenserver]
[xvp]
[zvm]
# su -s /bin/sh -c "nova-manage api_db sync" nova
# su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
# su -s /bin/sh -c "nova-manage db sync" nova
# su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
# systemctl enable openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
# systemctl start openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
[ 计算节点com1、com2 ]
安装配置
# yum install openstack-nova-compute -y
# vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:a@con:5672/
my_ip = 192.168.73.135
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
[barbican]
[cache]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[database]
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://con:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
www_authenticate_uri = http://con:5000/
auth_url = http://con:5000/
memcached_servers = con:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = a
[libvirt]
virt_type = qemu
[metrics]
[mks]
[neutron]
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://con:5000/v3
username = placement
password = a
[powervm]
[privsep]
[profiler]
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://con:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]
[zvm]
# systemctl enable libvirtd.service openstack-nova-compute.service
# systemctl start libvirtd.service openstack-nova-compute.service
[ 控制节点con ]
# nova service-list
# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
网络服务neutron
[ 控制节点con ]
创建数据库
# mysql -u root -pa
> CREATE DATABASE neutron;
> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'a';
> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'a';
> exit
配置neutron服务用户
openstack user create --domain default --password a neutron
openstack role add --project service --user neutron admin
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --region RegionOne network public http://con:9696
openstack endpoint create --region RegionOne network internal http://con:9696
openstack endpoint create --region RegionOne network admin http://con:9696
安装配置
# yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y
# vim /etc/neutron/neutron.conf
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true
transport_url = rabbit://openstack:a@con
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[cors]
[database]
connection = mysql+pymysql://neutron:a@con/neutron
[keystone_authtoken]
www_authenticate_uri = http://con:5000
auth_url = http://con:5000
memcached_servers = con:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = a
[nova]
auth_url = http://con:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = a
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[privsep]
[ssl]
# vim /etc/neutron/plugins/ml2/ml2_conf.ini
[DEFAULT]
[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = linuxbridge,l2population
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_ipset = true
# vim /etc/neutron/l3_agent.ini
[DEFAULT]
interface_driver = linuxbridge
# vim /etc/neutron/
dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[linux_bridge]
physical_interface_mappings = provider:ens38
[vxlan]
enable_vxlan = true
local_ip = 192.168.73.137
l2_population = true
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
# vim /etc/neutron/
metadata_agent.ini
[DEFAULT] nova_metadata_host = con metadata_proxy_shared_secret = a [cache]
# vim /etc/nova/nova.conf
[neutron]
auth_url = http://con:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = a
service_metadata_proxy = true
metadata_proxy_shared_secret = a
# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
# systemctl restart openstack-nova-api.service
# systemctl enable neutron-server.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-linuxbridge-agent.service neutron-l3-agent.service
# systemctl start neutron-server.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-linuxbridge-agent.service neutron-l3-agent.service
[ 计算节点com1,com2 ]
安装配置
# yum install openstack-neutron-linuxbridge ebtables ipset
# vim /etc/neutron/neutron.conf
[DEFAULT]
transport_url = rabbit://openstack:a@con
auth_strategy = keystone
[cors]
[database]
[keystone_authtoken]
www_authenticate_uri = http://con:5000
auth_url = http://con:5000
memcached_servers = con:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = a
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[privsep]
[ssl]
# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[DEFAULT]
[linux_bridge]
physical_interface_mappings = provider:ens34
[vxlan]
enable_vxlan = true
local_ip = 192.168.73.135
l2_population = true
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
# vim /etc/sysctl.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
# vim /etc/rc.sysinit
#!/bin/bash
for file in /etc/sysconfig/modules/*.modules ; do
[ -x $file ] && $file
done
# vim /etc/sysconfig/modules/br_netfilter.modules
modprobe br_netfilter
# chmod 755 /etc/sysconfig/modules/br_netfilter.modules
# lsmod | grep br_netfilter
# vim /etc/nova/nova.conf
[neutron]
auth_url = http://con:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = a
# systemctl restart openstack-nova-compute.service
# systemctl enable neutron-linuxbridge-agent.service
# systemctl start neutron-linuxbridge-agent.service
块存储服务cinder
[ 控制节点con ]
创建数据库
# mysql -u root -pa
> CREATE DATABASE cinder;
> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'a';
> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'a';
> exit
配置cinder服务用户
openstack user create --domain default --password a cinder
openstack role add --project service --user cinder admin
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
openstack endpoint create --region RegionOne volumev2 public http://con:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 internal http://con:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 admin http://con:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 public http://con:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 internal http://con:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 admin http://con:8776/v3/%\(project_id\)s
安装配置
# yum install openstack-cinder -y
# vim /etc/cinder/cinder.conf
[DEFAULT]
transport_url = rabbit://openstack:a@con
auth_strategy = keystone
my_ip = 192.168.73.137
[backend]
[backend_defaults]
[barbican]
[brcd_fabric_example]
[cisco_fabric_example]
[coordination]
[cors]
[database]
connection = mysql+pymysql://cinder:a@con/cinder
[fc-zone-manager]
[healthcheck]
[key_manager]
[keystone_authtoken]
www_authenticate_uri = http://con:5000
auth_url = http://con:5000
memcached_servers = con:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = a
[nova]
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[oslo_versionedobjects]
[privsep]
[profiler]
[sample_castellan_source]
[sample_remote_file_source]
[service_user]
[ssl]
[vault]
# su -s /bin/sh -c "cinder-manage db sync" cinder
# vim /etc/nova/nova.conf
[cinder]
os_region_name = RegionOne
# systemctl restart openstack-nova-api.service
# systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
# systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
[ 计算节点com1、com2 ]
准备块设备
# yum install lvm2 device-mapper-persistent-data -y
# systemctl enable lvm2-lvmetad.service
# systemctl start lvm2-lvmetad.service
# lsblk
# pvcreate /dev/sdb
# vgcreate cinder-volumes /dev/sdb
# vim /etc/lvm/lvm.conf
devices {
...
filter = [ "a/sda/", "a/sdb/", "r/.*/" ]
安装配置
# yum install openstack-cinder targetcli python-keystone -y
# vim /etc/cinder/cinder.conf
[DEFAULT]
transport_url = rabbit://openstack:a@con
auth_strategy = keystone
my_ip = 192.168.73.135
enabled_backends = lvm
glance_api_servers = http://con:9292
[backend]
[backend_defaults]
[barbican]
[brcd_fabric_example]
[cisco_fabric_example]
[coordination]
[cors]
[database]
connection = mysql+pymysql://cinder:a@con/cinder
[fc-zone-manager]
[healthcheck]
[key_manager]
[keystone_authtoken]
www_authenticate_uri = http://con:5000
auth_url = http://con:5000
memcached_servers = con:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = a
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
target_protocol = iscsi
target_helper = lioadm
[nova]
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[oslo_reports]
[oslo_versionedobjects]
[privsep]
[profiler]
[sample_castellan_source]
[sample_remote_file_source]
[service_user]
[ssl]
[vault]
# systemctl enable openstack-cinder-volume.service target.service
# systemctl start openstack-cinder-volume.service target.service
创建虚拟机
创建网络
provider网
# openstack network create --share --external --provider-physical-network provider --provider-network-type flat provider
# openstack subnet create --network provider --allocation-pool start=192.168.67.130,end=192.168.67.200 --dns-nameserver 8.8.4.4 --gateway 192.168.67.1 --subnet-range 192.168.67.0/24 provider
self-service网
# openstack network create net1
# openstack subnet create --network net1 --dns-nameserver 8.8.4.4 --gateway 172.16.1.1 --subnet-range 172.16.1.0/24 net1-sub
# openstack router create router1
# openstack router add subnet router1 net1-sub
# openstack router set router1 --external-gateway provider
# ip netns
qdhcp-ae93bdde-9746-4bd2-b876-a424c35e7dd6 (id: 2)
qdhcp-5a1587aa-2c80-4463-b41d-580320311dde (id: 1)
qrouter-6f142989-b721-47cf-9a9e-c769d4272e4e (id: 0)
# openstack port list --router router1
+--------------------------------------+------+-------------------+-------------------------------------------------------------------------------+--------+
| ID | Name | MAC Address | Fixed IP Addresses | Status |
+--------------------------------------+------+-------------------+-------------------------------------------------------------------------------+--------+
| 3e61517d-9441-4d43-b9df-8bda3dcee79b | | fa:16:3e:40:88:65 | ip_address='172.16.1.1', subnet_id='16eab405-1ecc-4946-be59-bb0920c929b5' | ACTIVE |
| 9f46c36d-b01f-49c4-aff2-d062cbd913b3 | | fa:16:3e:09:52:b7 | ip_address='192.168.67.140', subnet_id='c2badbea-57d9-4dad-81d0-15faf15d99db' | ACTIVE |
+--------------------------------------+------+-------------------+-------------------------------------------------------------------------------+--------+
# ping 192.168.67.140
PING 192.168.67.140 (192.168.67.140) 56(84) bytes of data.
64 bytes from 192.168.67.140: icmp_seq=1 ttl=64 time=0.050 ms
64 bytes from 192.168.67.140: icmp_seq=2 ttl=64 time=0.049 ms
64 bytes from 192.168.67.140: icmp_seq=3 ttl=64 time=0.054 ms
创建虚拟机
# openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 mini
# openstack security group rule create --proto icmp default
# openstack security group rule create --proto tcp --dst-port 22 default
# openstack server create --flavor mini --image cirros --nic net-id=ae93bdde-9746-4bd2-b876-a424c35e7dd6 --security-group default provider-instance
# nova list
+--------------------------------------+-------------------+--------+------------+-------------+-------------------------+
| ID | Name | Status | Task State | Power State | Networks |
+--------------------------------------+-------------------+--------+------------+-------------+-------------------------+
| f8bde0d3-aabc-4d66-a88b-65e7cb31cf73 | provider-instance | ACTIVE | - | Running | provider=192.168.67.186 |
+--------------------------------------+-------------------+--------+------------+-------------+-------------------------+
# ping 192.168.67.186
PING 192.168.67.186 (192.168.67.186) 56(84) bytes of data.
64 bytes from 192.168.67.186: icmp_seq=1 ttl=64 time=13.6 ms
64 bytes from 192.168.67.186: icmp_seq=2 ttl=64 time=0.980 ms
# openstack server create --flavor mini --image cirros --nic net-id=5a1587aa-2c80-4463-b41d-580320311dde --security-group default self-instance
# openstack console url show self-instance
# openstack volume create --size 1 vol
# openstack server add volume provider-instance vol
# openstack server create --flavor mini --image cirros_raw --nic net-id=5a1587aa-2c80-4463-b41d-580320311dde --security-group default --boot-from-volume 1 vol-inst
# nova list
+--------------------------------------+-------------------+--------+------------+-------------+-------------------------+
| ID | Name | Status | Task State | Power State | Networks |
+--------------------------------------+-------------------+--------+------------+-------------+-------------------------+
| f8bde0d3-aabc-4d66-a88b-65e7cb31cf73 | provider-instance | ACTIVE | - | Running | provider=192.168.67.186 |
| c384e01d-871a-4487-9a52-104bc04b27eb | self-instance | ACTIVE | - | Running | net1=172.16.1.205 |
| 73916b31-0e31-4065-a469-61deafdab3aa | vol-inst | ACTIVE | - | Running | net1=172.16.1.204 |
+--------------------------------------+-------------------+--------+------------+-------------+-------------------------+
配置迁移
在所有计算节点上
# vim /etc/libvirt/libvirtd.conf :
listen_tls = 0 # Disable the listen_tls flag
listen_tcp = 1 # Enable the listen_tcp flag
auth_tcp = “none” # 这里设置为none,意思不需要认证,可以设置sasl方式的认证
listen_addr = "0.0.0.0"
# vim /etc/sysconfig/libvirtd :
LIBVIRTD_ARGS=”--listen” #使libvirtd监听TCP/IP连接
# iptables -A INPUT -p tcp -m tcp --dport 16509 -j ACCEPT
# service iptables save
# service libvirtd restart
# virsh
# connect qemu+tcp://com1/system
在控制节点上
# nova live-migration self-instance
(迁移完成后,查看self-instance的host是否改变)
附
重启openstack服务
控制节点
systemctl restart chronyd.service
systemctl restart mariadb.service
systemctl restart rabbitmq-server.service
systemctl restart memcached.service
systemctl restart etcd
systemctl restart openstack-glance-api.service
systemctl restart openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl restart neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-l3-agent.service
systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service
计算节点
systemctl restart libvirtd.service target.service
systemctl restart openstack-nova-compute.service
systemctl restart neutron-linuxbridge-agent.service
systemctl restart openstack-cinder-volume.service