OpenStack
本次OpenStack使用Q版
1|0配置环境
所有服务器都需要开启虚拟化
配置环境
controller-15 2核5G
compute-16 2核2G
compute-17 2核2G
设置hostname
cat > /etc/hosts <<EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.88.15 controller 192.168.88.16 compute-01 192.168.88.17 compute-02
EOF
hostnamectl set-hostname controller hostnamectl set-hostname compute-01 hostnamectl set-hostname compute-02
acii码问题解决,在三台服务器进行
cat > /etc/environment <<EOF LANG=en_US.utf-8 LC_ALL=en_US.utf-8 EOF
source /etc/environment
所有节点安装
yum install chrony -y
在controller选择性更改ntp服务器上游
[root@controller ~]# vim /etc/chrony.conf # Use public servers from the pool.ntp.org project. # Please consider joining the pool (http://www.pool.ntp.org/join.html). #server 0.centos.pool.ntp.org iburst #server 1.centos.pool.ntp.org iburst #server 2.centos.pool.ntp.org iburst server 3.centos.pool.ntp.org iburst
# Allow NTP client access from local network.
allow 192.168.0.0/16
compute-01
[root@compute-01 ~]# vim /etc/chrony.conf # Use public servers from the pool.ntp.org project. # Please consider joining the pool (http://www.pool.ntp.org/join.html). #server 0.centos.pool.ntp.org iburst #server 1.centos.pool.ntp.org iburst #server 2.centos.pool.ntp.org iburst server 192.168.88.15 iburst
compute-02
[root@compute-02 ~]# vim /etc/chrony.conf # Use public servers from the pool.ntp.org project. # Please consider joining the pool (http://www.pool.ntp.org/join.html). #server 0.centos.pool.ntp.org iburst #server 1.centos.pool.ntp.org iburst #server 2.centos.pool.ntp.org iburst server 192.168.88.15 iburst
重启chronyd
systemctl restart chronyd
在三台服务器安装openstack-queens
tar -xf openstack-queens.tar.gz -C /opt/
在三台服务器配置本地yum源
cat > /etc/yum.repos.d/openstack.repo <<EOF [openstack] name=openstack baseurl=file:///opt/openstack gpgcheck=0
EOF
yum makecache
所有节点同时安装OpenStack客户端和OpenStack-selinux
yum install python-openstackclient openstack-selinux -y
在controller节点安装
yum install mariadb mariadb-server python2-PyMySQL -y
增加mariadb的openstack配置
echo '[mysqld] bind-address = 192.168.88.15 default-storage-engine = innodb innodb_file_per_table = on #设置每个表的独立表空间文件 max_connections = 4096 collation-server = utf8_general_ci character-set-server = utf8' >/etc/my.cnf.d/openstack.cnf
systemctl start mariadb systemctl enable mariadb
数据库安全初始化,不做会同步有问题
回车 n #不设置数据库密码 y y y y
mysql_secure_installation
创建核心组件所需数据库以及使用用户及密码(建议更改密码)
create database keystone; grant all on keystone.* to 'keystone'@'localhost' identified by 'KEYSTONE_DBPASS'; grant all on keystone.* to 'keystone'@'%' identified by 'KEYSTONE_DBPASS';
create database glance; grant all on glance.* to 'glance'@'localhost' identified by 'GLANCE_DBPASS'; grant all on glance.* to 'glance'@'%' identified by 'GLANCE_DBPASS';
create database nova; grant all on nova.* to 'nova'@'localhost' identified by 'NOVA_DBPASS'; grant all on nova.* to 'nova'@'%' identified by 'NOVA_DBPASS';
create database nova_api; grant all on nova_api.* to 'nova'@'localhost' identified by 'NOVA_DBPASS'; grant all on nova_api.* to 'nova'@'%' identified by 'NOVA_DBPASS';
CREATE DATABASE nova_cell0; GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' \ IDENTIFIED BY 'NOVA_DBPASS'; GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \ IDENTIFIED BY 'NOVA_DBPASS';
create database neutron; grant all on neutron.* to 'neutron'@'localhost' identified by 'NEUTRON_DBPASS'; grant all on neutron.* to 'neutron'@'%' identified by 'NEUTRON_DBPASS';
安装消息队列
增加OpenStack用户设置密码为RABBIT_PASS 可读可写可配置
默认登录账号guest 密码guest
yum install rabbitmq-server -y systemctl start rabbitmq-server.service systemctl enable rabbitmq-server.service rabbitmqctl add_user openstack RABBIT_PASS rabbitmqctl set_permissions openstack ".*" ".*" ".*" rabbitmq-plugins enable rabbitmq_management
安装memcached
yum install memcached python-memcached -y
更改memcached的监听ip,默认是127.0.0.1,此处更改为0.0.0.0
生产慎重更改
sed -i "s#127.0.0.1#0.0.0.0#g" /etc/sysconfig/memcached
systemctl start memcached systemctl enable memcached
安装etcd
yum install etcd -y
搭建大型项目,修改配置文件时记得备份
cp /etc/etcd/etcd.conf /etc/etcd/etcd.conf.bak
cat > /etc/etcd/etcd.conf <<EOF #[Member] ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="http://192.168.88.15:2380" ETCD_LISTEN_CLIENT_URLS="http://192.168.88.15:2379" ETCD_NAME="controller" #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.88.15:2380" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.88.15:2379" ETCD_INITIAL_CLUSTER="controller=http://192.168.88.15:2380" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01" ETCD_INITIAL_CLUSTER_STATE="new" EOF
systemctl start etcd systemctl enable etcd
2|0keystone认证服务
所有节点安装自动改配置文件工具
yum install openstack-utils -y
controller节点安装keystone
yum install openstack-keystone httpd mod_wsgi -y
cp /etc/keystone/keystone.conf /etc/keystone/keystone.conf.bak
grep -Ev '^$|#' /etc/keystone/keystone.conf.bak > /etc/keystone/keystone.conf
更改keystone配置
openstack-config --set /etc/keystone/keystone.conf database connection mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone openstack-config --set /etc/keystone/keystone.conf token provider fernet
同步数据库
su -s /bin/sh -c "keystone-manage db_sync" keystone
初始化fernet
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
q版的keystone初始化认证,去掉35357认证,通过5000可以全部认证
keystone-manage bootstrap --bootstrap-password ADMIN_PASS \ --bootstrap-admin-url http://controller:5000/v3/ \ --bootstrap-internal-url http://controller:5000/v3/ \ --bootstrap-public-url http://controller:5000/v3/ \ --bootstrap-region-id RegionOne
优化并启动httpd
echo "ServerName controller" >> /etc/httpd/conf/httpd.conf ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/ systemctl start httpd.service systemctl enable httpd.service
创建注册账户,注册keystone自己api
永久声明安装注册参数
echo 'export OS_PROJECT_DOMAIN_NAME=Default export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_NAME=admin export OS_USERNAME=admin export OS_PASSWORD=ADMIN_PASS export OS_AUTH_URL=http://controller:5000/v3 export OS_IDENTITY_API_VERSION=3 export OS_IMAGE_API_VERSION=2' >/root/admin-openrc
source ~/admin-openrc
echo 'source admin-openrc' >> ~/.bashrc
只需创建service服务项即可
openstack project create --domain default \ --description "Service Project" service
测试keystone服务
openstack token issue
openstack user list
3|0glance
在keystone上创建glance相关关联角色
openstack user create --domain default --password GLANCE_PASS glance openstack role add --project service --user glance admin
验证
openstack role assignment list
在keystone上创建服务和api
openstack service create --name glance --description "OpenStack Image" image openstack endpoint create --region RegionOne image public http://controller:9292 openstack endpoint create --region RegionOne image internal http://controller:9292 openstack endpoint create --region RegionOne image admin http://controller:9292
安装glance服务
yum install openstack-glance -y
cp /etc/glance/glance-api.conf /etc/glance/glance-api.conf.bak grep -Ev '^$|#' /etc/glance/glance-api.conf.bak > /etc/glance/glance-api.conf cp /etc/glance/glance-registry.conf /etc/glance/glance-registry.conf.bak grep -Ev '^$|#' /etc/glance/glance-registry.conf.bak > /etc/glance/glance-registry.conf
用openstack配置命令生成(glanceapi的文件)
openstack-config --set /etc/glance/glance-api.conf database connection mysql+pymysql://glance:GLANCE_DBPASS@controller/glance openstack-config --set /etc/glance/glance-api.conf glance_store stores file,http openstack-config --set /etc/glance/glance-api.conf glance_store default_store file openstack-config --set /etc/glance/glance-api.conf glance_store filesystem_store_datadir /var/lib/glance/images/ openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_uri http://controller:5000 openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_url http://controller:5000 #因为q版keystone验证改为全部5000端口所以此处有更改 openstack-config --set /etc/glance/glance-api.conf keystone_authtoken memcached_servers controller:11211 openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_type password openstack-config --set /etc/glance/glance-api.conf keystone_authtoken project_domain_name default openstack-config --set /etc/glance/glance-api.conf keystone_authtoken user_domain_name default openstack-config --set /etc/glance/glance-api.conf keystone_authtoken project_name service openstack-config --set /etc/glance/glance-api.conf keystone_authtoken username glance openstack-config --set /etc/glance/glance-api.conf keystone_authtoken password GLANCE_PASS openstack-config --set /etc/glance/glance-api.conf paste_deploy flavor keystone
用openstack配置命令生成(glanceregistry文件)
openstack-config --set /etc/glance/glance-registry.conf database connection mysql+pymysql://glance:GLANCE_DBPASS@controller/glance openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_uri http://controller:5000 openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_url http://controller:5000 #同上 openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken memcached_servers controller:11211 openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_type password openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken project_domain_name default openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken user_domain_name default openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken project_name service openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken username glance openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken password GLANCE_PASS openstack-config --set /etc/glance/glance-registry.conf paste_deploy flavor keystone
同步数据库
su -s /bin/sh -c "glance-manage db_sync" glance
启动服务
systemctl start openstack-glance-api.service openstack-glance-registry.service systemctl enable openstack-glance-api.service openstack-glance-registry.service
上传镜像测试(说明是openstack的镜像) --public (指定为公共镜像)
openstack image create "cirros" --file cirros-0.3.4-x86_64-disk.img --disk-format qcow2 \ --container-format bare --public
4|0nova的计算服务(核心服务)
在controller节点操作
keystone创建系统用户
openstack user create --domain default --password NOVA_PASS nova openstack role add --project service --user nova admin
在keystone上注册服务和api
openstack service create --name nova --description "OpenStack Compute" compute openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1 openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1 openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
创建placement api相关
在keystone创建系统用户
openstack user create --domain default --password PLACEMENT_PASS placement openstack role add --project service --user placement admin
在keystone上注册服务和api
openstack service create --name placement --description "Placement API" placement openstack endpoint create --region RegionOne placement public http://controller:8778 openstack endpoint create --region RegionOne placement internal http://controller:8778 openstack endpoint create --region RegionOne placement admin http://controller:8778
在controller节点安装nova服务
yum install openstack-nova-api openstack-nova-conductor \ openstack-nova-console openstack-nova-novncproxy \ openstack-nova-scheduler openstack-nova-placement-api -y
生成配置文件
cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
grep -Ev '^$|#' /etc/nova/nova.conf.bak > /etc/nova/nova.conf
openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:RABBIT_PASS@controller openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy keystone openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.88.15 openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver openstack-config --set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api openstack-config --set /etc/nova/nova.conf database connection mysql+pymysql://nova:NOVA_DBPASS@controller/nova openstack-config --set /etc/nova/nova.conf glance api_servers http://controller:9292 openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:5000/v3 openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller:11211 openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova openstack-config --set /etc/nova/nova.conf keystone_authtoken password NOVA_PASS openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp openstack-config --set /etc/nova/nova.conf libvirt virt_type qemu openstack-config --set /etc/nova/nova.conf vnc vncserver_listen '$my_ip' openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address '$my_ip' openstack-config --set /etc/nova/nova.conf placement os_region_name RegionOne openstack-config --set /etc/nova/nova.conf placement project_domain_name Default openstack-config --set /etc/nova/nova.conf placement user_domain_name Default openstack-config --set /etc/nova/nova.conf placement project_name service openstack-config --set /etc/nova/nova.conf placement auth_type password openstack-config --set /etc/nova/nova.conf placement auth_url http://controller:5000/v3 openstack-config --set /etc/nova/nova.conf placement username placement openstack-config --set /etc/nova/nova.conf placement password PLACEMENT_PASS openstack-config --set /etc/nova/nova.conf neutron url http://controller:9696 openstack-config --set /etc/nova/nova.conf neutron auth_url http://controller:35357 openstack-config --set /etc/nova/nova.conf neutron auth_type password openstack-config --set /etc/nova/nova.conf neutron project_domain_name default openstack-config --set /etc/nova/nova.conf neutron user_domain_name default openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne openstack-config --set /etc/nova/nova.conf neutron project_name service openstack-config --set /etc/nova/nova.conf neutron username neutron openstack-config --set /etc/nova/nova.conf neutron password NEUTRON_PASS openstack-config --set /etc/nova/nova.conf neutron service_metadata_proxy True openstack-config --set /etc/nova/nova.conf neutron metadata_proxy_shared_secret METADATA_SECRET
更改placement httpd设置
cat >> /etc/httpd/conf.d/00-nova-placement-api.conf <<EOF
<Directory /usr/bin> <IfVersion >= 2.4> Require all granted </IfVersion> <IfVersion < 2.4> Order allow,deny Allow from all </IfVersion> </Directory> EOF
重启httpd
systemctl restart httpd
同步数据库
su -s /bin/sh -c "nova-manage api_db sync" nova su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova su -s /bin/sh -c "nova-manage db sync" nova
验证nova cell0 and cell1
nova-manage cell_v2 list_cells
启动开机自启并服务
systemctl enable openstack-nova-api.service \ openstack-nova-consoleauth.service openstack-nova-scheduler.service \ openstack-nova-conductor.service openstack-nova-novncproxy.service openstack-nova-console
systemctl start openstack-nova-api.service \ openstack-nova-consoleauth.service openstack-nova-scheduler.service \ openstack-nova-conductor.service openstack-nova-novncproxy.service
验证
openstack compute service list
计算(compute)节点安装nova服务
在compute节点进行
在base源里增加
cat >> /etc/yum.repos.d/CentOS-Base.repo <<EOF [Virt] name=CentOS-$releasever - Base baseurl=http://mirrors.aliyun.com/centos/7/virt/x86_64/kvm-common/ gpgcheck=0 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7 EOF
yum makecache
安装nova服务
yum install openstack-nova-compute -y yum install openstack-utils -y
进行配置,需要修改my_ip
cp /etc/nova/nova.conf /etc/nova/nova.conf.bak grep -Ev '^$|#' /etc/nova/nova.conf.bak > /etc/nova/nova.conf
openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy keystone openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 192.168.88.16 openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver openstack-config --set /etc/nova/nova.conf glance api_servers http://controller:9292 openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:5000/v3 openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller:11211 openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova openstack-config --set /etc/nova/nova.conf keystone_authtoken password NOVA_PASS openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:RABBIT_PASS@controller openstack-config --set /etc/nova/nova.conf vnc enabled True openstack-config --set /etc/nova/nova.conf vnc vncserver_listen 0.0.0.0 openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address '$my_ip' openstack-config --set /etc/nova/nova.conf vnc novncproxy_base_url http://controller:6080/vnc_auto.html openstack-config --set /etc/nova/nova.conf placement os_region_name RegionOne openstack-config --set /etc/nova/nova.conf placement project_domain_name Default openstack-config --set /etc/nova/nova.conf placement user_domain_name Default openstack-config --set /etc/nova/nova.conf placement project_name service openstack-config --set /etc/nova/nova.conf placement auth_type password openstack-config --set /etc/nova/nova.conf placement auth_url http://controller:5000/v3 openstack-config --set /etc/nova/nova.conf placement username placement openstack-config --set /etc/nova/nova.conf placement password PLACEMENT_PASS openstack-config --set /etc/nova/nova.conf libvirt virt_type qemu
启动并开机自启服务
systemctl enable libvirtd.service openstack-nova-compute.service systemctl start libvirtd.service openstack-nova-compute.service
回到controller节点
验证
openstack compute service list --service nova-compute
创建compute节点网格
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
验证
openstack compute service list openstack catalog list openstack image list nova-status upgrade check
#如果增加新节点,可在/etc/nova/nova.conf增加配置(此处不是必备步骤)
#discover_hosts_in_cells_interval = 300
5|0neutron网络服务
在controller节点进行
创建用户关联角色
openstack user create --domain default --password NEUTRON_PASS neutron openstack role add --project service --user neutron admin
创建服务注册api
openstack service create --name neutron --description "OpenStack Networking" network openstack endpoint create --region RegionOne network public http://controller:9696 openstack endpoint create --region RegionOne network internal http://controller:9696 openstack endpoint create --region RegionOne network admin http://controller:9696
安装相关包
yum install openstack-neutron openstack-neutron-ml2 \ openstack-neutron-linuxbridge ebtables ipset -y
在所有节点内核开启br_netfilter模块,方法如下:
cat >> /etc/sysctl.conf << EOF net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 EOF
cat > /etc/rc.sysinit <<EOF #!/bin/bash for file in /etc/sysconfig/modules/*.modules ; do [ -x \$file ] && \$file done EOF
echo 'modprobe br_netfilter' > /etc/sysconfig/modules/br_netfilter.modules chmod 755 /etc/sysconfig/modules/br_netfilter.modules yum install -y libibverbs modprobe br_netfilter /sbin/sysctl -p
修改neutron相应配置
cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak grep -E '^[^#]' /etc/neutron/neutron.conf.bak > /etc/neutron/neutron.conf
openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2 openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins openstack-config --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:RABBIT_PASS@controller openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_status_changes True openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_data_changes True openstack-config --set /etc/neutron/neutron.conf database connection mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://controller:5000 openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://controller:35357 openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers controller:11211 openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type password openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password NEUTRON_PASS openstack-config --set /etc/neutron/neutron.conf nova auth_url http://controller:35357 openstack-config --set /etc/neutron/neutron.conf nova auth_type password openstack-config --set /etc/neutron/neutron.conf nova project_domain_name default openstack-config --set /etc/neutron/neutron.conf nova user_domain_name default openstack-config --set /etc/neutron/neutron.conf nova region_name RegionOne openstack-config --set /etc/neutron/neutron.conf nova project_name service openstack-config --set /etc/neutron/neutron.conf nova username nova openstack-config --set /etc/neutron/neutron.conf nova password NOVA_PASS openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp
cp /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.bak grep -E '^[^#]' /etc/neutron/plugins/ml2/ml2_conf.ini.bak > /etc/neutron/plugins/ml2/ml2_conf.ini
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vlan openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers linuxbridge openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks provider openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_ipset True
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak grep -E '^[^#]' /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini
注意需要修改网卡名字
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:ens33 openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group True openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan False
cp /etc/neutron/dhcp_agent.ini /etc/neutron/dhcp_agent.ini.bak grep -E '^[^#]' /etc/neutron/dhcp_agent.ini.bak > /etc/neutron/dhcp_agent.ini
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver linuxbridge openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT dhcp_driver neutron.agent.linux.dhcp.Dnsmasq openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata true
cp /etc/neutron/metadata_agent.ini /etc/neutron/metadata_agent.ini.bak grep -E '^[^#]' /etc/neutron/metadata_agent.ini.bak > /etc/neutron/metadata_agent.ini
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT nova_metadata_host controller openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT metadata_proxy_shared_secret METADATA_SECRET
openstack-config --set /etc/nova/nova.conf neutron url http://controller:9696 openstack-config --set /etc/nova/nova.conf neutron auth_url http://controller:35357 openstack-config --set /etc/nova/nova.conf neutron auth_type password openstack-config --set /etc/nova/nova.conf neutron project_domain_name default openstack-config --set /etc/nova/nova.conf neutron user_domain_name default openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne openstack-config --set /etc/nova/nova.conf neutron project_name service openstack-config --set /etc/nova/nova.conf neutron username neutron openstack-config --set /etc/nova/nova.conf neutron password NEUTRON_PASS openstack-config --set /etc/nova/nova.conf neutron service_metadata_proxy True openstack-config --set /etc/nova/nova.conf neutron metadata_proxy_shared_secret METADATA_SECRET
做软连接找到linux_bridge插件
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
同步数据库
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \ --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
重启nova-api
systemctl restart openstack-nova-api.service
启动并开机自启neutron服务
systemctl enable neutron-server.service \ neutron-linuxbridge-agent.service neutron-dhcp-agent.service \ neutron-metadata-agent.service systemctl start neutron-server.service \ neutron-linuxbridge-agent.service neutron-dhcp-agent.service \ neutron-metadata-agent.service
验证
openstack network agent list
6|0计算节点(compute)
在compute节点操作
yum install openstack-neutron-linuxbridge ebtables ipset -y
更改neutron配置
cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak grep -E '^[^#]' /etc/neutron/neutron.conf.bak > /etc/neutron/neutron.conf
openstack-config --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:RABBIT_PASS@controller openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://controller:5000 openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://controller:35357 openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers controller:11211 openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type password openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password NEUTRON_PASS openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp
cp /etc/neutron/plugins/ml2/linuxbridge /etc/neutron/plugins/ml2/linuxbridge.bak grep -E '^[^#]' /etc/neutron/plugins/ml2/linuxbridge.bak > /etc/neutron/plugins/ml2/linuxbridge
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:ens33 openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group True openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan False
增加nova配置
openstack-config --set /etc/nova/nova.conf neutron url http://controller:9696 openstack-config --set /etc/nova/nova.conf neutron auth_url http://controller:35357 openstack-config --set /etc/nova/nova.conf neutron auth_type password openstack-config --set /etc/nova/nova.conf neutron project_domain_name default openstack-config --set /etc/nova/nova.conf neutron user_domain_name default openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne openstack-config --set /etc/nova/nova.conf neutron project_name service openstack-config --set /etc/nova/nova.conf neutron username neutron openstack-config --set /etc/nova/nova.conf neutron password NEUTRON_PASS
重启compute的nova服务
systemctl restart openstack-nova-compute.service
启动并开机自启neutron服务
systemctl enable neutron-linuxbridge-agent.service systemctl start neutron-linuxbridge-agent.service
验证网络服务(在controller节点验证)
openstack network agent list
7|0安装web页面
安装在controller节点
yum install openstack-dashboard -y
new_local_settings配置
[root@controller ~]# grep -Ev '^#|^\s*$|^.*#' new_local_settings import os from django.utils.translation import ugettext_lazy as _ from openstack_dashboard import exceptions from openstack_dashboard.settings import HORIZON_CONFIG DEBUG = False TEMPLATE_DEBUG = DEBUG WEBROOT = '/dashboard/' ALLOWED_HOSTS = ['*', ] OPENSTACK_API_VERSIONS = { "identity": 3, "image": 2, "volume": 2, "compute": 2, } OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'default' LOCAL_PATH = '/tmp' SECRET_KEY='65941f1393ea1c265ad7' SESSION_ENGINE = 'django.contrib.sessions.backends.cache' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': 'controller:11211', }, } EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' OPENSTACK_HOST = "controller" OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user" OPENSTACK_KEYSTONE_BACKEND = { 'name': 'native', 'can_edit_user': True, 'can_edit_group': True, 'can_edit_project': True, 'can_edit_domain': True, 'can_edit_role': True, } OPENSTACK_HYPERVISOR_FEATURES = { 'can_set_mount_point': False, 'can_set_password': False, 'requires_keypair': False, } OPENSTACK_CINDER_FEATURES = { 'enable_backup': False, } OPENSTACK_NEUTRON_NETWORK = { 'enable_router': False, 'enable_quotas': False, 'enable_ipv6': False, 'enable_distributed_router': False, 'enable_ha_router': False, 'enable_lb': False, 'enable_firewall': False, 'enable_vpn': False, 'enable_fip_topology_check': False, 'default_ipv4_subnet_pool_label': None, 'default_ipv6_subnet_pool_label': None, 'profile_support': None, 'supported_provider_types': ['*'], 'supported_vnic_types': ['*'], } OPENSTACK_HEAT_STACK = { 'enable_user_pass': True, } IMAGE_CUSTOM_PROPERTY_TITLES = { "architecture": _("Architecture"), "kernel_id": _("Kernel ID"), "ramdisk_id": _("Ramdisk ID"), "image_state": _("Euca2ools state"), "project_id": _("Project ID"), "image_type": _("Image Type"), } IMAGE_RESERVED_CUSTOM_PROPERTIES = [] API_RESULT_LIMIT = 1000 API_RESULT_PAGE_SIZE = 20 SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024 DROPDOWN_MAX_ITEMS = 30 TIME_ZONE = "Asia/Shanghai" POLICY_FILES_PATH = '/etc/openstack-dashboard' LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'handlers': { 'null': { 'level': 'DEBUG', 'class': 'logging.NullHandler', }, 'console': { 'level': 'INFO', 'class': 'logging.StreamHandler', }, }, 'loggers': { 'django.db.backends': { 'handlers': ['null'], 'propagate': False, }, 'requests': { 'handlers': ['null'], 'propagate': False, }, 'horizon': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'openstack_dashboard': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'novaclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'cinderclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'keystoneclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'glanceclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'neutronclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'heatclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'ceilometerclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'swiftclient': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'openstack_auth': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'nose.plugins.manager': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'django': { 'handlers': ['console'], 'level': 'DEBUG', 'propagate': False, }, 'iso8601': { 'handlers': ['null'], 'propagate': False, }, 'scss': { 'handlers': ['null'], 'propagate': False, }, }, } SECURITY_GROUP_RULES = { 'all_tcp': { 'name': _('All TCP'), 'ip_protocol': 'tcp', 'from_port': '1', 'to_port': '65535', }, 'all_udp': { 'name': _('All UDP'), 'ip_protocol': 'udp', 'from_port': '1', 'to_port': '65535', }, 'all_icmp': { 'name': _('All ICMP'), 'ip_protocol': 'icmp', 'from_port': '-1', 'to_port': '-1', }, 'ssh': { 'name': 'SSH', 'ip_protocol': 'tcp', 'from_port': '22', 'to_port': '22', }, 'smtp': { 'name': 'SMTP', 'ip_protocol': 'tcp', 'from_port': '25', 'to_port': '25', }, 'dns': { 'name': 'DNS', 'ip_protocol': 'tcp', 'from_port': '53', 'to_port': '53', }, 'http': { 'name': 'HTTP', 'ip_protocol': 'tcp', 'from_port': '80', 'to_port': '80', }, 'pop3': { 'name': 'POP3', 'ip_protocol': 'tcp', 'from_port': '110', 'to_port': '110', }, 'imap': { 'name': 'IMAP', 'ip_protocol': 'tcp', 'from_port': '143', 'to_port': '143', }, 'ldap': { 'name': 'LDAP', 'ip_protocol': 'tcp', 'from_port': '389', 'to_port': '389', }, 'https': { 'name': 'HTTPS', 'ip_protocol': 'tcp', 'from_port': '443', 'to_port': '443', }, 'smtps': { 'name': 'SMTPS', 'ip_protocol': 'tcp', 'from_port': '465', 'to_port': '465', }, 'imaps': { 'name': 'IMAPS', 'ip_protocol': 'tcp', 'from_port': '993', 'to_port': '993', }, 'pop3s': { 'name': 'POP3S', 'ip_protocol': 'tcp', 'from_port': '995', 'to_port': '995', }, 'ms_sql': { 'name': 'MS SQL', 'ip_protocol': 'tcp', 'from_port': '1433', 'to_port': '1433', }, 'mysql': { 'name': 'MYSQL', 'ip_protocol': 'tcp', 'from_port': '3306', 'to_port': '3306', }, 'rdp': { 'name': 'RDP', 'ip_protocol': 'tcp', 'from_port': '3389', 'to_port': '3389', }, } REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES', 'LAUNCH_INSTANCE_DEFAULTS', 'OPENSTACK_IMAGE_FORMATS', 'OPENSTACK_KEYSTONE_DEFAULT_DOMAIN', 'CREATE_IMAGE_DEFAULTS', 'ENFORCE_PASSWORD_CHECK']
配置
cat new_local_settings >/etc/openstack-dashboard/local_settings
修改不能访问的bug
sed -i '3a WSGIApplicationGroup %{GLOBAL}' /etc/httpd/conf.d/openstack-dashboard.conf
重启apache和memcached
systemctl restart httpd.service memcached
访问
http://ip/dashboard
域 default 账户:admin 密码:ADMIN_PASS
命令行创建模板
创建网络(注意,此处会提示neutron的cli会被移除,这个命令还可以用)
neutron net-create --shared --provider:physical_network provider --provider:network_type flat WAN
注意:下面ip替换成宿主机ip网段,用于平面网络的ip分配
neutron subnet-create --name subnet-wan --allocation-pool \ start=192.168.88.100,end=192.168.88.200 --dns-nameserver 223.5.5.5 \ --gateway 192.168.88.2 WAN 192.168.88.0/24
生成硬件配置模板(也可以在web页面配置生产)
openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano
生成sshkey,并且作为controller节点链接虚拟机的默认注入密钥
ssh-keygen -q -N "" -f ~/.ssh/id_rsa
openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey
生成默认安全组(安全组名字default)
openstack security group rule create --proto icmp default openstack security group rule create --proto tcp --dst-port 22 default
可以自己设置net安全组
需要修改hosts文件
8|0compute扩容
扩容按照前面再compute的操作全部操作一遍即可,如使用现成的配置文件scp的话注意配置文件权限问题。
如果开机报错:
PMD: net_mlx4: cannot load glue library: libibverbs.so.1: cannot open shared object file: No such file or directory
解决方法:
yum install -y libibverbs
[root@compute-02 ~]# yum install openstack-nova-compute openstack-neutron-linuxbridge ebtables ipset -y
[root@compute-01 ~]# scp /etc/nova/nova.conf compute-02:/etc/nova/ [root@compute-01 ~]# scp /etc/neutron/neutron.conf compute-02:/etc/neutron/neutron.conf [root@compute-01 ~]# scp /etc/neutron/plugins/ml2/linuxbridge_agent.ini compute-02:/etc/neutron/plugins/ml2/linuxbridge_agent.ini
[root@compute-02 ~]# vim /etc/nova/nova.conf [DEFAULT] my_ip = 192.168.88.17
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl restart openstack-nova-compute.service
systemctl enable neutron-linuxbridge-agent.service
systemctl start neutron-linuxbridge-agent.service
回到controller节点
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
9|0glance镜像服务迁移
在新节点上安装数据库,python2-PyMySQL
yum install mariadb mariadb-server python2-PyMySQL -y
echo '[mysqld] bind-address = 192.168.88.17 default-storage-engine = innodb innodb_file_per_table #设置每个表的独立表空间文件 max_connections = 4096 collation-server = utf8_general_ci character-set-server = utf8' >/etc/my.cnf.d/openstack.cnf
systemctl enable mariadb
systemctl start mariadb
数据库安全初始化 回车 n y y y y
mysql_secure_installation
停掉controller节点的glance服务
systemctl stop openstack-glance-api.service openstack-glance-registry.service
controller节点恢复glance数据库数据
mysqldump -B glance > glance.sql
把生成的sql文件放到新的节点
[root@controller ~]# scp glance.sql 192.168.88.17:`pwd`
在新的计算节点:mysql导入
mysql < glance.sql
创建glance用户以及密码
create database glance; grant all on glance.* to 'glance'@'localhost' identified by 'GLANCE_DBPASS'; grant all on glance.* to 'glance'@'%' identified by 'GLANCE_DBPASS';
安装配置glance服务
yum install openstack-glance -y
cp /etc/glance/glance-api.conf /etc/glance/glance-api.conf.bak cp /etc/glance/glance-registry.conf /etc/glance/glance-registry.conf.bak
[root@controller ~]# scp -r /etc/glance/glance-api.conf 192.168.88.17:/etc/glance/ [root@controller ~]# scp -r /etc/glance/glance-registry.conf 192.168.88.17:/etc/glance/
更改链接数据库的信息
vim /etc/glance/glance-api.conf [database] connection = mysql+pymysql://glance:GLANCE_DBPASS@192.168.88.17/glance vim /etc/glance/glance-registry.conf [database] connection = mysql+pymysql://glance:GLANCE_DBPASS@192.168.88.17/glance
启动服务
systemctl enable openstack-glance-api.service systemctl enable openstack-glance-registry.service systemctl start openstack-glance-api.service systemctl start openstack-glance-registry.service
在controller节点迁移glance镜像
scp -r /var/lib/glance/images/* 192.168.88.17:/var/lib/glance/images/
回到新节点修改权限
chown glance.glance -R /var/lib/glance/images/
在controller节点更改keystone的注册信息
mysqldump keystone endpoint > endpoint.sql
cp endpoint.sql endpoint.sql.bak
[root@controller ~]# vim endpoint.sql
:%s #http://controller:9292#http://192.168.88.17:9292#gc
MariaDB [(none)]> use keystone
MariaDB [keystone]> source ~/endpoint.sql
修改所有nova节点配置文件
[root@controller ~]# vim /etc/nova/nova.conf [glance] api_servers = http://192.168.88.17:9292
[root@compute-01 ~]# vim /etc/nova/nova.conf [root@compute-02 ~]# vim /etc/nova/nova.conf
重启控制节点
controller节点:
systemctl restart openstack-nova-api
compute节点
systemctl restart openstack-nova-compute
退出web从新登录
测试创建实例
迁移镜像成功
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
10|0如何排错
清空某目录(/var/log为例)下所有.log文件的日志:
find /var/log/ -type f | awk '{print ">" $0}' | bash
查找所有日志中error关键字:
find /var/log/ -type f | xargs grep -i error
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
11|0cinder卷存储
在compute节点创建两块硬盘进行测试
在controller节点操作
数据库创库授权
CREATE DATABASE cinder; GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \ IDENTIFIED BY 'CINDER_DBPASS'; GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \ IDENTIFIED BY 'CINDER_DBPASS';
在keystone创建系统用户(glance,nova,neutron,cinder)关联角色
openstack user create --domain default --password CINDER_PASS cinder openstack role add --project service --user cinder admin
在keystone上创建服务和注册api
openstack service create --name cinderv2 \ --description "OpenStack Block Storage" volumev2 openstack service create --name cinderv3 \ --description "OpenStack Block Storage" volumev3 openstack endpoint create --region RegionOne \ volumev2 public http://controller:8776/v2/%\(project_id\)s openstack endpoint create --region RegionOne \ volumev2 internal http://controller:8776/v2/%\(project_id\)s openstack endpoint create --region RegionOne \ volumev2 admin http://controller:8776/v2/%\(project_id\)s openstack endpoint create --region RegionOne \ volumev3 public http://controller:8776/v3/%\(project_id\)s openstack endpoint create --region RegionOne \ volumev3 internal http://controller:8776/v3/%\(project_id\)s openstack endpoint create --region RegionOne \ volumev3 admin http://controller:8776/v3/%\(project_id\)s
装包配置
yum install openstack-cinder -y
cp /etc/cinder/cinder.conf{,.bak}
grep -Ev '^$|#' /etc/cinder/cinder.conf.bak >/etc/cinder/cinder.conf
openstack-config --set /etc/cinder/cinder.conf DEFAULT transport_url rabbit://openstack:RABBIT_PASS@controller openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone openstack-config --set /etc/cinder/cinder.conf DEFAULT my_ip 192.168.88.15 openstack-config --set /etc/cinder/cinder.conf database connection mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_uri http://controller:5000 openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_url http://controller:35357 openstack-config --set /etc/cinder/cinder.conf keystone_authtoken memcached_servers controller:11211 openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_type password openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_domain_name default openstack-config --set /etc/cinder/cinder.conf keystone_authtoken user_domain_name default openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_name service openstack-config --set /etc/cinder/cinder.conf keystone_authtoken username cinder openstack-config --set /etc/cinder/cinder.conf keystone_authtoken password CINDER_PASS openstack-config --set /etc/cinder/cinder.conf oslo_concurrency lock_path /var/lib/cinder/tmp openstack-config --set /etc/cinder/cinder.conf DEFAULT glance_api_servers http://controller:9292
[root@controller ~]# vim /etc/cinder/cinder.conf glance_api_servers = http://192.168.88.17:9292
同步数据库
su -s /bin/sh -c "cinder-manage db sync" cinder
启动服务
openstack-config --set /etc/nova/nova.conf cinder os_region_name RegionOne systemctl restart openstack-nova-api.service systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
验证
openstack volume service list
使用compute1节点兼职cinder存储节点(此处为块存储演示)
yum install lvm2 device-mapper-persistent-data -y
扫描识别硬盘
echo '- - -' >/sys/class/scsi_host/host0/scan echo '- - -' >/sys/class/scsi_host/host1/scan echo '- - -' >/sys/class/scsi_host/host2/scan
[root@compute-01 ~]# ls /sys/class/scsi_host/
host0 host1 host2
创建pv,pg
pvcreate /dev/sdb pvcreate /dev/sdc vgcreate cinder-ssd /dev/sdb vgcreate cinder-sata /dev/sdc
修改/etc/lvm/lvm.conf,在130下面插入一行:
filter = [ "a/sdb/", "a/sdc/","r/.*/"]
安装
yum install openstack-cinder targetcli python-keystone -y
配置
cp /etc/cinder/cinder.conf{,.bak}
[root@compute-01 ~]# vim /etc/cinder/cinder.conf [DEFAULT] transport_url = rabbit://openstack:RABBIT_PASS@controller auth_strategy = keystone my_ip = 192.168.88.16 glance_api_servers = http://192.168.88.17:9292 enabled_backends = ssd,sata [BACKEND] [BRCD_FABRIC_EXAMPLE] [CISCO_FABRIC_EXAMPLE] [COORDINATION] [FC-ZONE-MANAGER] [KEYMGR] [cors] [cors.subdomain] [database] connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = cinder password = CINDER_PASS [matchmaker_redis] [oslo_concurrency] lock_path = /var/lib/cinder/tmp [oslo_messaging_amqp] [oslo_messaging_notifications] [oslo_messaging_rabbit] [oslo_middleware] [oslo_policy] [oslo_reports] [oslo_versionedobjects] [ssl] [ssd] volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_group = cinder-ssd iscsi_protocol = iscsi iscsi_helper = lioadm volume_backend_name = ssd [sata] volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_group = cinder-sata iscsi_protocol = iscsi iscsi_helper = lioadm volume_backend_name = sata
启动
systemctl enable openstack-cinder-volume.service target.service systemctl start openstack-cinder-volume.service target.service
验证
[root@controller ~]# openstack volume service list
把卷挂载到实例中
把卷内数据挂载出来
umount后分离卷
查看卷ID
把卷内数据挂载出来
dd if=/dev/mapper/cinder--sata-volume--0191bfd0--d408--489e--91f7--36912ad400d3 of=/tmp/test.txt
mount -o loop /tmp/test.txt /mnt/
[root@compute-01 ~]# ls /mnt/
1
==============
创建实例
得到卷
12|0块存储对接nfs
准备一台nfs服务器
在存储节点进行
compute节点安装nfs
yum install -y nfs-utils
nfs服务器配置
[root@host-192-168-88-109 ~]# vi /etc/exports /data 192.168.88.0/24(rw,async,no_root_squash,no_all_squash)
修改/etc/cinder/cinder.conf
[DEFAULT] transport_url = rabbit://openstack:RABBIT_PASS@controller auth_strategy = keystone my_ip = 192.168.88.16 glance_api_servers = http://controller:9292 enabled_backends = ssd,sata,nfs
[nfs] volume_driver = cinder.volume.drivers.nfs.NfsDriver nfs_shares_config = /etc/cinder/nfs_shares volume_backend_name = nfs
修改/etc/cinder/nfs_shares
[root@compute-01 ~]# vim /etc/cinder/nfs_shares 192.168.88.109:/data
chown root.cinder /etc/cinder/nfs_shares
重启
systemctl restart openstack-cinder-volume.service
验证
[root@controller ~]# openstack volume service list
创建卷
在实例上连接卷
13|0实例冷迁移
nova节点之间免密钥互信
所有nova节点将nova用户改成“/bin/bash”
usermod -s /bin/bash nova
先在compute-01进行操作
su 进nova用户,生成密钥
[root@compute-01 ~]# su nova
bash-4.2$ cd ~
bash-4.2$ ssh-keygen -t rsa -q -N ''
免密登录自己
bash-4.2$ ssh nova@192.168.88.16
在.ssh目录下
bash-4.2$ cd .ssh/
bash-4.2$ cp -fa id_rsa.pub authorized_keys
将公钥发送到其他计算节点的/var/lib/nova/.ssh 注意权限和权限组(nova)
bash-4.2$ exit
[root@compute-01 ~]# scp -rp /var/lib/nova/.ssh/ compute-02:/var/lib/nova/
修改其他计算节点的权限
[root@compute-02 ~]# chown nova.nova -R /var/lib/nova/.ssh/
测试免密
[root@compute-01 ~]# su nova
bash-4.2$ ssh nova@compute-02
Last login: Tue Nov 1 20:50:16 2022 from compute-01
-bash-4.2$ exit
logout
Connection to compute-02 closed.
回到controller节点
vim /etc/nova/nova.conf [DEFAULT] scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
重启
systemctl restart openstack-nova-scheduler.service
所有计算节点修改nova配置文件
vim /etc/nova/nova.conf [DEFAULT] allow_resize_to_same_host = True
重启
systemctl restart openstack-nova-compute.service
ui界面迁移,另外如果配置后加,之前生成的实例不能被迁移
创建一个实例
关闭实例后进行冷迁移
确认迁移
迁移成功
14|0vxlan三层网络
vlan:1-4094
vxlan:4096*4096-2
1,删除所有平面网络的实例
添加lan区段
添加新网卡后所有节点在新网卡设置lan区段
所有节点添加ens36网卡并启动
cat > /etc/sysconfig/network-scripts/ifcfg-ens36 <<EOF TYPE=Ethernet BOOTPROTO=static DEFROUTE=yes NAME=ens36 DEVICE=ens36 ONBOOT=yes IPADDR=172.16.1.15 NETMASK=255.255.255.0 EOF
ifup ens36
在controller节点修改配置文件
vim /etc/neutron/neutron.conf [DEFAULT] core_plugin = ml2 service_plugins = router allow_overlapping_ips = true
vim /etc/neutron/plugins/ml2/ml2_conf.ini [ml2] type_drivers = flat,vlan,vxlan tenant_network_types = vxlan mechanism_drivers = linuxbridge,l2population [ml2_type_vxlan] vni_ranges = 1:1000
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini [vxlan] enable_vxlan = True local_ip = 172.16.1.15 l2_population = True
cp /etc/neutron/l3_agent.ini{,.bak} grep -E '^[^#]' /etc/neutron/l3_agent.ini.bak > /etc/neutron/l3_agent.ini
vim /etc/neutron/l3_agent.ini [DEFAULT] interface_driver = linuxbridge
重启服务
systemctl restart neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-l3-agent.service
systemctl enable neutron-l3-agent.service
验证
openstack network agent list
修改计算节点配置
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak} grep -E '^[^#]' /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini [vxlan] enable_vxlan = True local_ip = 172.16.1.16 l2_population = True
重启agent服务
systemctl restart neutron-linuxbridge-agent.service
回到controller控制节点
vim /etc/openstack-dashboard/local_settings
OPENSTACK_NEUTRON_NETWORK = { 'enable_router': True,
systemctl restart httpd.service memcached.service
创建vpc网络
创建两个实例进行测试
创建路由
设置网络
设置网络拓扑
测试网络是否连通
给服务器关联浮动ip
尝试连接成功
__EOF__
本文链接:https://www.cnblogs.com/panda-wei/p/16831383.html
关于博主:评论和私信会在第一时间回复。或者直接私信我。
版权声明:本博客所有文章除特别声明外,均采用 BY-NC-SA 许可协议。转载请注明出处!
声援博主:如果您觉得文章对您有帮助,可以点击文章右下角【推荐】一下。您的鼓励是博主的最大动力!
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· Manus重磅发布:全球首款通用AI代理技术深度解析与实战指南
· 被坑几百块钱后,我竟然真的恢复了删除的微信聊天记录!
· 没有Manus邀请码?试试免邀请码的MGX或者开源的OpenManus吧
· 园子的第一款AI主题卫衣上架——"HELLO! HOW CAN I ASSIST YOU TODAY
· 【自荐】一款简洁、开源的在线白板工具 Drawnix