openstack T版脚本解读(云计算比赛)
@
目录
2022云计算国赛指定脚本搭建openstack T版
iaas-pre.sh
初始化脚本,用于设置两台节点的环境初始化,如修改主机名、映射、免密等
#/bin/bash
source /etc/openstack/openrc.sh
#生效环境变量
#Welcome page
cat > /etc/motd <<EOF
################################
# Welcome to OpenStack #
################################
EOF
#每次登录屏幕打印欢迎页面,将其写入 /etc/motd 文件
#selinux
sed -i 's/SELINUX=.*/SELINUX=disabled/g' /etc/selinux/config
setenforce 0
#关闭 SELinux,并在配置文件 /etc/selinux/config 中将 SELinux 禁用
#firewalld
systemctl stop firewalld
systemctl disable firewalld >> /dev/null 2>&1
#停止并禁用 firewalld,并且把输出的日志信息丢到/dev/nll这个“回收站”里面
#NetworkManager
systemctl stop NetworkManager >> /dev/null 2>&1
systemctl disable NetworkManager >> /dev/null 2>&1
yum remove -y NetworkManager firewalld
systemctl restart network
#停止并禁用 NetworkManager,移除 NetworkManager 和 firewalld 应用程序。
#iptables
yum install iptables-services -y
if [ 0 -ne $? ]; then
echo -e "\033[31mThe installation source configuration errors\033[0m"
exit 1
fi
systemctl restart iptables
iptables -F
iptables -X
iptables -Z
/usr/sbin/iptables-save
systemctl stop iptables
systemctl disable iptables
#安装 iptables 服务,并在防火墙上删除所有规则,然后停止并禁用防火墙
# install package
sed -i -e 's/#UseDNS yes/UseDNS no/g' -e 's/GSSAPIAuthentication yes/GSSAPIAuthentication no/g' /etc/ssh/sshd_config
#修改 SSH 配置文件 /etc/ssh/sshd_config,以禁用 DNS 解析和 GSSAPI 身份验证。
yum upgrade -y
yum install python-openstackclient openstack-selinux openstack-utils crudini expect lsof net-tools vim -y
#更新并安装openstack所需要的软件包,crudini和expect是脚本执行中需要的工具
#hosts
if [[ `ip a |grep -w $HOST_IP ` != '' ]];then
hostnamectl set-hostname $HOST_NAME
elif [[ `ip a |grep -w $HOST_IP_NODE ` != '' ]];then
hostnamectl set-hostname $HOST_NAME_NODE
else
hostnamectl set-hostname $HOST_NAME
fi
sed -i -e "/$HOST_NAME/d" -e "/$HOST_NAME_NODE/d" /etc/hosts
echo "$HOST_IP $HOST_NAME" >> /etc/hosts
echo "$HOST_IP_NODE $HOST_NAME_NODE" >> /etc/hosts
#根据ip地址修改主机名,然后在/etc/hosts文件中做主机名映射地址
#ssh
if [[ ! -s ~/.ssh/id_rsa.pub ]];then
ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa -q -b 2048
fi
name=`hostname`
if [[ $name == $HOST_NAME ]];then
expect -c "set timeout -1;
spawn ssh-copy-id -i /root/.ssh/id_rsa $HOST_NAME_NODE;
expect {
*password:* {send -- $HOST_PASS_NODE\r;
expect {
*denied* {exit 2;}
eof}
}
*(yes/no)* {send -- yes\r;exp_continue;}
eof {exit 1;}
}
"
else
expect -c "set timeout -1;
spawn ssh-copy-id -i /root/.ssh/id_rsa $HOST_NAME;
expect {
*password:* {send -- $HOST_PASS\r;
expect {
*denied* {exit 2;}
eof}
}
*(yes/no)* {send -- yes\r;exp_continue;}
eof {exit 1;}
}
"
fi
#对两台机器之间做免密登录,以便于后面脚本中的两台机器的交互
#chrony
yum install -y chrony
if [[ $name == $HOST_NAME ]];then
sed -i '3,6s/^/#/g' /etc/chrony.conf
sed -i '7s/^/server controller iburst/g' /etc/chrony.conf
echo "allow $network_segment_IP" >> /etc/chrony.conf
echo "local stratum 10" >> /etc/chrony.conf
else
sed -i '3,6s/^/#/g' /etc/chrony.conf
sed -i '7s/^/server controller iburst/g' /etc/chrony.conf
fi
systemctl restart chronyd
systemctl enable chronyd
#时间同步服务,将controller节点的ip网段作为主时间节点,该网段在环境变量中设置$network_segment_IP,用于同步计算节点的时间
#DNS
if [[ $name == $HOST_NAME ]];then
yum install bind -y
sed -i -e '13,14s/^/\/\//g' \
-e '19s/^/\/\//g' \
-e '37,42s/^/\/\//g' \
-e 's/recursion yes/recursion no/g' \
-e 's/dnssec-enable yes/dnssec-enable no/g' \
-e 's/dnssec-validation yes/dnssec-validation no/g' /etc/named.conf
systemctl start named.service
systemctl enable named.service
fi
printf "\033[35mPlease Reboot or Reconnect the terminal\n\033[0m"
#用于解析DNS使用,一般bind进行配置
双节点跑完pre脚本后要刷新页面,可以通过reboot重启,注意重启后检查挂载repo,或者通过ssh的方式来刷新页面,能看见登录时间即可!!
iass-install-mysql.sh
此脚本会安装数据库、rabbitmq、memcached等服务
#!/bin/bash
source /etc/openstack/openrc.sh
ping $HOST_IP -c 4 >> /dev/null 2>&1
if [ 0 -ne $? ]; then
echo -e "\033[31m Warning\nPlease make sure the network configuration is cor
rect!\033[0m"
exit 1
fi
#首先生效环境变量,然后检查本地网络是否可以ping通,不通则会输出警告
# MariaDB
yum install -y mariadb-10.3.20 mariadb-server-10.3.20 python2-PyMySQL
sed -i "/^symbolic-links/a\default-storage-engine = innodb\ninnodb_file_per_table\n
collation-server = utf8_general_ci\ninit-connect = 'SET NAMES utf8'\ncharacter-set-s
erver = utf8\nmax_connections=10000" /etc/my.cnf
crudini --set /usr/lib/systemd/system/mariadb.service Service LimitNOFILE 10000
crudini --set /usr/lib/systemd/system/mariadb.service Service LimitNPROC 10000
systemctl daemon-reload
systemctl enable mariadb.service
systemctl restart mariadb.service
expect -c "
spawn /usr/bin/mysql_secure_installation
expect \"Enter current password for root (enter for none):\"
send \"\r\"
expect \"Set root password?\"
send \"y\r\"
expect \"New password:\"
send \"$DB_PASS\r\"
expect \"Re-enter new password:\"
send \"$DB_PASS\r\"
expect \"Remove anonymous users?\"
send \"y\r\"
expect \"Disallow root login remotely?\"
send \"n\r\"
expect \"Remove test database and access to it?\"
send \"y\r\"
expect \"Reload privilege tables now?\"
send \"y\r\"
expect eof
"
#安装mariadb数据库服务,并且在/etc/my.cnf配置文件中做相关调优,然后创建root数据库密码,该密码在环境变量中设置
# RabbitMQ
yum install rabbitmq-server -y
systemctl start rabbitmq-server.service
systemctl enable rabbitmq-server.service
rabbitmqctl add_user $RABBIT_USER $RABBIT_PASS
rabbitmqctl set_permissions $RABBIT_USER ".*" ".*" ".*"
#rabbit服务用于所有组件的消息传递,脚本中创建了openstack这个用户和密码000000,并设置用户权限
# Memcache
yum install memcached python-memcached -y
sed -i -e 's/OPTIONS.*/OPTIONS="-l 127.0.0.1,::1,'$HOST_NAME'"/g' /etc/sysconfig/memcached
systemctl start memcached.service
systemctl enable memcached.service
#安装memcache缓存服务,并在/etc/sysconfig/memcached配置文件中做相关配置
# ETCD
yum install etcd -y
cp -a /etc/etcd/etcd.conf{,.bak}
sed -i -e 's/#ETCD_LISTEN_PEER_URLS.*/ETCD_LISTEN_PEER_URLS="http:\/\/'$HOST_IP':2380"/g' \
-e 's/^ETCD_LISTEN_CLIENT_URLS.*/ETCD_LISTEN_CLIENT_URLS="http:\/\/'$HOST_IP':2379"/g' \
-e 's/^ETCD_NAME="default"/ETCD_NAME="'$HOST_NAME'"/g' \
-e 's/#ETCD_INITIAL_ADVERTISE_PEER_URLS.*/ETCD_INITIAL_ADVERTISE_PEER_URLS="http:\/\/'$HOST_IP':2380"/g' \
-e 's/^ETCD_ADVERTISE_CLIENT_URLS.*/ETCD_ADVERTISE_CLIENT_URLS="http:\/\/'$HOST_IP':2379"/g' \
-e 's/#ETCD_INITIAL_CLUSTER=.*/ETCD_INITIAL_CLUSTER="'$HOST_NAME'=http:\/\/'$HOST_IP':2380"/g' \
-e 's/#ETCD_INITIAL_CLUSTER_TOKEN.*/ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"/g' \
-e 's/#ETCD_INITIAL_CLUSTER_STATE.*/ETCD_INITIAL_CLUSTER_STATE="new"/g' /etc/etcd/etcd.conf
systemctl start etcd
#etcd 被用作服务注册和发现机制,它可以记录当前哪些服务正在运行,以及它们的地址和端口等信息。这使得
#OpenStack 组件可以互相发现和连接到彼此,并协同工作,以实现高可用性和可扩展性。
iass-install-keystone.sh
安装 Keystone 服务
#!/bin/bash
source /etc/openstack/openrc.sh
#keystone mysql
mysql -uroot -p$DB_PASS -e "create database IF NOT EXISTS keystone ;"
mysql -uroot -p$DB_PASS -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '$KEYSTONE_DBPASS' ;"
mysql -uroot -p$DB_PASS -e "GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '$KEYSTONE_DBPASS' ;"
#生效环境变量,然后在数据库中创建keystone数据库,以及授权远程访问
#install keystone
yum install openstack-keystone httpd mod_wsgi -y
#安装keystone软件包
#/etc/keystone/keystone.conf
openstack-config --set /etc/keystone/keystone.conf database connection mysql+pymysql://keystone:$KEYSTONE_DBPASS@$HOST_NAME/keystone
openstack-config --set /etc/keystone/keystone.conf token provider fernet
#使用openstack-config工具修改/etc/keystone/keystone.conf文件,配置keystone的数据库连接,令牌认证等
su -s /bin/sh -c "keystone-manage db_sync" keystone
#keystone用户执行此命令连接数据库
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
#生成用于加密和解密身份认证令牌的 Fernet 密钥,这里指定了使用 keystone
#用户和组来管理密钥。
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
#配置 Keystone 数据库,用于存储和管理用户、服务、角色和权限等身份认证信
#息,同样指定了使用 keystone 用户和组
keystone-manage bootstrap --bootstrap-password $ADMIN_PASS \
--bootstrap-admin-url http://$HOST_NAME:5000/v3/ \
--bootstrap-internal-url http://$HOST_NAME:5000/v3/ \
--bootstrap-public-url http://$HOST_NAME:5000/v3/ \
--bootstrap-region-id RegionOne
#创建用于管理 Keystone 的管理员账号,并配置 Keystone API 的 URL 和区域 ID。其中,$ADMIN_PASS 是管理员账号的密码
sed -i "s/#ServerName www.example.com:80/ServerName $HOST_NAME/g" /etc/httpd/conf/httpd.conf
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
systemctl enable httpd.service
systemctl restart httpd.service
#配置apahce的默认主机名为controller,然后创建一个软连接,最后重启服务
export OS_USERNAME=admin
export OS_PASSWORD=$ADMIN_PASS
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://$HOST_NAME:5000/v3
export OS_IDENTITY_API_VERSION=3
#设置一个临时的环境变量,里面写入了临时amdin用户的相关信息
openstack domain create --description "Default Domain" $DOMAIN_NAME
openstack project create --domain $DOMAIN_NAME --description "Admin project" myadmin
openstack user create --domain $DOMAIN_NAME --password $ADMIN_PASS myadmin
openstack role add --project myadmin --user myadmin admin
#创建了默认域,admin租户、myadmin用户、并赋予了admin角色
export OS_USERNAME=myadmin
export OS_PASSWORD=$ADMIN_PASS
export OS_PROJECT_NAME=myadmin
export OS_USER_DOMAIN_NAME=$DOMAIN_NAME
export OS_PROJECT_DOMAIN_NAME=$DOMAIN_NAME
export OS_AUTH_URL=http://$HOST_NAME:5000/v3
export OS_IDENTITY_API_VERSION=3
# 创建了一个myadmin用户,以及相关信息
openstack project delete admin
#删除临时admin项目
openstack project set --name admin --domain $DOMAIN_NAME --description "Admin Project" --enable myadmin
#创建一个admin项目并设置myadmin为该项目的管理员
export OS_PROJECT_NAME=admin
#设置项目名为admin
openstack user delete admin
openstack user set --name admin --domain $DOMAIN_NAME --project admin --project-domain $DOMAIN_NAME --password $ADMIN_PASS --enable myadmin
#创建一个admin用户,并绑定admin项目,和域,设置密码并启动该用户myadmin为管理员用户
export OS_USERNAME=admin
#设置用户名为admin
openstack role add --project admin --user admin admin
#给admin用户赋予admin角色
openstack project create --domain $DOMAIN_NAME --description "Service Project" service
openstack project create --domain $DOMAIN_NAME --description "Demo Project" demo
#创建admin项目和demo项目
openstack user create --domain $DOMAIN_NAME --password $DEMO_PASS demo
openstack role create user
openstack role add --project demo --user demo user
创建一个demo普通用户,并赋予普通user角色
cat > /etc/keystone/admin-openrc.sh <<-EOF
export OS_PROJECT_DOMAIN_NAME=$DOMAIN_NAME
export OS_USER_DOMAIN_NAME=$DOMAIN_NAME
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=$ADMIN_PASS
export OS_AUTH_URL=http://$HOST_NAME:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
EOF
#将管理员用户的环境变量写入/etc/keystone/admin-openrc.sh,作为平台管理员使用
cat > /etc/keystone/demo-openrc.sh <<-EOF
export OS_PROJECT_DOMAIN_NAME=$DOMAIN_NAME
export OS_USER_DOMAIN_NAME=$DOMAIN_NAME
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=$DEMO_PASS
export OS_AUTH_URL=http://$HOST_NAME:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
EOF
#将普通用户的环境变量写入/etc/keystone/demo-openrc.sh
source /etc/keystone/admin-openrc.sh
#生效环境变量,管理员用户开始生效使用,如果后续出现权限问题,可尝试生效环境变量
iass-install-glance.sh
安装镜像服务
#!/bin/bash
source /etc/openstack/openrc.sh
source /etc/keystone/admin-openrc.sh
#生效环境变量
#glance mysql
mysql -uroot -p$DB_PASS -e "create database IF NOT EXISTS glance ;"
mysql -uroot -p$DB_PASS -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost'
IDENTIFIED BY '$GLANCE_DBPASS' ;"
mysql -uroot -p$DB_PASS -e "GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIF
IED BY '$GLANCE_DBPASS' ;"
#创建glance数据库,并授权远程登录
#glance user role service endpoint
openstack user create --domain $DOMAIN_NAME --password $GLANCE_PASS glance
openstack role add --project service --user glance admin
openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region RegionOne image public http://$HOST_NAME:9292
openstack endpoint create --region RegionOne image internal http://$HOST_NAME:9292
openstack endpoint create --region RegionOne image admin http://$HOST_NAME:9292
#创建glance用户、项目、服务、以及端点
#glance install 安装软件包
yum install -y openstack-glance
#/etc/glance/glance-api.conf
openstack-config --set /etc/glance/glance-api.conf database connection mysql+pymysq
l://glance:$GLANCE_DBPASS@$HOST_NAME/glance
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken www_authenticate_uri http://$HOST_NAME:5000
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_url http://$HOST_NAME:5000
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken memcached_servers $HOST_NAME:11211
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_type password
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken project_domain_name $DOMAIN_NAME
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken user_domain_name $DOMAIN_NAME
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken project_name service
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken username glance
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken password $GLANCE_PASS
openstack-config --set /etc/glance/glance-api.conf paste_deploy flavor keystone
#在/etc/glance/glance-api.conf配置了glance的keystone认证信息
openstack-config --set /etc/glance/glance-api.conf glance_store stores file,http
openstack-config --set /etc/glance/glance-api.conf glance_store $DOMAIN_NAME'_store' file
openstack-config --set /etc/glance/glance-api.conf glance_store filesystem_store_datadir /var/lib/glance/images/
#配置了镜像的存储位置和格式
#/etc/glance/glance-registry.conf
openstack-config --set /etc/glance/glance-registry.conf database connection mysql+pymysql://glance:$GLANCE_DBPASS@$HOST_NAME/glance
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken www_authenticate_uri http://$HOST_NAME:5000
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_url http://$HOST_NAME:5000
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken memcached_servers $HOST_NAME:11211
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_type password
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken project_domain_name $DOMAIN_NAME
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken user_domain_name $DOMAIN_NAME
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken project_name service
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken username glance
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken password $GLANCE_PASS
openstack-config --set /etc/glance/glance-registry.conf paste_deploy flavor keystone
#通过/etc/glance/glance-registry.conf文件主要配置glance和数据库的连接,keystone的认证
#su glance mysql
su -s /bin/sh -c "glance-manage db_sync" glance
#通过glance用户连接数据库
systemctl enable openstack-glance-api.service openstack-glance-registry.service
systemctl restart openstack-glance-api.service openstack-glance-registry.service
#设置glance服务开机自启,并重新启动
iass-install-placement.sh
安装资源管理和调度服务
#!/bin/bash
source /etc/openstack/openrc.sh
source /etc/keystone/admin-openrc.sh
#生效环境变量
#placement mysql
mysql -uroot -p$DB_PASS -e "CREATE DATABASE placement;"
mysql -uroot -p$DB_PASS -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'loca
lhost' IDENTIFIED BY '$PLACEMENT_DBPASS';"
mysql -uroot -p$DB_PASS -e "GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' I
DENTIFIED BY '$PLACEMENT_DBPASS';"
#创建placement数据库,并授权远程访问
#placement user role service endpoint
openstack user create --domain $DOMAIN_NAME --password $PLACEMENT_PASS placement
openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://$HOST_NAME:8778
openstack endpoint create --region RegionOne placement internal http://$HOST_NAME:87
78
openstack endpoint create --region RegionOne placement admin http://$HOST_NAME:8778
#创建placement用户并赋予admin角色、服务、以及端点
#placement install 安装placement软件包
yum install openstack-placement-api python2-pip -y
#/etc/placement/placement.conf
openstack-config --set /etc/placement/placement.conf api auth_strategy keystone
openstack-config --set /etc/placement/placement.conf keystone_authtoken auth_url http://$HOST_NAME:5000/v3
openstack-config --set /etc/placement/placement.conf keystone_authtoken memcached_servers $HOST_NAME:11211
openstack-config --set /etc/placement/placement.conf keystone_authtoken auth_type password
openstack-config --set /etc/placement/placement.conf keystone_authtoken project_domain_name $DOMAIN_NAME
openstack-config --set /etc/placement/placement.conf keystone_authtoken user_domain_name $DOMAIN_NAME
openstack-config --set /etc/placement/placement.conf keystone_authtoken project_name service
openstack-config --set /etc/placement/placement.conf keystone_authtoken username placement
openstack-config --set /etc/placement/placement.conf keystone_authtoken password $PLACEMENT_PASS
openstack-config --set /etc/placement/placement.conf placement_database connection mysql+pymysql://placement:$PLACEMENT_DBPASS@$HOST_NAME/placement
#通过/etc/placement/placement.conf配置keystone认证服务、数据库的连接
#su placement mysql
su -s /bin/sh -c "placement-manage db sync" placement
#通过placement用户连接数据库
#/etc/httpd/conf.d/00-placement-api.conf
cat >> /etc/httpd/conf.d/00-placement-api.conf <<EOF
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
EOF
#配置httpd的版本为2.4,不然版本过高会报错
iass-install-nova-con/com.sh
计算服务组件
iaas-install-nova-controller.sh
#!/bin/bash
source /etc/openstack/openrc.sh
source /etc/keystone/admin-openrc.sh
#生效环境变量
#neutron mysql
mysql -uroot -p$DB_PASS -e "create database IF NOT EXISTS nova ;"
mysql -uroot -p$DB_PASS -e "create database IF NOT EXISTS nova_api ;"
mysql -uroot -p$DB_PASS -e "create database IF NOT EXISTS nova_cell0 ;"
mysql -uroot -p$DB_PASS -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDE
NTIFIED BY '$NOVA_DBPASS' ;"
mysql -uroot -p$DB_PASS -e "GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED
BY '$NOVA_DBPASS' ;"
mysql -uroot -p$DB_PASS -e "GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY '$NOVA_DBPASS' ;"
mysql -uroot -p$DB_PASS -e "GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY '$NOVA_DBPASS' ;"
mysql -uroot -p$DB_PASS -e "GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY '$NOVA_DBPASS' ;"
mysql -uroot -p$DB_PASS -e "GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY '$NOVA_DBPASS' ;"
#创建nova相关的数据库,以及授权远程访问
#nova user role service endpoint
openstack user create --domain $DOMAIN_NAME --password $NOVA_PASS nova
openstack role add --project service --user nova admin
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://$HOST_NAME:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://$HOST_NAME:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://$HOST_NAME:8774/v2.1
#创建nova用户并赋予admin角色,创建计算服务和端点
#nova install 安装nova软件包
yum install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler -y
#/etc/nova/nova.conf
openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip $HOST_IP
openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron true
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:$RABBIT_PASS@$HOST_NAME
#nova只有一个配置文件/etc/nova/nova.conf,此处配置rabbit消息队列连接
openstack-config --set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:$NOVA_DBPASS@$HOST_NAME/nova_api
#nova_api连接数据库
openstack-config --set /etc/nova/nova.conf database connection mysql+pymysql://nova:$NOVA_DBPASS@$HOST_NAME/nova
#nova连接数据库
openstack-config --set /etc/nova/nova.conf api auth_strategy keystone
openstack-config --set /etc/nova/nova.conf keystone_authtoken www_authenticate_uri http://$HOST_NAME:5000/
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://$HOST_NAME:5000/
openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers $HOST_NAME:11211
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name $DOMAIN_NAME
openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name $DOMAIN_NAME
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
openstack-config --set /etc/nova/nova.conf keystone_authtoken password $NOVA_PASS
#配置keystone认证信息
openstack-config --set /etc/nova/nova.conf vnc enabled true
openstack-config --set /etc/nova/nova.conf vnc server_listen $HOST_IP
openstack-config --set /etc/nova/nova.conf vnc server_proxyclient_address $HOST_IP
#vnc配置nova创建的虚拟机的网页控制台连接地址
openstack-config --set /etc/nova/nova.conf glance api_servers http://$HOST_NAME:9292
#对接glance服务
openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
#锁文件用于控制对共享资源的并发访问,以防止并发访问冲突。存储位置为/var/lib/nova/tmp
openstack-config --set /etc/nova/nova.conf placement region_name RegionOne
openstack-config --set /etc/nova/nova.conf placement project_domain_name $DOMAIN_NAME
openstack-config --set /etc/nova/nova.conf placement project_name service
openstack-config --set /etc/nova/nova.conf placement auth_type password
openstack-config --set /etc/nova/nova.conf placement user_domain_name $DOMAIN_NAME
openstack-config --set /etc/nova/nova.conf placement auth_url http://$HOST_NAME:5000/v3
openstack-config --set /etc/nova/nova.conf placement username placement
openstack-config --set /etc/nova/nova.conf placement password $PLACEMENT_PASS
#配置 placement服务的认证信息
openstack-config --set /etc/nova/nova.conf scheduler discover_hosts_in_cells_interval 300
#配置主机发现,这个参数用于控制nova调度器检测可用的计算节点的时间间隔,这里设置为300秒(5分钟)
```powershell
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
#用nova用户连接数据库
#su nova mysql
systemctl restart openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl enable openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
#重启nova服务,并设置开机自启动
openstack flavor create --id 1 --vcpus 1 --ram 512 --disk 10 m1.tiny
openstack flavor create --id 2 --vcpus 1 --ram 1024 --disk 20 m1.small
openstack flavor create --id 3 --vcpus 2 --ram 2048 --disk 40 m1.medium
#创建三个云主机类型
iaas-install-nova-compute.sh
#!/bin/bash
source /etc/openstack/openrc.sh
#生效环境变量
#nova-compute install 安装nova-compute软件包
yum install openstack-nova-compute -y
#/etc/nova/nova.conf
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip $HOST_IP
openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewa
ll.NoopFirewallDriver
openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://$RABBIT_USER:$RABBIT_PASS@$HOST_NAME
#配置rabbitmq消息队列连接
openstack-config --set /etc/nova/nova.conf api auth_strategy keystone
#配置认证类型为keystone
openstack-config --set /etc/nova/nova.conf keystone_authtoken www_authenticate_uri http://$HOST_NAME:5000/
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://$HOST_NAME:5000/
openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers $HOST_NAME:11211
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name $DOMAIN_NAME
openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name $DOMAIN_NAME
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
openstack-config --set /etc/nova/nova.conf keystone_authtoken password $NOVA_PASS
#配置keystone认证信息
openstack-config --set /etc/nova/nova.conf vnc enabled True
openstack-config --set /etc/nova/nova.conf vnc server_listen 0.0.0.0
openstack-config --set /etc/nova/nova.conf vnc server_proxyclient_address $HOST_IP_NODE
#配置vnc监控地址,以及连接地址
openstack-config --set /etc/nova/nova.conf vnc novncproxy_base_url http://$HOST_IP:6080/vnc_auto.html
#配置vnc连接网页地址
openstack-config --set /etc/nova/nova.conf glance api_servers http://$HOST_NAME:9292
#对接glance服务
openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
#配置锁文件路径
openstack-config --set /etc/nova/nova.conf placement region_name RegionOne
openstack-config --set /etc/nova/nova.conf placement project_domain_name $DOMAIN_NAME
openstack-config --set /etc/nova/nova.conf placement project_name service
openstack-config --set /etc/nova/nova.conf placement auth_type password
openstack-config --set /etc/nova/nova.conf placement user_domain_name $DOMAIN_NAME
openstack-config --set /etc/nova/nova.conf placement auth_url http://$HOST_NAME:5000/v3
openstack-config --set /etc/nova/nova.conf placement username placement
openstack-config --set /etc/nova/nova.conf placement password $PLACEMENT_PASS
#配置placement连接信息
virt_num=`egrep -c '(vmx|svm)' /proc/cpuinfo`
if [ $virt_num = '0' ];then
crudini --set /etc/nova/nova.conf libvirt virt_type qemu
fi
#检查本地机器是否支持虚拟化,如果数值等于0,则改成qemu虚拟化类型
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl restart libvirtd.service openstack-nova-compute.service
#设置nova-compute服务开机自启,并重启nova-compute服务
ssh $HOST_IP "source /etc/keystone/admin-openrc.sh && openstack compute service list --service nova-compute"
ssh $HOST_IP 'source /etc/keystone/admin-openrc.sh && su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova'
#远程连接到controller节点配置主机发现
iass-install-neutron-con/com.sh
网络组件安装
aas-install-neutron-controller.sh
#!/bin/bash
source /etc/openstack/openrc.sh
source /etc/keystone/admin-openrc.sh
#生效环境变量
#neutron mysql
mysql -uroot -p$DB_PASS -e "create database IF NOT EXISTS neutron ;"
mysql -uroot -p$DB_PASS -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhos
t' IDENTIFIED BY '$NEUTRON_DBPASS' ;"
mysql -uroot -p$DB_PASS -e "GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENT
IFIED BY '$NEUTRON_DBPASS' ;"
#创建neutron数据库,并授权远程访问
#neutron user role service endpoint
openstack user create --domain $DOMAIN_NAME --password $NEUTRON_PASS neutron
openstack role add --project service --user neutron admin
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --region RegionOne network public http://$HOST_NAME:9696
openstack endpoint create --region RegionOne network internal http://$HOST_NAME:9696
openstack endpoint create --region RegionOne network admin http://$HOST_NAME:9696
#创建neutron用户并赋予admin角色,network服务,以及端点
#neutron install 安装neutron软件包
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y
#network
if [[ `ip a |grep -w $INTERFACE_IP |grep -w $INTERFACE_NAME` = '' ]];then
cat > /etc/sysconfig/network-scripts/ifcfg-$INTERFACE_NAME <<EOF
DEVICE=$INTERFACE_NAME
TYPE=Ethernet
BOOTPROTO=none
ONBOOT=yes
EOF
systemctl restart network
fi
#修改第二张网卡,作为云主机的网络提供
#/etc/neutron/neutron.conf
openstack-config --set /etc/neutron/neutron.conf database connection mysql+pymysql://neutron:$NEUTRON_DBPASS@$HOST_NAME/neutron
#配置数据库连接
openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins router
openstack-config --set /etc/neutron/neutron.conf DEFAULT allow_overlapping_ips true
#配置网络连接插件
openstack-config --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://$RABBIT_USER:$RABBIT_PASS@$HOST_NAME
#配置连接rabbimq消息队列连接
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
#认证方式为keystone
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken www_authenticate_uri http://$HOST_NAME:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://$HOST_NAME:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers $HOST_NAME:11211
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type password
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name $DOMAIN_NAME
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name $DOMAIN_NAME
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password $NEUTRON_PASS
#配置keystone认证信息
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_status_changes true
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_data_changes true
#设置为 "true" 时表示当 neutron 管理的端口状态改变时,会发送通知给 nova 服务。
openstack-config --set /etc/neutron/neutron.conf nova auth_url http://$HOST_NAME:5000
openstack-config --set /etc/neutron/neutron.conf nova auth_type password
openstack-config --set /etc/neutron/neutron.conf nova project_domain_name $DOMAIN_NAME
openstack-config --set /etc/neutron/neutron.conf nova user_domain_name $DOMAIN_NAME
openstack-config --set /etc/neutron/neutron.conf nova region_name RegionOne
openstack-config --set /etc/neutron/neutron.conf nova project_name service
openstack-config --set /etc/neutron/neutron.conf nova username nova
openstack-config --set /etc/neutron/neutron.conf nova password $NOVA_PASS
#配置对接nova的连接信息
openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp
#配置锁文件路径
#/etc/neutron/plugins/ml2/ml2_conf.ini
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vlan,vxlan,gre,local
#设置网卡配置模式
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan
#设置租户网络类型为vxlan
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers linuxbridge,l2population
#将Linux Bridge和L2 Population机制驱动程序配置为ML2插件的机制驱动程序。
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security
#将Port Security扩展驱动程序配置为ML2插件的扩展驱动程序。
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks $Physical_NAME
#将Flat类型的网络配置为使用指定的物理网络名称
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vlan network_vlan_ranges $Physical_NAME:$minvlan:$maxvlan
#将VLAN类型的网络配置为使用指定的物理网络名称、最小VLAN ID和最大VLAN ID。
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vxlan vni_ranges $minvlan:$maxvlan
#将VXLAN类型的网络配置为使用指定的VNI范围,即最小VNI和最大VNI
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_ipset true
#将安全组配置为启用IP集成,这意味着它将使用Linux内核中的IP集成来优化防火墙规则。
#/etc/neutron/plugins/ml2/linuxbridge_agent.ini
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:$INTERFACE_NAME
#将物理网络接口映射到 Linux Bridge。
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan true
#启用 VXLAN
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan local_ip $HOST_IP
#设置 VXLAN 的本地 IP 地址
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan l2_population true
#启用 VXLAN 的 L2 Population 功能。可以提高网络的性能和可扩展性
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group true
#启用安全组功能。安全组是一种虚拟网络的访问控制机制,可以限制虚拟机的网络访问权限
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
#设置防火墙驱动程序。防火墙驱动程序是用于实现安全组功能的一个组件。
#br_netfilter
modprobe br_netfilter
#加载内核模块 br_netfilter,它允许 Linux 内核通过 netfilter 框架来处理桥接网络数据包。
echo 'net.bridge.bridge-nf-call-iptables = 1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.conf
#将两个内核参数写入 /etc/sysctl.conf 文件中
#允许 Linux 内核在 iptables 防火墙规则中使用 netfilter 框架来处理桥接网络数据包
sysctl -p
sysctl net.bridge.bridge-nf-call-iptables
sysctl net.bridge.bridge-nf-call-ip6tables
#生效,并重新加载
#/etc/neutron/l3_agent.ini
openstack-config --set /etc/neutron/l3_agent.ini DEFAULT interface_driver linuxbridge
#接口类型为网桥
#/etc/neutron/dhcp_agent.ini
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver linuxbridge
#设置DHCP代理将使用Linux桥接驱动程序来与Linux内核网络堆栈交互,以便在OpenStack云中实现虚拟网络功能。
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT dhcp_driver neutron.agent.linux.dhcp.Dnsmasq
#DHCP代理将使用Dnsmasq
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata true
#DHCP代理将使用隔离的元数据服务来获取OpenStack云中的虚拟机元数据。
#/etc/neutron/metadata_agent.ini
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT nova_metadata_host $HOST_NAME
#设置Neutron元数据代理从哪里获取虚拟机的元数据信息。
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT metadata_proxy_shared_secret $METADATA_SECRET
#于确保元数据代理和Nova元数据服务之间的通信是安全的。
#/etc/nova/nova.conf
openstack-config --set /etc/nova/nova.conf neutron auth_url http://$HOST_NAME:5000
openstack-config --set /etc/nova/nova.conf neutron auth_type password
openstack-config --set /etc/nova/nova.conf neutron project_domain_name $DOMAIN_NAME
openstack-config --set /etc/nova/nova.conf neutron user_domain_name $DOMAIN_NAME
openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne
openstack-config --set /etc/nova/nova.conf neutron project_name service
openstack-config --set /etc/nova/nova.conf neutron username neutron
openstack-config --set /etc/nova/nova.conf neutron password $NEUTRON_PASS
openstack-config --set /etc/nova/nova.conf neutron service_metadata_proxy true
openstack-config --set /etc/nova/nova.conf neutron metadata_proxy_shared_secret $METADATA_SECRET
#用于在nova中配置neutron认证
#su neutron mysql
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
#软连接
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
#用neutron用户创建数据库
systemctl restart openstack-nova-api.service
systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent neutron-l3-agent
systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent neutron-l3-agent
#重启并开机自启动neutron服务
iaas-install-neutron-compute.sh
#!/bin/bash
source /etc/openstack/openrc.sh
#生效环境变量
#neutron install 安装neutron软件包
yum install -y openstack-neutron-linuxbridge ebtables ipset
#network
if [[ `ip a |grep -w $INTERFACE_IP |grep -w $INTERFACE_NAME` = '' ]];then
cat > /etc/sysconfig/network-scripts/ifcfg-$INTERFACE_NAME <<EOF
DEVICE=$INTERFACE_NAME
TYPE=Ethernet
BOOTPROTO=none
ONBOOT=yes
EOF
systemctl restart network
fi
#将第二张网卡配置云主机的提供网卡
#/etc/neutron/neutron.conf
openstack-config --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://$RABBIT_USER:$RABBIT_PASS@$HOST_NAME
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken www_authenticate_uri http://$HOST_NAME:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://$HOST_NAME:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers $HOST_NAME:11211
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type password
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name $DOMAIN_NAME
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name $DOMAIN_NAME
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password $NEUTRON_PASS
openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp
#配置同上
#/etc/neutron/plugins/ml2/linuxbridge_agent.ini
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:$INTERFACE_NAME
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan True
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan local_ip $HOST_IP_NODE
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan l2_population true
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group True
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
#配置同上
#br_netfilter
modprobe br_netfilter
#加载内核模块 br_netfilter,它允许 Linux 内核通过 netfilter 框架来处理桥接网络数据包。
echo 'net.bridge.bridge-nf-call-iptables = 1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.conf
#将两个内核参数写入 /etc/sysctl.conf 文件中
#允许 Linux 内核在 iptables 防火墙规则中使用 netfilter 框架来处理桥接网络数据包
sysctl -p
sysctl net.bridge.bridge-nf-call-iptables
sysctl net.bridge.bridge-nf-call-ip6tables
#生效,并重新加载
#/etc/nova/nova.conf
openstack-config --set /etc/nova/nova.conf neutron url http://$HOST_NAME:9696
openstack-config --set /etc/nova/nova.conf neutron auth_url http://$HOST_NAME:5000
openstack-config --set /etc/nova/nova.conf neutron auth_type password
openstack-config --set /etc/nova/nova.conf neutron project_domain_name $DOMAIN_NAME
openstack-config --set /etc/nova/nova.conf neutron user_domain_name $DOMAIN_NAME
openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne
openstack-config --set /etc/nova/nova.conf neutron project_name service
openstack-config --set /etc/nova/nova.conf neutron username neutron
openstack-config --set /etc/nova/nova.conf neutron password $NEUTRON_PASS
#在nova中配置neutron认证
systemctl restart openstack-nova-compute.service
systemctl restart neutron-linuxbridge-agent.service
systemctl enable neutron-linuxbridge-agent.service
#重启nova-compute服务,以及neutron服务,并设置开机自启
iass-install-dashboard.sh
openstack网页管理界面
#!/bin/bash
source /etc/openstack/openrc.sh
source /etc/keystone/admin-openrc.sh
#生效环境变量
#dashboard install 安装dashboard软件包
yum install openstack-dashboard -y
#/etc/openstack-dashboard/local_settings
sed -i '/^OPENSTACK_HOST/s#127.0.0.1#'$HOST_NAME'#' /etc/openstack-dashboard/local_settings
#替换为控制节点主机名
sed -i "/^ALLOWED_HOSTS/s#\[.*\]#['*']#" /etc/openstack-dashboard/local_settings
#允许所有主机访问该服务
sed -i '/TIME_ZONE/s#UTC#Asia/Shanghai#' /etc/openstack-dashboard/local_settings
#设置时间区域为上海
sed -i '/^#SESSION_ENGINE/s/#//' /etc/openstack-dashboard/local_settings
#解开注释,即启用缓存作为session存储的后端
sed -i "/^SESSION_ENGINE/s#'.*'#'django.contrib.sessions.backends.cache'#" /etc/openstack-dashboard/local_settings
#将session存储后端切换为缓存。这样可以提高session访问的效率和性能。
cat >> /etc/openstack-dashboard/local_settings <<EOF
OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2,
"volume": 2,
}
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "$DOMAIN_NAME"
#指定了openstack api的版本,默认角色默认域
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '$HOST_NAME:11211',
}
}
#定义了OpenStack Dashboard使用的缓存服务。具体来说,它使用了Memcached作为缓存后端,并将缓存服务
#的地址设置为$HOST_NAME:11211,其中$HOST_NAME是一个环境变量,表示缓存服务所在的主机名。
WEBROOT = '/dashboard/'
#定义了OpenStack Dashboard的Web根路径为/dashboard/,即在Web服务器上访问OpenStack Dashboard的路径。
EOF
#/etc/httpd/conf.d/openstack-dashboard.conf
sed -e '4iWSGIApplicationGroup %{GLOBAL}' /etc/httpd/conf.d/openstack-dashboard.conf
#确保Dashboard使用全局的WSGI应用程序组,以确保在部署过程中能够正常运行
#rebuild dashboard
cd /usr/share/openstack-dashboard && python manage.py make_web_conf --apache > /etc/httpd/conf.d/openstack-dashboard.conf
#确保Dashboard在Apache服务器上的正确配置,以便在Web浏览器中访问和使用Horizon Dashboard。
ln -s /etc/openstack-dashboard /usr/share/openstack-dashboard/openstack_dashboard/conf
#软连接
sed -i "s:WSGIScriptAlias / :WSGIScriptAlias /dashboard :" /etc/httpd/conf.d/openstack-dashboard.conf
#将 Dashboard 应用部署到 /dashboard 目录下。
sed -i "s:Alias /static:Alias /dashboard/static:" /etc/httpd/conf.d/openstack-dashboard.conf
#配置网站时能够正确加载静态文件。
systemctl restart httpd.service memcached.service
#重启httpd和缓存服务
#/root/logininfo.txt
printf "\033[35mThe horizon service is ready,Now you can visit the following;\n\033[0m"
echo 浏览器访问:http://$HOST_IP/dashboard
echo 域:$DOMAIN_NAME
echo 用户名:admin
echo 密码:"${ADMIN_PASS}"
echo 信息输出到root目录下的logininfo.txt中了。
#打印信息到屏幕
cat >> /root/logininfo.txt << EOF
浏览器访问:http://$HOST_IP/dashboard
域:$DOMAIN_NAME
用户名:admin
密码:"${ADMIN_PASS}"
#输出信息到文件中
iass-install-cinder-con/com.sh
iaas-install-cinder-controller.sh
安装卷存储服务
#!/bin/bash
source /etc/openstack/openrc.sh
source /etc/keystone/admin-openrc.sh
#生效环境变量
#cinder mysql
mysql -uroot -p$DB_PASS -e "create database cinder;"
mysql -uroot -p$DB_PASS -e "grant all privileges on cinder.* to 'cinder'@'%' identif
ied by '$CINDER_DBPASS';"
mysql -uroot -p$DB_PASS -e "grant all privileges on cinder.* to 'cinder'@'localhost'
identified by '$CINDER_DBPASS';"
#创建cinder数据库,并授权远程访问
#cinder user role service endpoint
openstack user create --domain $DOMAIN_NAME --password $CINDER_PASS cinder
openstack role add --project service --user cinder admin
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
#创建cinder用户赋予admin角色,创建两个卷服务
openstack endpoint create --region RegionOne volumev2 public http://$HOST_NAME:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 internal http://$HOST_NAME:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 admin http://$HOST_NAME:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 public http://$HOST_NAME:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 internal http://$HOST_NAME:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 admin http://$HOST_NAME:8776/v3/%\(project_id\)s
#创建端点
#cinder install 安装cinder软件包
yum install openstack-cinder -y
#/etc/cinder/cinder.conf
openstack-config --set /etc/cinder/cinder.conf database connection mysql+pymysql://cinder:$CINDER_DBPASS@$HOST_NAME/cinder
openstack-config --set /etc/cinder/cinder.conf DEFAULT transport_url rabbit://$RABBIT_USER:$RABBIT_PASS@$HOST_NAME
openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken www_authenticate_uri http://$HOST_NAME:5000
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_url http://$HOST_NAME:5000
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken memcached_servers $HOST_NAME:11211
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_type password
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_domain_name $DOMAIN_NAME
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken user_domain_name $DOMAIN_NAME
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_name service
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken username cinder
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken password $CINDER_PASS
openstack-config --set /etc/cinder/cinder.conf DEFAULT my_ip $HOST_IP
openstack-config --set /etc/cinder/cinder.conf oslo_concurrency lock_path /var/lib/cinder/tmp
#配置keystone认证信息和文件锁
#/etc/nova/nova.conf
openstack-config --set /etc/nova/nova.conf cinder os_region_name RegionOne
#Cinder组件用来确定云中数据存储区域的标识符
#su cinder mysql
su -s /bin/sh -c "cinder-manage db sync" cinder
#通过cinder用户连接数据库
systemctl restart openstack-nova-api.service
systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service httpd
#重启nova-api服务,cinder服务并开机自启动
iaas-install-cinder-compute.sh
#!/bin/bash
source /etc/openstack/openrc.sh
#生效环境变量
#cinder install 安装软件包
yum install lvm2 device-mapper-persistent-data openstack-cinder targetcli python-ke
ystone -y
systemctl enable lvm2-lvmetad.service
systemctl restart lvm2-lvmetad.service
#重启并设置开机自启动lvm服务
#Create a disk for cinder volumes
pvcreate /dev/$BLOCK_DISK
#创建物理卷
vgcreate cinder-volumes /dev/$BLOCK_DISK
#创建卷组
partprobe
#刷新分区表,以便内核重新读取
#sed -i '/^ filter/d' /etc/lvm/lvm.conf
#sed -i '/^devices/a\ filter = ["a/sdb/", "a/sda/", "r/.*/"]' /etc/lvm/lvm.conf
#sed -i "s/sdz/$BLOCK_DISK/g" /etc/lvm/lvm.conf
#partprobe
#/etc/cinder/cinder.conf
openstack-config --set /etc/cinder/cinder.conf database connection mysql+pymysql://cinder:$CINDER_DBPASS@$HOST_NAME/cinder
#连接数据库
openstack-config --set /etc/cinder/cinder.conf DEFAULT transport_url rabbit://$RABBIT_USER:$RABBIT_PASS@$HOST_NAME
#连接rabbit消息队列
openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
#配置keystone为认证服务
openstack-config --set /etc/cinder/cinder.conf DEFAULT my_ip $HOST_IP_NODE
#配置cinder地址
openstack-config --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm
#配置后端存储类型为lvm
openstack-config --set /etc/cinder/cinder.conf DEFAULT glance_api_servers http://$HOST_NAME:9292
#配置cinder的api地址和端口号
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken www_authenticate_uri http://$HOST_NAME:5000
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_url http://$HOST_NAME:5000
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken memcached_servers $HOST_NAME:11211
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_type password
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_domain_name $DOMAIN_NAME
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken user_domain_name $DOMAIN_NAME
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_name service
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken username cinder
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken password $CINDER_PASS
#配置keystone认证的信息
openstack-config --set /etc/cinder/cinder.conf lvm volume_driver cinder.volume.drivers.lvm.LVMVolumeDriver
#指定LVM(逻辑卷管理器)驱动来管理Cinder卷。
openstack-config --set /etc/cinder/cinder.conf lvm volume_group cinder-volumes
#指定LVM卷组的名称,该卷组将用于存储Cinder卷。
openstack-config --set /etc/cinder/cinder.conf lvm iscsi_protocol iscsi
#指定使用iSCSI协议来访问Cinder卷。
openstack-config --set /etc/cinder/cinder.conf lvm iscsi_helper lioadm
#指定使用LIO(Linux iSCSI Target)管理器作为iSCSI协议的帮助程序来访问Cinder卷。
openstack-config --set /etc/cinder/cinder.conf oslo_concurrency lock_path /var/lib/cinder/tmp
#设置文件锁路径
systemctl enable openstack-cinder-volume.service target.service
systemctl restart openstack-cinder-volume.service target.service
#设置开机自启并重启cinder服务
ssh $HOST_IP "source /etc/keystone/admin-openrc.sh && cinder service-list"
#连接controller节点,查看cinder服务