测试
宿主机配置网络以及安装kvm
配置root默认登陆
vi /etc/gdm/custom.conf
[daemon]
AutomaticLoginEnable=True
AutomaticLogin=root
宿主机用户
用户名:liao
密码:2022y01m02d
用户名:root
密码:123456
宿主机配置网络
vi /etc/sysconfig/network-scripts/ifcfg-网卡名
IPADDR=10.31.82.141
NETMASK=255.255.255.0
GATEWAY=10.31.82.254
DNS1=114.114.114.114
systemctl restart network
宿主机硬件
主机型号:戴尔 PRECISION 7920
英特尔至强铂金 8260 2.4GHz,(3.9GHz Turbo, 24C, 10.4GT/s 3UPI, 35.75MB 缓存, HT (165W) DDR4-29332nd)
128GB (8x16GB) DDR4 2666MHz RDIMM ECC
无硬盘
3.5英寸 4TB 7200rpm SATA AG-企业硬盘
2.5英寸 512GB SATA Class 20 固态硬盘
集成英特尔AHCI SATA 芯片控制器 (8x 6.0Gb/s), SW RAID 0,1,5,10
Nvidia RTX 3090, 24GB, 4DP 显卡
# 内存48g,12核
virt-install --name=controller --memory=49152 --vcpus=12 --location=/tmp/CentOS-7-x86_64-Minimal-1908.iso --disk path=/kvm_data/controller_save.img,size=200 --bridge=br0 --graphics=none --extra-args=console=ttyS0
virt-install --virt-type=kvm --name=v1 --vcpus=1 --memory=2048 --location=/tmp/CentOS-7-x86_64-Minimal-1908.iso --disk path=/kvm_data/v1.img,size=20 --network bridge=br0 --graphics none --extra-args='console=ttyS0' --force
安装kvm
# 启动模块
modprobe kvm
modprobe -r kvm_intel
modprobe kvm_intel nested=1
lsmod |grep kvm
# 安装knm
yum -y install libvirt
yum -y install epel*
yum -y install virt-manager
yum -y install kvm
yum -y install qemu*
创建虚拟机并安装centos7
# 启动前,在虚拟机cpu设置中,选择复制cpu设置
# 创建kvm数据文件夹
mkdir kvm_data
# centos.iso路径
/tmp/
关闭NetworkManage服务
systemctl stop NetworkManager
systemctl disable NetworkManager
安装网桥工具
yum -y install bridge-utils
网卡配置
# 先编辑自带网卡
cat ifcfg-名字
TYPE=Ethernet
BOOTPROTO=static
DEFROUTE=yes
NAME=名字
DEVICE=名字
ONBOOT=yes
BRIDGE=br0
# 编辑网桥
cat ifcfg-br0
TYPE=Bridge
BOOTPROTO=static
NAME=br0
DEVICE=br0
ONBOOT=yes
IPADDR=10.31.82.141
NETMASK=255.255.255.0
GATEWAY=10.31.82.254
DNS1=114.114.114.114
# 创建网桥br0
brctl addbr br0
# 将物理网卡端口eth0加入网桥
brctl addif br0 eth0
# 之后会断网,无法远程,需要在服务器上重启网络
systemctl restart network
创建虚拟机后,在两个虚机上修改网络
cd /etc/sysconfig/network-scripts/
vi ifcfg-名字
# 按照下方信息进行修改
ONBOOT=yes
BOOTPROTO=static
IPADDR=10.31.82.14x # controller142,compute143
NETMASK=255.255.255.0
GATEWAY=10.31.82.254
DNS1=114.114.114.114 # 同宿主机DNS相同,不加也可以
# 关闭NetworkManager
systemctl stop NetworkManager
# 重启虚拟机网卡
systemctl restart network
双节点openstack搭建手册
1.初始化配置
安装完centos后,配置网络
vi /etc/sysconfig/network-scripts/ifcfg-网卡名
ipaddr和gateway可在:编辑,虚拟网络编辑器中查询
保存后重启network
systemctl restart network
初始化配置(双节点)
vi /etc/selinux/config
修改:SELINUX=disabled
setenforce 0
关防火墙(双节点)
systemctl stop firewalld; systemctl disable firewalld
配置hostname(controller)
hostnamectl set-hostname controller
配置hostname(compute)
hostnamectl set-hostname compute
配置时间同步(双节点)
yum -y install chrony
修改chrony配置(controller)
vi /etc/chrony.conf
修改chrony配置(compute)
vi /etc/chrony.conf
重启chrony服务(双节点)
systemctl restart chronyd
检查
systemctl status chronyd
chronyc sources -v
域名解析(双节点)
hostname
hostname -i
vi /etc/hosts
增加:
192.168.227.3 controller
192.168.227.4 compute
安装openstack源(双节点)
yum install centos-release-openstack-train -y
继续安装(双节点)
yum install python-openstackclient openstack-selinux -y
2.数据库
数据库(controller)
yum install mariadb mariadb-server python2-PyMySQL -y
修改配置文件(controller)
vi /etc/my.cnf.d/openstack.cnf
[mysqld]
bind-address = 192.168.227.3
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
启动服务(controller)
systemctl enable mariadb.service; systemctl start mariadb.service
初始化数据库(controller)
mysql_secure_installation
回车
y # 密码123456
y
n
y
y
3.消息队列
安装(controller)
yum install rabbitmq-server -y
启动自启(controller)
systemctl enable rabbitmq-server.service; systemctl start rabbitmq-server.service
创建openstack用户,和密码123456(controller)
rabbitmqctl add_user openstack 123456
# 报错的话
ps -ef | grep rabbitmq | grep -v grep | awk '{print $2}' | xargs kill -9 #将mq的进程杀掉
rabbitmq-server -detached #启动mq
# 再创建
开放所有权限(controller)
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
查看需要启动的服务(controller)
rabbitmq-plugins list
开启图形化界面(controller)
rabbitmq-plugins enable rabbitmq_management rabbitmq_management_agent
测试图形化界面,账号密码都是guest
访问http://controller的ip:15672/
4.缓存服务(controller)
安装
yum install memcached python-memcached -y
修改配置文件
vi /etc/sysconfig/memcached
PORT="11211"
USER="memcached"
MAXCONN="1024"
CACHESIZE="1024"
OPTIONS="-l 127.0.0.1,::1,controller"
自启动
systemctl enable memcached.service; systemctl start memcached.service
5.keystone(controller)
登录mysql
mysql -p123456
数据库操作
# 创建库
CREATE DATABASE keystone;
# 授权,密码123456
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '123456';
# 退出
exit;
安装服务
yum install openstack-keystone httpd mod_wsgi -y
修改配置文件
vi /etc/keystone/keystone.conf
[database]
connection = mysql+pymysql://keystone:123456@controller/keystone
[token]
provider = fernet
同步数据库
su -s /bin/sh -c "keystone-manage db_sync" keystone
测试
# 登录mysql后
use keystone;
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A
Database changed
MariaDB [keystone]> show tables;
+------------------------------------+
| Tables_in_keystone |
+------------------------------------+
| access_rule |
| access_token |
| application_credential |
| application_credential_access_rule |
| application_credential_role |
48 rows in set (0.001 sec)
MariaDB [keystone]> quit
Bye
创建令牌
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
关键步骤
# 密码admin
keystone-manage bootstrap --bootstrap-password admin --bootstrap-admin-url http://controller:5000/v3/ --bootstrap-internal-url http://controller:5000/v3/ --bootstrap-public-url http://controller:5000/v3/ --bootstrap-region-id RegionOne
配置apache
vim /etc/httpd/conf/httpd.conf
ServerName controller:80
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
systemctl enable httpd.service; systemctl start httpd.service
检查
systemctl status httpd
配置环境变量
vim admin.sh
#!/bin/bash
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
立即生效
source admin.sh
检查
openstack endpoint list
openstack token issue
创建域、项目、角色
# 创建域
openstack domain create --description "An Example Domain" example
# 创建service项目
openstack project create --domain default --description "Service Project" service
# 创建myproject项目
openstack project create --domain default --description "Demo Project" myproject
# 创建myuser用户(密码:myuser)
openstack user create --domain default --password-prompt myuser
# 创建myrole角色
openstack role create myrole
# 把myrole角色添加到myproject项目和myuser用户中
openstack role add --project myproject --user myuser myrole
验证
unset OS_AUTH_URL OS_PASSWORD
# 密码:admin
openstack --os-auth-url http://controller:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name admin --os-username admin token issue
# 密码:myuser
openstack --os-auth-url http://controller:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name myproject --os-username myuser token issue
配置admin环境变量
vim admin.sh
#!/bin/bash
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
配置myuser环境变量
vim myuser.sh
#!/bin/bash
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=myproject
export OS_USERNAME=myuser
export OS_PASSWORD=myuser
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
生效
source admin.sh
# 验证
openstack token issue
source myuser.sh
# 验证
openstack token issue
# 再回到admin
source admin.sh
6.glance(controller)
登录mysql
mysql -p123456
CREATE DATABASE glance;
# 密码glance
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance';
quit
# 需要设置密码,统一glance
openstack user create --domain default --password-prompt glance
openstack role add --project service --user glance admin
openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292
验证
openstack endpoint list
安装服务
yum install openstack-glance -y
配置glance文件(openstack配置文件不能有中文,注释的也不行)
vim /etc/glance/glance-api.conf
[database]
connection = mysql+pymysql://glance:glance@controller/glance
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = glance
[paste_deploy]
flavor = keystone
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
同步数据库
su -s /bin/sh -c "glance-manage db_sync" glance
启动自启服务
systemctl enable openstack-glance-api.service; systemctl start openstack-glance-api.service
检查日志
tail -f /var/log/glance/api.log
上传镜像
# 先用ftp将从qq下载的img文件上传到root目录,再执行下面的语句
glance image-create --name "cirros4" --file cirros-0.4.0-x86_64-disk.img --disk-format qcow2 --container-format bare --visibility public
验证
openstack image list
检查日志
tail -f /var/log/glance/api.log
7.placement(controller)
mysql -p123456
CREATE DATABASE placement;
# up主密码为:placement123,我的密码为placement
GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'placement';
quit
创建账号、域、用户等配置
# 设置密码,统一placement
openstack user create --domain default --password-prompt placement
openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778
安装服务
yum install openstack-placement-api -y
配置placement文件
# 配置placement文件
vim /etc/placement/placement.conf
[placement_database]
connection = mysql+pymysql://placement:placement@controller/placement
[api]
auth_strategy = keystone
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = placement
同步数据库
su -s /bin/sh -c "placement-manage db sync" placement
查看apache版本
httpd -v
如果大于等于2.4
vim /etc/httpd/conf.d/00-placement-api.conf
# 在最后加入
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
重启http
systemctl restart httpd
验证
placement-status upgrade check
8.nova(controller和compute)
创库授权(controller)
mysql -p123456
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;
# 我的密码是nova,up是nova123
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'nova';
quit
创建计算服务凭证(controller)
# 密码nova
openstack user create --domain default --password-prompt nova
openstack role add --project service --user nova admin
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
安装服务(controller)
yum install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler -y
# openstack-nova-conductor 负责数据库
# openstack-nova-novncproxy 负责云主机连接
# openstack-nova-scheduler 负责调度调度
修改配置文件(controller)
vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
[api_database]
connection = mysql+pymysql://nova:nova@controller/nova_api
[database]
connection = mysql+pymysql://nova:nova@controller/nova
[DEFAULT]
transport_url = rabbit://openstack:123456@controller:5672/
[api]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[DEFAULT]
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[DEFAULT]
my_ip = 192.168.227.3
[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement
同步数据库(controller)
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova
验证(controller)
mysql -p123456
show databases;
use nova; show tables;
use nova_api; show tables;
use nova_cell0; show tables;
quit
验证 nova cell0 和 cell1 是否正确注册(controller)
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
启动自启服务(controller)
systemctl enable openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service; systemctl start openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
安装服务(compute)
yum install openstack-nova-compute -y
修改配置文件(compute)
# 10.31.82.142 controller
# 10.31.82.143 compute
vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
[DEFAULT]
transport_url = rabbit://openstack:123456@controller
[api]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[DEFAULT]
my_ip = 192.168.227.4
[DEFAULT]
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://192.168.227.3:6080/vnc_auto.html # controller的ip
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement
查看是否支持cpu虚拟化(compute)
egrep -c '(vmx|svm)' /proc/cpuinfo
若不行,则用以下命令(compute)
vim /etc/nova/nova.conf
[libvirt]
virt_type = qemu
启动自启服务(compute)
systemctl enable libvirtd.service openstack-nova-compute.service; systemctl start libvirtd.service openstack-nova-compute.service
验证(controller)
openstack compute service list --service nova-compute
主机发现(controller)每添加一个节点,都要执行一下这个命令
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
控制节点配置自动主机发现(controller)
vim /etc/nova/nova.conf
[scheduler]
discover_hosts_in_cells_interval = 300
编写nova服务重新启动脚本(controller)
vim restart-nova.sh
#!/bin/bash
systemctl restart openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
bash restart-nova.sh
9.neutron(controller)
创库授权
mysql -p123456
CREATE DATABASE neutron;
# 密码neutron
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron';
quit
创建域、项目用户
# 密码统一neutron
openstack user create --domain default --password-prompt neutron
openstack role add --project service --user neutron admin
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --region RegionOne network public http://controller:9696
openstack endpoint create --region RegionOne network internal http://controller:9696
openstack endpoint create --region RegionOne network admin http://controller:9696
安装服务
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y
配置文件
vim /etc/neutron/neutron.conf
[database]
connection = mysql+pymysql://neutron:neutron@controller/neutron
[DEFAULT]
core_plugin = ml2
service_plugins =
[DEFAULT]
transport_url = rabbit://openstack:123456@controller
[DEFAULT]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
[DEFAULT]
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
# 没有就放在最后
[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
继续配置文件
去下面网址中,复制配置文件,替换掉ml_conf.ini
vim快捷键,gg去首行,v视图,G尾行,d删除
vim /etc/neutron/plugins/ml2/ml2_conf.ini
https://docs.openstack.org/ocata/config-reference/networking/samples/ml2_conf.ini.html
再修改
[ml2]
type_drivers = flat,vlan
[ml2]
tenant_network_types =
[ml2]
mechanism_drivers = linuxbridge
[ml2]
extension_drivers = port_security
[ml2_type_flat]
flat_networks = extnet
[securitygroup]
enable_ipset = true
配置文件,同理先替换
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
https://docs.openstack.org/ocata/config-reference/networking/samples/linuxbridge_agent.ini.html
# extnet为[ml2_type_flat]设置名称ens33为网卡名称,用ip a显示网卡名称
[linux_bridge]
physical_interface_mappings = extnet:ens33
[vxlan]
enable_vxlan = false
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
配置内核
vim /etc/sysctl.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
加载模块
modprobe br_netfilter
sysctl -p
继续配置
vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
vim /etc/neutron/metadata_agent.ini
# 记住此处密码123456
[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = 123456
继续配置
vim /etc/nova/nova.conf
# 上面配置的密码
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
service_metadata_proxy = true
metadata_proxy_shared_secret = 123456
配置软链接
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
同步数据库
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
验证
mysql -p123456
use neutron;
show tables;
quit
创建neutron重启脚本
vim restart-neutron.sh
#!/bin/bash
systemctl restart neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
重启计算服务
systemctl restart openstack-nova-api.service
systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service; systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
检查
tail -f /var/log/neutron/*.log
安装(compute)
yum install openstack-neutron-linuxbridge ebtables ipset -y
配置文件(compute)
vim /etc/neutron/neutron.conf
[DEFAULT]
transport_url = rabbit://openstack:123456@controller
[DEFAULT]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
继续(compute)
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
# 末尾加上
[linux_bridge]
physical_interface_mappings = extnet:ens33
[vxlan]
enable_vxlan = false
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
继续(compute)
vim /etc/sysctl.conf
# 末尾加上
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
sysctl -p
继续(compute)
# vim /etc/nova/nova.conf
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
重启服务(compute)
systemctl restart openstack-nova-compute.service
systemctl enable neutron-linuxbridge-agent.service; systemctl start neutron-linuxbridge-agent.service
验证(controller)
openstack network agent list
创建实例(controller)
openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano
ssh-keygen -q -N "" # 一直回车
openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey
openstack keypair list
openstack security group rule create --proto icmp default
openstack network create --share --external --provider-physical-network extnet --provider-network-type flat flat-extnet
# controller的ip.50_100,gateway就是controller的gateway,subnet-range 取ip前三
openstack subnet create --network flat-extnet --allocation-pool start=10.31.82.44,end=10.31.82.100 --dns-nameserver 114.114.114.114 --gateway 10.31.82.254 --subnet-range 10.31.82.0/24 flat-subnet
# 获取net-id
openstack network list
openstack server create --flavor m1.nano --image cirros4 --nic net-id=37c65d60-ebfb-4ffc-8d6a-fc2f95f34006 --security-group default --key-name mykey vm1
# 验证
openstack server list
# 获取链接
openstack console url show vm1
链接无法访问时(compute)
# 查看服务器支持的虚拟化类型
virsh capabilities
<machine maxCpus='240'>pc-i440fx-rhel7.5.0</machine>
<machine maxCpus='240'>pc-i440fx-rhel7.1.0</machine>
<machine maxCpus='240'>pc-i440fx-rhel7.2.0</machine> #要这个
# 编辑nova配置
vim /etc/nova/nova.conf
[libvirt]
hw_machine_type = x86_64=pc-i440fx-rhel7.2.0 # 更改虚拟化类型
cpu_mode = host-passthrough # 直接使用宿主机的cpu
# 重启nova服务
systemctl restart openstack-nova-compute.service
登录,并查询ip(控制台)
用户名:cirros
密 码:gocubsgo
sudo su - root
ifconfig
10.dashboard
安装服务
yum install openstack-dashboard -y
yum -y install net-tools
修改配置文件
vim /etc/openstack-dashboard/local_settings
OPENSTACK_HOST = "controller"
ALLOWED_HOSTS = ['*']
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'controller:11211',
}
}
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2,
"volume": 3,
}
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
OPENSTACK_NEUTRON_NETWORK = {
...
'enable_router': False,
'enable_quotas': False,
'enable_distributed_router': False,
'enable_ha_router': False,
'enable_lb': False,
'enable_firewall': False,
'enable_vpn': False,
'enable_fip_topology_check': False,
}
TIME_ZONE = "Asia/Shanghai"
继续
vim /etc/httpd/conf.d/openstack-dashboard.conf
WSGIApplicationGroup %{GLOBAL}
# 之后重启服务
systemctl restart httpd.service memcached.service
继续
vim /etc/openstack-dashboard/local_settings
WEBROOT = '/dashboard'
# 之后重启服务
systemctl restart httpd.service memcached.service
登录
# 网址
http://controller的ip/dashboard
# 域
default
# 用户
admin
# 密码
admin
ssh远程
# 密码gocubsgo
ssh cirros@实例ip
常用指令
systemctl stop chronyd # 停止chronyd
systemctl status ntpd # ntpd状态
yum remove tomcat # 删除指定软件
提示
Missing value auth-url required for auth plugin password
需要:source admin.sh
vim 中'/'查找,'\'转义,n查找下一个
记录
nova重启脚本
bash restart-nova.sh
neutron重启脚本
bash restart-neutron.sh
删除openstack虚拟机
# 查看虚拟机id
nova list --all-tenants
# 根据id删除
nova delete 4ce3eed4-ed37-43aa-bcc4-9f9fd5ee55e4
# 在对应的计算节点重启
systemctl restart openstack-nova-compute.service
查询openstack的安全组
openstack security group list
删除安全组
openstack security group delete id