openstack-ha第一版平台

安装基本软件(所有节点)

yum install  vim iotop bc gcc gcc-c++ glibc glibc-devel pcre pcre-devel openssl  openssl-devel zip unzip zlib-devel  net-tools lrzsz tree ntpdate telnet lsof tcpdump wget libevent libevent-devel bc  systemd-devel bash-completion traceroute bridge-utils vs'f'tpd  -y

节点规划

主机名 IP地址
openstack-controller1.ws.local 172.31.7.101
openstack-controller2.ws.local 172.31.7.102
openstack-mysql1.ws.local 172.31.7.103
openstack-mysql2.ws.local 172.31.7.104
openstack-haproxy1.ws.local 172.31.7.105
openstack-haproxy2.ws.local 172.31.7.106
openstack-node1.ws.local 172.31.7.107
openstack-node2.ws.local 172.31.7.108
openstack-node3.ws.local 172.31.7.109

keystone(controller)

  • 数据端
# 安装openstack基本服务
yum install -y centos-release-openstack-train
yum install -y python-openstackclient openstack-selinux

# 安装服务
yum install mariadb mariadb-server

# 配置mysql文件
vim /etc/my.cnf.d/openstack.cnf
[mysqld]
bind-address = 10.0.0.11

default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8

# 启动服务
systemctl start mariadb; systemctl enable mariadb

# 初始化数据库
mysql_secure_installation
回车
n
y
n
y
y

mysql -u root -p

CREATE DATABASE keystone;

GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'keystone123';

# 部署配置rabbitmq
yum install -y rabbitmq-server

systemctl enable rabbitmq-server.service; systemctl start rabbitmq-server.service

rabbitmqctl add_user openstack openstack123

rabbitmqctl set_permissions openstack ".*" ".*" ".*"

rabbitmq-plugins enable rabbitmq_management  rabbitmq_management_agent

# 安装配置mecache
yum install -y memcached
PORT="11211"
USER="memcached"
MAXCONN="1024"
CACHESIZE="1024"
OPTIONS="-l 0.0.0.0,::1"

systemctl enable memcached.service; systemctl start memcached.service

ha1

# 安装服务
yum -y install haproxy keepalived

]# cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_strict
vrrp_iptables
vrrp_garp_interval 0
vrrp_gna_interval 0
}

vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 58
priority 100
advert_int 1
authentication {
  auth_type PASS
  auth_pass 1111
}
virtual_ipaddress {
  172.31.7.248 dev eth0 label eth0:0
}
}

# 配置反向代理
vim /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# Example configuration for a possible web application.  See the
# full configuration options online.
#
#   http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
#
#---------------------------------------------------------------------

#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
# to have these messages end up in /var/log/haproxy.log you will
# need to:
#
# 1) configure syslog to accept network log events.  This is done
#    by adding the '-r' option to the SYSLOGD_OPTIONS in
#    /etc/sysconfig/syslog
#
# 2) configure local2 events to go to the /var/log/haproxy.log
#   file. A line like the following can be added to
#   /etc/sysconfig/syslog
#
#    local2.*                       /var/log/haproxy.log
#
log         127.0.0.1 local2

chroot      /var/lib/haproxy
pidfile     /var/run/haproxy.pid
maxconn     4000
user        haproxy
group       haproxy
daemon

# turn on stats unix socket
stats socket /var/lib/haproxy/stats

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode                    http
log                     global
option                  httplog
option                  dontlognull
option http-server-close
option forwardfor       except 127.0.0.0/8
option                  redispatch
retries                 3
timeout http-request    10s
timeout queue           1m
timeout connect         10s
timeout client          1m
timeout server          1m
timeout http-keep-alive 10s
timeout check           10s
maxconn                 3000

#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
listen openstack-mysql-3306
bind 172.31.7.248:3306
mode tcp
server 172.31.7.103 172.31.7.103:3306 check inter 3s fall 3 rise 5

listen openstack-rabbitmq-5672
bind 172.31.7.248:5672
mode tcp
server 172.31.7.103 172.31.7.103:5672 check inter 3s fall 3 rise 5

listen openstack-memcache-11211
bind 172.31.7.248:11211
mode tcp
server 172.31.7.103 172.31.7.103:11211 check inter 3s fall 3 rise 5
  • 控制端
# 控制端(需要python2-PyMySQL、python-memcached模块连接数据库和缓存)
yum -y install mariadb python2-PyMySQL python-memcached
# 利用 172.31.7.248:3306 连接数据库是否能行

yum install openstack-keystone httpd mod_wsgi

# 配置文件
vim /etc/keystone/keystone.conf
[database]
# ...
connection = mysql+pymysql://keystone:keystone123@openstack-vip.ws.local/keystone	# openstack-vip.ws.local统一用这个域名

[token]		# token的提供者,默认1个小时
# ...
provider = fernet

# 配置hosts
vim /etc/hosts
172.31.7.248 openstack-vip.ws.local

# 同步数据库
su -s /bin/sh -c "keystone-manage db_sync" keystone

# 初始化 Fernet 密钥库:
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

# 引导身份服务:
keystone-manage bootstrap --bootstrap-password admin \
--bootstrap-admin-url http://openstack-vip.ws.local:5000/v3/ \
--bootstrap-internal-url http://openstack-vip.ws.local:5000/v3/ \
--bootstrap-public-url http://openstack-vip.ws.local:5000/v3/ \
--bootstrap-region-id RegionOne

admin: 管理网络
192.168.0.0/16
internal:内部网络
10.10.0.0/16
public:公用网络
172.31.0.0/16

# 配置http服务
vim /etc/httpd/conf/httpd.conf
ServerName 172.31.7.101:80

# 配置软链接
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

# 启动自启http
systemctl restart httpd; systemctl enable httpd

# 测试可用性
curl 本机IP/vip地址/vip域名

# 设置的环境变量来配置管理帐户
vim admin.sh
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://openstack-vip.ws.local:5000/v3
export OS_IDENTITY_API_VERSION=3

# 加载
source admin.sh

# domain创建域(类似于机房)
openstack domain create --description "An Example Domain" example

# 在domain里创建项目
openstack project create --domain default --description "Service Project" service

# 创建测试project
openstack project create --domain default --description "Demo Project" myproject

# 在default域中创建用户
openstack user create --domain default --password-prompt myuser	# 需要设置密码,设置为 myuser 即可

# 创建角色,用于授权
openstack role create myrole

# 将myrole角色添加到myproject项目和myuser用户
openstack role add --project myproject --user myuser myrole

# 取消环境变量进行认证操作
unset OS_AUTH_URL OS_PASSWORD

# 为admin用户,认证身份验证令牌
openstack --os-auth-url http://openstack-vip.ws.local:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name admin --os-username admin token issue	   # 输入设置的密码: admin

# 为myuser在上一节中创建的用户,请求身份验证令牌
openstack --os-auth-url http://openstack-vip.ws.local:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name myproject --os-username myuser token issue   # 输入设置的密码: myuser

# 为两个域配置环境变量
vim admin-openrc.sh
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_AUTH_URL=http://openstack-vip.ws.local:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

vim demo-openrc.sh
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=myproject
export OS_USERNAME=myuser
export OS_PASSWORD=myuser
export OS_AUTH_URL=http://openstack-vip.ws.local:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

# 查看两个域的token
source admin-openrc/demo-openrc

glance(controller)

  • 数据端设置共享(nfs)目录
yum -y install nfs-utils

mkdir /data/glance -p

vim /etc/exports
/data/glance *(rw, no_root_squash)

systemctl start nfs; systemctl enable nfs

# 创库授权
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'glance123';
  • controller1端将镜像服务共享出去
yum -y install nfs-utils

systemctl start nfs; systemctl enable nfs

# 检查
showmount -e 172.31.7.103

# 创建glance账号
openstack user create --domain default --password-prompt glance		# 会输入设置密码,设置为glance即可

# 将admin角色添加到glance用户和 service项目,赋予admin权限
openstack role add --project service --user glance admin

# 创建glance的service(类似于k8s的service)
openstack service create --name glance --description "OpenStack Image" image

# 给glance的service注册API接口(endpoint)
openstack endpoint create --region RegionOne image public http://openstack-vip.ws.local:9292	# 公有网络
openstack endpoint create --region RegionOne image internal http://openstack-vip.ws.local:9292	# 私有网络
openstack endpoint create --region RegionOne image admin http://openstack-vip.ws.local:9292		# 管理网络
  • ha1端配置反向代理
vim /etc/haproxy/haproxy.cfg
listen openstack-glance-9292
bind 172.31.7.248:9292
mode tcp
server 172.31.7.101 172.31.7.101:9292 check inter 3s fall 3 rise 5

systemctl restart haproxy
  • controller1继续配置glance相关服务
# 安装软件包
yum install openstack-glance

# 配置文件(不能有中文,中文注释也不行)
vim /etc/glance/glance-api.conf
[database]
connection = mysql+pymysql://glance:glance123@openstack-vip.ws.local/glance

[keystone_authtoken]		# 配置keystone的认证信息
www_authenticate_uri  = http://openstack-vip.ws.local:5000
auth_url = http://openstack-vip.ws.local:5000
memcached_servers = openstack-vip.ws.local:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = glance			# 这是创建账号的密码

[paste_deploy]		# 使用keystone做认证
flavor = keystone

[glance_store]		# 配置镜像的存储
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/


# 初始化数据库
su -s /bin/sh -c "glance-manage db_sync" glance

# 启动自启
systemctl enable openstack-glance-api.service; systemctl start openstack-glance-api.service

# 配置共享存储,先停止glance服务(会有属主属组问题)
systemctl stop openstack-glance-api.service

# 挂载
mount -t nfs 172.31.7.103:/data/glance /var/lib/glance/images

# 配置永久挂载
vim /etc/fstab
172.31.7.103:/data/glance /var/lib/glance/images nfs defaults,_netdev 0 0

PS: _netdev表示当系统联网后再进行挂载操作,以免系统开机时间过长或开机失败:

# 到数据端利用id更改权限
id glance    # 得到id值
chow -R 161:161 /data/glance
mount -a

# 启动glance
systemctl start openstack-glance-api.service

# 测试可用性
wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img

glance image-create --name "cirros-0.4.0" --file cirros-0.4.0-x86_64-disk.img --disk-format qcow2 --container-format bare --visibility public

glance image-list

Placement(controller)

  • stein版本拆分出的一个组件
  • 作用
  • 统计node节点的资源
  • 数据端创库授权
CREATE DATABASE placement;

GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY 'placement123';
  • controller1端
# 创建账号
openstack user create --domain default --password-prompt placement		# 密码都设置为 placement

# 将 Placement 用户添加到具有 admin 角色的服务项目
openstack role add --project service --user placement admin

# 创建 Placement 的 service
openstack service create --name placement --description "Placement API" placement

# 给Placement的service注册api
openstack endpoint create --region RegionOne placement public http://openstack-vip.ws.local:8778	# 公有
openstack endpoint create --region RegionOne placement internal http://openstack-vip.ws.local:8778	# 私有
openstack endpoint create --region RegionOne placement admin http://openstack-vip.ws.local:8778		# 管理
  • ha1配置反向代理
vim /etc/haproxy/haproxy.cfg
listen openstack-placement-8778
bind 172.31.7.248:8778
mode tcp
server 172.31.7.101 172.31.7.101:8778 check inter 3s fall 3 rise 5

systemctl restart haproxy
  • controller1端(继续配置服务)
# 安装服务
yum install openstack-placement-api

# 配置文件
vim /etc/placement/placement.conf
[placement_database]
connection = mysql+pymysql://placement:placement123@openstack-vip.ws.local/placement

[api]
auth_strategy = keystone

[keystone_authtoken]
auth_url = http://openstack-vip.ws.local:5000/v3
memcached_servers = openstack-vip.ws.local:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = placement

# 同步数据库
su -s /bin/sh -c "placement-manage db sync" placement

# 解决bug,新版官方没有告诉解决方法(去R版中可用看到官方提示bug信息)
# 允许apache大于2.4版本的访问,目前t版已经大于了,如果不写会报错
# 由于打包错误,您必须通过将以下配置添加到以下配置来启用对 Placement API 的访问 /etc/httpd/conf.d/00-nova-placement-api.conf

vim /etc/httpd/conf.d/00-placement-api.conf   # 追加进去
....
<Directory /usr/bin>
<IfVersion >= 2.4>
  Require all granted
</IfVersion>
<IfVersion < 2.4>
  Order allow,deny
  Allow from all		# 允许所有
</IfVersion>
</Directory>

# httpd -v 命令可以看到版本已经大于2.4了,所以必须得配置

# 重启服务
systemctl restart httpd

# 验证
placement-status upgrade check

nova-controller

  • 数据端
# 创库授权
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'nova123';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'nova123';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'nova123';
  • controller端
source admin-openrc

# 创建账号
openstack user create --domain default --password-prompt nova		# 设置密码,统一为nova

# 让nova用户在service这个项目中有admin权限
openstack role add --project service --user nova admin

# 创建service实体
openstack service create --name nova --description "OpenStack Compute" compute

# 注册三个端点
openstack endpoint create --region RegionOne compute public http://openstack-vip.ws.local:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://openstack-vip.ws.local:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://openstack-vip.ws.local:8774/v2.1
# openstack endpoint 查看注册是否成功
  • ha1配置反向代理
vim /etc/haproxy/haproxy.cfg
listen openstack-nova-controller-8774
bind 172.31.7.248:8774
mode tcp
server 172.31.7.101 172.31.7.101:8774 check inter 3s fall 3 rise 5

# 重启
systemctl restart haproxy
  • controller继续配置
# 安装nova服务
yum install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler

openstack-nova-conductor	# 提供数据库服务
openstack-nova-novncproxy	# 提供vnc连接虚拟机服务
openstack-nova-scheduler	# 提供调度服务

# 配置文件
vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata

[api_database]
connection = mysql+pymysql://nova:nova123@openstack-vip.ws.local/nova_api

[database]
connection = mysql+pymysql://nova:nova123@openstack-vip.ws.local/nova

[DEFAULT]
transport_url = rabbit://openstack:openstack123@openstack-vip.ws.local:5672/

[api]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://openstack-vip.ws.local:5000/
auth_url = http://openstack-vip.ws.local:5000/
memcached_servers = openstack-vip.ws.local:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova

[DEFAULT]
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver

[vnc]
enabled = true
server_listen = 172.31.7.101
server_proxyclient_address = 172.31.7.101

[glance]
api_servers = http://openstack-vip.ws.local:9292

[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://openstack-vip.ws.local:5000/v3
username = placement
password = placement

# 同步nova-api数据库
su -s /bin/sh -c "nova-manage api_db sync" nova

# 注册cell0数据库
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova

# 创建cell1单元格
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova

# 同步nova数据库
su -s /bin/sh -c "nova-manage db sync" nova

# 验证 nova cell0 和 cell1 是否正确注册(用来发现计算主机,分到不同的cell来管理)
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova

# 自启与启动
systemctl enable openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service

systemctl start openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
  • 配置ha1
vim /etc/haproxy/haproxy.cfg
defaults
  mode                    http
  log                     global
  option                  httplog
  option                  dontlognull
  option http-server-close
  option forwardfor       except 127.0.0.0/8
  option                  redispatch
  retries                 3
  timeout http-request    10s
  timeout queue           1m
  timeout connect         10s
  timeout client          5m
  timeout server          5m
  timeout http-keep-alive 5m
  timeout check           10s
  maxconn                 30000

'''''
listen openstack-nova-novncproxy-6080
bind 172.31.7.248:6080
mode tcp
server 172.31.7.101 172.31.7.101:6080 check inter 3s fall 3 rise 5

# 重启
systemctl restart haproxy
  • controller
# 重启nova服务
systemctl restart openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service

# 查看日志时scheduler会报错。错误是连接数据库超时,因为用的haprocy代理的,所以会慢一些,忽略即可,后面会连上

nova-compute

  • node1
# 安装yum源(只需要官方源和t版源就好了)
yum install centos-release-openstack-train
yum install https://rdoproject.org/repos/rdo-release.rpm

# 安装基础软件
yum install -y python-openstackclient openstack-selinux
yum install -y openstack-nova-compute

# 配置文件
vim  /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata

[DEFAULT]
transport_url = rabbit://openstack:openstack123@openstack-vip.ws.local

[api]
auth_strategy = keystone

[keystone_authtoken]
auth_url = http://openstack-vip.ws.local:5000/v3
memcached_servers = openstack-vip.ws.local:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova

[DEFAULT]
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver

[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = 172.31.7.107		# 创建的虚拟机宿主机
novncproxy_base_url = http://openstack-vip.ws.local:6080/vnc_auto.html	# controller提供web窗口

[glance]
api_servers = http://openstack-vip.ws.local:9292

[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://openstack-vip.ws.local:5000/v3
username = placement
password = placement

# 检查硬件是否支持虚拟化(如果为0,则需要加配置)
egrep -c '(vmx|svm)' /proc/cpuinfo
# 如果不支持加上
vim /etc/nova/nova.conf
[libvirt]
virt_type = qemu

# 配置域名解析
vim /etc/hosts
'''
172.31.7.248 openstack-vip.ws.local

# 启动并自启服务
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl start libvirtd.service openstack-nova-compute.service




==============================================controller===================================================
# 到controller节点验证是否成功加入到计算节点
openstack compute service list --service nova-compute

# controller执行主机发现
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova

# 上面主机发现麻烦了,每次加入主机都要执行,配置如下,让主机发现多少时间自动执行一次
vim /etc/nova/nova.conf
[scheduler]
discover_hosts_in_cells_interval = 300		# 300秒扫描一次主机发现

# 重启服务
systemctl restart openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service

# 验证
openstack compute service list
+----+--------------------+------------+----------+---------+-------+----------------------------+
| Id | Binary             | Host       | Zone     | Status  | State | Updated At                 |
+----+--------------------+------------+----------+---------+-------+----------------------------+
|  1 | nova-scheduler     | controller | internal | enabled | up    | 2016-02-09T23:11:15.000000 |
|  2 | nova-conductor     | controller | internal | enabled | up    | 2016-02-09T23:11:16.000000 |
|  3 | nova-compute       | compute1   | nova     | enabled | up    | 2016-02-09T23:11:20.000000 |
+----+--------------------+------------+----------+---------+-------+----------------------------+

# 检查所有的api
openstack catalog list

# 检查api状态
nova-status upgrade check

neutron-controller

  • 网络类型
  • 提供者网络(桥接):虚拟机桥接到物理机,并且虚拟机必须和物理机在同一个网络范围内
  • 自服务网络:可以自己创建网络,最终会通过虚拟路由器连接外网(公司用的不多)
  • 创库授权
CREATE DATABASE neutron;

GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'neutron123';
  • 配置操作
# 加载环境变量
source  admin-openrc

# 创建账号
openstack user create --domain default --password-prompt neutron		# 密码都设置为neutron

# 将neutron在service项目里拥有admin权限
openstack role add --project service --user neutron admin

# 创建service
openstack service create --name neutron --description "OpenStack Networking" network

# 在上面创建好service中的network里添加api端点
openstack endpoint create --region RegionOne network public http://openstack-vip.ws.local:9696
openstack endpoint create --region RegionOne network internal http://openstack-vip.ws.local:9696
openstack endpoint create --region RegionOne network admin http://openstack-vip.ws.local:9696
  • ha1节点配置反向代理
vim /etc/haproxy/haproxy.cfg
listen openstack-neutron-9696
bind 172.31.7.248:9696
mode tcp
server 172.31.7.101 172.31.7.101:9696 check inter 3s fall 3 rise 5

# 重启
systemctl restart haproxy
  • Networking Option 1: Provider networks (桥接网络)
  • Networking Option 2: Self-service networks(自服务网络)
  • controller 选择桥接网络继续配置
# 安装软件
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
ebtables	# 安全组,可自定义规则,和iptables一样
openstack-neutron-ml2	# 二层交换配置,主要用于做桥接
openstack-neutron-linuxbridge	# 桥接

vim  /etc/neutron/neutron.conf
[database]
connection = mysql+pymysql://neutron:neutron123@openstack-vip.ws.local/neutron

[DEFAULT]
core_plugin = ml2
service_plugins =		# 禁用三层网络插件

[DEFAULT]
transport_url = rabbit://openstack:openstack123@openstack-vip.ws.local

[DEFAULT]
auth_strategy = keystone		# 使用keystone做认证

[keystone_authtoken]			# keystone的认证信息
www_authenticate_uri = http://openstack-vip.ws.local:5000
auth_url = http://openstack-vip.ws.local:5000
memcached_servers = openstack-vip.ws.local:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron

[DEFAULT]				# 通知网络变化
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true

[nova]			# 配置文件没有此信息,在最后加即可。告诉neutron要到nova查看keystone的认证信息
auth_url = http://openstack-vip.ws.local:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova

[oslo_concurrency]		# 配置锁路径
lock_path = /var/lib/neutron/tmp
  • controller 配置二层网络插件
vim /etc/neutron/plugins/ml2/ml2_conf.ini
# 官网少了许多的配置项,需要从其它版本中拷贝一份:https://docs.openstack.org/ocata/config-reference/networking/samples/ml2_conf.ini.html
[DEFAULT]

#
# From oslo.log
#

# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
# Note: This option can be changed without restarting.
#debug = false

# DEPRECATED: If set to false, the logging level will be set to WARNING instead
# of the default INFO level. (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#verbose = true

# The name of a logging configuration file. This file is appended to any
# existing logging configuration files. For details about logging configuration
# files, see the Python logging module documentation. Note that when logging
# configuration files are used then all logging configuration is set in the
# configuration file and other logging configuration options are ignored (for
# example, logging_context_format_string). (string value)
# Note: This option can be changed without restarting.
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None>

# Defines the format string for %%(asctime)s in log records. Default:
# %(default)s . This option is ignored if log_config_append is set. (string
# value)
#log_date_format = %Y-%m-%d %H:%M:%S

# (Optional) Name of log file to send logging output to. If no default is set,
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file = <None>

# (Optional) The base directory used for relative log_file  paths. This option
# is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None>

# Uses logging handler designed to watch file system. When log file is moved or
# removed this handler will open a new log file with specified path
# instantaneously. It makes sense only if log_file option is specified and
# Linux platform is used. This option is ignored if log_config_append is set.
# (boolean value)
#watch_log_file = false

# Use syslog for logging. Existing syslog format is DEPRECATED and will be
# changed later to honor RFC5424. This option is ignored if log_config_append
# is set. (boolean value)
#use_syslog = false

# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER

# Log output to standard error. This option is ignored if log_config_append is
# set. (boolean value)
#use_stderr = true

# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s

# Format string to use for log messages when context is undefined. (string
# value)
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s

# Additional data to append to log message when logging level for the message
# is DEBUG. (string value)
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d

# Prefix each line of exception output with this format. (string value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s

# Defines the format string for %(user_identity)s that is used in
# logging_context_format_string. (string value)
#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s

# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO

# Enables or disables publication of error events. (boolean value)
#publish_errors = false

# The format for an instance that is passed with the log message. (string
# value)
#instance_format = "[instance: %(uuid)s] "

# The format for an instance UUID that is passed with the log message. (string
# value)
#instance_uuid_format = "[instance: %(uuid)s] "

# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false


[ml2]

#
# From neutron.ml2
#

# List of network type driver entrypoints to be loaded from the
# neutron.ml2.type_drivers namespace. (list value)
#type_drivers = local,flat,vlan,gre,vxlan,geneve

# Ordered list of network_types to allocate as tenant networks. The default
# value 'local' is useful for single-box testing but provides no connectivity
# between hosts. (list value)
#tenant_network_types = local

# An ordered list of networking mechanism driver entrypoints to be loaded from
# the neutron.ml2.mechanism_drivers namespace. (list value)
#mechanism_drivers =

# An ordered list of extension driver entrypoints to be loaded from the
# neutron.ml2.extension_drivers namespace. For example: extension_drivers =
# port_security,qos (list value)
#extension_drivers =

# Maximum size of an IP packet (MTU) that can traverse the underlying physical
# network infrastructure without fragmentation when using an overlay/tunnel
# protocol. This option allows specifying a physical network MTU value that
# differs from the default global_physnet_mtu value. (integer value)
#path_mtu = 0

# A list of mappings of physical networks to MTU values. The format of the
# mapping is <physnet>:<mtu val>. This mapping allows specifying a physical
# network MTU value that differs from the default global_physnet_mtu value.
# (list value)
#physical_network_mtus =

# Default network type for external networks when no provider attributes are
# specified. By default it is None, which means that if provider attributes are
# not specified while creating external networks then they will have the same
# type as tenant networks. Allowed values for external_network_type config
# option depend on the network type values configured in type_drivers config
# option. (string value)
#external_network_type = <None>

# IP version of all overlay (tunnel) network endpoints. Use a value of 4 for
# IPv4 or 6 for IPv6. (integer value)
#overlay_ip_version = 4


[ml2_type_flat]

#
# From neutron.ml2
#

# List of physical_network names with which flat networks can be created. Use
# default '*' to allow flat networks with arbitrary physical_network names. Use
# an empty list to disable flat networks. (list value)
#flat_networks = *


[ml2_type_geneve]

#
# From neutron.ml2
#

# Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of
# Geneve VNI IDs that are available for tenant network allocation (list value)
#vni_ranges =

# Geneve encapsulation header size is dynamic, this value is used to calculate
# the maximum MTU for the driver. This is the sum of the sizes of the outer ETH
# + IP + UDP + GENEVE header sizes. The default size for this field is 50,
# which is the size of the Geneve header without any additional option headers.
# (integer value)
#max_header_size = 30


[ml2_type_gre]

#
# From neutron.ml2
#

# Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE
# tunnel IDs that are available for tenant network allocation (list value)
#tunnel_id_ranges =


[ml2_type_vlan]

#
# From neutron.ml2
#

# List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network>
# specifying physical_network names usable for VLAN provider and tenant
# networks, as well as ranges of VLAN tags on each available for allocation to
# tenant networks. (list value)
#network_vlan_ranges =


[ml2_type_vxlan]

#
# From neutron.ml2
#

# Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of
# VXLAN VNI IDs that are available for tenant network allocation (list value)
#vni_ranges =

# Multicast group for VXLAN. When configured, will enable sending all broadcast
# traffic to this multicast group. When left unconfigured, will disable
# multicast VXLAN mode. (string value)
#vxlan_group = <None>


[securitygroup]

#
# From neutron.ml2
#

# Driver for security groups firewall in the L2 agent (string value)
#firewall_driver = <None>

# Controls whether the neutron security group API is enabled in the server. It
# should be false when using no security groups or using the nova security
# group API. (boolean value)
#enable_security_group = true

# Use ipset to speed-up the iptables based security groups. Enabling ipset
# support requires that ipset is installed on L2 agent node. (boolean value)
#enable_ipset = true



=====================================配置信息=======================================
[ml2]		# 配置文件中没有 [ml2] 的配置信息,在最后手动加入即可
type_drivers = flat,vlan

[ml2]
tenant_network_types =			# 租户不启用网络

[ml2]
mechanism_drivers = linuxbridge		# 启用 Linux 桥接机制

[ml2]
extension_drivers = port_security		# 启用端口安全扩展驱动程序

[ml2_type_flat]
flat_networks = external	# 定义虚拟网络名称,因为这里需要指向外部网络,设置名称就为external,后面要与网卡绑定

[securitygroup]
enable_ipset = true				# 启用ipset以提高安全组规则的效率
  • 配置虚拟网络走那张网卡
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
# 由于原文件不全,去其它版本中拷贝: https://docs.openstack.org/ocata/config-reference/networking/samples/linuxbridge_agent.ini
[DEFAULT]

#
# From oslo.log
#

# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
# Note: This option can be changed without restarting.
#debug = false

# DEPRECATED: If set to false, the logging level will be set to WARNING instead
# of the default INFO level. (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#verbose = true

# The name of a logging configuration file. This file is appended to any
# existing logging configuration files. For details about logging configuration
# files, see the Python logging module documentation. Note that when logging
# configuration files are used then all logging configuration is set in the
# configuration file and other logging configuration options are ignored (for
# example, logging_context_format_string). (string value)
# Note: This option can be changed without restarting.
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None>

# Defines the format string for %%(asctime)s in log records. Default:
# %(default)s . This option is ignored if log_config_append is set. (string
# value)
#log_date_format = %Y-%m-%d %H:%M:%S

# (Optional) Name of log file to send logging output to. If no default is set,
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file = <None>

# (Optional) The base directory used for relative log_file  paths. This option
# is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None>

# Uses logging handler designed to watch file system. When log file is moved or
# removed this handler will open a new log file with specified path
# instantaneously. It makes sense only if log_file option is specified and
# Linux platform is used. This option is ignored if log_config_append is set.
# (boolean value)
#watch_log_file = false

# Use syslog for logging. Existing syslog format is DEPRECATED and will be
# changed later to honor RFC5424. This option is ignored if log_config_append
# is set. (boolean value)
#use_syslog = false

# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER

# Log output to standard error. This option is ignored if log_config_append is
# set. (boolean value)
#use_stderr = true

# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s

# Format string to use for log messages when context is undefined. (string
# value)
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s

# Additional data to append to log message when logging level for the message
# is DEBUG. (string value)
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d

# Prefix each line of exception output with this format. (string value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s

# Defines the format string for %(user_identity)s that is used in
# logging_context_format_string. (string value)
#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s

# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO

# Enables or disables publication of error events. (boolean value)
#publish_errors = false

# The format for an instance that is passed with the log message. (string
# value)
#instance_format = "[instance: %(uuid)s] "

# The format for an instance UUID that is passed with the log message. (string
# value)
#instance_uuid_format = "[instance: %(uuid)s] "

# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false


[agent]

#
# From neutron.ml2.linuxbridge.agent
#

# The number of seconds the agent will wait between polling for local device
# changes. (integer value)
#polling_interval = 2

# Set new timeout in seconds for new rpc calls after agent receives SIGTERM. If
# value is set to 0, rpc timeout won't be changed (integer value)
#quitting_rpc_timeout = 10

# DEPRECATED: Enable suppression of ARP responses that don't match an IP
# address that belongs to the port from which they originate. Note: This
# prevents the VMs attached to this agent from spoofing, it doesn't protect
# them from other devices which have the capability to spoof (e.g. bare metal
# or VMs attached to agents without this flag set to True). Spoofing rules will
# not be added to any ports that have port security disabled. For LinuxBridge,
# this requires ebtables. For OVS, it requires a version that supports matching
# ARP headers. This option will be removed in Ocata so the only way to disable
# protection will be via the port security extension. (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#prevent_arp_spoofing = true

# Extensions list to use (list value)
#extensions =


[linux_bridge]

#
# From neutron.ml2.linuxbridge.agent
#

# Comma-separated list of <physical_network>:<physical_interface> tuples
# mapping physical network names to the agent's node-specific physical network
# interfaces to be used for flat and VLAN networks. All physical networks
# listed in network_vlan_ranges on the server should have mappings to
# appropriate interfaces on each agent. (list value)
#physical_interface_mappings =

# List of <physical_network>:<physical_bridge> (list value)
#bridge_mappings =


[securitygroup]

#
# From neutron.ml2.linuxbridge.agent
#

# Driver for security groups firewall in the L2 agent (string value)
#firewall_driver = <None>

# Controls whether the neutron security group API is enabled in the server. It
# should be false when using no security groups or using the nova security
# group API. (boolean value)
#enable_security_group = true

# Use ipset to speed-up the iptables based security groups. Enabling ipset
# support requires that ipset is installed on L2 agent node. (boolean value)
#enable_ipset = true


[vxlan]

#
# From neutron.ml2.linuxbridge.agent
#

# Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 plugin
# using linuxbridge mechanism driver (boolean value)
#enable_vxlan = true

# TTL for vxlan interface protocol packets. (integer value)
#ttl = <None>

# TOS for vxlan interface protocol packets. (integer value)
#tos = <None>

# Multicast group(s) for vxlan interface. A range of group addresses may be
# specified by using CIDR notation. Specifying a range allows different VNIs to
# use different group addresses, reducing or eliminating spurious broadcast
# traffic to the tunnel endpoints. To reserve a unique group for each possible
# (24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on
# all the agents. (string value)
#vxlan_group = 224.0.0.1

# IP address of local overlay (tunnel) network endpoint. Use either an IPv4 or
# IPv6 address that resides on one of the host network interfaces. The IP
# version of this value must match the value of the 'overlay_ip_version' option
# in the ML2 plug-in configuration file on the neutron server node(s). (IP
# address value)
#local_ip = <None>

# Extension to use alongside ml2 plugin's l2population mechanism driver. It
# enables the plugin to populate VXLAN forwarding table. (boolean value)
#l2_population = false

# Enable local ARP responder which provides local responses instead of
# performing ARP broadcast into the overlay. Enabling local ARP responder is
# not fully compatible with the allowed-address-pairs extension. (boolean
# value)
#arp_responder = false


==================================================配置信息==================================================
[linux_bridge]
physical_interface_mappings = external:eth0		# 在定义的网络名称绑定某张网卡

[vxlan]
enable_vxlan = false

[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver


# 配置内核
# Linux 操作系统内核支持网桥过滤器
vim /etc/sysctl.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1

# 加载模块
modprobe br_netfilter

# 生效内核
sysctl -p
  • 配置 DHCP 代理
# 可以利用dhcp获取ip地址
vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
  • 配置元数据代理
# 配置元数据主机和共享密钥
vim /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_host = openstack-vip.ws.local		# nova服务反向代理到了ha1节点
metadata_proxy_shared_secret = xier123
  • 配置 Compute 服务以使用 Networking 服务
vim /etc/nova/nova.conf
[neutron]
auth_url = http://openstack-vip.ws.local:5000		# keystone地址
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
service_metadata_proxy = true
metadata_proxy_shared_secret = xier123
  • neutron其它配置
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

# 同步数据库
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

# 重启nova API 服务
systemctl restart openstack-nova-api.service

# 启动并自启
systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service  neutron-metadata-agent.service
systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service  neutron-metadata-agent.service

neutron-compute

  • node安装服务
yum install -y openstack-neutron-linuxbridge ebtables ipset
  • 编辑配置文件
vim /etc/neutron/neutron.conf
[DEFAULT]
transport_url = rabbit://openstack:openstack123@openstack-vip.ws.local

[DEFAULT]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://openstack-vip.ws.local:5000
auth_url = http://openstack-vip.ws.local:5000
memcached_servers = openstack-vip.ws.local:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
  • 配置桥接网络
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
# 配置文件补全,去其它版本拷贝
[DEFAULT]

#
# From oslo.log
#

# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
# Note: This option can be changed without restarting.
#debug = false

# DEPRECATED: If set to false, the logging level will be set to WARNING instead
# of the default INFO level. (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#verbose = true

# The name of a logging configuration file. This file is appended to any
# existing logging configuration files. For details about logging configuration
# files, see the Python logging module documentation. Note that when logging
# configuration files are used then all logging configuration is set in the
# configuration file and other logging configuration options are ignored (for
# example, logging_context_format_string). (string value)
# Note: This option can be changed without restarting.
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None>

# Defines the format string for %%(asctime)s in log records. Default:
# %(default)s . This option is ignored if log_config_append is set. (string
# value)
#log_date_format = %Y-%m-%d %H:%M:%S

# (Optional) Name of log file to send logging output to. If no default is set,
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file = <None>

# (Optional) The base directory used for relative log_file  paths. This option
# is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None>

# Uses logging handler designed to watch file system. When log file is moved or
# removed this handler will open a new log file with specified path
# instantaneously. It makes sense only if log_file option is specified and
# Linux platform is used. This option is ignored if log_config_append is set.
# (boolean value)
#watch_log_file = false

# Use syslog for logging. Existing syslog format is DEPRECATED and will be
# changed later to honor RFC5424. This option is ignored if log_config_append
# is set. (boolean value)
#use_syslog = false

# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER

# Log output to standard error. This option is ignored if log_config_append is
# set. (boolean value)
#use_stderr = true

# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s

# Format string to use for log messages when context is undefined. (string
# value)
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s

# Additional data to append to log message when logging level for the message
# is DEBUG. (string value)
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d

# Prefix each line of exception output with this format. (string value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s

# Defines the format string for %(user_identity)s that is used in
# logging_context_format_string. (string value)
#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s

# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO

# Enables or disables publication of error events. (boolean value)
#publish_errors = false

# The format for an instance that is passed with the log message. (string
# value)
#instance_format = "[instance: %(uuid)s] "

# The format for an instance UUID that is passed with the log message. (string
# value)
#instance_uuid_format = "[instance: %(uuid)s] "

# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false


[agent]

#
# From neutron.ml2.linuxbridge.agent
#

# The number of seconds the agent will wait between polling for local device
# changes. (integer value)
#polling_interval = 2

# Set new timeout in seconds for new rpc calls after agent receives SIGTERM. If
# value is set to 0, rpc timeout won't be changed (integer value)
#quitting_rpc_timeout = 10

# DEPRECATED: Enable suppression of ARP responses that don't match an IP
# address that belongs to the port from which they originate. Note: This
# prevents the VMs attached to this agent from spoofing, it doesn't protect
# them from other devices which have the capability to spoof (e.g. bare metal
# or VMs attached to agents without this flag set to True). Spoofing rules will
# not be added to any ports that have port security disabled. For LinuxBridge,
# this requires ebtables. For OVS, it requires a version that supports matching
# ARP headers. This option will be removed in Ocata so the only way to disable
# protection will be via the port security extension. (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#prevent_arp_spoofing = true

# Extensions list to use (list value)
#extensions =


[linux_bridge]

#
# From neutron.ml2.linuxbridge.agent
#

# Comma-separated list of <physical_network>:<physical_interface> tuples
# mapping physical network names to the agent's node-specific physical network
# interfaces to be used for flat and VLAN networks. All physical networks
# listed in network_vlan_ranges on the server should have mappings to
# appropriate interfaces on each agent. (list value)
#physical_interface_mappings =

# List of <physical_network>:<physical_bridge> (list value)
#bridge_mappings =


[securitygroup]

#
# From neutron.ml2.linuxbridge.agent
#

# Driver for security groups firewall in the L2 agent (string value)
#firewall_driver = <None>

# Controls whether the neutron security group API is enabled in the server. It
# should be false when using no security groups or using the nova security
# group API. (boolean value)
#enable_security_group = true

# Use ipset to speed-up the iptables based security groups. Enabling ipset
# support requires that ipset is installed on L2 agent node. (boolean value)
#enable_ipset = true


[vxlan]

#
# From neutron.ml2.linuxbridge.agent
#

# Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 plugin
# using linuxbridge mechanism driver (boolean value)
#enable_vxlan = true

# TTL for vxlan interface protocol packets. (integer value)
#ttl = <None>

# TOS for vxlan interface protocol packets. (integer value)
#tos = <None>

# Multicast group(s) for vxlan interface. A range of group addresses may be
# specified by using CIDR notation. Specifying a range allows different VNIs to
# use different group addresses, reducing or eliminating spurious broadcast
# traffic to the tunnel endpoints. To reserve a unique group for each possible
# (24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on
# all the agents. (string value)
#vxlan_group = 224.0.0.1

# IP address of local overlay (tunnel) network endpoint. Use either an IPv4 or
# IPv6 address that resides on one of the host network interfaces. The IP
# version of this value must match the value of the 'overlay_ip_version' option
# in the ML2 plug-in configuration file on the neutron server node(s). (IP
# address value)
#local_ip = <None>

# Extension to use alongside ml2 plugin's l2population mechanism driver. It
# enables the plugin to populate VXLAN forwarding table. (boolean value)
#l2_population = false

# Enable local ARP responder which provides local responses instead of
# performing ARP broadcast into the overlay. Enabling local ARP responder is
# not fully compatible with the allowed-address-pairs extension. (boolean
# value)
#arp_responder = false


==================================================配置信息==============================================
[linux_bridge]
physical_interface_mappings = external:eth0

[vxlan]
enable_vxlan = false

[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver



# 配置桥接内核
vim /etc/sysctl.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
  • 配置nova与neutron的通讯
vim /etc/nova/nova.conf
[neutron]
auth_url = http://openstack-vip.ws.local:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
  • 启动,自启服务
# 重启nova服务,使nova连接neutron生效
systemctl restart openstack-nova-compute.service

# 启动、自启neutron服务
systemctl enable neutron-linuxbridge-agent.service
systemctl restart neutron-linuxbridge-agent.service

# 生效内核配置
sysctl -p

# 重启controller节点的nova与neutron服务检查日志是否有error信息
systemctl restart neutron-*
systemctl restart openstack-nova*
  • 到controller验证
openstack network agent list

创建一个实例(有bug)

ssh-keygen -q -N ""
openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey

# 创建网络
openstack network create  --share --external --provider-physical-network external --provider-network-type flat external-net

# 创建子网
openstack subnet create --network external-net --allocation-pool start=172.31.7.50,end=172.31.7.100 --dns-nameserver 223.6.6.6 --gateway 172.31.7.254 --subnet-range 172.31.7.0/24 external-sub

# 创建云主机类型
openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano


# 安全组放行icmp
openstack security group rule create --proto icmp default

# 安全组放行22端口
openstack security group rule create --proto tcp --dst-port 22 default

# 创建虚拟机
openstack server create --flavor m1.nano --image cirros --nic net-id=PROVIDER_NET_ID --security-group default --key-name mykey train-mv1


=============================================解决bug================================================================
# 手动指定虚拟机绑定网桥,controller和compute都要做(bug点)
brctl addif brqxxxxxxx-xx eth0

# 远程访问虚拟机(执行会得到一个url,访问url即可)
openstack console url show train-mv1

#############################################重点###################################################
# 会发现虚拟机初始化失败,解决方法如下:(bug点)
# 查看服务器支持的虚拟化类型
virsh capabilities

# 修改配置文件
vim /etc/nova/nova.conf
[libvirt]
'''''
hw_machine_type = x86_64=pc-i440fx-rhel7.2.0	# 更改虚拟化类型
cpu_mode = host-passthrough		# 直接使用宿主机的cpu

# 重启nova
systemctl restart openstack-nova-compute.service


===========================================如果以上已经解决了,那么以下不需要再做====================================
# 修改源码解决bug
vim /usr/lib/python2.7/site-packpages/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py
# 399和400行注释:
if 'metric' in gateway:
	metric = gateway['metric'] - 1

# 打开debug
vim /etc/neutron/neutron.conf
debug = true

# 查看接口是否绑定到网卡
brctl show

dashboard(controller)

  • 安装服务
yum install openstack-dashboard -y
  • 配置服务
vim  /etc/openstack-dashboard/local_settings
OPENSTACK_HOST = "172.31.7.101"	# 监听地址
ALLOWED_HOSTS = ['172.31.7.101', 'openstack-vip.ws.local']	# 白名单
  • 配置ha1反向代理
vim /etc/haproxy/haproxy.cfg
listen openstack-dashboard-80
bind 172.31.7.248:80
mode tcp
server 172.31.7.101 172.31.7.101:80 check inter 3s fall 3 rise 5
  • 继续配置dashboard文件
# 配置sessions
# https://docs.openstack.org/horizon/latest/admin/sessions.html
vim /etc/openstack-dashboard/local_settings
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'

WEBROOT = '/dashboard'

CACHES = {
  'default': {
       'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
       'LOCATION': 'openstack-vip.ws.local:11211',
  }
}

# 启用身份 API 版本 3
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST

# 启用对域的支持
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True

# 配置 API 版本
OPENSTACK_API_VERSIONS = {
  "identity": 3,
  "image": 2,
  "volume": 3,
}

# 配置Default为您通过仪表板创建的用户的默认域
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"

# 配置user通过仪表板创建的用户的默认角色
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"

# 因为使用的是二层,所有全部关闭,不启用三层
OPENSTACK_NEUTRON_NETWORK = {
  ...
  'enable_router': False,
  'enable_quotas': False,
  'enable_distributed_router': False,
  'enable_ha_router': False,
  'enable_lb': False,
  'enable_firewall': False,
  'enable_vpn': False,
  'enable_fip_topology_check': False,
}


TIME_ZONE = "Asia/"


vim /etc/httpd/conf.d/openstack-dashboard.conf
WSGIApplicationGroup %{GLOBAL}

# 重启http
systemctl restart httpd.service
  • 查看缓存的数据
telent 172.31.7.248 11211

stats items		# 列出所有的keys

stats cachedump ID 0   # 获得key的值,0表示全部列出

get key		# 查看数据

添加compute节点(node2)

  • 后期可能会大量的添加计算节点,使用脚本一键添加即可
  • node2节点(nova)
# 安装基本服务
yum install centos-release-openstack-train python-openstackclient openstack-selinux

# 安装nova
yum install openstack-nova-compute

# 将已经装好的node1节点的配置拷贝到node2
# 到node1节点执行
cd /etc/nova/

tar zcvf nova-computer.tar.gz ./

scp /etc/nova/nova-computer.tar.gz 172.31.7.108:/etc/nova/

# 回到node2节点执行
cd /etc/nova/

tar xvf nova-computer.tar.gz

vim nova.conf
server_proxyclient_address = 172.31.7.108

vim /etc/hosts
172.31.7.248 openstack-vip.ws.local

# 重启服务
systemctl restart openstack-nova-compute libvirtd
systemctl enable openstack-nova-compute libvirtd

# 到controller节点查看是否已经注册进来
nova service-list
  • node2节点(neutron)
# 安装neutron
yum install -y openstack-neutron-linuxbridge ebtables ipset

# 到控制节点打开安全组和拷贝python脚本
vim  /etc/neutron/plugins/ml2/ml2_conf.ini
enable_security_group = true

scp /usr/lib/python2.7/site-packpages/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py 172.31.7.108:/usr/lib/python2.7/site-packpages/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py

# 到node1拷贝neutron配置文件
cd /etc/neutron/

tar zcvf neutron-compute.tar.gz ./*

scp neutron-compute.tar.gz 172.31.7.108:/etc/neutron/

# 到node2节点
cd /etc/neutron/

tar zxvf neutron-compute.tar.gz

vim /etc/sysctl.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1

systemctl enable neutron-linuxbridge-agent.service
systemctl start neutron-linuxbridge-agent.service

sysctl -p

# 到controller节点验证是否已经加入
neutron agent-list

编写脚本快速添加openstack node节点 (node3)

  • 编写脚本
# 环境准备
mkdir openstack-compute

cd openstack-compute

# 在脚本当前目录
1.需要有nova和neutron的配置压缩包
2.内核参数(sysctl.conf)和资源限制(limits.conf)
cat /etc/sysctl.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1

cat /etc/security/limits.conf
root    soft    core    unlimited
root    hard    core    unlimited
root    soft    nproc    1000000
root    hard    nproc    1000000
root    soft    nofile    1000000
root    hard    nofile    1000000
root    soft    memlock    32000
root    hard    memlock    32000
root    soft    msgqueue    8192000
root    hard    msgqueue    8192000

vim profile
export "HISTTIMEFORMAT="%F %T `whoami`""

==========================================================================================================
# 环境准备好编写脚本
vim openstack-compute-install.sh
#!/bin/bash

echo "即将替换yum源"
sleep 3
echo "yum替换完成"

echo "即将开始时间同步"
/usr/sbin/ntpdate time1.aliyun.com && hwclock -w
echo "时间同步完成"
echo "当前服务器时间--->". date "+%Y年%m月%d日 %H时%M分%S秒"
sleep 1

echo "即将开始系统优化,包括内核参数、资源限制以及历史命令格式"
sleep 1
\cp limits.conf /etc/security/limits.conf
\cp sysctl.conf /etc/sysctl.conf
echo "export HISTTIMEFORMAT="%F %T `whoami`"" >> /etc/profile
sleep 1
echo "172.31.7.248 openstack-vip.ws.local" >> /etc/hosts
sleep 1
echo "系统参数优化完成"
sleep 1

yum install -y centos-release-openstack-train python-openstackclient openstack-selinux

# install nova
echo "即将开始安装nova"
sleep 1
yum install -y openstack-nova-compute

echo "nova 安装完成,即将开始替换配置文件"
sleep 1
tar xvf nova-computer.tar.gz -C /etc/nova
echo "开始替换server_proxyclient_address监听地址"
sleep 1
NODE_IP=`ifconfig eth0 | grep -w inet | awk '{print $2}'`
echo "当前计算节点IP是 $NODE_IP,即将替换nova.conf文件"
sleep 1
sed -i "s/server_proxyclient_address = 172.31.7.107/server_proxyclient_address = $NODE_IP/g" /etc/nova/nova.conf

systemctl enable libvirtd.service openstack-nova-compute.service; systemctl start libvirtd.service openstack-nova-compute.service

# install neutron
echo "即将开始安装neutron"
sleep 1
yum install -y openstack-neutron-linuxbridge ebtables ipset
echo "neutron安装完成,即将开始替换配置文件"
sleep 1
tar xvf neutron-compute.tar.gz -C /etc/neutron
\cp  linuxbridge_neutron_agent.py  /usr/lib/python2.7/site-packages/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py

systemctl enable neutron-linuxbridge-agent.service; systemctl start neutron-linuxbridge-agent.service

echo "当前计算节点nova和neutron服务安装配置完成,一分钟后即将重启当前服务器,请在服务器重启后查看nova和neutron是否有异常日志或到controller验证"
sleep 3
shutdown -r +1 "系统将在一分钟后重启,请重启后验证服务是否正常"
  • 打包脚本
tar zcvf openstack-compute-install.tar.gz openstack-compute-install/

# 删除在controller节点上创建neutron的注册信息
neutron agent-list	# 可查看ID
neutron agent-delete ID

# 删除在controller节点上创建nova的注册信息
nova service-list    # 可查看ID
nova service-delete ID

controller高可用(controller2)

keystone

# 安装openstack基本服务
yum install -y centos-release-openstack-train 
yum install -y python-openstackclient openstack-selinux

# 安装连接数据模块
yum -y install mariadb python2-PyMySQL python-memcached

# 安装配置keystone服务
yum install -y openstack-keystone httpd mod_wsgi


# 作用: 提供这些是为了允许在另一个操作系统用户/组下运行 keystone
# 由于是高可用备节点,使用基本配置在已经搭好的 controller1 节点拷贝即可
# controller1节点
cd /etc/keystone/

tar zcvf keystone-controller1.tar.gz ./*

scp keystone-controller1.tar.gz 172.31.7.102:/etc/keystone/

# 回到controller2节点
cd /etc/keystone/

tar zxvf keystone-controller1.tar.gz

# 查看IP有没有需要改变的
grep 172 ./* -R

# 配置hosts解析
vim /etc/hosts
172.31.7.248 openstack-vip.ws.local

# 因为数据端是单独的,所以数据不需要初始化。keystone也不用初始化。授权创建账号都不需要

# 配置http
vim /etc/httpd/conf/httpd.conf
''''
ServerName 172.31.7.102:80

# 配置软链接与启动自启http
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

systemctl enable httpd.service; systemctl start httpd.service

# 验证有没有5000端口或者将haproxy1中的172.31.7.101:5000注释打开172.31.7.102反向代理验证openstack是否可用
ss -tnl
  • ha1配置反向代理
vim /etc/haproxy/haproxy.cfg
listen openstack-keystone-5000
bind 172.31.7.248:5000
mode tcp
server 172.31.7.101 172.31.7.101:5000 check inter 3s fall 3 rise 5
server 172.31.7.102 172.31.7.102:5000 check inter 3s fall 3 rise 5

systemctl restart haproxy

glance

# 创建数据库,创建账号,同步数据库都不需要操作,已经在openstack里已经注册好了,数据库也初始化好了,能访问即可

# 安装服务
yum install -y openstack-glance

# 配置共享存储
mkdir /var/lib/glance/images

chown -R glance.glance /var/lib/glance/images

vim /etc/fstab
172.31.7.105:/data/glance /var/lib/glance/images nfs defaults,_netdev 0 0

mount -a

# 在controller1节点打包配置文件拷贝过来
# controller1
cd /etc/glance

tar zcvf glance-controller1.tar.gz ./*

scp glance-controller1.tar.gz 172.31.7.102:/etc/glance

# 回到controller2
cd /etc/glance
tar zxvf glance-controller1.tar.gz

# 启动自启
systemctl enable openstack-glance-api.service; systemctl start openstack-glance-api.service
  • ha1
vim /etc/haproxy/haproxy.cfg
listen openstack-glance-9292
bind 172.31.7.248:9292
mode tcp
server 172.31.7.101 172.31.7.101:9292 check inter 3s fall 3 rise 5
server 172.31.7.102 172.31.7.102:9292 check inter 3s fall 3 rise 5

systemctl restart haproxy

placement

# 因为openstack已经注册了账号和端点等信息,不需要配置。数据库已经有,不需要创建和初始化操作

# 安装服务
yum install -y openstack-placement-api

# 打包controller1节点的配置文件
# controller1节点
cd /etc/placement/

tar zcvf placement-controller1.tar.gz ./*

scp placement-controller1.tar.gz 172.31.7.102:/etc/placement/

# 回到controller2节点
cd /etc/placement/

tar zxvf placement-controller1.tar.gz

# 检查是否有IP以前值,有则改为本机ip
grep 172 ./* -R

# 解决bug(允许apache大于2.4版本的访问,目前t版已经大于了,如果不写会报错)
vim /etc/httpd/conf.d/00-placement-api.conf   # 追加进去
....
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>

# 重启服务
systemctl restart httpd
  • ha1配置反向代理
vim /etc/haproxy/haproxy.cfg
listen openstack-placement-8878
bind 172.31.7.248:8878
mode tcp
server 172.31.7.101 172.31.7.101:8878 check inter 3s fall 3 rise 5
server 172.31.7.102 172.31.7.102:8878 check inter 3s fall 3 rise 5

systemctl restart haproxy

# 到controller2验证
placement-status upgrade check

nova

# 安装基本服务
yum install -y openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler

# 在controller1配置文件打包拷贝
cd /etc/nova/

tar zcvf nova-controller.tar.gz ./*

scp nova-controller.tar.gz 172.31.7.102:/etc/nova/

# 回到controller2解压验证是否有之前的ip配置
scp /root/admin.openrc.sh /root/demo.openrc.sh 172.31.7.102:/root/

cd /etc/nova/

tar zxvf nova-controller.tar.gz

grep 172 ./* -R

# 修改配置文件
vim nova.conf
server_listen = 172.31.7.102
server_proxyclient_address = 172.31.7.102

# 自启启动服务
systemctl enable openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl start openstack-nova-api.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
  • ha1配置反向代理
vim /etc/haproxy/haproxy.cfg
listen openstack-nova-8774
bind 172.31.7.248:8774
mode tcp
server 172.31.7.101 172.31.7.101:8774 check inter 3s fall 3 rise 5
server 172.31.7.102 172.31.7.102:8774 check inter 3s fall 3 rise 5

listen openstack-nova-novncproxy-6080
bind 172.31.7.248:6080
mode tcp
server 172.31.7.101 172.31.7.101:6080 check inter 3s fall 3 rise 5
server 172.31.7.102 172.31.7.102:6080 check inter 3s fall 3 rise 5

systemctl restart haproxy

# 到controller2验证
source admin.openrc.sh

nova service-list

neutron

# 安装服务
yum install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables

# 到controller1上打包拷贝配置文件
# controller1
cd /etc/neutron

tar zcvf neutron-controller1.tar.gz ./

scp neutron-controller1.tar.gz 172.31.7.102:/etc/neutron

# 回到controller2解压并配置文件
cd /etc/neutron

tar zxvf neutron-controller1.tar.gz

# 查看有没有拷贝前的ip地址,有则还本机ip
grep 172 ./* -R

# 配置桥接内核参数
vim /etc/sysctl.conf
'''
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1

# 启动自启服务
systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

# 生效内核
sysctl -p

# 验证
neutron agent-list
  • h1配置反向代理
vim /etc/haproxy/haproxy.cfg
listen openstack-neutron-9696
bind 172.31.7.248:9696
mode tcp
server 172.31.7.101 172.31.7.101:9696 check inter 3s fall 3 rise 5
server 172.31.7.102 172.31.7.102:9696 check inter 3s fall 3 rise 5

systemctl reload haproxy

dashboard

# 安装服务
yum install -y openstack-dashboard

# 到controller1节点打包配置文件拷贝
# controller1
cd /etc/openstack-dashboard/

tar zcvf openstack-dashboard-controller1.tar.gz ./

scp openstack-dashboard-controller1.tar.gz 172.31.7.102:/etc/openstack-dashboard/

# 回到controller2上解压并配置文件
cd /etc/openstack-dashboard/

tar zxvf openstack-dashboard-controller1.tar.gz

# 查看是否有之前配置信息
grep 172 ./* -R

# 修改配置文件
vim local_settings
'''
ALLOWED_HOSTS = [ '172.31.7.102', 'openstack-vip.ws.local' ]
OPENSTACK_HOST = '172.31.7.102'

# 重启服务
systemctl restart httpd.service
  • 配置反向代理
# dashboard
vim /etc/haproxy/haproxy.cfg
listen openstack-dashboard-80
bind 172.31.7.248:80
mode tcp
server 172.31.7.101 172.31.7.101:80 check inter 3s fall 3 rise 5
server 172.31.7.102 172.31.7.102:80 check inter 3s fall 3 rise 5

镜像制作

  • 制作流程
# 启动一个虚拟机,kvm等虚拟化安装好,添加两张网卡,都为桥接,为了远程连接云主机

# curl openstack的api
curl http://169.254.169.254

# 检查是否支持虚拟化
grep -E '(vmx|svm)' /proc/cpuinfo


# 创建镜像
qemu-img create -f qcow2 /var/lib/libvirt/images/Centos-7-x86_64.qcow2 10G

virt-install --virt-type kvm --name centos7-x86_64 --ram 1024 --cdrom=/data/isos/CentOS-7-x86_64-NetInstall-1611.iso --disk path=/var/lib/libvirt/images/Centos7-x86_64.qcow2  --network bridge=br0 --graphics vnc,listen=0.0.0.0 --noautoconsole

virt-manager


# 虚拟机初始化时修改网卡名称
net.ifnames=0 biosdevname=0

# 虚拟机基本配置
vim eth0
ip为dhcp
ONBOOT=yes

yum -y install net-tools

关闭selinux和防火墙

reboot

# 利用远程连接工具连接虚拟机配置安装基础服务
ssh root@虚拟机IP

yum install  vim iotop bc gcc gcc-c++ glibc glibc-devel pcre pcre-devel openssl  openssl-devel zip unzip zlib-devel  net-tools lrzsz tree ntpdate telnet lsof tcpdump wget libevent libevent-devel bc  systemd-devel bash-completion traceroute bridge-utils  -y

# 镜像压缩
qemu-img convert -c -O qcow2 Centos-7-x86_64.qcow2 Centos-7-x86_64.qcow2

# 配置controller公钥免密(一般是拷贝ansible的公钥,用于批量部署)
ssh-keygen

ssh-copy-id 虚拟机ip

# 关机
poweroff

======================================利用镜像启动虚拟机======================================
# 宿主机安装acpid,宿主机可以重启虚拟机
yum install acpid -y
systemctl enable acpid; systemctl start acpid

# 宿主机利用镜像启动实例
scp kvm机ip:/var/lib/libvirt/images/Centos7-x86_64.qcow2 /root/

# 上传镜像启动虚拟机即可

iotop 查看磁盘读写

磁盘拉伸(ubuntu)

# 现在以ubantu镜像为例,制作ubantu镜像(宿主机操作)
上传ubantu镜像

# 创建镜像(宿主机操作)
qemu-img create -f qcow2 /var/lib/libvirt/images/ubuntu.qcow2 10G

# 加速源
http://mirros.aliyun.com/ubuntu/

# 创建虚拟机(宿主机操作)
virt-install --virt-type kvm --name ubuntu-1804 --ram 1024 --cdrom=/data/isos/ubuntu-1804-server-amd64.iso --disk path=/var/lib/libvirt/images/ubuntu.qcow2  --network bridge=br0 --graphics vnc,listen=0.0.0.0 --noautoconsole

# 虚拟机中操作网卡重命名(虚拟机操作)
选择英文回车--->f6--->esc--->写入 net.ifnames=0 biosdevname=0--->回车安装--->语言默认回车--->地区默认回车--->no(不检查键盘)--->默认回车--->默认回车--->设置主机名回车--->创建用户回车--->设置密码--->时区默认--->手动分区(Manual)--->第三个(Virtual disk 1 (vda) - 10.7 GB Virtio Biock Device)--->yes--->选择分出来的磁盘(pri/log....)--->创建一个新的分区(Create a new partition)--->全分配(continue)--->主分区(primaru)--->只分这一个分区(Done setting up the partition)--->使用分区并安装(Finsh partioning....)--->是否写入磁盘(是)--->下一步--->第一个,不更新--->选择OpenSSH server(用空格打开),continue--->yes--->continue

# 配置sshd文件(虚拟机操作)
sudo su - root
vim /etc/ssh/sshd_config
PermitRootLogint yes

systemctl restart sshd

# 设置root密码(虚拟机操作)
passwd

# 远程连接安装服务(虚拟机操作)
apt install vim iotop bc gcc gcc-c++ glibc glibc-devel pcre pcre-devel openssl  openssl-devel zip unzip zlib-devel  net-tools lrzsz tree ntpdate telnet lsof tcpdump wget libevent libevent-devel bc  systemd-devel bash-completion traceroute bridge-utils  -y

==================================ha1节点==================================
# 配置nova元数据的代理(磁盘拉伸工具需要访问openstack-API拿到磁盘数据配置到云主机)
vim /etc/haproxy/haproxy.cfg
listen openstack-nova-8775
bind 172.31.7.248:8775
mode tcp
server 172.31.7.101 172.31.7.101:8775 check inter 3s fall 3 rise 5
server 172.31.7.102 172.31.7.102:8775 check inter 3s fall 3 rise 5

systemctl restart haproxy
=========================================================================


===============================磁盘拉伸(kvm虚拟机操作)=========================
# 特点:只能增,不能减
# 安装磁盘拉伸工具
apt install cloud-init -y

# 检测是否能获取到openstack的api(因为cloud_init会在创建云主机时调用openstack-API获取磁盘信息来加载的创建的云主机中)
curl http://169.254.169.254

# 配置文件
vim /etc/cloud/cloud.cfg
'''
users:
- root
disable_root: false

# 要设置映像使用的元数据源,请对 包运行 dpkg-reconfigure命令cloud-init。当提示选择EC2数据源
dpkg-reconfigure cloud-init

# 会出现页面,只留 Ec2,其它全用空格取消

# 继续配置cloud.cfg模块(只保留磁盘拉伸的配置),模块太多影响虚拟机启动速度
vim /etc/cloud/cloud.cfg
# The modules that run in 'init' stage
cloud_init_modules:
- growpart		# 磁盘拉伸
- resizefs		# 磁盘生效

# - package-update-upgrade-install 以下全部注释

=================关闭安全组====================
# 所有节点关闭安全组(openstack节点)
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
'''
enable_security_group = false
'''
=====================================

# 磁盘拉伸操作(cloud-init会去访问openstack磁盘的api,拿到磁盘大小分配的云主机)
如果启动不了那么就是配置文件或openstack-API访问不了
systemctl start cloud-init; systemctl enable cloud-init


# 将需要免密主机的公钥写入到kvm虚拟机(需要免密的主机操作)
ssh-copy kvm虚拟机IP

# 关机
poweroff

# 拷贝镜像到openstack宿主机,上传镜像启动虚拟机即可(总的来说,磁盘拉伸就是安装cloud-init服务并启动即可)
# 利用ios镜像启动qcow2镜像
virt-install --virt-type kvm --name ubuntu-1804 --ram 1024 --cdrom=/data/isos/centos-Min.ios --disk path=/var/lib/libvirt/images/centos-7-x86_64-1511.qcow2  --network bridge=br0 --graphics vnc,listen=0.0.0.0 --noautoconsole

# 立马关机

# 到kvm宿主机重置密码(宿主机)
yum -y install libguestfs-tools

virt-customize -a /os/images/Centos-1511.qcow2 --root-password password:123456


# 官方下载的镜像一般会关闭密码验证导致远程登录不上
vim /etc/ssh/sshd_config
'''
PasswordAuthentication yes

systemctl restart sshd


vim /etc/cloud/cloud.cfg
# 只留下cloud_init_modules中的磁盘伸缩配置,其余如下配置全部注释
cloud_init_modules:
- growpart
- resizefs

cloud_config_modules:
# 注释

cloud_final_modules:
# 注释


sync  将内存数据写到磁盘

制作centos6.9镜像并实现磁盘拉伸及网卡重命名

# 创建镜像(宿主机操作)
qemu-img create -f qcow2 /var/lib/libvirt/images/centos6.9.qcow2 10G

# 创建虚拟机(宿主机操作)
virt-install --virt-type kvm --name centos6.9 --ram 1024 --cdrom=/data/isos/centos6.9-x86_64.iso --disk path=/var/lib/libvirt/images/centos6.9.qcow2  --network bridge=br0 --graphics vnc,listen=0.0.0.0 --noautoconsole

# 启动云主机时由于mac地址会变,导致ip拿不到,做以下配置
vim /etc/udev/rules.d/70-persistent-net.rules
# 删除源eth0的配置信息,将下面eth1改为eth0重启云主机即可

# 磁盘拉伸
cd /opt
rpm -ivh http://ftp-stud.hs-esslingen.de/pub/epel/6/i386/epel-release-6-8.noarch.rpm
yum install git parted cloud-utils
git clone https://github.com/flegmatik/linux-rootfs-resize.git
cd linux-rootfs-resize
./install

reboot

openstack多网卡与指定IP创建主机

  • 功能
  • 数据中心
  • 内部的api调用
  • 所有节点
# 启动内网网卡(所有节点,再相互ping是否能通)
ifup eth1
  • 控制节点
# 编辑配置文件 ml2_conf.ini
vim /etc/neutron/plugins/ml2/ml2_conf.ini
172 flat_networks = internal,external

# 编辑配置文件 linuxbridge_agent.ini
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
155 physical_interface_mappings = external:eth0,internal:eth1
  • 计算节点
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
155 physical_interface_mappings = external:eth0,internal:eth1
  • 控制节点创建网络并实现多网卡
  • 多网卡的作用:
    • 数据端与服务端分离分担流量压力与保证数据安全,免受攻击
    • 可动态分配内网外网。如:内网作为mysql,外网作为nginx,灵活分配
# 创建网络
openstack network create  --share --internal --provider-physical-network internal --provider-network-type flat internal

# 创建子网
openstack subnet create --network external-net --allocation-pool start=10.10.0.50,end=10.10.0.100 --dns-nameserver 223.6.6.6 --gateway 10.10.0.1 --subnet-range 10.10.0.0.0/21 internal-sub

# 在kvm虚拟机中再添加一张网卡,类型并用virtio,制作好关机上传镜像运行虚拟机
注意centos6:注意centos6需要配置mac,不然网卡信息无法在openstack平台加载
注意centos7:不要让NetworkManager管理网络,在网卡配置文件(ens33或eth0)加上 NM_CONTROLLERD=no
重启网络查看网络是否有问题,将需要免密的公钥放入到kvm虚拟机

# 在dashboard创建实例时,先推内网再推外网,因为公司服务一般都是先有内网再有外网

# 完成后就达到 172外网(nginx,tomcat等)服务访问 10内网服务(mysql,redis等)互通了,接下来就是将web要访问数据库的地址指向 10内网网段即可,即保证了安全又得到了流量压力分担缓解,数据和api通过内网调用,业务通过外网调用
  • 自定义IP
# 给云主机指定单个IP
nova boot --image 镜像名称 --flavor 1C-1G-50G --availability-zone 域:计算节点 --nic net-name=external-net(neutron定义的网络名称),v4-fixed-ip=172.31.7.88 云主机名称

# 给云主机指定多个IP
nova boot --image 镜像名称 --flavor 1C-1G-50G --availability-zone 域:计算节点 --nic net-id=$ID,v4-fixed-ip=10.10.7.50 --nic net-id=$ID,v4-fixed-ip=172.31.7.88 云主机名称

OpenStack虚拟机类型调整与虚拟机跨主机迁移

  • 查看当前配额
neutron quota-show admin
network  100
port     500		# port决定可以创建多少个云主机
rbac_policy  10
subnet   100
subnetpool  -1
  • 修改配额(重点,这个是web也调整不了的值,这是openstack内部限制,所有节点都需要加)
# 如果有多个控制节点,需要保持一致
vim /etc/neutron/neutron.conf
[quotas]
quota_network = 10
quota_subnet = 10
quota_port = 5000
quota_driver = neutron.db.quota.driver.DbQuotaDriver
quota_router = 10
quota_floatingip = 1000
quota_security_group = 10
quota_security_group_rule = 100


systemctl restart neutron*
  • WEB配置文件开启配额限制
vim /etc/openstack-dashboard/local_settings
'''
'enable_quotas': True,
'''

openstack项目实战

keepalived+haproxy

  • 在云主机中关闭磁盘伸缩初始化配置(所有节点)
# 进入云主机界面
systemctl disable cloud-init

# 开启密码验证
vim /etc/ssh/sshd_config
'''
PasswordAuthentication yes
  • 准备四台主机,两台做负载均衡和vip等,另外两台做web
# 10.10.7.88 和 172.31.7.88    负载机一
# 10.10.7.89 和 172.31.7.89    负载机二

# 负载机上创建两个vip:172.31.7.99,172.31.7.98

# 10.10.7.78 和 172.31.7.78    web机一
# 10.10.7.79 和 172.31.7.79    web机二


nova boot --image centos7.6_v1 --flavor 1C-1G-50G --availability-zone projectB:openstack-node2.ws.local --nic net-id=$ID,v4-fixed-ip=172.31.7.88 --nic net-id=$ID,v4-fixed-ip=10.10.7.88 projectB-web1

nova boot --image centos7.6_v1 --flavor 1C-1G-50G --availability-zone projectB:openstack-node3.ws.local --nic net-id=$ID,v4-fixed-ip=172.31.7.89 --nic net-id=$ID,v4-fixed-ip=10.10.7.89 projectB-web2

nova boot --image centos7.6_v1 --flavor 1C-1G-50G --availability-zone projectB:openstack-node2.ws.local --nic net-id=$ID,v4-fixed-ip=172.31.7.78 --nic net-id=$ID,v4-fixed-ip=10.10.7.78 projectB-lb1

nova boot --image centos7.6_v1 --flavor 1C-1G-50G --availability-zone projectB:openstack-node3.ws.local --nic net-id=$ID,v4-fixed-ip=172.31.7.79 --nic net-id=$ID,v4-fixed-ip=10.10.7.79 projectB-lb2
# java解压环境变量自己配置

# tomcat解压配置
vim conf/server.xml
appBase="/data/tomcat/webapps"

mkdir -p /data/tomcat/webapps/myapp

vim /data/tomcat/webapps/myapp/index.html
<h1>172.31.7.88/89</h1>

# 启动tomcat
/apps/apache-tomcat-10/bin/catalina.sh start

# 访问
172.31.7.78:8080/myapp
172.31.7.79:8080/myapp
  • ha1
]# cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_strict
vrrp_iptables
vrrp_garp_interval 0
vrrp_gna_interval 0
}

vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 58
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
172.31.7.248 dev eth0 label eth0:0
172.31.7.188 dev eth0 label eth0:1
}
}

systemctl reload keepalived



vim /etc/haproxy/haproxy.cfg
listen guangdon-nginx-80
bind 172.31.7.188:80
mode tcp
server 172.31.7.78 172.31.7.78:80 check inter 3s fall 3 rise 5
server 172.31.7.79 172.31.7.79:80 check inter 3s fall 3 rise 5

systemctl reload haproxy
  • keepalived+haproxy VIP配置(172.31.7.88、172.31.7.89)
# openstack中做vip需要打开安全组(只需要controller打开即可)
vim /etc/neutron/plugins/ml2/ml2_conf.ini
enable_security_group = true

vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
167 enable_security_group = true

# 安装
yum install -y haproxy keepalived

# 配置keepalived主(172.31.7.88)
vim /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
notification_email {
 acassen@firewall.loc
 failover@firewall.loc
 sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_strict
vrrp_iptables			# 不生成iptables策略
vrrp_garp_interval 0
vrrp_gna_interval 0
}

vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 55
priority 100
advert_int 1
authentication {
    auth_type PASS
    auth_pass 1111
}
virtual_ipaddress {
    172.31.7.16 dev eth0 label eth0:0
}
}

systemctl reload keepalived

scp /etc/keepalived/keepalived.conf 172.31.7.89:/etc/keepalived/keepalived.conf


# 配置keepalived从(172.31.7.89)
vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
notification_email {
 acassen@firewall.loc
 failover@firewall.loc
 sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_strict
vrrp_iptables
vrrp_garp_interval 0
vrrp_gna_interval 0
}

vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 55        # id要和主对应,相同
priority 80				# 优先级越高,vip地址就在那台机器,默认为100
advert_int 1
authentication {
    auth_type PASS
    auth_pass 1111
}
virtual_ipaddress {
    172.31.7.16 dev eth0 label eth0:0
}
}

PS: 由于作为keepalived的从节点,是没有监听的 vip 地址,需要配置内核参数,不然haproxy起不来
sysctl -a | grep local
得到:net.ipv4.ip_nonlocal_bind = 0

vim /etc/sysctl.conf
net.ipv4.ip_nonlocal_bind = 1

sysctl -p

systemctl restart keepalived

# 安装配置haproxy(172.31.7.88)
yum install -y haproxy

vim /etc/haproxy/haproxy.cfg
listen guangdon-nginx-80
bind 172.31.7.16:80
mode http
server 172.31.7.78 172.31.7.78:8080 check inter 3s fall 3 rise 5
server 172.31.7.79 172.31.7.79:8080 check inter 3s fall 3 rise 5

listen guizhou-nginx-80
bind 172.31.7.16:80
mode http
server 172.31.7.78 172.31.7.78:8080 check inter 3s fall 3 rise 5
server 172.31.7.79 172.31.7.79:8080 check inter 3s fall 3 rise 5


# 安装配置haproxy(172.31.7.89)


systemctl restart haproxy


================================================controller操作============================================
# 如果出现脑裂(两台都出现vip地址情况)
在openstack的dashboard中开放 112 协议,出口入口都要开

# 查看安全组
openstack security group rule list

# 在openstack中注册vip地址,不然无法通讯,需要vip给ha做负载均衡(注意vip地址网段,根据网段分配相应网络 id )
openstack network list    # 获取网络id

neutron port-create --fixed-ip ip_address=172.31.7.16 --security-group 安全组id 网段对应的网络id

# 查看是否注册成功
neutron port-list


# 注册成功绑定到相应的云主机上
openstack port list | grep 172.31.7.78		# 取出id备份
openstack port list | grep 172.31.7.79		# 取出id备份

# 注册的vip地址绑定到相应的云主机中,但是是单个绑定,如果有多个会被新的覆盖掉
neutron port-update 172.31.7.78的port-id --allowed_address_pairs list=true type=dict ip_address=172.31.7.16
neutron port-update 172.31.7.79的port-id --allowed_address_pairs list=true type=dict ip_address=172.31.7.16

# 多个地址绑定
neutron port-update 172.31.7.78的port-id --allowed_address_pairs list=true type=dict ip_address={"172.31.7.16","172.31.7.26"}
neutron port-update 172.31.7.79的port-id --allowed_address_pairs list=true type=dict ip_address={"172.31.7.16","172.31.7.26"}

lvs

# 先停掉上面的 keepalived 和 haproxy(172.31.7.88,172.31.7.89)
systemctl stop haproxy keepalived

# 安装服务
yum install -y ipvsadm

sysctl -p

# 配置keepalived
vim /etc/keepalived/keepalived.conf
'''
# Tomcat service virtual_server
virtual_server 172.31.7.16 8080 {
delay_loop 6    # 健康检查间隔时间
lb_algo wrr     # 负载算法
lb_kind DR      # lvs模式
# persistence_timeout 60    # 会话保持时间
protocol TCP    # TCP转发

real_server 172.31.7.78 8080 {
  weight 1    # 权重
  TCP_CHECK {
  connect_timeout 5    # 无响应超时时间
  nb_get_retry 3       # 重试次数
  delay_before_retry 3  # 重试间隔
  connect_port 80      # 检查端口
  }
}
real_server 172.31.7.79 80 {
  weight 1
  TCP_CHECK {
  connect_timeout 5
  nb_get_retry 3
  delay_before_retry 3
  connect_port 80
  }
}
}

systemctl restart keepalived


# lvs初始化启动脚本(172.31.7.88,172.31.7.89)
vim /root/lvs-dr.sh
#!/bin/bash
# LVS DR默认初始化脚本
LVS_VIP=172.31.7.16
source /etc/rc.d/init.d/functions
case "$1" in
start)
/sbin/ifconfig lo:0 $LVS_VIP netmask 255.255.255.255 broadcast $LVS_VIP
/sbin/route add -host $LVS_VIP dev lo:0
echo "1" > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" > /proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" > /proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" > /proc/sys/net/ipv4/conf/all/arp_announce
sysctl -p > /dev/null 2>&1
echo "RealServer Start OK"
;;
stop)
/sbin/ifconfig lo:0 down
/sbin/route del $LVS_VIP > /dev/null 2>&1
echo "0" > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo "0" > /proc/sys/net/ipv4/conf/lo/arp_announce
echo "0" > /proc/sys/net/ipv4/conf/all/arp_ignore
echo "0" > /proc/sys/net/ipv4/conf/all/arp_announce
sysctl -p > /dev/null 2>&1
echo "RealServer Stoped"
;;
*)
echo "Usage: $0 {start|stop}"
exit 1
esac
exit 0


bash lvs-dr.sh start

# 查看lvs策略(172.31.7.88,172.31.7.89)
ipvsadm -Ln

openstack优化配置

  • 配置虚拟机自启动(必须加)
# 计算节点 /etc/nova/nova.conf 进行以下配置:
resume_guests_state_on_host_boot=true
  • 配置CPU超限使用
# 默认为16,即允许开启16倍于物理cpu的虚拟cpu个数
cpu_allocation_ratio=16
  • 配置内存超限使用
# 配置允许1.5倍于物理内存的虚拟内存
ram_allocation_ratio=1.5
  • 配置硬盘超限使用(不建议启动,会导致数据丢失)
# 硬盘尽量不要超限,可能会导致数据出现丢失
disk_allocation_ratio=1.0
  • 配置保留磁盘空间
# 即会预留一部分磁盘空间给系统使用
reserved_host_disk_mb=20480
  • 配置预留内存给系统使用
# 即预留一定的内存给系统使用
reserved_host_memory_mb=4096
  • 配置虚拟机类型动态调整
# 有些时候,创建完成的虚拟机因业务需求需要变更内存或cpu或磁盘,因此需要配置允许后期类型调整
allow_resize_to_same_host=true		# 允许调整

openstack跨主机迁移/类型调整

  • 当发生主机迁移时,迁移主机会向被迁移主机把当前的云主机文件目录拷贝过去,在被迁移主机启动云主机
  • 调整类型过程和主机迁移一致
  • 开启nova用户登录(所有主机)
usermod nova -s /bin/bash
  • 配置nova登录密码(所有主机)
echo 123456 | passwd --stdin nova
  • 生成密钥对,配置免密登录(所有主机)
su - nova

ssh-keygen		# 一直回车即可

ssh-copy-id nova@172.31.7.107

ssh-copy-id nova@172.31.7.108

ssh-copy-id nova@172.31.7.109
  • 后面自行完成调整类型和云主机迁移操作即可

cinder(云硬盘)

  • 数据端创库授权
CREATE DATABASE cinder;

GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'cinder123';
  • controller(1、2)
# 创建cinder账号(密码统一cinder)
openstack user create --domain default --password-prompt cinder

# 将管理员角色添加到cinder用户
openstack role add --project service --user cinder admin

# 创建service
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2

openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3

# 注册api
openstack endpoint create --region RegionOne volumev2 public http://openstack-vip.ws.local:8776/v2/%\(project_id\)s

openstack endpoint create --region RegionOne volumev2 internal http://openstack-vip.ws.local:8776/v2/%\(project_id\)s

openstack endpoint create --region RegionOne volumev2 admin http://openstack-vip.ws.local:8776/v2/%\(project_id\)s

openstack endpoint create --region RegionOne volumev3 public http://openstack-vip.ws.local:8776/v3/%\(project_id\)s

openstack endpoint create --region RegionOne volumev3 internal http://openstack-vip.ws.local:8776/v3/%\(project_id\)s

openstack endpoint create --region RegionOne volumev3 admin http://openstack-vip.ws.local:8776/v3/%\(project_id\)s
  • ha1配置反向代理
vim /etc/haproxy/haproxy.cfg
listen openstack-cinder-8776
bind 172.31.7.248:8776
mode tcp
server 172.31.7.101 172.31.7.101:8776 check inter 3s fall 3 rise 5
server 172.31.7.102 172.31.7.102:8776 check inter 3s fall 3 rise 5

systemctl reload haproxy
  • 配置cinder(控制节点)
# 安装服务
yum install openstack-cinder -y

vim /etc/cinder/cinder.conf
[database]
connection = mysql+pymysql://cinder:cinder123@openstack-vip.ws.local/cinder

[DEFAULT]
transport_url = rabbit://openstack:openstack123@openstack-vip.ws.local

[DEFAULT]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://openstack-vip.ws.local:5000
auth_url = http://openstack-vip.ws.local:5000
memcached_servers = openstack-vip.ws.local:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder

[DEFAULT]
my_ip = 10.0.0.11

[oslo_concurrency]
lock_path = /var/lib/cinder/tmp

# 同步数据库
su -s /bin/sh -c "cinder-manage db sync" cinder


# 配置nova
vim /etc/nova/nova.conf
[cinder]
os_region_name = RegionOne

# 重启、启动相关服务
systemctl restart openstack-nova-api.service

systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service; systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
  • compute配置
# 添加一块磁盘

# 动态识别新磁盘
ll /sys/class/scsi_host/host

echo "---" > /sys/class/scsi_host/host0/scan
echo "---" > /sys/class/scsi_host/host1/scan
echo "---" > /sys/class/scsi_host/host2/scan

# 安装卷服务并启动
yum install lvm2 device-mapper-persistent-data

systemctl enable lvm2-lvmetad.service; systemctl start lvm2-lvmetad.service

# 创建卷
pvcreate /dev/sdb

# 创建卷组(注意创建的名称,后面需要用 cinder-volumes )
vgcreate cinder-volumes /dev/sdb

# 只允许虚拟机访问 sdb 盘,其它盘全部拒绝
vim  /etc/lvm/lvm.conf
'''
devices {
filter = [ "a/sdb/", "r/.*/"]

systemctl restart lvm2-lvmetad.service

# 安装cinder相关服务
yum install openstack-cinder targetcli python-keystone

vim /etc/cinder/cinder.conf
[database]
connection = mysql+pymysql://cinder:cinder123@openstack-vip.ws.local/cinder

[DEFAULT]
transport_url = rabbit://openstack:openstack123@openstack-vip.ws.local

[DEFAULT]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://openstack-vip.ws.local:5000
auth_url = http://openstack-vip.ws.local:5000
memcached_servers = openstack-vip.ws.local:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder

[DEFAULT]
my_ip = 172.31.7.106

[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
target_protocol = iscsi
target_helper = lioadm
#volume_backend_name = Openstack-lvm

[DEFAULT]
enabled_backends = lvm

[DEFAULT]
glance_api_servers = http://openstack-vip.ws.local:9292

[oslo_concurrency]
lock_path = /var/lib/cinder/tmp

systemctl enable openstack-cinder-volume.service target.service; systemctl start openstack-cinder-volume.service target.service
  • 验证(控制节点)
openstack volume service list
  • 重新识别磁盘大小
resize2fs /dev/vdb

cinder配置nfs存储

  • mysql1
yum -y install nfs-utils

mkdir /data/cinder

vim /etc/exports
'''
/data/cinder *(rw,no_root_squash)

systemctl start nfs
  • controller
# 配置controller的cinder主配置文件(controller1、controller2)
vim /etc/cinder/cinder.conf
[DEFAULT]
enabled_backends = nfs
[nfs]
volume_backend_name = openstack-nfs   # 定义名称,后面做关联使用
volume_driver = cinder.volume.drivers.nfs.NfsDriver   # 驱动
nfs_shares_config = /etc/cinder/nfs_shares    # 定义nfs挂载的配置文件路径
nfs_mount_point_base = $state_path/mnt     # 定义nfs挂载点

vim /etc/cinder/nfs_shares		# 由于上面的配置会自动读取这个配置文件
172.31.7.105:/data/cinder

chown root.cinder /etc/cinder/nfs_shares

systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service openstack-cinder-volume.service
  • compute
# 配置compute的cinder主配置文件(compute)
vim /etc/cinder/cinder.conf
[lvm]
volume_backend_name = openstack-nfs		# 与上面controller定义的名称区别(lvm和nfs)

systemctl restart openstack-cinder-volume.service target.service

# 验证(controller)
openstack volume service list

# 将磁盘类型与磁盘关联(controller)
cinder type-create lvm
cinder type-create nfs
cinder type-key lvm set volume_backend_name=Openstack-lvm
cinder type-key nfs set volume_backend_name=Openstack-nfs

自服务网络

  • 控制节点
# 安装服务
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables

# 配置文件
vim  /etc/neutron/neutron.conf
[database]
connection = mysql+pymysql://neutron:neutron123@openstack-vip.ws.local/neutron

[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true		# 不同用户可以创建相同网段

[DEFAULT]
transport_url = rabbit://openstack:openstack123@openstack-vip.ws.local

[DEFAULT]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://openstack-vip.ws.local:5000
auth_url = http://openstack-vip.ws.local:5000
memcached_servers = openstack-vip.ws.local:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron


[DEFAULT]
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true

[nova]
auth_url = http://openstack-vip.ws.local:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp




vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan,vxlan

[ml2]
tenant_network_types = vxlan

[ml2]
mechanism_drivers = linuxbridge,l2population
# l2population通知mac地址,优化网络,减少广播报文

[ml2]
extension_drivers = port_security   # 端口安全,如果不跑vip可以直接关闭

[ml2_type_flat]
flat_networks = external          # 定义网络名称,后面需要用来绑定网卡

[ml2_type_vxlan]
vni_ranges = 1:1000

[securitygroup]
enable_security_group = true    # 打开安全组
enable_ipset = true




vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = extnet:eth0,intnet:eth1

[vxlan]
enable_vxlan = true
local_ip = 172.31.7.101   # 云主机要做地址转换才能通外网或可访问网,这里配置云主机做地址转换时走哪个ip,写本机ip即可
l2_population = true

[securitygroup]
enable_security_group = true     # 打开安全组(如果不开不能使用vip)
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver




vim /etc/neutron/l3_agent.ini
[DEFAULT]
interface_driver = linuxbridge

[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true



sysctl -p
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1


systemctl restart neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service  neutron-metadata-agent.service

systemctl enable neutron-l3-agent.service; systemctl start neutron-l3-agent.service

# 验证
neutron agent-list
  • compute
yum install openstack-neutron-linuxbridge ebtables ipset

vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = external:eth0,internal:eth1

[vxlan]
enable_vxlan = true
local_ip = 172.31.7.107
l2_population = true

[securitygroup]
enable_security_group = false     # node不启动安全组,避免报文,iptables影响服务
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver




sysctl -p
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1

systemctl restart neutron-linuxbridge-agent.service
  • 开启dashboard的router功能
vim /etc/openstack-dashboard/local_settings
OPENSTACK_NEUTRON_NETWORK = {
 ...
 'enable_router': True,
 'enable_quotas': True,
 'enable_distributed_router': True,
 'enable_ha_router': True,
 'enable_lb': True,
 'enable_firewall': True,
 'enable_vpn': True,
 'enable_fip_topology_check': True,
}

systemctl restart httpd

# 三种子网掩码
255.255.252.0  0/22(1000多个地址)   255.255.248.0(2000多个地址)   0/21 255.255.255.0  0/24

# 内网之间怎么互相访问
将路由器接口都接到内网上,由路由器负责内网互相通信,再绑一个公网,则可全访问
posted @   A57  阅读(217)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· 无需6万激活码!GitHub神秘组织3小时极速复刻Manus,手把手教你使用OpenManus搭建本
· Manus爆火,是硬核还是营销?
· 终于写完轮子一部分:tcp代理 了,记录一下
· 别再用vector<bool>了!Google高级工程师:这可能是STL最大的设计失误
· 单元测试从入门到精通
点击右上角即可分享
微信分享提示