14.Open vSwitch实现跨主机的容器之间网络互联
Open vSwitch实现跨主机的容器之间网络互联
Open vSwitch介绍
Open vSwitch,即Open Virtual Switch开放虚拟交换机,是在开源的Apache2.0许可下的产品级质量的多层虚拟交换机。Open vSwitch通过软件的方式形成交换机部件。
跟传统的物理交换机相比,虚拟交换机同样具备众多优点,一是配置更加灵活。一台普通的服务器可以配置出数十台甚至上百台虚拟交换机,且端口数目可以灵活选择。例如,VMware的ESX一台服务器可以仿真出248台虚拟交换机,且每台交换机预设虚拟端口即可达56个;二是成本更加低廉,通过虚拟交换往往可以获得昂贵的普通交换机才能达到的性能,例如微软的Hyper-V平台,虚拟机与虚拟交换机之间的联机速度轻易可达10Gbps。
官网:http://www.openvswitch.org/
利用Open vSwitch实现docker跨主机网络
实现目标:将两台主机的容器利用Open vSwitch连接起来,实现互联互通
环境准备
主机名 | 操作系统 | 宿主机IP | Docker0 IP | 容器 IP |
---|---|---|---|---|
ovs1 | ubuntu 18.04 | 10.0.0.101/24 | 172.17.1.1/24 | 172.17.1.2/24 |
ovs2 | ubuntu 18.04 | 10.0.0.102/24 | 172.17.2.1/24 | 172.17.2.2/24 |
修改两台主机的docker0分别使用不同的网段
#配置第一台主机
[root@ovs1 ~]#vim /etc/docker/daemon.json
{
"bip": "172.17.1.1/24",
"registry-mirrors": ["https://si7y70hh.mirror.aliyuncs.com"]
}
[root@ovs1 ~]#systemctl restart docker
[root@ovs1 ~]#ip add show docker0
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:dc:29:03:6c brd ff:ff:ff:ff:ff:ff
inet 172.17.1.1/24 brd 172.17.1.255 scope global docker0
valid_lft forever preferred_lft forever
#配置第二台主机
[root@ovs2 ~]#vim /etc/docker/daemon.json
{
"bip": "172.17.2.1/24",
"registry-mirrors": ["https://si7y70hh.mirror.aliyuncs.com"]
}
[root@ovs2 ~]#systemctl restart docker
[root@ovs2 ~]#ip add show docker0
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:e2:38:84:83 brd ff:ff:ff:ff:ff:ff
inet 172.17.2.1/24 brd 172.17.2.255 scope global docker0
valid_lft forever preferred_lft forever
安装openvswitch-switch和bridge-utils和确认版本
#在第一个主机安装包
[root@ovs1 ~]#apt -y install openvswitch-switch bridge-utils
[root@ovs1 ~]#ps -e | grep ovs
6766 ? 00:00:00 ovsdb-server
6826 ? 00:00:00 ovs-vswitchd
#查看ovs版本信息和ovs支持的OpenFlow协议的版本
[root@ovs1 ~]#ovs-appctl --version
ovs-appctl (Open vSwitch) 2.9.5
[root@ovs1 ~]#ovs-ofctl --version
ovs-ofctl (Open vSwitch) 2.9.5
OpenFlow versions 0x1:0x5
#查看网桥
[root@ovs1 ~]#brctl show
bridge name bridge id STP enabled interfaces
docker0 8000.0242dc29036c no
#在第二个主机安装包
[root@ovs2 ~]#apt -y install openvswitch-switch bridge-utils
[root@ovs2 ~]#ps -e | grep ovs
6618 ? 00:00:00 ovsdb-server
6680 ? 00:00:00 ovs-vswitchd
#查看ovs版本信息和ovs支持的OpenFlow协议的版本
[root@ovs2 ~]#ovs-appctl --version
ovs-appctl (Open vSwitch) 2.9.5
[root@ovs2 ~]#ovs-ofctl --version
ovs-ofctl (Open vSwitch) 2.9.5
OpenFlow versions 0x1:0x5
[root@ovs2 ~]#brctl show
bridge name bridge id STP enabled interfaces
docker0 8000.0242e2388483 no
创建br0网桥并激活
[root@ovs1 ~]#ovs-vsctl add-br br0
[root@ovs1 ~]#ip link set dev br0 up
[root@ovs1 ~]#ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:6b:54:d3 brd ff:ff:ff:ff:ff:ff
inet 10.0.0.101/24 brd 10.0.0.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe6b:54d3/64 scope link
valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:dc:29:03:6c brd ff:ff:ff:ff:ff:ff
inet 172.17.1.1/24 brd 172.17.1.255 scope global docker0
valid_lft forever preferred_lft forever
4: ovs-system: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether ce:ff:6f:7f:4b:11 brd ff:ff:ff:ff:ff:ff
5: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default qlen 1000
link/ether f2:2b:d7:d8:a1:4d brd ff:ff:ff:ff:ff:ff
inet6 fe80::f02b:d7ff:fed8:a14d/64 scope link
valid_lft forever preferred_lft forever
#第二台主机重复上面
[root@ovs2 ~]# ovs-vsctl add-br br0
[root@ovs2 ~]#ip link set dev br0 up
[root@ovs2 ~]#ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:01:f3:0c brd ff:ff:ff:ff:ff:ff
inet 10.0.0.102/24 brd 10.0.0.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe01:f30c/64 scope link
valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:e2:38:84:83 brd ff:ff:ff:ff:ff:ff
inet 172.17.2.1/24 brd 172.17.2.255 scope global docker0
valid_lft forever preferred_lft forever
4: ovs-system: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether d6:29:ca:3a:9d:99 brd ff:ff:ff:ff:ff:ff
5: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default qlen 1000
link/ether 82:4f:05:e3:5d:42 brd ff:ff:ff:ff:ff:ff
inet6 fe80::804f:5ff:fee3:5d42/64 scope link
valid_lft forever preferred_lft forever
创建gre隧道(remote_ip为peer宿主机ip)
注意:如果有多台docker主机需要构建网络创建多个gre隧道
[root@ovs1 ~]#ovs-vsctl add-port br0 gre0 -- set Interface gre0 type=gre options:remote_ip=10.0.0.102
[root@ovs1 ~]#ovs-vsctl list-ports br0
gre0
[root@ovs1 ~]#ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:6b:54:d3 brd ff:ff:ff:ff:ff:ff
inet 10.0.0.101/24 brd 10.0.0.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe6b:54d3/64 scope link
valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:dc:29:03:6c brd ff:ff:ff:ff:ff:ff
inet 172.17.1.1/24 brd 172.17.1.255 scope global docker0
valid_lft forever preferred_lft forever
4: ovs-system: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether ce:ff:6f:7f:4b:11 brd ff:ff:ff:ff:ff:ff
5: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default qlen 1000
link/ether f2:2b:d7:d8:a1:4d brd ff:ff:ff:ff:ff:ff
inet6 fe80::f02b:d7ff:fed8:a14d/64 scope link
valid_lft forever preferred_lft forever
6: gre0@NONE: <NOARP> mtu 1476 qdisc noop state DOWN group default qlen 1000
link/gre 0.0.0.0 brd 0.0.0.0
7: gretap0@NONE: <BROADCAST,MULTICAST> mtu 1462 qdisc noop state DOWN group default qlen 1000
link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff
8: erspan0@NONE: <BROADCAST,MULTICAST> mtu 1450 qdisc noop state DOWN group default qlen 1000
link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff
9: gre_sys@NONE: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 65000 qdisc fq_codel master ovs-system state UNKNOWN group default qlen 1000
link/ether ce:d2:c1:4e:be:c6 brd ff:ff:ff:ff:ff:ff
inet6 fe80::ccd2:c1ff:fe4e:bec6/64 scope link
valid_lft forever preferred_lft forever
[root@ovs2 ~]#ovs-vsctl add-port br0 gre0 -- set Interface gre0 type=gre options:remote_ip=10.0.0.101
[root@ovs2 ~]#ovs-vsctl list-ports br0
gre0
[root@ovs2 ~]#ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:01:f3:0c brd ff:ff:ff:ff:ff:ff
inet 10.0.0.102/24 brd 10.0.0.255 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe01:f30c/64 scope link
valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default
link/ether 02:42:e2:38:84:83 brd ff:ff:ff:ff:ff:ff
inet 172.17.2.1/24 brd 172.17.2.255 scope global docker0
valid_lft forever preferred_lft forever
4: ovs-system: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN group default qlen 1000
link/ether d6:29:ca:3a:9d:99 brd ff:ff:ff:ff:ff:ff
5: br0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default qlen 1000
link/ether 82:4f:05:e3:5d:42 brd ff:ff:ff:ff:ff:ff
inet6 fe80::804f:5ff:fee3:5d42/64 scope link
valid_lft forever preferred_lft forever
6: gre0@NONE: <NOARP> mtu 1476 qdisc noop state DOWN group default qlen 1000
link/gre 0.0.0.0 brd 0.0.0.0
7: gretap0@NONE: <BROADCAST,MULTICAST> mtu 1462 qdisc noop state DOWN group default qlen 1000
link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff
8: erspan0@NONE: <BROADCAST,MULTICAST> mtu 1450 qdisc noop state DOWN group default qlen 1000
link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff
10: gre_sys@NONE: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 65000 qdisc fq_codel master ovs-system state UNKNOWN group default qlen 1000
link/ether 0a:98:48:d9:5f:83 brd ff:ff:ff:ff:ff:ff
inet6 fe80::898:48ff:fed9:5f83/64 scope link
valid_lft forever preferred_lft forever
将br0作为接口加入docker0网桥
[root@ovs1 ~]#brctl addif docker0 br0
[root@ovs1 ~]#ovs-vsctl show
84cbdad7-4731-4c2e-b7d7-eecb4a56d27b
Bridge "br0"
Port "gre0"
Interface "gre0"
type: gre
options: {remote_ip="10.0.0.102"}
Port "br0"
Interface "br0"
type: internal
ovs_version: "2.9.5"
[root@ovs1 ~]#brctl show
bridge name bridge id STP enabled interfaces
docker0 8000.0242dc29036c no br0
[root@ovs2 ~]#brctl addif docker0 br0
[root@ovs2 ~]#ovs-vsctl show
e6a3aab3-e224-4834-85fc-2516b33a67e2
Bridge "br0"
Port "gre0"
Interface "gre0"
type: gre
options: {remote_ip="10.0.0.101"}
Port "br0"
Interface "br0"
type: internal
ovs_version: "2.9.5"
[root@ovs2 ~]#brctl show
bridge name bridge id STP enabled interfaces
docker0 8000.0242e2388483 no br0
添加静态路由(网段地址为 peer Docker网段)
#ovs1 添加 peer docker net
[root@ovs1 ~]#ip route add 172.17.2.0/24 dev docker0
[root@ovs1 ~]#route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 10.0.0.2 0.0.0.0 UG 0 0 0 eth0
10.0.0.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
172.17.1.0 0.0.0.0 255.255.255.0 U 0 0 0 docker0
172.17.2.0 0.0.0.0 255.255.255.0 U 0 0 0 docker0
#ovs2 添加 peer docker net
[root@ovs2 ~]#ip route add 172.17.1.0/24 dev docker0
[root@ovs2 ~]#route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 10.0.0.2 0.0.0.0 UG 0 0 0 eth0
10.0.0.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
172.17.1.0 0.0.0.0 255.255.255.0 U 0 0 0 docker0
172.17.2.0 0.0.0.0 255.255.255.0 U 0 0 0 docker0
测试跨主机的容器之间的连通性
docker run -it busybox:1.28.3 /bin/sh //ovs01测试
[root@ovs1 ~]#docker run -it alpine /bin/sh
/ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
2: gre0@NONE: <NOARP> mtu 1476 qdisc noop state DOWN qlen 1000
link/gre 0.0.0.0 brd 0.0.0.0
3: gretap0@NONE: <BROADCAST,MULTICAST> mtu 1462 qdisc noop state DOWN qlen 1000
link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff
4: erspan0@NONE: <BROADCAST,MULTICAST> mtu 1450 qdisc noop state DOWN qlen 1000
link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff
10: eth0@if11: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP
link/ether 02:42:ac:11:01:02 brd ff:ff:ff:ff:ff:ff
inet 172.17.1.2/24 brd 172.17.1.255 scope global eth0
valid_lft forever preferred_lft forever
/ # ping -c 3 172.17.2.2
PING 172.17.2.2 (172.17.2.2): 56 data bytes
64 bytes from 172.17.2.2: seq=0 ttl=63 time=4.459 ms
64 bytes from 172.17.2.2: seq=1 ttl=63 time=1.279 ms
64 bytes from 172.17.2.2: seq=2 ttl=63 time=0.517 ms
--- 172.17.2.2 ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 0.517/2.085/4.459 ms
[root@ovs2 ~]#docker run -it alpine /bin/sh
/ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
2: gre0@NONE: <NOARP> mtu 1476 qdisc noop state DOWN qlen 1000
link/gre 0.0.0.0 brd 0.0.0.0
3: gretap0@NONE: <BROADCAST,MULTICAST> mtu 1462 qdisc noop state DOWN qlen 1000
link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff
4: erspan0@NONE: <BROADCAST,MULTICAST> mtu 1450 qdisc noop state DOWN qlen 1000
link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff
11: eth0@if12: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP
link/ether 02:42:ac:11:02:02 brd ff:ff:ff:ff:ff:ff
inet 172.17.2.2/24 brd 172.17.2.255 scope global eth0
valid_lft forever preferred_lft forever
/ # ping -c 3 172.17.1.2
PING 172.17.1.2 (172.17.1.2): 56 data bytes
64 bytes from 172.17.1.2: seq=0 ttl=63 time=1.553 ms
64 bytes from 172.17.1.2: seq=1 ttl=63 time=1.136 ms
64 bytes from 172.17.1.2: seq=2 ttl=63 time=1.176 ms
--- 172.17.1.2 ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 1.136/1.288/1.553 ms
/ #
在第二个主机上再打开一个nginx容器,从第一个主机的容器访问,观察来源的IP
[root@ovs2 ~]#docker pull nginx
[root@ovs2 ~]#docker run -d --name nginx nginx
d3c26005a7626628f7baf017481217b36e3d69dabfa6cc86fe125f9548e7333c
[root@ovs2 ~]#docker exec -it nginx hostname -I
172.17.2.2
[root@ovs2 ~]#docker logs -f nginx
172.17.1.2 - - [27/Feb/2020:09:57:18 +0000] "GET / HTTP/1.1" 200 612 "-" "Wget" "-"
#从第一个主机的容器发起请求,可以查看到上面的访问日志输出
[root@ovs1 ~]#docker run -it alpine wget -qO - http://172.17.2.2/
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
用脚本保存配置用于开机启动
#ovs1配置
[root@ovs1 ~]#cat > net.sh <<EOF
#!/bin/bash
ip link set dev br0 up
brctl addif docker0 br0
ip route add 172.17.2.0/24 dev docker0
EOF
[root@ovs1 ~]#chmod +x net.sh
#ovs2配置
[root@ovs2 ~]#cat > net.sh <<EOF
#!/bin/bash
ip link set dev br0 up
brctl addif docker0 br0
ip route add 172.17.1.0/24 dev docker0
EOF
[root@ovs1 ~]#chmod +x net.sh