centos ovn 搭建测试(八:SFC)

SFC 功能验证

该功能目前社区并未支持,未来计划是否支持上还不清楚,但是有业界大佬添加了SFC的功能。

仓库是:https://github.com/doonhammer/ovs.git,使用sfc.v30分支,ovs版本是2.9.90,系统 kernel 版本不能太高。

but version newer than 4.15.x is not supported

SFC详细配置可以参考:

https://gist.github.com/voyageur/a26943eced3324b302f1ffede45252bd

https://opnfv-ovn4nfv.readthedocs.io/en/latest/development/ovn-sfc-openstack.html

一.环境说明

centos:CentOS Linux release 7.8.2003 (Core) 

kernel:4.12.10

openvswitch:2.9.90

二.源码编译

# 源码下载
git clone https://github.com/doonhammer/ovs.git
git checkout sfc.v30

# 源码编译
./boot.sh
./configure
make rpm-fedora
make rpm-fedora-kmod

三.安装

cd rpm/rpmbuild/RPMS/x86_64/
yum install *.rpm ../noarch/python-openvswitch-2.9.90-1.el7.noarch.rpm

四.网络拓扑

 环境说明:

SFC1:ns-a ---> p2 ---> p4 ---> ns-b(ns-a ping ns-b)

SFC2:ns-a ---> p2 ---> p4 ---> p6 ---> p8 ---> ns-c(ns-a ping ns-c)

使用bridge作为模拟NFV转发,需要关闭环境IPv6模块,不然bridge会环路,环路后CPU很高,报文无法处理导致ping不通。

 五.逻辑交换机及逻辑port配置

ovn-nbctl ls-add n1

ovn-nbctl lsp-add n1 n1-p1 
ovn-nbctl lsp-set-addresses n1-p1 "00:00:00:00:00:11 1.1.1.10"

ovn-nbctl lsp-add n1 n1-p2
ovn-nbctl lsp-set-addresses n1-p2 "00:00:00:00:00:22 1.1.1.20"

ovn-nbctl lsp-add n1 n1-p3
ovn-nbctl lsp-set-addresses n1-p3 "00:00:00:00:00:33 1.1.1.30"

ovn-nbctl lsp-add n1 n1-p4
ovn-nbctl lsp-set-addresses n1-p4 "00:00:00:00:00:44 1.1.1.40"

ovn-nbctl lsp-add n1 n1-ap
ovn-nbctl lsp-set-addresses n1-ap "00:00:00:00:00:77 1.1.1.70"

ovn-nbctl lsp-add n1 n1-bp
ovn-nbctl lsp-set-addresses n1-bp "00:00:00:00:00:88 1.1.1.80"

ovn-nbctl lsp-add n1 n1-cp
ovn-nbctl lsp-set-addresses n1-cp "00:00:00:00:00:99 1.1.1.90"

六.逻辑服务链配置

# lsp-pair-add SWITCH PORT-IN PORT-OUT [LSP-PAIR]
ovn-nbctl lsp-pair-add n1 n1-p1 n1-p2 VNF1-PP1

# 创建逻辑链
ovn-nbctl lsp-chain-add n1 PC1

# 链中添加逻辑组
ovn-nbctl lsp-pair-group-add PC1 PG1

# 逻辑组中添加VNF,可以添加多个VNF,进行负载均衡
ovn-nbctl lsp-pair-group-add-port-pair PG1 VNF1-PP1

# 配置classifier
# The path of the flow from or to the logical port. The valid values are "entry-lport" or "exit-lport". If the path is "entry-lport" the rules are applied to traffic leaving the entry-lport, if the path is "exit-lport" the port-chain is applied to traffic going to the lport.
# The direction of the flows through the port chain, this can be either "uni-directional" or "bi-directional".
ovn-nbctl lsp-chain-classifier-add n1 PC1 n1-bp 'entry-lport' 'bi-directional' PCC1 ''

ovn-nbctl lsp-pair-add n1 n1-p3 n1-p4 VNF2-PP1
ovn-nbctl lsp-chain-add n1 PC2
ovn-nbctl lsp-pair-group-add PC2 PG2
ovn-nbctl lsp-pair-group-add PC2 PG3
ovn-nbctl lsp-pair-group-add-port-pair PG2 VNF2-PP1
ovn-nbctl lsp-pair-group-add-port-pair PG3 VNF1-PP1
ovn-nbctl lsp-chain-classifier-add n1 PC2 n1-cp 'entry-lport' 'bi-directional' PCC2 ''

七.配置ns-a,ns-b,ns-c

ip netns add ns-a
ip link add veth1 type veth peer name veth2 
ifconfig veth1 up 
ifconfig veth2 up 
ip link set veth2 netns ns-a 
ip netns exec ns-a ip link set veth2 address 00:00:00:00:00:77
ip netns exec ns-a ip addr add 1.1.1.70/24 dev veth2 
ip netns exec ns-a ip link set veth2 up 
ovs-vsctl add-port br-int  veth1 
ovs-vsctl set Interface veth1 external_ids:iface-id=n1-ap

ip netns add ns-b
ip link add veth3 type veth peer name veth4
ifconfig veth3 up 
ifconfig veth4 up 
ip link set veth4 netns ns-b
ip netns exec ns-b ip link set veth4 address 00:00:00:00:00:88
ip netns exec ns-b ip addr add 1.1.1.80/24 dev veth4 
ip netns exec ns-b ip link set veth4 up 
ovs-vsctl add-port br-int  veth3
ovs-vsctl set Interface veth3 external_ids:iface-id=n1-bp

ip netns add ns-c
ip link add veth5 type veth peer name veth6
ifconfig veth5 up 
ifconfig veth6 up 
ip link set veth6 netns ns-c
ip netns exec ns-c ip link set veth6 address 00:00:00:00:00:99
ip netns exec ns-c ip addr add 1.1.1.90/24 dev veth6
ip netns exec ns-c ip link set veth6 up 
ovs-vsctl add-port br-int  veth5
ovs-vsctl set Interface veth5 external_ids:iface-id=n1-cp

八.配置VNF

ip link add p1 type veth peer name p2
ip link add p3 type veth peer name p4
ip link add p5 type veth peer name p6
ip link add p7 type veth peer name p8

ifconfig p1 up
ifconfig p2 up
ifconfig p3 up
ifconfig p4 up
ifconfig p5 up
ifconfig p6 up
ifconfig p7 up
ifconfig p8 up

ovs-vsctl add-port br-int  p1
ovs-vsctl set Interface p1 external_ids:iface-id=n1-p1
ovs-vsctl add-port br-int  p3
ovs-vsctl set Interface p3 external_ids:iface-id=n1-p2
ovs-vsctl add-port br-int  p5
ovs-vsctl set Interface p5 external_ids:iface-id=n1-p3
ovs-vsctl add-port br-int  p7
ovs-vsctl set Interface p7 external_ids:iface-id=n1-p4

ip link set p2 address 00:00:00:00:00:11
ip link set p4 address 00:00:00:00:00:22
ip link set p6 address 00:00:00:00:00:33
ip link set p8 address 00:00:00:00:00:44

brctl addbr br1
brctl addbr br2

ifconfig br1 up
ifconfig br2 up

brctl addif br1 p2
brctl addif br1 p4
brctl addif br2 p6
brctl addif br2 p8

九.OVN配置查看

[root@ovn-master ~]# ovn-nbctl show
switch bc2a834e-d162-44b8-aec6-376ffcdf62dc (n1)
    port n1-ap
        addresses: ["00:00:00:00:00:77 1.1.1.70"]
    port n1-p1
        addresses: ["00:00:00:00:00:11 1.1.1.10"]
    port n1-p3
        addresses: ["00:00:00:00:00:33 1.1.1.30"]
    port n1-p5
        addresses: ["00:00:00:00:00:55 1.1.1.50"]
    port n1-p4
        addresses: ["00:00:00:00:00:44 1.1.1.40"]
    port n1-bp
        addresses: ["00:00:00:00:00:88 1.1.1.80"]
    port n1-cp
        addresses: ["00:00:00:00:00:99 1.1.1.90"]
    port n1-p6
        addresses: ["00:00:00:00:00:66 1.1.1.60"]
    port n1-p2
        addresses: ["00:00:00:00:00:22 1.1.1.20"]

[root@ovn-master ~]# ovn-sbctl show
Chassis "52778a67-8232-4802-9d7c-57e08ce6890e"
    hostname: "ovn-node1"
    Encap geneve
        ip: "192.168.1.199"
        options: {csum="true"}
Chassis "32b40428-bde1-4ba7-b5a7-aab310cd2baf"
    hostname: ovn-master
    Encap geneve
        ip: "192.168.1.200"
        options: {csum="true"}
    Port_Binding "n1-p2"
    Port_Binding "n1-cp"
    Port_Binding "n1-p1"
    Port_Binding "n1-ap"
    Port_Binding "n1-p4"
    Port_Binding "n1-bp"
    Port_Binding "n1-p3"

十.功能验证

[root@ovn-master ~]# ip netns exec ns-a ping 1.1.1.80 -c 3
PING 1.1.1.80 (1.1.1.80) 56(84) bytes of data.
64 bytes from 1.1.1.80: icmp_seq=1 ttl=64 time=0.074 ms
64 bytes from 1.1.1.80: icmp_seq=2 ttl=64 time=0.070 ms
64 bytes from 1.1.1.80: icmp_seq=3 ttl=64 time=0.068 ms

--- 1.1.1.80 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2082ms
rtt min/avg/max/mdev = 0.068/0.070/0.074/0.010 ms
[root@ovn-master ~]# ip netns exec ns-a ping 1.1.1.90 -c 3
PING 1.1.1.90 (1.1.1.90) 56(84) bytes of data.
64 bytes from 1.1.1.90: icmp_seq=1 ttl=64 time=0.064 ms
64 bytes from 1.1.1.90: icmp_seq=2 ttl=64 time=0.070 ms
64 bytes from 1.1.1.90: icmp_seq=3 ttl=64 time=0.079 ms

--- 1.1.1.90 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2053ms
rtt min/avg/max/mdev = 0.064/0.071/0.079/0.006 ms

通过p1-p8、br1和br2上抓包,及br1和br2 up/down 操作可以验证icmp报文的流量路径符合预期。

十一. ovn-trace 验证路径

1)验证ns-a到ns-b路径

 ①n1-ap ---> n1-p2,如果网桥成功转发后,流量将会到达p1--->n1-p1

[root@ovn-master ~]# ovn-trace n1 'inport == "n1-ap" && eth.src == 00:00:00:00:00:77 && eth.dst == 00:00:00:00:00:88'
# reg14=0x7,vlan_tci=0x0000,dl_src=00:00:00:00:00:77,dl_dst=00:00:00:00:00:88,dl_type=0x0000

ingress(dp="n1", inport="n1-ap")
--------------------------------
 0. ls_in_port_sec_l2 (ovn-northd.c:4087): inport == "n1-ap", priority 50, uuid 5532c1f8
    next;
11. ls_in_chain (ovn-northd.c:3692): eth.dst == 00:00:00:00:00:88, priority 100, uuid b939c5c3
    outport = "n1-p2";
    output;

egress(dp="n1", inport="n1-ap", outport="n1-p2")
------------------------------------------------
10. ls_out_port_sec_l2 (ovn-northd.c:4549): outport == "n1-p2", priority 50, uuid f2f816cb
    output;
    /* output to "n1-p2", type "" */

②n1-p1 ---> n1-bp

[root@ovn-master ~]# ovn-trace n1 'inport == "n1-p1" && eth.src == 00:00:00:00:00:77 && eth.dst == 00:00:00:00:00:88'
# reg14=0x1,vlan_tci=0x0000,dl_src=00:00:00:00:00:77,dl_dst=00:00:00:00:00:88,dl_type=0x0000

ingress(dp="n1", inport="n1-p1")
--------------------------------
 0. ls_in_port_sec_l2 (ovn-northd.c:4087): inport == "n1-p1", priority 50, uuid 3f36e0bf
    next;
11. ls_in_chain (ovn-northd.c:3717): eth.dst == 00:00:00:00:00:88 && inport == "n1-p1", priority 150, uuid f51ba58e
    next;
17. ls_in_l2_lkup (ovn-northd.c:4422): eth.dst == 00:00:00:00:00:88, priority 50, uuid f16f7723
    outport = "n1-bp";
    output;

egress(dp="n1", inport="n1-p1", outport="n1-bp")
------------------------------------------------
10. ls_out_port_sec_l2 (ovn-northd.c:4549): outport == "n1-bp", priority 50, uuid af31f9d5
    output;
    /* output to "n1-bp", type "" */

2)验证ns-a到ns-c路径 

① n1-ap ---> n1-p2,如果网桥成功转发后,流量将会到达p1--->n1-p1

[root@ovn-master ~]# ovn-trace n1 'inport == "n1-ap" && eth.src == 00:00:00:00:00:77 && eth.dst == 00:00:00:00:00:99'
# reg14=0x7,vlan_tci=0x0000,dl_src=00:00:00:00:00:77,dl_dst=00:00:00:00:00:99,dl_type=0x0000

ingress(dp="n1", inport="n1-ap")
--------------------------------
 0. ls_in_port_sec_l2 (ovn-northd.c:4087): inport == "n1-ap", priority 50, uuid 5532c1f8
    next;
11. ls_in_chain (ovn-northd.c:3692): eth.dst == 00:00:00:00:00:99, priority 100, uuid 420b3640
    outport = "n1-p2";
    output;

egress(dp="n1", inport="n1-ap", outport="n1-p2")
------------------------------------------------
10. ls_out_port_sec_l2 (ovn-northd.c:4549): outport == "n1-p2", priority 50, uuid f2f816cb
    output;
    /* output to "n1-p2", type "" */

②n1-p1 ---> n1-p4,如果网桥成功转发后,流量将会到达p1--->n1-p3

[root@ovn-master ~]# ovn-trace n1 'inport == "n1-p1" && eth.src == 00:00:00:00:00:77 && eth.dst == 00:00:00:00:00:99'
# reg14=0x1,vlan_tci=0x0000,dl_src=00:00:00:00:00:77,dl_dst=00:00:00:00:00:99,dl_type=0x0000

ingress(dp="n1", inport="n1-p1")
--------------------------------
 0. ls_in_port_sec_l2 (ovn-northd.c:4087): inport == "n1-p1", priority 50, uuid 3f36e0bf
    next;
11. ls_in_chain (ovn-northd.c:3746): eth.dst == 00:00:00:00:00:99 && inport == "n1-p1", priority 150, uuid d94804de
    outport = "n1-p4";
    output;

egress(dp="n1", inport="n1-p1", outport="n1-p4")
------------------------------------------------
10. ls_out_port_sec_l2 (ovn-northd.c:4549): outport == "n1-p4", priority 50, uuid c5059dbc
    output;
    /* output to "n1-p4", type "" */

n1-p3 ---> n1-cp

[root@ovn-master ~]# ovn-trace n1 'inport == "n1-p3" && eth.src == 00:00:00:00:00:77 && eth.dst == 00:00:00:00:00:99'
# reg14=0x3,vlan_tci=0x0000,dl_src=00:00:00:00:00:77,dl_dst=00:00:00:00:00:99,dl_type=0x0000

ingress(dp="n1", inport="n1-p3")
--------------------------------
 0. ls_in_port_sec_l2 (ovn-northd.c:4087): inport == "n1-p3", priority 50, uuid 61232acb
    next;
11. ls_in_chain (ovn-northd.c:3717): eth.dst == 00:00:00:00:00:99 && inport == "n1-p3", priority 150, uuid 63c1f326
    next;
17. ls_in_l2_lkup (ovn-northd.c:4422): eth.dst == 00:00:00:00:00:99, priority 50, uuid 59e78ff3
    outport = "n1-cp";
    output;

egress(dp="n1", inport="n1-p3", outport="n1-cp")
------------------------------------------------
10. ls_out_port_sec_l2 (ovn-northd.c:4549): outport == "n1-cp", priority 50, uuid 45d1f786
    output;
    /* output to "n1-cp", type "" */

两个SFC流量路径符合预期。

十二.思考

SFC功能在项目上完整实施是一个比较复杂研发过程,如果没有ovn,可以通过插件灵活控制流量下发,不过目前有社区OVN-SFC的源码参考,所以通过定义逻辑服务链控制流表对于研发也不是难事,但是一个完整的SFC需要考虑的因素比较多,例如复杂均衡,链路探测,故障BYPASS,负载故障如何切换等等,都是有一定的开发量。同时两端的ns-a、ns-b、ns-c在项目中不是这么玩的,两端多了虚拟机或者namespace,不是可取的方案,应该被替到,期望的是交换机的流量可以直接引到SFC的头结点或者有个统一的VIP调度节点引到流量到不同的SFC链上。所以目前验证的这些功能是基础的,后续业务上线还是有一定的开发工作量。

 
posted @ 2022-07-25 10:16  salami_china  阅读(475)  评论(0编辑  收藏  举报