美文网首页Docker容器虚拟化技术
[docker 网络]docker跨主机网络 ovs gre 测

[docker 网络]docker跨主机网络 ovs gre 测

作者: nicktming | 来源:发表于2019-06-03 22:03 被阅读0次

    1. 准备工作

    1.1 ovs 安装

    参考该文章 https://blog.csdn.net/wodeamd1/article/details/81282437

    1.2 对docker网络类型有基本了解

    可以参考[mydocker]---docker的四种网络模型与原理实现(1)[mydocker]---docker的四种网络模型与原理实现(2).

    1.2 两台机器

    vm1

    [root@vm1 ~]# ifconfig
    eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
            inet 172.19.0.12  netmask 255.255.240.0  broadcast 172.19.15.255
            inet6 fe80::5054:ff:fe4b:71f8  prefixlen 64  scopeid 0x20<link>
            ether 52:54:00:4b:71:f8  txqueuelen 1000  (Ethernet)
            RX packets 456800  bytes 531196208 (506.5 MiB)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 133968  bytes 149845102 (142.9 MiB)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
    
    lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
            inet 127.0.0.1  netmask 255.0.0.0
            inet6 ::1  prefixlen 128  scopeid 0x10<host>
            loop  txqueuelen 1000  (Local Loopback)
            RX packets 0  bytes 0 (0.0 B)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 0  bytes 0 (0.0 B)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
    [root@vm1 ~]# route -n
    Kernel IP routing table
    Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
    0.0.0.0         172.19.0.1      0.0.0.0         UG    0      0        0 eth0
    169.254.0.0     0.0.0.0         255.255.0.0     U     1002   0        0 eth0
    172.19.0.0      0.0.0.0         255.255.240.0   U     0      0        0 eth0
    
    

    vm2

    [root@vm2 ~]# ifconfig
    eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
            inet 172.19.0.8  netmask 255.255.240.0  broadcast 172.19.15.255
            inet6 fe80::5054:ff:fe14:eae  prefixlen 64  scopeid 0x20<link>
            ether 52:54:00:14:0e:ae  txqueuelen 1000  (Ethernet)
            RX packets 278496  bytes 394401800 (376.1 MiB)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 36312  bytes 5027752 (4.7 MiB)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
    
    lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
            inet 127.0.0.1  netmask 255.0.0.0
            inet6 ::1  prefixlen 128  scopeid 0x10<host>
            loop  txqueuelen 1000  (Local Loopback)
            RX packets 0  bytes 0 (0.0 B)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 0  bytes 0 (0.0 B)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
    
    [root@vm2 ~]# route -n
    Kernel IP routing table
    Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
    0.0.0.0         172.19.0.1      0.0.0.0         UG    0      0        0 eth0
    169.254.0.0     0.0.0.0         255.255.0.0     U     1002   0        0 eth0
    172.19.0.0      0.0.0.0         255.255.240.0   U     0      0        0 eth0
    

    2. 配置

    gre.png

    2.1 vm1中的配置

    [root@vm1 ~]# ovs-vsctl show
    91e815a1-1021-4c97-a21c-893ab8c28e37
        ovs_version: "2.5.1"
    [root@vm1 ~]# 
    [root@vm1 ~]# 
    [root@vm1 ~]# ovs-vsctl add-br br0
    [root@vm1 ~]# ovs-vsctl add-port br0 tep0 -- set interface tep0 type=internal
    [root@vm1 ~]# ifconfig tep0 192.168.0.200 netmask 255.255.255.0
    [root@vm1 ~]# ovs-vsctl add-port br0 gre10 -- set interface gre10 type=gre options:remote_ip=172.19.0.8
    [root@vm1 ~]# ovs-vsctl show
    91e815a1-1021-4c97-a21c-893ab8c28e37
        Bridge "br0"
            Port "gre10"
                Interface "gre10"
                    type: gre
                    options: {remote_ip="172.19.0.8"}
            Port "tep0"
                Interface "tep0"
                    type: internal
            Port "br0"
                Interface "br0"
                    type: internal
        ovs_version: "2.5.1"
    [root@vm1 ~]# route -n
    Kernel IP routing table
    Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
    0.0.0.0         172.19.0.1      0.0.0.0         UG    0      0        0 eth0
    169.254.0.0     0.0.0.0         255.255.0.0     U     1002   0        0 eth0
    172.19.0.0      0.0.0.0         255.255.240.0   U     0      0        0 eth0
    192.168.0.0     0.0.0.0         255.255.255.0   U     0      0        0 tep0
    

    2.2 vm2中的配置

    [root@vm2 ~]# ovs-vsctl add-br br0
    [root@vm2 ~]# ovs-vsctl add-port br0 tep0 -- set interface tep0 type=internal
    [root@vm2 ~]# ifconfig tep0 192.168.0.201 netmask 255.255.255.0
    [root@vm2 ~]# ovs-vsctl add-port br0 gre10 -- set interface gre10 type=gre options:remote_ip=172.19.0.12
    [root@vm2 ~]# ovs-vsctl show
    533800d4-246f-4099-a776-8254610db91f
        Bridge "br0"
            Port "gre10"
                Interface "gre10"
                    type: gre
                    options: {remote_ip="172.19.0.12"}
            Port "tep0"
                Interface "tep0"
                    type: internal
            Port "br0"
                Interface "br0"
                    type: internal
        ovs_version: "2.5.1"
    [root@vm2 ~]# route -n
    Kernel IP routing table
    Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
    0.0.0.0         172.19.0.1      0.0.0.0         UG    0      0        0 eth0
    169.254.0.0     0.0.0.0         255.255.0.0     U     1002   0        0 eth0
    172.19.0.0      0.0.0.0         255.255.240.0   U     0      0        0 eth0
    192.168.0.0     0.0.0.0         255.255.255.0   U     0      0        0 tep0
    

    2.3 测试

    -------------------------------vm1---------------------------------------
    [root@vm1 ~]# ping -c 1 192.168.0.200
    PING 192.168.0.200 (192.168.0.200) 56(84) bytes of data.
    64 bytes from 192.168.0.200: icmp_seq=1 ttl=64 time=0.027 ms
    
    --- 192.168.0.200 ping statistics ---
    1 packets transmitted, 1 received, 0% packet loss, time 0ms
    rtt min/avg/max/mdev = 0.027/0.027/0.027/0.000 ms
    [root@vm1 ~]# ping -c 1 192.168.0.201
    PING 192.168.0.201 (192.168.0.201) 56(84) bytes of data.
    64 bytes from 192.168.0.201: icmp_seq=1 ttl=64 time=4.20 ms
    
    --- 192.168.0.201 ping statistics ---
    1 packets transmitted, 1 received, 0% packet loss, time 0ms
    rtt min/avg/max/mdev = 4.204/4.204/4.204/0.000 ms
    [root@vm1 ~]# 
    
    -------------------------------vm2---------------------------------------
    [root@vm2 ~]# ping -c 1 192.168.0.201
    PING 192.168.0.201 (192.168.0.201) 56(84) bytes of data.
    64 bytes from 192.168.0.201: icmp_seq=1 ttl=64 time=0.018 ms
    
    --- 192.168.0.201 ping statistics ---
    1 packets transmitted, 1 received, 0% packet loss, time 0ms
    rtt min/avg/max/mdev = 0.018/0.018/0.018/0.000 ms
    [root@vm2 ~]# ping -c 1 192.168.0.200
    PING 192.168.0.200 (192.168.0.200) 56(84) bytes of data.
    64 bytes from 192.168.0.200: icmp_seq=1 ttl=64 time=0.684 ms
    
    --- 192.168.0.200 ping statistics ---
    1 packets transmitted, 1 received, 0% packet loss, time 0ms
    rtt min/avg/max/mdev = 0.684/0.684/0.684/0.000 ms
    

    3. 模拟docker测试 (容器在同一个子网中)

    图片.png

    yum install bridge-utils

    3.1 vm1中配置

    [root@vm1 ~]# ip link add docker0 type bridge
    [root@vm1 ~]# ifconfig docker0 172.17.42.1/16
    [root@vm1 ~]# ip link set docker0 up
    [root@vm1 ~]# ip netns list
    [root@vm1 ~]# ip netns add ns1 
    [root@vm1 ~]# ip link add veth0 type veth peer name veth1 
    [root@vm1 ~]# brctl addif docker0 veth0
    [root@vm1 ~]# ip link set veth1 netns ns1
    [root@vm1 ~]# ip link set veth0 up
    [root@vm1 ~]# ip netns exec ns1 sh
    sh-4.2# ip link set veth1 up
    sh-4.2# ip link set lo up
    sh-4.2# ip addr add 172.17.1.2/16 dev veth1
    sh-4.2# ifconfig
    lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
            inet 127.0.0.1  netmask 255.0.0.0
            inet6 ::1  prefixlen 128  scopeid 0x10<host>
            loop  txqueuelen 1000  (Local Loopback)
            RX packets 2  bytes 168 (168.0 B)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 2  bytes 168 (168.0 B)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
    
    veth1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
            inet 172.17.1.2  netmask 255.255.0.0  broadcast 0.0.0.0
            inet6 fe80::78b0:1eff:fe83:58c7  prefixlen 64  scopeid 0x20<link>
            ether 7a:b0:1e:83:58:c7  txqueuelen 1000  (Ethernet)
            RX packets 31  bytes 2206 (2.1 KiB)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 21  bytes 1474 (1.4 KiB)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
    sh-4.2# exit
    exit
    [root@vm1 ~]# 
    

    3.2 vm2中配置

    [root@vm2 ~]# ip link add docker0 type bridge
    [root@vm2 ~]# ip addr add 172.17.43.1/16 dev docker0
    [root@vm2 ~]# ip link set docker0 up
    [root@vm2 ~]# ip netns list
    [root@vm2 ~]# ip netns add ns1 
    [root@vm2 ~]# ip link add veth0 type veth peer name veth1 
    [root@vm2 ~]# brctl addif docker0 veth0
    [root@vm2 ~]# ip link set veth1 netns ns1
    [root@vm2 ~]# ip link set veth0 up
    [root@vm2 ~]# ip netns exec ns1 sh
    sh-4.2# ip link set veth1 up
    sh-4.2# ip link set lo up
    sh-4.2# ip addr add 172.17.2.2/16 dev veth1
    sh-4.2# exit
    exit
    [root@vm2 ~]# 
    

    3.3 测试

    从vm1的ns1中访问vm2的ns2, 相当于容器间访问

    [root@vm1 ~]# ip netns exec ns1 sh
    sh-4.2# ifconfig veth1
    veth1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
            inet 172.17.1.2  netmask 255.255.0.0  broadcast 0.0.0.0
            inet6 fe80::78b0:1eff:fe83:58c7  prefixlen 64  scopeid 0x20<link>
            ether 7a:b0:1e:83:58:c7  txqueuelen 1000  (Ethernet)
            RX packets 34  bytes 2388 (2.3 KiB)
            RX errors 0  dropped 0  overruns 0  frame 0
            TX packets 24  bytes 1656 (1.6 KiB)
            TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0
    // 访问vm2中的ns1
    sh-4.2# ping -c 1 172.17.2.2
    PING 172.17.2.2 (172.17.2.2) 56(84) bytes of data.
    64 bytes from 172.17.2.2: icmp_seq=1 ttl=64 time=0.779 ms
    
    --- 172.17.2.2 ping statistics ---
    1 packets transmitted, 1 received, 0% packet loss, time 0ms
    rtt min/avg/max/mdev = 0.779/0.779/0.779/0.000 ms
    // 访问本机docker0
    sh-4.2# ping -c 1 172.17.42.1
    PING 172.17.42.1 (172.17.42.1) 56(84) bytes of data.
    64 bytes from 172.17.42.1: icmp_seq=1 ttl=64 time=0.063 ms
    
    --- 172.17.42.1 ping statistics ---
    1 packets transmitted, 1 received, 0% packet loss, time 0ms
    rtt min/avg/max/mdev = 0.063/0.063/0.063/0.000 ms
    // 访问vm2中的docker0
    sh-4.2# ping -c 1 172.17.43.1
    PING 172.17.43.1 (172.17.43.1) 56(84) bytes of data.
    64 bytes from 172.17.43.1: icmp_seq=1 ttl=64 time=1.17 ms
    
    --- 172.17.43.1 ping statistics ---
    1 packets transmitted, 1 received, 0% packet loss, time 0ms
    rtt min/avg/max/mdev = 1.170/1.170/1.170/0.000 ms
    sh-4.2# exit
    exit
    [root@vm1 ~]# 
    

    同理vm2中的ns1也可以访问vm1中的ns1

    [root@vm2 ~]# ip netns exec ns1 sh
    sh-4.2# ping -c 1  172.17.2.2
    PING 172.17.2.2 (172.17.2.2) 56(84) bytes of data.
    64 bytes from 172.17.2.2: icmp_seq=1 ttl=64 time=0.034 ms
    
    --- 172.17.2.2 ping statistics ---
    1 packets transmitted, 1 received, 0% packet loss, time 0ms
    rtt min/avg/max/mdev = 0.034/0.034/0.034/0.000 ms
    sh-4.2# ping -c 1  172.17.1.2
    PING 172.17.1.2 (172.17.1.2) 56(84) bytes of data.
    64 bytes from 172.17.1.2: icmp_seq=1 ttl=64 time=0.769 ms
    
    --- 172.17.1.2 ping statistics ---
    1 packets transmitted, 1 received, 0% packet loss, time 0ms
    rtt min/avg/max/mdev = 0.769/0.769/0.769/0.000 ms
    sh-4.2# ping -c 1  172.17.42.1
    PING 172.17.42.1 (172.17.42.1) 56(84) bytes of data.
    64 bytes from 172.17.42.1: icmp_seq=1 ttl=64 time=0.724 ms
    
    --- 172.17.42.1 ping statistics ---
    1 packets transmitted, 1 received, 0% packet loss, time 0ms
    rtt min/avg/max/mdev = 0.724/0.724/0.724/0.000 ms
    sh-4.2# ping -c 1  172.17.43.1
    PING 172.17.43.1 (172.17.43.1) 56(84) bytes of data.
    64 bytes from 172.17.43.1: icmp_seq=1 ttl=64 time=0.034 ms
    
    --- 172.17.43.1 ping statistics ---
    1 packets transmitted, 1 received, 0% packet loss, time 0ms
    rtt min/avg/max/mdev = 0.034/0.034/0.034/0.000 ms
    sh-4.2# exit
    exit
    [root@vm2 ~]# 
    

    容器之间已经可以访问了. 接下来测试一下如下的情况. 用vm1中的ns1测试即可.

    [root@vm1 ~]# ip netns exec ns1 sh
    // 访问本机内网ip不通  因为ns1中没有设置默认网关
    sh-4.2# ping -c 1 172.19.0.12
    PING 172.19.0.12 (172.19.0.12) 56(84) bytes of data.
    // 访问vm2的ip访问不了 因为没有设置iptables规则和开启ip_forward功能
    sh-4.2# ping -c 1 172.19.0.8
    PING 172.19.0.8 (172.19.0.8) 56(84) bytes of data.
    // 访问vm2中的tep0不通 因为不属于同一个网络
    sh-4.2# ping -c 1 192.168.1.200
    PING 192.168.1.200 (192.168.1.200) 56(84) bytes of data.
    // 访问互联网不通 因为没有设置iptables规则和开启ip_forward功能
    sh-4.2# ping -c 1 www.baidu.com
    

    vm1做如下设置

    [root@vm1 ~]# echo 1 >  /proc/sys/net/ipv4/ip_forward
    [root@vm1 ~]# iptables -t nat -A POSTROUTING -s 172.17.1.0/16 -o eth0 -j MASQUERADE
    [root@vm1 ~]# ip netns exec ns1 sh
    // 将docker0设置为ns1的默认网关
    sh-4.2# route add default gw 172.17.42.1
    sh-4.2# ping -c 1 172.19.0.12
    PING 172.19.0.12 (172.19.0.12) 56(84) bytes of data.
    64 bytes from 172.19.0.12: icmp_seq=1 ttl=64 time=0.038 ms
    
    --- 172.19.0.12 ping statistics ---
    1 packets transmitted, 1 received, 0% packet loss, time 0ms
    rtt min/avg/max/mdev = 0.038/0.038/0.038/0.000 ms
    sh-4.2# ping -c 1 172.19.0.8
    PING 172.19.0.8 (172.19.0.8) 56(84) bytes of data.
    64 bytes from 172.19.0.8: icmp_seq=1 ttl=63 time=0.327 ms
    
    --- 172.19.0.8 ping statistics ---
    1 packets transmitted, 1 received, 0% packet loss, time 0ms
    rtt min/avg/max/mdev = 0.327/0.327/0.327/0.000 ms
    sh-4.2# ping -c 1 www.baidu.com
    PING www.wshifen.com (119.63.197.151) 56(84) bytes of data.
    64 bytes from 119.63.197.151 (119.63.197.151): icmp_seq=1 ttl=49 time=51.4 ms
    
    --- www.wshifen.com ping statistics ---
    1 packets transmitted, 1 received, 0% packet loss, time 0ms
    rtt min/avg/max/mdev = 51.474/51.474/51.474/0.000 ms
    // 因为在gre中不属于同一个网络 无法访问到vm2中的tep0
    sh-4.2# ping -c 1 192.168.1.200
    PING 192.168.1.200 (192.168.1.200) 56(84) bytes of data.
    
    --- 192.168.1.200 ping statistics ---
    1 packets transmitted, 0 received, 100% packet loss, time 0ms
    sh-4.2# exit
    exit
    [root@vm1 ~]# 
    

    4. 清理

    vm1

    [root@vm1 ~]# ovs-vsctl show
    91e815a1-1021-4c97-a21c-893ab8c28e37
        Bridge "br0"
            Port "gre10"
                Interface "gre10"
                    type: gre
                    options: {remote_ip="172.19.0.8"}
            Port "tep0"
                Interface "tep0"
                    type: internal
            Port "br0"
                Interface "br0"
                    type: internal
        ovs_version: "2.5.1"
    [root@vm1 ~]# ovs-vsctl del-br br0
    [root@vm1 ~]# ip link delete docker0 type bridge
    [root@vm1 ~]# ip link delete veth0 type veth
    [root@vm1 ~]# ip netns delete ns1
    [root@vm1 ~]# iptables -t nat -F
    [root@vm1 ~]# ovs-vsctl show
    91e815a1-1021-4c97-a21c-893ab8c28e37
        ovs_version: "2.5.1"
    [root@vm1 ~]# 
    

    vm2

    [root@vm2 ~]# ovs-vsctl show
    533800d4-246f-4099-a776-8254610db91f
        Bridge "br0"
            Port "gre10"
                Interface "gre10"
                    type: gre
                    options: {remote_ip="172.19.0.12"}
            Port "tep0"
                Interface "tep0"
                    type: internal
            Port "br0"
                Interface "br0"
                    type: internal
        ovs_version: "2.5.1"
    [root@vm2 ~]# ovs-vsctl del-br br0
    [root@vm2 ~]# ip link delete docker0 type bridge
    [root@vm2 ~]# ip link delete veth0 type veth
    [root@vm2 ~]# ip netns delete ns1
    [root@vm2 ~]# iptables -t nat -F
    

    5. 参考

    1. https://blog.csdn.net/wodeamd1/article/details/81282437
    2. https://blog.csdn.net/song7999/article/details/80403527
    3. Docker 容器与容器云
    4. https://blog.csdn.net/qq_27366789/article/details/83348366

    相关文章

      网友评论

        本文标题:[docker 网络]docker跨主机网络 ovs gre 测

        本文链接:https://www.haomeiwen.com/subject/tpuatctx.html