1) Docker本地网络类型
1. 本地网络介绍
docker network ls # 查看网络
none : 无网络模式
bridge : 默认模式,相当于NAT
host : 公用宿主机Network NameSapce
container:与其他容器公用Network Namespace
2. 创建bridge网络
docker network create -d host test_b # 创建名为test_b的网络
docker network ls
docker network inspect test_n # 查看test_b的网络详细
2) Docker跨主机网络介绍
服务器OS 主机IP Docker版本 网卡名 主机名
CentOS7 192.168.137.50 18.09.9 ens33 docker01
CentOS7 192.168.137.51 18.09.9 ens33 docker02
1. Docker 跨主机访问-macvlan实现
(1) 在docker01 , docker02上创建 macvlan
docker network create --driver macvlan --subnet=10.0.0.0/24 --gateway=10.0.0.254 -o parent=ens33 macvlan_1
ip link set eth0 promsic on (ubuntu或其他版本需要)
docker network ls
docker network inspect macvlan_1
(2) 在docker01 , docker02上创建 nginx容器
docker container run --name nginx --network macvlan_1 --ip=10.0.0.11 -itd nginx:v11 # 在docker01上
docker container run --name nginx --network macvlan_1 --ip=10.0.0.12 -itd nginx:v11 # 在docker02上
docker container ls
(3) 测试
# 进入docker01上的nginx容器
docker container exec -it nginx bash
[root@aee2ceaec5ab /]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 10.0.0.11 netmask 255.255.255.0 broadcast 10.0.0.255
ether 02:42:0a:00:00:0b txqueuelen 0 (Ethernet)
RX packets 51 bytes 4692 (4.5 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
loop txqueuelen 1000 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@aee2ceaec5ab /]# ping 10.0.0.12
PING 10.0.0.12 (10.0.0.12) 56(84) bytes of data.
64 bytes from 10.0.0.12: icmp_seq=1 ttl=64 time=0.586 ms
64 bytes from 10.0.0.12: icmp_seq=2 ttl=64 time=0.617 ms
# 进入docker02上的nginx容器
docker container exec -it nginx bash
[root@8d40de079c82 /]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 10.0.0.12 netmask 255.255.255.0 broadcast 10.0.0.255
ether 02:42:0a:00:00:0c txqueuelen 0 (Ethernet)
RX packets 41 bytes 3772 (3.6 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
loop txqueuelen 1000 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@8d40de079c82 /]# ping 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=1.28 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=0.783 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=0.756 ms
2. Docker 跨主机访问-overlay实现
### 使用consul实现docker的overlay
(1) 在docker01上启动 consul 服务,实现网络的统一配置管理
docker run -d -p 8500:8500 -h consul --restart=always --name consul progrium/consul -server -bootstrap
(2) docker01、02上
# 在docker01上
cat /etc/docker/daemon.json
...
"hosts":["tcp://0.0.0.0:2376","unix:///var/run/docker.sock"],
"cluster-store": "consul://192.168.137.50:8500",
"cluster-advertise": "192.168.137.50:2376"
...
# 在docker02上
cat /etc/docker/daemon.json
...
"hosts":["tcp://0.0.0.0:2376","unix:///var/run/docker.sock"],
"cluster-store": "consul://192.168.137.50:8500",
"cluster-advertise": "192.168.137.51:2376"
...
(3) system文件
cat > /usr/lib/systemd/system/docker.service << EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
[Service]
Type=notify
ExecStart=/usr/local/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
(4) 重启docker
systemctl daemon-reload
systemctl restart docker
(5) 查看docker启动情况
# 在docker01上
ss -atunp | grep docker
tcp ESTAB 0 0 192.168.137.50:51020 192.168.137.50:8500 users:(("dockerd",pid=4989,fd=31))
tcp ESTAB 0 0 192.168.137.50:51014 192.168.137.50:8500 users:(("dockerd",pid=4989,fd=22))
tcp ESTAB 0 0 192.168.137.50:51016 192.168.137.50:8500 users:(("dockerd",pid=4989,fd=30))
tcp LISTEN 0 128 :::2376 :::* users:(("dockerd",pid=4989,fd=5))
tcp LISTEN 0 128 :::8500 :::* users:(("docker-proxy",pid=5183,fd=4))
# 在docker02上
ss -atunp | grep docker
udp UNCONN 0 0 192.168.137.51:7946 *:* users:(("dockerd",pid=4816,fd=19))
tcp LISTEN 0 128 192.168.137.51:7946 *:* users:(("dockerd",pid=4816,fd=18))
tcp ESTAB 0 0 192.168.137.51:47146 192.168.137.50:8500 users:(("dockerd",pid=4816,fd=17))
tcp ESTAB 0 0 192.168.137.51:47144 192.168.137.50:8500 users:(("dockerd",pid=4816,fd=16))
tcp LISTEN 0 128 :::2376 :::* users:(("dockerd",pid=4816,fd=5))
(6) 创建overlay网络(在dockr01或docker02任意一台创建都可以)
docker network create -d overlay --subnet 172.11.0.0/24 --gateway 172.11.0.254 overlay_consul
(6.1) 在docker01和docker02上面查看都会出现一个overlay的网络
docker network ls
docker network inspect overlay_consul
(7) 两边启动容器测试
docker run -it --network overlay_consul busybox /bin/sh
每个容器有两块网卡,eth0实现容器间的通讯,eth1实现容器访问外网
###############################################################################
### 使用etcd实现docker的overlay
(1) 安装etcd(需要注意etcd的版本,有的版本不支持docker实现overlay)
apt-get install etcd
etcdctl --version
etcdctl version 2.2.5
(2) 修改etcd配置文件和/etc/docker/daemon.json
egrep -v "^$|^#" /etc/default/etcd
ETCD_LISTEN_CLIENT_URLS="http://192.168.137.50:2379,http://192.168.137.50:4001"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.137.50:2379,http://192.168.137.50:4001"
vim /etc/docker/daemon.json
"cluster-store": "etcd://192.168.137.50:2379"
(3) 启动etcd,重启docker
systemctl restart etcd.service
systemctl restart docker.service
(4) 创建overlay网络(在dockr01或docker02任意一台创建都可以)
docker network create -d overlay overlay_etcd
(5) 两边启动容器测试
docker run -it --network overlay_etcd busybox /bin/sh
网友评论