docker的网络访问

 

一、 docker linux 网络命名空间

 

docker linux 网络命名空间

#ip  netns  list    查看网络命名空间

#ip  netns  delete  test1  删除网络命名空间

#ip netns  add   test1   增加网络命名空间

test1 网络命名空间里边执行ip a或者ip link
# ip  netns  exec  test1 ip  a
# ip  netns  exec  test1  ip link

把test1 网络命名空间里边的lo 网卡up
# ip netns exec test1  ip link set dev lo up

案例:
两个命名空间的网卡连接
(两个命名空间创建veth,通过veth pair 连接起来)

node1
创建两个网卡:veth-test2@veth-test1、veth-test1@veth-test2
#ip link add veth-test1 type veth peer  name veth-test2 
把veth-test1添加到netns   test1里
#ip  link set veth-test1 netns  test1
#ip netns exec  test1 ip  link 命名空间里查看
#ip link 本地查看

把veth-test2添加到netns   test2里
#ip  link set veth-test2 netns  test2
#ip netns exec  test2 ip  link 命名空间里查看
#ip link 本地查看

分配给两个网卡ip地址
#ip netns exec test1 ip addr  add 192.168.1.1/24 dev veth-test1
#ip netns exec test2 ip addr add 192.168.1.2/24 dev veth-test2

分别把这两个网卡up
#ip netns exec test1 ip link set dev veth-test1 up
#ip netns exec test2 ip link set dev veth-test2 up

分别查看两个网卡的地址
#ip netns exec test1  ip a
#ip netns exec test2  ip a

  

1,Docker bridge0

查看docker的网络
#docker  network  ls
查看运行的容器
#docker  ps
查看docker网络详细情况
#docker  network inspect  [NETWORK ID]

docker0 怎样和veth及容器内的网络 连接到一起的?
先在系统安装包,命令#brctl
#yum install bridge-utils

查看网络桥接情况
#brctl  show 
docker0    ---veth

查看bridge详细
#docker  network inspect  bridge
可以看到容器的网络ip


2,Docker 多容器通信
创建test2,  --link test1 (相当于添加了dns)
#docker run -d --name  test2  --link test1  busybox  /bin/sh  -c "while true; do sleep 3600; done"
#docker exec -it test2  /bin/sh
#ping  test1 (可以ping通)
#test1  3306(访问3306端口)

如何建一个bridge,重新连接到bridge上
查看
#docker network  ls
创建一个my-bridge
#docker  network  create  -d bridge  my-bridge
创建test3到my-bridge(新建容器指定网络my-bridge)
#docker  run  -d --name test3 --network my-bridge  busybox /bin/sh  -c "while true; do sleep 3600; done"
把test2 连接到my-bridge上,相当于有两个网络ip
#docker  network  connect my-bridge  test2
如果用户创建的bridge(如my-bridge),那么相当于创建了--link,在test3中
#ping test2 ----可以ping通

3,docker的端口映射
#docker run --name web  -d nginx
#telnet 172.17.0.2  80
重新创建+端口(把容器的80端口映射为本地的80端口)
#docker run --name  web -d -p 80:80  nginx

4,docker网络的none和host
#docker network ls
NETWORK ID      NAME    DRIVER
                          bridge       bridge
                          host           host
                           none         null
创建none的容器
#docker run -d  --name test1  --network  none  busybox  /bin/sh  -c "while  true; do sleep 3600; done"
#docker ps
#docker network  inspect  none
没有任何网络信息
创建host的容器
#docker  run -d --name test1  --network  host  busybox  /bin/sh -c "while true; do sleep 3600; done"
#docker network inspect  host
通过host创建的容器的网络信息和本地系统的网络是一样的。
缺点:端口、网络冲突

5,在多容器部署应用
Dockerfile
FROM python:2.7
LABEL maintaner="xiaoming@163.com"
COPY .  /app
WORKDIR  /app
RUN  pip install  flask  redis
EXPOSE 5000
CMD [ "python", "app.py" ]

app.py
import os
import socket

app = Flask(__name__)
redis = Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1'), port=6379)

@app.route('/')
def   hello():
    redis.incr('hits')
    returen 'Hello Container World! I have been seen %s times and my hostname is %s\n' % (redis.get('hits'), socket.gethostname()))

if __name__== "__main__":
    app.run(host="0.0.0.0", port=5000, debug=True)
创建docker容器redis
#docker  run -d  --name redis  redis
注:如果是app内部使用的6379端口,没有必要暴露给外部,所以,不需要-p  6379:6379参数
build flask-redis
#docker  build  -t  xiaoming/flask-redis  .
#docker image   ls
创建一个新的container, 链接redis容器,设置环境变量-e REDIS_HOST=redis 
#docker  run -d --link redis  --name flask-redis  -e REDIS_HOST=redis xiaoming/flask-redis
进入容器
#docker  exec -it flask-redis /bin/sh
env
REDIS_HOST=redis
#ping redis
#curl  127.0.0.1:5000
#exit
#docker stop flask-redis
#docker rm flask-redis
暴露5000端口到本地系统(-e REDIS_HOST=redis 添加环境变量#env)
#docker run -d -p 5000:5000 --link redis  --name flask-redis  -e REDIS_HOST=redis xiaoming/flask-redis
#curl  127.0.0.1:5000

6,Docker overlay网络和etcd通信
多机器通信---分布式存储
node1
#tar zxvf  etcd-v3.0.12-linux-amd64.tar.gz
#cd etcd-v3.0.12-linux-amd64
#nohup  ./etcd  --name docker-node1  --initial-advertise-peer-urls https://192.168.205.10:2380 \
--listen-peer-urls http://192.168.205.10:2380 \
--listen-client-urls http://192.168.205.10:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://192.168.205.10:2379 \
--initial-cluster-token etcd-cluster  \
--initial-cluster docker-node1=192.168.205.10:2380,docker-node2=192.168.205.11:2380 \
--initial-cluster-state new&
node2
#tar zxvf  etcd-v3.0.12-linux-amd64.tar.gz
#cd etcd-v3.0.12-linux-amd64
#nohup  ./etcd  --name docker-node2  --initial-advertise-peer-urls https://192.168.205.11:2380 \
--listen-peer-urls http://192.168.205.11:2380 \
--listen-client-urls http://192.168.205.11:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://192.168.205.11:2379 \
--initial-cluster-token etcd-cluster  \
--initial-cluster docker-node1=192.168.205.10:2380,docker-node2=192.168.205.11:2380 \
--initial-cluster-state new&
验证
#cd etcd-v3.0.12-linux-amd64  && ./etcdctl  cluster-health
node1&node2重启docker服务,
#service docker stop
手动启动docker
#/usr/bin/dockerd -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --cluster-store=etcd://192.168.205.10:2379 --cluster-advertise=192.168.205.10:2375&
#docker version
#/usr/bin/dockerd -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --cluster-store=etcd://192.168.205.11:2379 --cluster-advertise=192.168.205.11:2375&
#docker version
创建overlay network
在docker-node1上创建一个demo的overlay network
#docker network ls
#docker network create -d overlay demo
#docker network ls
demo已经同步到node2上了,怎样实现的?etcd
#cd etcd-v3.0.12-linux-amd64
#./etcdctl ls 
#./etcdctl /docker
#./etcdctl /docker/network
#./etcdctl /docker/nodes
#./etcdctl /docker/network/v1.0
#./etcdctl /docker/network/v1.0/network
查看docker network ls 的WORKID 和network前面的一样,说明etcd同步过来了network,
#docker network inspect demo
案例:
创建容器,连接到overlay
node1
#docker run -d --name test1 --net demo busybox sh -c "while true; do sleep 3600; done"
node2
#docker run -d --name test2 --net demo busybox sh -c "while true; do sleep 3600; done"
验证
#docker network inspect demo
#docker exec -it test2 ip a
#docker exec -it test2 ping 10.0.0.3 或者#docker exec -it test1 ping test2

  

 

Docker的网络访问:

#systemctl  start docker

#ifconfig

[root@zizhen02 ~]# ifconfig

docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500

        inet 172.17.0.1  netmask 255.255.0.0  broadcast 172.17.255.255

        ether 02:42:b2:e3:5b:db  txqueuelen 0  (Ethernet)

        RX packets 0  bytes 0 (0.0 B)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 0  bytes 0 (0.0 B)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

 

必须安装网桥工具bridge-utils,运行命令:#yum install bridge-utils

https://fp-moon.iteye.com/blog/1468650

[root@zizhen02 ~]# brctl show

bridge name    bridge id            STP enabled     interfaces

docker0             8000.0242b2e35bdb        no    

 

docker网络访问有两种访问方式: 随机映射和指定映射

随机映射:docker   run  -P

#docker images

#docker run –d –P nginx   #docker 运行到后台

#[root@zizhen02 ~]# docker ps

CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                   NAMES

2ce4f13ed654        nginx               "nginx -g 'daemon of…"   22 seconds ago      Up 21 seconds       0.0.0.0:32768->80/tcp   condescending_wilbur

查看路由表 #iptables –t  nat  -vnL

[root@zizhen02 ~]# iptables -t nat -vnL

Chain PREROUTING (policy ACCEPT 10 packets, 1030 bytes)

 pkts bytes target     prot opt in     out     source               destination        

   34  1760 DOCKER     all  --  *      *       0.0.0.0/0            0.0.0.0/0            ADDRTYPE match dst-type LOCAL

 

Chain INPUT (policy ACCEPT 10 packets, 1030 bytes)

 pkts bytes target     prot opt in     out     source               destination        

 

Chain OUTPUT (policy ACCEPT 24 packets, 1448 bytes)

 pkts bytes target     prot opt in     out     source               destination        

    1    60 DOCKER     all  --  *      *       0.0.0.0/0           !127.0.0.0/8          ADDRTYPE match dst-type LOCAL

 

Chain POSTROUTING (policy ACCEPT 53 packets, 2964 bytes)

 pkts bytes target     prot opt in     out     source               destination        

    0     0 MASQUERADE  all  --  *      !docker0  172.17.0.0/16        0.0.0.0/0          

    0     0 MASQUERADE  tcp  --  *      *       172.17.0.2           172.17.0.2           tcp dpt:80

 

Chain DOCKER (2 references)

 pkts bytes target     prot opt in     out     source               destination        

    0     0 RETURN     all  --  docker0 *       0.0.0.0/0            0.0.0.0/0          

   29  1516 DNAT       tcp  --  !docker0 *       0.0.0.0/0            0.0.0.0/0            tcp dpt:32768 to:172.17.0.2:80

#ps  -aux |grep dhcp

#cat /var/lib/libvirt/dnsmasq/default.conf

[root@zizhen02 ~]# docker ps

CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                   NAMES

2ce4f13ed654        nginx               "nginx -g 'daemon of…"   16 minutes ago      Up 16 minutes       0.0.0.0:32768->80/tcp   condescending_wilbur

[root@zizhen02 ~]# docker logs 2ce4f13ed654

 

指定映射:

-p  hostPort:containerPort

-p  ip: hostPort:containerPort

-p  ip: : containerPort

-p  hostPort:containerPort:udp

-p  443:443  -p 81:80

#

[root@zizhen02 ~]# docker run -d -p 192.168.1.107:8000:80 --name mynginx nginx

5cb5e2fb6d1151d4cf5a1bf5c5241ce6baa55375fc319d97723b5ae17ed98199

[root@zizhen02 ~]# docker ps

CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                        NAMES

5cb5e2fb6d11        nginx               "nginx -g 'daemon of…"   16 seconds ago      Up 15 seconds       192.168.1.107:8000->80/tcp   mynginx

2ce4f13ed654        nginx               "nginx -g 'daemon of…"   23 minutes ago      Up 23 minutes       0.0.0.0:32768->80/tcp        condescending_wilbur

[root@zizhen02 ~]# docker ps -l

CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                        NAMES

5cb5e2fb6d11        nginx               "nginx -g 'daemon of…"   23 seconds ago      Up 22 seconds       192.168.1.107:8000->80/tcp   mynginx

[root@zizhen02 ~]# curl 192.168.1.107:8000

<!DOCTYPE html>

<html>

<head>

<title>Welcome to nginx!</title>

<style>

    body {

        width: 35em;

        margin: 0 auto;

        font-family: Tahoma, Verdana, Arial, sans-serif;

    }

</style>

</head>

<body>

<h1>Welcome to nginx!</h1>

<p>If you see this page, the nginx web server is successfully installed and

working. Further configuration is required.</p>

 

<p>For online documentation and support please refer to

<a href="http://nginx.org/">nginx.org</a>.<br/>

Commercial support is available at

<a href="http://nginx.com/">nginx.com</a>.</p>

 

<p><em>Thank you for using nginx.</em></p>

</body>

</html>

#

[root@zizhen02 ~]# docker port mynginx

80/tcp -> 192.168.1.107:8000

[root@zizhen02 ~]# docker run -d -p 443:443 -p 8001:80 --name nginxv2 nginx

97b6ec6152522ef7d747d60e0d9b611fab741fd4a05e14baa645d1dd23c6d874

[root@zizhen02 ~]# docker ps -l

CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                                        NAMES

97b6ec615252        nginx               "nginx -g 'daemon of…"   10 seconds ago      Up 8 seconds        0.0.0.0:443->443/tcp, 0.0.0.0:8001->80/tcp   nginxv2

[root@zizhen02 ~]# docker ps

CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                                        NAMES

97b6ec615252        nginx               "nginx -g 'daemon of…"   20 seconds ago      Up 18 seconds       0.0.0.0:443->443/tcp, 0.0.0.0:8001->80/tcp   nginxv2

5cb5e2fb6d11        nginx               "nginx -g 'daemon of…"   8 minutes ago       Up 8 minutes        192.168.1.107:8000->80/tcp                   mynginx

2ce4f13ed654        nginx               "nginx -g 'daemon of…"   32 minutes ago      Up 32 minutes       0.0.0.0:32768->80/tcp                        condescending_wilbur

[root@zizhen02 ~]# docker port ngixv2

Error: No such container: ngixv2

[root@zizhen02 ~]# docker port nginxv2

443/tcp -> 0.0.0.0:443

80/tcp -> 0.0.0.0:8001

[root@zizhen02 ~]#

#

 二、docker 数据持久化和数据共享

docker数据持久化和数据共享
方案1:基于本地文件系统的Volume。可以在执行docker create或docker run时,通过-v参数将主机的目录作为容器的数据卷。这部分功能便是基于本地文件系统的volume管理。
方案2:基于plugin的Volume,支持第三方的存储方案,比如NAS,aws
方案1详解:
(1)受管理的data volume,由docker后台自动创建,位置固定。
VOLUME ["/var/lib/mysql"]
docker run -v mysql:/var/lib/mysql
查看
#docker  volume ls
删除
#docker volume rm VOLUE NAME
创建mysql1
#docker run -d --name mysql1 -e MYSQL_ALLOW_EMPTY_PASSWORD=true mysql
#docker ps
#docker volume ls
#docker volume inspect  VOLUME NAME
"Mountpoint":"/var/lib/docker/1342dsfsf321/_data"
通过创建volume别名,方便
#docker run -d -v mysql:/var/lib/mysql  --name mysql1 -e MYSQL_ALLOW_EMPTY_PASSWORD=true mysql
#dcoker volume ls
loacl       mysql
进入mysql容器进行创建数据库,退出,删除mysql1,
再次创建mysql2,mysql里创建的数据库还在
#docker run -d -v mysql:/var/lib/mysql --name mysql1  -e MYSQL_ALLOW_EMPTY_PASSWORD=true mysql

(2)绑定挂载的volume,具体挂载位置可以由用户指定。
bind mounting
只需要指定本地的目录和容器的目录一一对应关系
docker run -v /home/aaa:/root/aaa
#more Dockerfile
FROM nginx:latest
WORKDIR /usr/share/nginx/html
COPY index.html  index.html
#docker run -d -v $(pwd):/usr/share/nginx/html  -p 80:80 --name web my-nginx

Docker+bind+mounting

 NFS案例。。。。

三、docker compose

docker部署wordpress
正式部署mysql
#docker run -d --name mysql  -v /home/mysql:/var/lib/mysql  -e MYSQL_ROOT_PASSWORD=root -e MYSQL_DATABASE=wordpress mysql
#docker ps
创建wordpress
#docker  run -d -e WORDPRESS_DB_HOST=mysql:3306  --link  mysql -p 8080:80 workpress


Docker Compose 是一个工具
这个工具可以通过一个yml文件定义多容器的docker应用
通过一条目录就可以根据yml文件的定义去创建或管理这个多容器
默认名字:docker-compose.yml
三大概念: Services、Networks 、Volume
Service:一个Service代表一个container,这个container可以从dockerhub的image来创建,或者从本地的Dockerfile build出来的image来创建
Service的启动类似docker run,我们可以给其指定network和volume,所以可以给service指定network和volume的引用。
通过dockerhub上获取的image
service:
db:
image: postgres:9.4
volumes:
- "db-data:/var/lib/postgresql/data"
network:
- back-tier
just like this!
#docker run -d --network back-tier -v db-data:/var/lib/postgresql/data postgres:9.4
通过本地获取的image
services:
worker:
build: ./worker
links:
- db
- redis
networks:
- back-tier

Volumes
services:
db:
image: postgres:9.4
volumes:
- "db-data:/var/lib/postgresql/data" (db-data 定义#docker volume create db-data)
networks:
- back-tier

services:
worker:
build: ./worker
links:
- db
- redis
networks:
- back-tier
定义:
networks:
front-tier:
driver: birdge
back-tier:
driver: birdge

version: '3'

services:

wordpress:
image: wordpress
ports:
- 8080:80
environment:
WORDPRESS_DB_HOST: mysql
WORDPRESS_DB_PASSWORD: root
networks:
- my-bridge

mysql:
image: mysql
environment:
MYSQL_ROOT_PASSWORD: root
MYSQL_DATABASE: wordpress
wolumes:
- mysql-data:/var/lib/mysql
networks:
- my-bridge

wolumes:
- mysql-data:
networks:
my-bridge:
driver: bridge

 

 

  

 

 

 

posted on 2019-03-14 19:12  微子天明  阅读(606)  评论(0编辑  收藏  举报

导航