docker 学习

vim /usr/lib/systemd/system/docker.service

ExecStart=/usr/bin/docker daemon --bip=172.18.42.1/16 --registry-mirror=http://f71f084d.m.daocloud.io --registry-mirror=http://f71f084d.m.daocloud.io
systemctl daemon-reload
systemctl restart docker.service


修改 docker0 IP地址 为172.18.42.1
eth0 192.168.0.182
systemctl daemon-reload

docker128
docker0 172.18.42.1/16
eth0 192.168.1.106


docker130
docker0 172.17.42.1/16
eth0 192.168.1.104

 

docker128 上执行 route add -net 172.17.0.0/16 gw 192.168.0.104
docker130 上执行 route add -net 172.18.0.0/16 gw 192.168.0.106

docker128上启动一个nginx的容器
docker run -d --name nginx128 -h nginx128 nginx128

进入容器
docker exec -it nginx128 /bin/bash

查看 ip地址
ip a

root@nginx1:/# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
6: eth0@if7: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff
inet 172.17.0.2/16 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::42:acff:fe11:2/64 scope link
valid_lft forever preferred_lft forever


docker130上启动一个nginx的容器
docker run -d --name nginx130 -h nginx130 nginx130

进入容器
docker exec -it nginx130 /bin/bash

查看 ip地址
ip a

root@nginx1:/# ip a
root@nginx18:/# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
12: eth0@if13: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:12:00:05 brd ff:ff:ff:ff:ff:ff
inet 172.18.0.5/16 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::42:acff:fe12:5/64 scope link
valid_lft forever preferred_lft forever


在128的容器上,ping 130容器ip,看是否ping通
root@nginx128:/# ping 172.18.0.5

不通

查看防火墙规则,出现

[root@localhost ~]# iptables -nvL
Chain INPUT (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
924 71017 ACCEPT all -- * * 0.0.0.0/0 0.0.0.0/0 state RELATED,ESTABLISHED
0 0 ACCEPT icmp -- * * 0.0.0.0/0 0.0.0.0/0
0 0 ACCEPT all -- lo * 0.0.0.0/0 0.0.0.0/0
1 52 ACCEPT tcp -- * * 0.0.0.0/0 0.0.0.0/0 state NEW tcp dpt:22
12 384 REJECT all -- * * 0.0.0.0/0 0.0.0.0/0 reject-with icmp-host-prohibited

Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
173 18663 DOCKER all -- * docker0 0.0.0.0/0 0.0.0.0/0
173 18663 ACCEPT all -- * docker0 0.0.0.0/0 0.0.0.0/0 ctstate RELATED,ESTABLISHED
175 13704 ACCEPT all -- docker0 !docker0 0.0.0.0/0 0.0.0.0/0
0 0 ACCEPT all -- docker0 docker0 0.0.0.0/0 0.0.0.0/0
0 0 REJECT all -- * * 0.0.0.0/0 0.0.0.0/0 reject-with icmp-host-prohibited

Chain OUTPUT (policy ACCEPT 739 packets, 83695 bytes)
pkts bytes target prot opt in out source destination

Chain DOCKER (1 references)
pkts bytes target prot opt in out source destination
0 0 ACCEPT tcp -- !docker0 docker0 0.0.0.0/0 172.16.0.10 tcp dpt:80

解决办法关闭iptables
iptable -F ;
iptable -t nat -F

root@nginx128:/# ping 172.18.0.1
PING 172.18.0.1 (172.18.0.1): 56 data bytes
64 bytes from 172.18.0.1: icmp_seq=65 ttl=62 time=1.007 ms
64 bytes from 172.18.0.1: icmp_seq=66 ttl=62 time=0.370 ms
64 bytes from 172.18.0.1: icmp_seq=67 ttl=62 time=0.720 ms
64 bytes from 172.18.0.1: icmp_seq=68 ttl=62 time=0.733 ms


docker128 抓包
tshark -f icmp

[root@localhost ~]# tshark -i docker0 -f icmp
Running as user "root" and group "root". This could be dangerous.
Capturing on 'docker0'
1 0.000000000 172.17.0.2 -> 172.18.0.1 ICMP 98 Echo (ping) request id=0x0017, seq=333/19713, ttl=62
2 0.000042877 172.18.0.1 -> 172.17.0.2 ICMP 98 Echo (ping) reply id=0x0017, seq=333/19713, ttl=64 (request in 1)
3 1.002079472 172.17.0.2 -> 172.18.0.1 ICMP 98 Echo (ping) request id=0x0017, seq=334/19969, ttl=62
4 1.002124671 172.18.0.1 -> 172.17.0.2 ICMP 98 Echo (ping) reply id=0x0017, seq=334/19969, ttl=64 (request in 3)
5 2.033006967 172.17.0.2 -> 172.18.0.1 ICMP 98 Echo (ping) request id=0x0017, seq=335/20225, ttl=62
6 2.033050178 172.18.0.1 -> 172.17.0.2 ICMP 98 Echo (ping) reply id=0x0017, seq=335/20225, ttl=64 (request in 5)
7 3.006344969 172.17.0.2 -> 172.18.0.1 ICMP 98 Echo (ping) request id=0x0017, seq=336/20481, ttl=62
8 3.006407373 172.18.0.1 -> 172.17.0.2 ICMP 98 Echo (ping) reply id=0x0017, seq=336/20481, ttl=64 (request in 7)
9 4.050510908 172.17.0.2 -> 172.18.0.1 ICMP 98 Echo (ping) request id=0x0017, seq=337/20737, ttl=62
10 4.050583655 172.18.0.1 -> 172.17.0.2 ICMP 98 Echo (ping) reply id=0x0017, seq=337/20737, ttl=64 (request in 9)



docker 网络方案 Libnetwork (gre xlans)

linux namespace 详解

[root@localhost ~]# mkdir /var/run/netns
[root@localhost ~]# ln -s /proc/2716/ns/net /var/run/netns/2716
[root@localhost ~]# ip netns ls
2716 (id: 0)
[root@localhost ~]# ip netns exec 2716 ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
4: eth0@if5: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP
link/ether 02:42:ac:11:00:01 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 172.17.0.1/16 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::42:acff:fe11:1/64 scope link
valid_lft forever preferred_lft forever

[root@localhost ~]# ip netns exec 2716 ethtool -S eth0

[root@localhost ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eno16777736: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:7f:31:31 brd ff:ff:ff:ff:ff:ff
inet 192.168.1.104/24 brd 192.168.1.255 scope global dynamic eno16777736
valid_lft 6669sec preferred_lft 6669sec
inet6 fe80::20c:29ff:fe7f:3131/64 scope link
valid_lft forever preferred_lft forever
3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP
link/ether 02:42:24:58:2d:43 brd ff:ff:ff:ff:ff:ff
inet 172.17.42.1/16 scope global docker0
valid_lft forever preferred_lft forever
inet6 fe80::42:24ff:fe58:2d43/64 scope link
valid_lft forever preferred_lft forever
5: vethb901ad0@if4: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP
link/ether 26:f0:5f:64:12:84 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet6 fe80::24f0:5fff:fe64:1284/64 scope link
valid_lft forever preferred_lft forever




OVS + DOCKER
在一台物理机上虚拟出一个交换机,

docker docker docker docker
| | | |
tap0 tap1 tap0 tap1
\ / \ /
ovs br0 ovs br0
/ /
eth0 eth1 eth0 eth1



docker131 192.168.18.131 docker128 192.168.18.128

docker0 172.17.42.1/24 docker0 172.17.43.1/24
| |
br0 ---> gre0--------eth0(GRE)-----------------gre0<--bro
ovs ovs


brctl安装
yum -y install bridge-utils

安装 OVS
vi /etc/selinux/config
SELINUX=disabled 重启

http://rpm.pbone.net/index.php3/stat/4/idpl/31446019/dir/centos_7/com/openvswitch-2.4.0-1.el7.x86_64.rpm.html

[root@docker131 ~]# rpm -ivh openvswitch-2.4.0-1.el7.x86_64.rpm

[root@docker131 ~]# systemctl start openvswitch.service
[root@docker131 ~]# systemctl status openvswitch.service

[root@docker131 ~]# ovs-vsctl del-br br0
[root@docker131 ~]# ovs-vsctl add-br br0
[root@docker131 ~]# ovs-vsctl add-port br0 gre1 -- set interface gre1 type=gre option:remote_ip=192.168.18.128
[root@docker131 ~]# ovs-vsctl show

添加br0 到本地的docker0,使得容器流量通过OVS流经tunnel
brctl addif docker0 br0

ip link set dev br0 up
ip link set dev docker0 up

iptables -F
iptables -t nat -F

ip route add 172.16.0.0/16 dev docker0


docker管理工具
1. Shipyard入门
2. cAdvisor入门

Swarm 作为一个管理docker集群工具,可以单独部署于一个节点。

Swarm的具体工作流程:docker client发送请求给swarm; swarm处理请求并发送至相应的docker node;
docker node执行相应的操作并返回相应.
1.运行一个命令去创建一个集群
2.运行另一个命令去启动swarm
3.在运行有docker engine的每台主机上,运行一个命令与上面的集群相连接

 

swarm 将一组docker enginge作为一个集群进行管理,并提供过了lable,schedule,filter的能力
其中调度部分,允许用户自己定制自己的调度策略。

1。docker run -e "constraint: operationsystem=fedora"
2. docker run -e "constraint: storagedriver=aufs"


Kubernetes API入门


API Server
|
Kubernetes

get post patch GET/watch HTTP Redirect HTTP Proxy

Kubernetes Rest API汇总

NODES
GET /api/v1/nodes 获取Node列表
POST /api/v1/nodes 创建一个node对象
DELETE /api/v1/nodes/{name} 删除一个node对象
GET /api/v1/nodes/{name} 获取一个node对象
PATCH /api/v1/nodes/{name} 部分更新一个node对象
PUT /api/v1/nodes/{name} 替换一个node对象

NAMESPACES
GET /api/v1/namespaces 获取namespaces列表
POST /api/v1/namespaces 创建一个namespaces对象
DELETE /api/v1/namespaces/{name} 删除一个namespaces对象
GET /api/v1/namespaces/{name} 获取一个namespaces对象
PATCH /api/v1/namespaces/{name} 部分更新一个namespaces对象
PUT /api/v1/namespaces/{name} 替换一个namespaces对象


Swagger-UI 查看API说明

 


192.168.1.107 etcd服务器安装
curl -L https://github.com/coreos/etcd/releases/download/v3.0.6/etcd-v3.0.6-linux-amd64.tar.gz -o etcd-v3.0.6-linux-amd64.tar.gz
tar xzvf etcd-v3.0.6-linux-amd64.tar.gz && cd etcd-v3.0.6-linux-amd64
./etcd --version

Git SHA: 9efa00d
Go Version: go1.6.3
Go OS/Arch: linux/amd64

[root@etcd etc]# etcd --data-dir=/data/etcd/data --listen-client-urls 'http://0.0.0.0:2379' --listen-peer-urls 'http://0.0.0.0:2380' --advertise-client-urls 'http://0.0.0.0:2379'
&

kubernetes安装:
https://github.com/kubernetes/kubernetes/releases/download/v1.4.0-alpha.2/kubernetes.tar.gz

 



docker 私有仓库

docker pull registry

docker run -d -p 5000:5000 --privileged=true registry

docker ps -a

对镜像打一个标记

docker tag nginx localhost:5000/nginx_mysql_produment

push 到 私有的仓库

docker push localhost:5000/nginx_mysql_produment

posted @ 2016-12-19 21:45  fengjian1585  阅读(717)  评论(0编辑  收藏  举报