Docker学习记录

Docker

安装

# 安装gcc相关环境
yum -y install gcc
yum -y install gcc-c++
## 卸载旧版本
yum remove docker \
                  docker-client \
                  docker-client-latest \
                  docker-common \
                  docker-latest \
                  docker-latest-logrotate \
                  docker-logrotate \
                  docker-engine
# 安装
yum install -y yum-utils
yum-config-manager \
    --add-repo \
    https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache fast

ce 社区版 ee 企业版

yum install docker-ce[docker-ee] docker-ce-cli[docker-ee-cli] containerd.io docker-compose-plugin

安指定版本

yum install docker-ce-<VERSION_STRING> docker-ce-cli-<VERSION_STRING> containerd.io docker-compose-plugin

卸载

sudo yum remove docker-ce docker-ce-cli containerd.io docker-compose-plugin
sudo rm -rf /var/lib/docker
sudo rm -rf /var/lib/containerd

启动

systemctl start/stop/status/enable/disable docker

命令

查版本/查信息

docker version/info

设置阿里云加速

sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://todnba9t.mirror.aliyuncs.com"]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker

镜像相关命令

查看全部镜像 [-a 全部; -q 只ID]

docker images [-aq]

hub上搜索镜像 [过滤*大于等于1000的]

docker search mysql [-f stars=1000]

hub上拉取镜像 [指定版本 默认last]

docker pull mysql[:5.7]

删除指定镜像 [删除多个]

docker rmi -f 镜像ID [镜像ID 镜像ID]

删除所有镜像

docker rmi -f $(docker images -aq)

容器相关命令

交互方式运行镜像

docker run [参数] image
	# --name="名字"		# 指定容器名字
	# -d					# 后台运行
	# -it					# 交互方式运行并进入容器
	# -p					# 指定端口
	# 	-p # ip:主机端口:容器端口  配置主机端口映射到容器端口
	# 	-p # 主机端口:容器端口
	# 	-p # 容器端口
	# -P					# 随机指定端口
	# eg: docker run -it centos /bin/bash 	

退出容器并停止 Ctrl+P+Q 不停止退出

exit

列出运行中容器 [全部带历史 ][只ID][最近创建的两个]

docker ps [-a][-q][-n=2]

删除指定的容器,不能删除正在运行的容器,强制删除使用 rm -f

docker rm 容器ID

删除全部

docker rm -f $(docker ps -aq)

启动停止重启杀死

docker start/stop/restart/kill 容器ID

其他

查看日志

docker logs -tf 容器ID
#num为要显示的日志条数
docker logs --tail num 容器ID 

看容器进程信息

docker top 容器ID

看容器元数据信息

docker inspect 容器id

进入容器

docker exec 容器ID # 进入容器后开启一个新的终端,可以在里面操作 docker exec -it bd2a1db199b7 /bin/bash
docker attach 容器ID # 进入容器正在执行的终端,不会启动新的进程 docker attach bd2a1db199b7

拷贝容器文件到主机

docker cp 容器id:/容器内路径 目的主机路径 # docker cp bd2a1db199b7:/home/test.java /home

查看容器cpu信息

docker stats

docker 安装 nginx

docker search nginx
docker pull nginx
docker run --name nginx01 -d -p 3344:80 nginx

docker 安装 tomcat

docker pull tomcat:9.0
docker run -it --rm tomcat:9.0  ##直接启动关闭就删除 测试用
docker run --name tomcat01 -d -p 3344:8080 tomcat:9.0

docker 安装 es+kibana

docker run -d --name es01 -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -e ES_JAVA_OPTS="-Xms64m -Xmx512m" elasticsearch:7.3.0

docker 安装 mysql

docker pull mysql:5.7
docker run -d -p 3310:3306 -e MYSQL_ROOT_PASSWORD=123456 --name mysql01 -v /home/mysql/conf:/etc/mysql/conf.d -v /home/mysql/data:/var/lib/mysql mysql:5.7

打包自己的镜像

docker commit -a="wendy" -m="tomcat with root page" 027db740109a tomcat8-with-root-page:1.0

容器卷

容器卷挂载

##本机路径:容器路径  docker inspect 容器id  中Mounts块查看挂载是否成功
docker run -it -v /home/share:/home centos
	# -v 容器内路径 							# 匿名挂载  -v /etc/nginx
	# -v 卷名:容器内路径					  # 具名挂载  -v juming:/etc/nginx
	# -v /宿主机路径:容器内路径  			   # 指定路径挂载  -v /home/nginx:/etc/nginx
	# -v 路径:路径:ro/rw  	 # 指定权限(ro->readonly rw->readwrite) -v juming:/etc/nginx:ro

查看挂载信息

# 查看匿名/具名挂载卷 [详情,具体哪个目录][删全部][删一个][新建]
docker volume ls [inspect 名字][prune][rm][create]
[root@localhost /]# docker volume ls
DRIVER              VOLUME NAME
local               9c234d2dc4b5124fe85a11cc38fe066fd6e6b2d5be90587b0e48c31af91369d4
local               juming
[root@localhost /]# docker volume inspect 9c234d2dc4b5124fe85a11cc38fe066fd6e6b2d5be90587b0e48c31af91369d4

容器间共享卷

# 先启动一个有挂载的容器 ["volume1","volume2"]
docker run -it --name docker01 wendy-centos:1.0 /bin/bash
## --volume-from一个有挂载的容器  就可以实现数据共享 (复制模式共享,删除docker01也不影响docker02)
docker run -it --name docker02 --volume-from docker01 wendy-centos:1.0 /bin/bash

DockerFile

dockerfile 挂载

dockerfile1 文件内容

FROM centos
VOLUME ["volume1","volume2"]  // 匿名挂载
CMD echo "-----end-----"
CMD /bin/bash

构建

docker build -f dockerfile1 -t wendy-centos:1.0 .

构建dockerfile命令

docker build -f dockerfile1 -t wendy-centos:1.0 .

dockerfile 常用指令

FROM 					# 基础镜像,一切从这里构建
MAINTAINER			 	# 镜像是谁写的  名字<邮箱>
RUN						# 镜像构建的时候需要运行的命令
ADD						# 为镜像添加内容(压缩包)
WORKDIR					# 镜像的工作目录
VOLUME					# 挂载目录
EXPOSE					# 暴露端口
CMD						# 指定这个容器启动的时候要运行的命令,只有最后一个会生效,相当替换
ENTRYPOINT				# 指定这个容器启动的时候要运行的命令,追加
ONBUILD					# 当构建一个被集成dockerfile这个时候会运行ONBUILD 触发指令
COPY					# 类似ADD  将我们文件拷贝到镜像中
ENV						# 构建时设置环境变量

初步构建

构建一个含有vim和ifconfig命令的centos

[root@localhost docker-test-v]# cat mydockerfile-centos

FROM centos:7
MAINTAINER wendy<zhiwen.ji@qq.com>

ENV MYPATH /usr/local
WORKDIR $MYPATH

RUN yum -y install vim
RUN yum -y install net-tools

EXPOSE 8888

CMD echo $MYPATH
CMD echo "-----end-----"

CMD /bin/bash

构建命令

docker build -f mydockerfile-centos -t mydfcentos:0.1 .

正常centos和构建centos对比

正常:

[root@localhost ~]# docker run -it centos:7 /bin/bash
[root@8d051480d5af /]# ls
anaconda-post.log  bin  dev  etc  home  lib  lib64  media  mnt  opt  proc  root  run  sbin  srv  sys  tmp  usr  var
[root@8d051480d5af /]# vim a
bash: vim: command not found
[root@8d051480d5af /]# ifconfig
bash: ifconfig: command not found

构建:

[root@localhost docker-test-v]# docker run -it mydfcentos:0.1 /bin/bash
[root@c26533c126c0 local]# pwd
/usr/local
[root@c26533c126c0 local]# ls
bin  etc  games  include  lib  lib64  libexec  sbin  share  src
[root@c26533c126c0 local]# vim a
[root@c26533c126c0 local]# ls
a  bin  etc  games  include  lib  lib64  libexec  sbin  share  src
[root@c26533c126c0 local]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.17.0.3  netmask 255.255.0.0  broadcast 172.17.255.255
        ether 02:42:ac:11:00:03  txqueuelen 0  (Ethernet)
        RX packets 8  bytes 648 (648.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        loop  txqueuelen 1  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

查看构建步骤历史

# docker history 镜像ID
[root@localhost docker-test-v]# docker history 35c22c10d0f2
IMAGE               CREATED             CREATED BY                                      SIZE                COMMENT
35c22c10d0f2        13 minutes ago      /bin/sh -c #(nop)  CMD ["/bin/sh" "-c" "/bin…   0B                  
34421c0487cf        13 minutes ago      /bin/sh -c #(nop)  CMD ["/bin/sh" "-c" "echo…   0B                  
684cecdf472a        13 minutes ago      /bin/sh -c #(nop)  CMD ["/bin/sh" "-c" "echo…   0B                  
1901c612ec6a        13 minutes ago      /bin/sh -c #(nop)  EXPOSE 8888                  0B                  
bece07184685        13 minutes ago      /bin/sh -c yum -y install net-tools             177MB               
f4f90fc61e6d        13 minutes ago      /bin/sh -c yum -y install vim                   232MB               
8f0931e4ba0a        13 minutes ago      /bin/sh -c #(nop) WORKDIR /usr/local            0B                  
113d7c9b5a7b        13 minutes ago      /bin/sh -c #(nop)  ENV MYPATH=/usr/local        0B                  
da8cae163faf        13 minutes ago      /bin/sh -c #(nop)  MAINTAINER wendy<zhiwen.j…   0B                  
eeb6ee3f44bd        10 months ago       /bin/sh -c #(nop)  CMD ["/bin/bash"]            0B                  
<missing>           10 months ago       /bin/sh -c #(nop)  LABEL org.label-schema.sc…   0B                  
<missing>           10 months ago       /bin/sh -c #(nop) ADD file:b3ebbe8bd304723d4…   204MB

CMD和ENTRYPOINT区别展示

CMD

[root@localhost docker-test-v]# vim mydf-cmd-test
FROM centos
CMD ["ls","-a"]
[root@localhost docker-test-v]# docker build -f mydf-cmd-test -t centos-cmd-test .
Sending build context to Docker daemon  4.096kB
Step 1/2 : FROM centos
 ---> 5d0da3dc9764
Step 2/2 : CMD ["ls","-a"]
 ---> Running in 2b875583c5b5
Removing intermediate container 2b875583c5b5
 ---> fa2601e5666e
Successfully built fa2601e5666e
Successfully tagged centos-cmd-test:latest
[root@localhost docker-test-v]# docker run fa2601e5666e
.
..
.dockerenv
bin
dev
etc
home
lib
lib64
lost+found
media
mnt
opt
proc
root
run
sbin
srv
sys
tmp
usr
var
[root@localhost docker-test-v]# docker run fa2601e5666e -l
docker: Error response from daemon: OCI runtime create failed: runc create failed: unable to start container process: exec: "-l": executable file not found in $PATH: unknown.
ERRO[0000] error waiting for container: context canceled 

ENTRYPOINT

[root@localhost docker-test-v]# vim mydf-entry-test
FROM centos
ENTRYPOINT ["ls","-a"]
[root@localhost docker-test-v]# docker build -f mydf-entry-test -t centos-entry-test .
Sending build context to Docker daemon   5.12kB
Step 1/2 : FROM centos
 ---> 5d0da3dc9764
Step 2/2 : ENTRYPOINT ["ls","-a"]
 ---> Running in 1adc0700047e
Removing intermediate container 1adc0700047e
 ---> 784d65e0819a
Successfully built 784d65e0819a
Successfully tagged centos-entry-test:latest
[root@localhost docker-test-v]# docker run 784d65e0819a
.
..
.dockerenv
bin
dev
etc
home
lib
lib64
lost+found
media
mnt
opt
proc
root
run
sbin
srv
sys
tmp
usr
var
[root@localhost docker-test-v]# docker run 784d65e0819a -l
total 0
drwxr-xr-x.   1 root root   6 Aug 11 06:27 .
drwxr-xr-x.   1 root root   6 Aug 11 06:27 ..
-rwxr-xr-x.   1 root root   0 Aug 11 06:27 .dockerenv
lrwxrwxrwx.   1 root root   7 Nov  3  2020 bin -> usr/bin
drwxr-xr-x.   5 root root 340 Aug 11 06:27 dev
drwxr-xr-x.   1 root root  66 Aug 11 06:27 etc
drwxr-xr-x.   2 root root   6 Nov  3  2020 home
lrwxrwxrwx.   1 root root   7 Nov  3  2020 lib -> usr/lib
lrwxrwxrwx.   1 root root   9 Nov  3  2020 lib64 -> usr/lib64
drwx------.   2 root root   6 Sep 15  2021 lost+found
drwxr-xr-x.   2 root root   6 Nov  3  2020 media
drwxr-xr-x.   2 root root   6 Nov  3  2020 mnt
drwxr-xr-x.   2 root root   6 Nov  3  2020 opt
dr-xr-xr-x. 115 root root   0 Aug 11 06:27 proc
dr-xr-x---.   2 root root 162 Sep 15  2021 root
drwxr-xr-x.  11 root root 163 Sep 15  2021 run
lrwxrwxrwx.   1 root root   8 Nov  3  2020 sbin -> usr/sbin
drwxr-xr-x.   2 root root   6 Nov  3  2020 srv
dr-xr-xr-x.  13 root root   0 Aug 11 02:52 sys
drwxrwxrwt.   7 root root 171 Sep 15  2021 tmp
drwxr-xr-x.  12 root root 144 Sep 15  2021 usr
drwxr-xr-x.  20 root root 262 Sep 15  2021 var

制作Tomcat镜像

  1. 准备压缩包

    apache-tomcat-9.0.58.tar.gz jdk-8u211-linux-x64.tar.gz

  2. 准备dockerfile

    readme.txt 构建说明

    Dockerfile 官方命名 在build时就会自动去找这个命名 就不用-f 指定文件名了

    ADD 会自动解压缩

    FROM centos
    MAINTAINER wendy<zhiwen.ji@qq.com>
    
    COPY readme.txt /usr/local/readme.txt
    ADD apache-tomcat-9.0.58.tar.gz /usr/local/
    ADD jdk-8u211-linux-x64.tar.gz /usr/local/
    
    RUN yum -y install vim
    
    ENV MYPATH /usr/local/
    WORKDIR $MYPATH
    ENV JAVA_HOME /usr/local/jdk1.8.0_211/
    ENV CLASSPATH $JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
    ENV CATALINA_HOME /usr/local/apache-tomcat-9.0.58/
    ENV CATALINA_BASE /usr/local/apache-tomcat-9.0.58/
    ENV PATH $PATH:$JAVA_HOME/bin:$CATALINA_HOME/bin:$CATALINA_HOME/lib
    
    EXPOSE 8080
    CMD /usr/local/apache-tomcat-9.0.58/bin/startup.sh && tail -f /usr/local/apache-tomcat-9.0.58/logs/catalina.out
    
  3. 构建

    docker build -t diy-tomcat .

  4. 运行

    docker run --name diytomcat01 -d -p 9090:8080 -v /usr/local/docker-tomcat/test/:/usr/local/apache-tomcat-9.0.58/webapps/test/ -v /usr/local/docker-tomcat/logs/:/usr/local/apache-tomcat-9.0.58/logs/ diy-tomcat 
    
  5. 测试挂载

    本机可以看到logs目录下日志,并在test创建一个应用(只有html测试),访问可以看到就是成功了

    [root@localhost docker-tomcat]# ls
    apache-tomcat-9.0.58.tar.gz  Dockerfile  jdk-8u211-linux-x64.tar.gz  logs  readme.txt  test
    [root@localhost docker-tomcat]# ls logs/
    catalina.2022-08-11.log  host-manager.2022-08-11.log  localhost_access_log.2022-08-11.txt
    catalina.out             localhost.2022-08-11.log     manager.2022-08-11.log
    [root@localhost docker-tomcat]# ls test/
    index.html  WEB-INF
    

发布镜像

发布到DockerHub

先注册dockerhub账号,再登陆

dokcer login -u zhiwenj
password: ****

发布,作者/名称:版本号

docker push zhiwenj/diy-tomcat:1.0

报错:An image does not exist locally with the tag: wendy/diy-tomcat

因为本地是latest,没带版本号,所以需要重写定一个版本号

docker tag 330f41ec0790 zhiwenj/diy-tomcat:1.0

然后重新发布

发布到阿里云

注册 - 登陆 - 设置registry密码 - 创建命名空间 - 创建仓库 - 查看说明 - 推送

docker login --username=wenenenenen registry.cn-hangzhou.aliyuncs.com
docker tag [ImageId] registry.cn-hangzhou.aliyuncs.com/wendy-docker-test/test-01:[镜像版本号]
docker push registry.cn-hangzhou.aliyuncs.com/wendy-docker-test/test-01:[镜像版本号]

命令小结

docker-study

Docker网络

理解docker0

  1. 本机回环地址 2. 虚拟机地址 3. docker0 这是Docker服务启动后自动生成的
[root@localhost ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:ed:0c:8f brd ff:ff:ff:ff:ff:ff
    inet 192.168.137.3/24 brd 192.168.137.255 scope global dynamic ens33
       valid_lft 1199sec preferred_lft 1199sec
    inet6 fe80::e24c:801:d03e:bf59/64 scope link 
       valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN 
    link/ether 02:42:b1:96:f1:91 brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
       valid_lft forever preferred_lft forever

启动一个Tomcat容器,执行ip addr 报错,因为容器切割了ip命令,需要手动安装一下,容器内执行 apt update && apt install -y iproute2, 再次执行 docker exec -it tomcat01 ip addr

发现容器得到了一个新的网络:12: eth0@if13,ip地址:172.17.0.3。这是Docker在容器启动时为其分配的

[root@localhost ~]# docker run -d --name tomcat01 tomcat
[root@localhost ~]# docker exec -it tomcat01 ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
12: eth0@if13: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default 
    link/ether 02:42:ac:11:00:03 brd ff:ff:ff:ff:ff:ff link-netnsid 0
    inet 172.17.0.3/16 brd 172.17.255.255 scope global eth0
       valid_lft forever preferred_lft forever

使用主机ping容器端口172.17.0.3,发现可以ping通

[root@localhost ~]# ping 172.17.0.3
PING 172.17.0.3 (172.17.0.3) 56(84) bytes of data.
64 bytes from 172.17.0.3: icmp_seq=1 ttl=64 time=0.062 ms
64 bytes from 172.17.0.3: icmp_seq=2 ttl=64 time=0.039 ms
^C
--- 172.17.0.3 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 999ms
rtt min/avg/max/mdev = 0.039/0.050/0.062/0.013 ms
  • linux可以ping通docker容器内部,因为docker0的ip地址为172.17.0.1,容器为172.17.0.3。

  • 原理:我们每启动一个docker容器,docker就会给容器分配一个默认的可用ip,我们只要安装了docker,就会有一个网卡docker0(bridge)。网卡采用桥接模式,并使用veth-pair技术(veth-pair就是一堆虚拟设备接口,成对出现,一段连着协议,一段彼此相连,充当一个桥梁。)。

  • 这时我们退出容器,回到主机再次观察主机的ip地址:

[root@localhost ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:ed:0c:8f brd ff:ff:ff:ff:ff:ff
    inet 192.168.137.3/24 brd 192.168.137.255 scope global dynamic ens33
       valid_lft 1658sec preferred_lft 1658sec
    inet6 fe80::e24c:801:d03e:bf59/64 scope link 
       valid_lft forever preferred_lft forever
3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP 
    link/ether 02:42:b1:96:f1:91 brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:b1ff:fe96:f191/64 scope link 
       valid_lft forever preferred_lft forever
13: vethf58de89@if12: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP 
    link/ether 22:8b:fc:74:f1:d6 brd ff:ff:ff:ff:ff:ff link-netnsid 1
    inet6 fe80::208b:fcff:fe74:f1d6/64 scope link 
       valid_lft forever preferred_lft forever
  • 发现了一个新网络13: vethda1df4b@if12,对应容器内网络地址的12: eth@if13
  • 容器和容器之间是可以互相ping通的:原理 :容器1→Docker0→容器2
  • docker中的所有网络接口都是虚拟的 ,转发效率高。删除容器后,对应的网桥也随之删除

--link命令

每次启动容器都会随机分配ip,那么就要经常改变应用中的配置文件,想要根据服务名或者容器ID之类的固定名称去ping,可以使用--link

在容器启动命令中加入一个选项:--link,使得我们可以根据容器名来访问容器

[root@localhost ~]# docker run -d --name tomcat02 --link tomcat01 tomcat:8
5c0c73dd4c0b8eba0713e4290c93daf8638335838f9d7902e4f6ff6a4e6adff1
[root@localhost ~]# docker exec -it tomcat02 ping tomcat01
OCI runtime exec failed: exec failed: unable to start container process: exec: "ping": executable file not found in $PATH: unknown
# 还是容器切割没有ping命令 先进容器安装一下 apt update && apt install -y inetutils-ping
[root@localhost ~]# docker exec -it tomcat02 ping tomcat01
PING tomcat01 (172.17.0.3): 56 data bytes
64 bytes from 172.17.0.3: icmp_seq=0 ttl=64 time=0.073 ms
64 bytes from 172.17.0.3: icmp_seq=1 ttl=64 time=0.057 ms
^C--- tomcat01 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max/stddev = 0.057/0.065/0.073/0.000 ms

然而反向就不可以ping通,这是因为--link的本质是把需要连接的容器名/id写入启动容器的配置文件host中,即增加了一个ip和容器名/id的映射:

[root@localhost ~]# docker exec -it tomcat02 cat /etc/hosts
127.0.0.1	localhost
::1	localhost ip6-localhost ip6-loopback
fe00::0	ip6-localnet
ff00::0	ip6-mcastprefix
ff02::1	ip6-allnodes
ff02::2	ip6-allrouters
172.17.0.3	tomcat01 097453019a95
172.17.0.2	5c0c73dd4c0b

目前已经不建议使用这种方式。

自定义网络

查看网络

[root@localhost ~]# docker network ls
NETWORK ID          NAME                DRIVER              SCOPE
a07b22356262        bridge              bridge              local
71deafd68afe        host                host                local
835e96fb07ef        none                null                local
[root@localhost ~]# docker network inspect a07b22356262
[
    {
        "Name": "bridge",
        "Id": "a07b22356262426a754dc5b5c7ee3bf10286dc21f8e983e557d3e3565864b21b",
        "Created": "2022-08-12T09:28:03.322619652+08:00",
        "Scope": "local",
        "Driver": "bridge",
        "EnableIPv6": false,
        "IPAM": {
            "Driver": "default",
            "Options": null,
            "Config": [
                {
                    "Subnet": "172.17.0.0/16",
                    "Gateway": "172.17.0.1"
                }
            ]
        },
        "Internal": false,
        "Attachable": false,
        "Ingress": false,
        "ConfigFrom": {
            "Network": ""
        },
        "ConfigOnly": false,
        "Containers": {
            "097453019a95b422d58cb0b7a5b5ebd02e5371aa0db18076e86727410662f86d": {
                "Name": "tomcat01",
                "EndpointID": "d111c8fcbc21eefdb1c3b91382c1afd0a148a2452ff2bcced7d6d25bcda77212",
                "MacAddress": "02:42:ac:11:00:03",
                "IPv4Address": "172.17.0.3/16",
                "IPv6Address": ""
            },
            "5c0c73dd4c0b8eba0713e4290c93daf8638335838f9d7902e4f6ff6a4e6adff1": {
                "Name": "tomcat02",
                "EndpointID": "70c3fde79f53812bd15a135a84424f73b8c83b76b28c132806e30dfa76e1d1c1",
                "MacAddress": "02:42:ac:11:00:02",
                "IPv4Address": "172.17.0.2/16",
                "IPv6Address": ""
            }
        },
        "Options": {
            "com.docker.network.bridge.default_bridge": "true",
            "com.docker.network.bridge.enable_icc": "true",
            "com.docker.network.bridge.enable_ip_masquerade": "true",
            "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
            "com.docker.network.bridge.name": "docker0",
            "com.docker.network.driver.mtu": "1500"
        },
        "Labels": {}
    }
]

docker中的网络模式有

  • bridge 桥接(docker默认)
  • none 不配置网络
  • host 和宿主机共享网络(用的少)

docker run 命令其实默认带有一个参数--net bridge,此处的bridge指的就是docker0网络的名字(docker network ls 中的NAME)。如果我们不想使用docker0,就可以创建一个新的网络

docker run -d --name tomcat01 tomcat:8 等同于

docker run -d --name tomcat01 tomcat:8 --net bridge

# --driver bridge 模式 不写也行 默认就是桥接
# --subnet 192.168.0.0/16 子网  /16 能有255*255个子网   /24 只能有255个
# --gateway 192.168.0.1 网关  自我理解应该是用来桥接的桥 
[root@localhost ~]# docker network create --driver bridge --subnet 192.168.0.0/16 --gateway 192.168.0.1 mynet
30a3d06a966fb5e612ee19b69779a6386fcfc4b0853318d7d368b5db10b35687
[root@localhost ~]# docker network ls
NETWORK ID          NAME                DRIVER              SCOPE
a07b22356262        bridge              bridge              local
71deafd68afe        host                host                local
30a3d06a966f        mynet               bridge              local
835e96fb07ef        none                null                local

启动容器时使用自定义的网络

[root@localhost ~]# docker run -d --name tomcat01 --net mynet tomcat:8
2ae7d056f24ecd09a10cda5203e78fea2de793138582fab50a62c0eb55927553
[root@localhost ~]# docker run -d --name tomcat02 --net mynet tomcat:8
2052b146aa34f66f4de9044992cc0fe4538885dbf45d00e7a1273d2bbcae4bca
[root@localhost ~]# docker network inspect mynet
[
    {
        "Name": "mynet",
        "Id": "30a3d06a966fb5e612ee19b69779a6386fcfc4b0853318d7d368b5db10b35687",
        "Created": "2022-08-12T17:49:24.376465717+08:00",
        "Scope": "local",
        "Driver": "bridge",
        "EnableIPv6": false,
        "IPAM": {
            "Driver": "default",
            "Options": {},
            "Config": [
                {
                    "Subnet": "192.168.0.0/16",
                    "Gateway": "192.168.0.1"
                }
            ]
        },
        "Internal": false,
        "Attachable": false,
        "Ingress": false,
        "ConfigFrom": {
            "Network": ""
        },
        "ConfigOnly": false,
        "Containers": {
            "2052b146aa34f66f4de9044992cc0fe4538885dbf45d00e7a1273d2bbcae4bca": {
                "Name": "tomcat02",
                "EndpointID": "793864aa1c60c1b7774c39b019cea7da1d5f3b8330eb53a17c291d2d66c4f269",
                "MacAddress": "02:42:c0:a8:00:03",
                "IPv4Address": "192.168.0.3/16",
                "IPv6Address": ""
            },
            "2ae7d056f24ecd09a10cda5203e78fea2de793138582fab50a62c0eb55927553": {
                "Name": "tomcat01",
                "EndpointID": "4df95d14036118cc973684032d9418b25ea6f163dca774e42efb76b21d00b154",
                "MacAddress": "02:42:c0:a8:00:02",
                "IPv4Address": "192.168.0.2/16",
                "IPv6Address": ""
            }
        },
        "Options": {},
        "Labels": {}
    }
]
## 使用自定义网络的两个Tomcat是互相可以ping通的  ping服务名、容器名或者容器ID 也行
apt update && apt install -y inetutils-ping
# tomcat01容器内 安装ping命令报错Temporary failure resolving 'deb.debian.org'
# 是因为自定义网络连不上网了 解决:/etc/docker/daemon.json 加上dns配置
{
  "registry-mirrors": ["https://todnba9t.mirror.aliyuncs.com"],
  "dns": ["8.8.8.8","114.114.114.114"]
}
# 测试互相ping
[root@localhost ~]# docker exec -it tomcat01 ping tomcat02
PING tomcat02 (192.168.0.3): 56 data bytes
64 bytes from 192.168.0.3: icmp_seq=0 ttl=64 time=0.062 ms
64 bytes from 192.168.0.3: icmp_seq=1 ttl=64 time=0.061 ms
^C--- tomcat02 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max/stddev = 0.061/0.061/0.062/0.000 ms
[root@localhost ~]# docker exec -it tomcat02 ping tomcat01
PING tomcat01 (192.168.0.2): 56 data bytes
64 bytes from 192.168.0.2: icmp_seq=0 ttl=64 time=0.048 ms
64 bytes from 192.168.0.2: icmp_seq=1 ttl=64 time=0.059 ms
^C--- tomcat01 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max/stddev = 0.048/0.053/0.059/0.000 ms

不同网络、网段打通

# 新建tomcat-d01 d02 默认是使用docker0网络的
[root@localhost /]# docker run --name tomcat-d01 -d tomcat:8
30adef1930b3749c663a0a75e3c8cfe23dae5aad75f376e2e7026739004fd386
[root@localhost /]# docker run --name tomcat-d02 -d tomcat:8
c839f8e0a627cf4da0880ed82c5c62ffdedee17b3a05b8182ccacebb2c889a38

# 打通命令 docker netword connect 网络名 容器名/ID
[root@localhost /]# docker exec -it tomcat-d01 ping tomcat01
ping: unknown host
[root@localhost /]# docker network connect mynet tomcat-d01
[root@localhost /]# docker exec -it tomcat-d01 ping tomcat01
PING tomcat01 (192.168.0.2): 56 data bytes
64 bytes from 192.168.0.2: icmp_seq=0 ttl=64 time=0.064 ms
64 bytes from 192.168.0.2: icmp_seq=1 ttl=64 time=0.060 ms
^C--- tomcat01 ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max/stddev = 0.060/0.062/0.064/0.000 ms
#   反过来也能ping通   是双向的
[root@localhost /]# docker exec -it tomcat01 ping tomcat-d01
PING tomcat-d01 (192.168.0.4): 56 data bytes
64 bytes from 192.168.0.4: icmp_seq=0 ttl=64 time=0.047 ms
64 bytes from 192.168.0.4: icmp_seq=1 ttl=64 time=0.061 ms
64 bytes from 192.168.0.4: icmp_seq=2 ttl=64 time=0.060 ms
^C--- tomcat-d01 ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max/stddev = 0.047/0.056/0.061/0.000 ms
# 而tomcat-d02 没有打通过  依然ping不通
[root@localhost /]# docker exec -it tomcat-d02 ping tomcat01
ping: unknown host
[root@localhost /]# docker exec -it tomcat-d02 ping tomcat02
ping: unknown host

## 打通后docker network inspect mynet 发现是把容器tomcat-d01 放到了mynet网络里
# 这种方式称为一个容器两个IP  类似阿里云的公网私网
。。。。。。
"Containers": {
            "2052b146aa34f66f4de9044992cc0fe4538885dbf45d00e7a1273d2bbcae4bca": {
                "Name": "tomcat02",
                "EndpointID": "7b80dd866a6c9c36e189dff2ea1a535179983ac005d35e5f93d476840b507f9a",
                "MacAddress": "02:42:c0:a8:00:03",
                "IPv4Address": "192.168.0.3/16",
                "IPv6Address": ""
            },
            "2ae7d056f24ecd09a10cda5203e78fea2de793138582fab50a62c0eb55927553": {
                "Name": "tomcat01",
                "EndpointID": "d9d9ed925d3d550046aefb237570326e8b9fccf37883bc83e71570c8e1342f5e",
                "MacAddress": "02:42:c0:a8:00:02",
                "IPv4Address": "192.168.0.2/16",
                "IPv6Address": ""
            },
            "30adef1930b3749c663a0a75e3c8cfe23dae5aad75f376e2e7026739004fd386": {
                "Name": "tomcat-d01",
                "EndpointID": "cdeb36cf8ec0967b1d10208d4ff31ffcf05702592e86bb58444fa9aaf1d5d088",
                "MacAddress": "02:42:c0:a8:00:04",
                "IPv4Address": "192.168.0.4/16",
                "IPv6Address": ""
            }
        }
。。。。。。        

搭建redis集群

采用三主三从 分片配置集群

# 创建redis集群自定义网络
docker network create --subnet 172.18.0.0/16 redis
# 脚本创建 6台 redis的配置
for port in $(seq 1 6);
do \
mkdir -p /usr/local/docker-redis/node-${port}/conf
touch /usr/local/docker-redis/node-${port}/conf/redis.conf
cat << EOF >/usr/local/docker-redis/node-${port}/conf/redis.conf
port 6379
bind 0.0.0.0
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
cluster-announce-ip 172.18.0.1${port}
cluster-announce-port 6379
cluster-announce-bus-port 16379
appendonly yes
EOF
done

# 启动 6台 redis容器 脚本循环启动
for port in $(seq 1 6);
do \
docker run -p 637${port}:6379 -p 1637${port}:16379 --name redis-${port} \
-v /usr/local/docker-redis/node-${port}/data:/data \
-v /usr/local/docker-redis/node-${port}/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.18.0.1${port} redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf; \
done
# 手动替换启动
docker run -p 6371:6379 -p 16371:16379 --name redis-1 \
-v /usr/local/docker-redis/node-1/data:/data \
-v /usr/local/docker-redis/node-1/conf/redis.conf:/etc/redis/redis.conf \
-d --net redis --ip 172.18.0.11 redis:5.0.9-alpine3.11 redis-server /etc/redis/redis.conf; \

# 创建集群  随便进一个redis  不是/bin/bash命令 是sh
[root@localhost docker-redis]# docker exec -it redis-1 sh
/data # redis-cli --cluster create 172.18.0.11:6379 172.18.0.12:6379 172.18.0.13:6379 172.18.0.14:6379 172.18.0.15:6379 172.18.0.1
6:6379 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 172.18.0.15:6379 to 172.18.0.11:6379
Adding replica 172.18.0.16:6379 to 172.18.0.12:6379
Adding replica 172.18.0.14:6379 to 172.18.0.13:6379
M: 09b1eef2538fdd2c0335342b9de6438a0ba9f40b 172.18.0.11:6379
   slots:[0-5460] (5461 slots) master
M: eb46e66ce57d199dfeea3550d7f34e6e8fa1ea67 172.18.0.12:6379
   slots:[5461-10922] (5462 slots) master
M: dd35a300ad3ad466b977832a884fe5aa48a9c563 172.18.0.13:6379
   slots:[10923-16383] (5461 slots) master
S: 43cfdff052b5137c539a9b3fe185f86239d154d3 172.18.0.14:6379
   replicates dd35a300ad3ad466b977832a884fe5aa48a9c563
S: 6e318419c846ecb3b8895deb738b8a1a9bb8c672 172.18.0.15:6379
   replicates 09b1eef2538fdd2c0335342b9de6438a0ba9f40b
S: 8c9d8ed5ab1b267c606829f499abe2c55d962869 172.18.0.16:6379
   replicates eb46e66ce57d199dfeea3550d7f34e6e8fa1ea67
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
....
>>> Performing Cluster Check (using node 172.18.0.11:6379)
M: 09b1eef2538fdd2c0335342b9de6438a0ba9f40b 172.18.0.11:6379
   slots:[0-5460] (5461 slots) master
   1 additional replica(s)
M: eb46e66ce57d199dfeea3550d7f34e6e8fa1ea67 172.18.0.12:6379
   slots:[5461-10922] (5462 slots) master
   1 additional replica(s)
M: dd35a300ad3ad466b977832a884fe5aa48a9c563 172.18.0.13:6379
   slots:[10923-16383] (5461 slots) master
   1 additional replica(s)
S: 8c9d8ed5ab1b267c606829f499abe2c55d962869 172.18.0.16:6379
   slots: (0 slots) slave
   replicates eb46e66ce57d199dfeea3550d7f34e6e8fa1ea67
S: 43cfdff052b5137c539a9b3fe185f86239d154d3 172.18.0.14:6379
   slots: (0 slots) slave
   replicates dd35a300ad3ad466b977832a884fe5aa48a9c563
S: 6e318419c846ecb3b8895deb738b8a1a9bb8c672 172.18.0.15:6379
   slots: (0 slots) slave
   replicates 09b1eef2538fdd2c0335342b9de6438a0ba9f40b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

# 测试集群 -c 集群模式
/data # redis-cli -c 
# 集群信息
127.0.0.1:6379> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:6
cluster_my_epoch:1
cluster_stats_messages_ping_sent:222
cluster_stats_messages_pong_sent:227
cluster_stats_messages_sent:449
cluster_stats_messages_ping_received:222
cluster_stats_messages_pong_received:222
cluster_stats_messages_meet_received:5
cluster_stats_messages_received:449

# 节点信息  三主三从
127.0.0.1:6379> cluster nodes
eb46e66ce57d199dfeea3550d7f34e6e8fa1ea67 172.18.0.12:6379@16379 master - 0 1660557471100 2 connected 5461-10922
dd35a300ad3ad466b977832a884fe5aa48a9c563 172.18.0.13:6379@16379 master - 0 1660557471505 3 connected 10923-16383
09b1eef2538fdd2c0335342b9de6438a0ba9f40b 172.18.0.11:6379@16379 myself,master - 0 1660557470000 1 connected 0-5460
8c9d8ed5ab1b267c606829f499abe2c55d962869 172.18.0.16:6379@16379 slave eb46e66ce57d199dfeea3550d7f34e6e8fa1ea67 0 1660557471000 6 connected
43cfdff052b5137c539a9b3fe185f86239d154d3 172.18.0.14:6379@16379 slave dd35a300ad3ad466b977832a884fe5aa48a9c563 0 1660557470593 4 connected
6e318419c846ecb3b8895deb738b8a1a9bb8c672 172.18.0.15:6379@16379 slave 09b1eef2538fdd2c0335342b9de6438a0ba9f40b 0 1660557471606 5 connected

# 测试从机顶替  set一个值 然后停止该master  然后get  发现redis-4已经顶上redis-3成为master
127.0.0.1:6379> set a b
-> Redirected to slot [15495] located at 172.18.0.13:6379
OK
172.18.0.13:6379> get a
"b"
172.18.0.13:6379> get a
Could not connect to Redis at 172.18.0.13:6379: Host is unreachable
(65.06s)
not connected> exit
/data # redis-cli -c
127.0.0.1:6379> get a
-> Redirected to slot [15495] located at 172.18.0.14:6379
"b"
172.18.0.14:6379> cluster nodes
8c9d8ed5ab1b267c606829f499abe2c55d962869 172.18.0.16:6379@16379 slave eb46e66ce57d199dfeea3550d7f34e6e8fa1ea67 0 1660557709000 6 connected
eb46e66ce57d199dfeea3550d7f34e6e8fa1ea67 172.18.0.12:6379@16379 master - 0 1660557709136 2 connected 5461-10922
6e318419c846ecb3b8895deb738b8a1a9bb8c672 172.18.0.15:6379@16379 slave 09b1eef2538fdd2c0335342b9de6438a0ba9f40b 0 1660557708222 5 connected
09b1eef2538fdd2c0335342b9de6438a0ba9f40b 172.18.0.11:6379@16379 master - 0 1660557709239 1 connected 0-5460
dd35a300ad3ad466b977832a884fe5aa48a9c563 172.18.0.13:6379@16379 master,fail - 1660557596311 1660557595808 3 connected
43cfdff052b5137c539a9b3fe185f86239d154d3 172.18.0.14:6379@16379 myself,master - 0 1660557707000 7 connected 10923-16383

部署springboot jar包

  1. 编写springboot

  2. mvn package 打包

    [root@localhost application]# ls
    Dockerfile  geotools-tutorial-1.0.jar
    
  3. 编写Dockerfile

    [root@localhost application]# cat Dockerfile

    FROM java:8
    
    COPY *.jar /geotools.jar
    
    EXPOSE 8081
    
    ENTRYPOINT ["java","-jar","geotools.jar"]
    
  4. build docker镜像

    docker build -t geotools:1.0 .
    
  5. 运行

    docker run --name geo01 -d -p 8081:8081 geotools:1.0
    

Docker Compose

简介

Dockerfile build run 手动操作,单个容器的操作,微服务模式下,太多的话需要一个个去操作。

Docker Compose 轻松高效的容器管理,可以定义运行多个容器。

概念:

  • 服务services : 容器,应用。(web、redis、mysql。。。)
  • 项目project : 一组关联的容器。

官网介绍:

Compose 是一个用于定义和运行多容器 Docker 应用程序的工具。使用 Compose,您可以使用 YAML 文件来配置应用程序的服务。然后,使用一个命令,您可以从您的配置中创建并启动所有服务。要了解有关 Compose 的所有功能的更多信息,请参阅功能列表

Compose 适用于所有环境:生产、登台、开发、测试以及 CI 工作流程。您可以在常见用例中了解有关每个案例的更多信息。

使用 Compose 基本上是一个三步过程:

  1. 使用 Dockerfile 定义应用程序的环境,以便可以在任何地方复制它。
  2. docker-compose.yml 中定义构成应用程序的服务,以便它们可以在隔离环境中一起运行。
  3. 运行 docker-compose upDocker compose command 启动并运行您的整个应用程序。您也可以使用 Compose Standalone(docker-compose 二进制文件)运行 docker-compose up

安装

  1. 下载

    curl -SL https://github.com/docker/compose/releases/download/v2.7.0/docker-compose-linux-x86_64 -o /usr/local/bin/docker-compose
    
  2. 授权

    chmod 777 /usr/local/bin/docker-compose
    
  3. 查看

    docker-compose version
    
  4. 卸载

    # 删除docker-compose 就行
    rm $DOCKER_CONFIG/cli-plugins/docker-compose
    # 查找cli插件位置
    docker info --format '{{range .ClientInfo.Plugins}}{{if eq .Name "compose"}}{{.Path}}{{end}}{{end}}'
    

入门案例

  1. 创建文件夹

     mkdir composetest
     cd composetest
    
  2. 创建py程序 app.py

    import time
    
    import redis
    from flask import Flask
    
    app = Flask(__name__)
    cache = redis.Redis(host='redis', port=6379)
    
    def get_hit_count():
        retries = 5
        while True:
            try:
                return cache.incr('hits')
            except redis.exceptions.ConnectionError as exc:
                if retries == 0:
                    raise exc
                retries -= 1
                time.sleep(0.5)
    
    @app.route('/')
    def hello():
        count = get_hit_count()
        return 'Hello World! I have been seen {} times.\n'.format(count)
    
    ## 这是官网上没有的 对应Dockerfile的修改
    if __name__ == "__main__":
        app.run(host="0.0.0.0", debug=True)
    
  3. 创建程序依赖 requirements.txt

    flask
    redis
    
  4. 编写 Dockerfile

    # syntax=docker/dockerfile:1
    FROM python:3.7-alpine
    WORKDIR /code
    ENV FLASK_APP=app.py
    ENV FLASK_RUN_HOST=0.0.0.0
    RUN apk add --no-cache gcc musl-dev linux-headers
    COPY requirements.txt requirements.txt
    RUN pip install -r requirements.txt
    EXPOSE 5000
    COPY . .
    CMD ["flask", "run"]
    
    # 因为报错终止了  修改成下面的
    FROM python:3.6-alpine
    ADD . /code
    WORKDIR /code
    RUN pip install -r requirements.txt
    CMD ["python", "app.py"]
    
  5. 编写 docker-compose.yml 配置文件

    version: "3.9"
    services:
      web:
        build: .
        ports:
          - "8000:5000"
        ## 这是官网上没有的 对应Dockerfile的修改 
        volumes:
          - .:/code
      redis:
        image: "redis:alpine"
    
  6. 构建

    [root@localhost composetest]# docker-compose up
    [+] Running 2/0
     ⠿ Container composetest-web-1    Created             0.0s
     ⠿ Container composetest-redis-1  Created             0.0s
    Attaching to composetest-redis-1, composetest-web-1
    composetest-redis-1  | 1:C 16 Aug 2022 08:59:31.057 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo
    composetest-redis-1  | 1:C 16 Aug 2022 08:59:31.057 # Redis version=6.2.6, bits=64, commit=00000000, modified=0, pid=1, just started
    composetest-redis-1  | 1:C 16 Aug 2022 08:59:31.057 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf
    composetest-redis-1  | 1:M 16 Aug 2022 08:59:31.057 * monotonic clock: POSIX clock_gettime
    composetest-redis-1  | 1:M 16 Aug 2022 08:59:31.058 * Running mode=standalone, port=6379.
    composetest-redis-1  | 1:M 16 Aug 2022 08:59:31.058 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.
    composetest-redis-1  | 1:M 16 Aug 2022 08:59:31.058 # Server initialized
    composetest-redis-1  | 1:M 16 Aug 2022 08:59:31.058 # WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect.
    composetest-redis-1  | 1:M 16 Aug 2022 08:59:31.058 * Loading RDB produced by version 6.2.6
    composetest-redis-1  | 1:M 16 Aug 2022 08:59:31.058 * RDB age 151 seconds
    composetest-redis-1  | 1:M 16 Aug 2022 08:59:31.058 * RDB memory usage when created 0.77 Mb
    composetest-redis-1  | 1:M 16 Aug 2022 08:59:31.058 # Done loading RDB, keys loaded: 1, keys expired: 0.
    composetest-redis-1  | 1:M 16 Aug 2022 08:59:31.058 * DB loaded from disk: 0.000 seconds
    composetest-redis-1  | 1:M 16 Aug 2022 08:59:31.058 * Ready to accept connections
    composetest-web-1    |  * Serving Flask app 'app' (lazy loading)
    composetest-web-1    |  * Environment: production
    composetest-web-1    |    WARNING: This is a development server. Do not use it in a production deployment.
    composetest-web-1    |    Use a production WSGI server instead.
    composetest-web-1    |  * Debug mode: on
    composetest-web-1    |  * Running on all addresses.
    composetest-web-1    |    WARNING: This is a development server. Do not use it in a production deployment.
    composetest-web-1    |  * Running on http://172.24.0.2:5000/ (Press CTRL+C to quit)
    composetest-web-1    |  * Restarting with stat
    composetest-web-1    |  * Debugger is active!
    composetest-web-1    |  * Debugger PIN: 116-781-253
    
  7. 测试

    [root@localhost ~]# curl localhost:8000
    Hello World! I have been seen 7 times.
    [root@localhost ~]# curl localhost:8000
    Hello World! I have been seen 8 times.
    [root@localhost ~]# docker ps
    CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                    NAMES
    8df26bac74ff        redis:alpine        "docker-entrypoint.s…"   5 minutes ago       Up 2 minutes        6379/tcp                 composetest-redis-1
    20b1f77343d8        composetest_web     "python app.py"          5 minutes ago       Up 2 minutes        0.0.0.0:8000->5000/tcp   composetest-web-1
    [root@localhost ~]# docker images
    REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
    composetest_web     latest              eaaccc9f548d        48 minutes ago      55.5MB
    redis               alpine              3900abf41552        8 months ago        32.4MB
    [root@localhost ~]# docker network ls
    NETWORK ID          NAME                  DRIVER              SCOPE
    eb29590a53a3        bridge                bridge              local
    3cfdd97d09c3        composetest_default   bridge              local
    f454031e2478        host                  host                local
    e98bf5a234b1        none                  null                local
    

    一些默认配置项:

    • 默认服务名、容器名:文件夹名_服务名_num(就是集群状态下的副本数量)
    • 默认网络名:文件夹名_default : 自动创建的网络,整个项目在一个网络下,可以用服务名访问
  8. 停止

    ##  或者原始终端 Ctrl+C
    docker-compose stop  ## docker-compose up -d 后台启动时候用stop 停止  但是不删除容器
    # docker-compose down --volumes  同时删除挂载的卷就是数据
    docker-compose down  ##  停止并删除容器
    
   

## yaml配置规则

详见[官网文档](https://docs.docker.com/compose/compose-file/#compose-file)

​```yaml
# 三层结构
version: '' # 当前docker-compose版本 和docker版本对应
service: # 服务  核心层
  服务1: web
    # 服务配置
    images
    build
    network
    volumes
    .....
  服务2: redis
    .....
  服务3: xxx
    .....
#  其他层  全局规则等
configs:
volumes:
......

WordPress案例

  1. 建项目,就是文件夹

    mkdir my_wordpress
    cd my_wordpress
    
  2. 写docker-compose.yml

    services:
      db:
        # We use a mariadb image which supports both amd64 & arm64 architecture
        image: mariadb:10.6.4-focal
        # If you really want to use MySQL, uncomment the following line
        #image: mysql:8.0.27
        command: '--default-authentication-plugin=mysql_native_password'
        volumes:
          - db_data:/var/lib/mysql
        restart: always
        environment:
          - MYSQL_ROOT_PASSWORD=somewordpress
          - MYSQL_DATABASE=wordpress
          - MYSQL_USER=wordpress
          - MYSQL_PASSWORD=wordpress
        expose:
          - 3306
          - 33060
      wordpress:
        image: wordpress:latest
        ports:
          - 80:80
        restart: always
        environment:
          - WORDPRESS_DB_HOST=db
          - WORDPRESS_DB_USER=wordpress
          - WORDPRESS_DB_PASSWORD=wordpress
          - WORDPRESS_DB_NAME=wordpress
    volumes:
      db_data:
    
  3. 启动

    docker-compose up -d
    
  4. 访问并配置博客

计数器案例

springboot+redis实现计数器案例

程序

@RestController
public class HelloController {
  @Resource
  private RedisTemplate<String, String> redisTemplate;
  @GetMapping("/count")
  public String count() {
    Long count = redisTemplate.opsForValue().increment("count");
    return "public static void main ---> "+count;
  }
}

Dockerfile

FROM java:8

COPY *.jar /geotools.jar

EXPOSE 8081

ENTRYPOINT ["java","-jar","geotools.jar"]

docker-compose.yml

version: '3.6'
services:
  geotools:
    build: .
    image: geotools
    depends_on:
      - redis
    ports:
      - "8081:8081"
  redis:
    image: "redis:alpine"

执行

docker-compose up -d

测试

[root@bogon ~]# curl localhost:8081/count
public static void main ---> 17
[root@bogon ~]# curl localhost:8081/count
public static void main ---> 18

Docker Swarm

搭建

集群环境准备,使用阿里云服务器或者虚拟机,四台 。每台一核两G就够了,然后全部安装docker,使用xshell同步操作四个。

官网文档

Docker Engine 1.12 引入了 swarm 模式,使您能够创建一个由一个或多个 Docker 引擎组成的集群,称为 swarm。一个 swarm 由一个或多个节点组成:在 swarm 模式下运行 Docker Engine 1.12 或更高版本的物理机或虚拟机。

有两种类型的节点 : managers and workers.

Swarm mode cluster

If you haven’t already, read through the swarm mode overview and key concepts.

查看swarm命令

[root@localhost ~]# docker swarm --help
Usage:	docker swarm COMMAND
Manage Swarm
Commands:
  ca          Display and rotate the root CA
  init        Initialize a swarm
  join        Join a swarm as a node and/or manager
  join-token  Manage join tokens
  leave       Leave the swarm
  unlock      Unlock swarm
  unlock-key  Manage the unlock key
  update      Update the swarm
Run 'docker swarm COMMAND --help' for more information on a command.

流程:首先初始化(init)一台机器成为manager节点,并暴露(--advertise-addr)自己的地址,让其他节点加入join进来选择成为manager或者是worker

# 初始化一个manager 当前机器docker-1 ip为192.168.137.4
[root@localhost ~]# docker swarm init --advertise-addr 192.168.137.4
Swarm initialized: current node (slclpnonzlpn8lse1of09e6zl) is now a manager.

To add a worker to this swarm, run the following command:

    docker swarm join --token SWMTKN-1-4euindy8toduoh3va8vyqz7xdjn6rkgn3p77g4tiomif6f27bx-06l4erft7xek04ytewiega313 192.168.137.4:2377

To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.


#  生成worker join的token
[root@localhost ~]# docker swarm join-token worker
To add a worker to this swarm, run the following command:

    docker swarm join --token SWMTKN-1-4euindy8toduoh3va8vyqz7xdjn6rkgn3p77g4tiomif6f27bx-06l4erft7xek04ytewiega313 192.168.137.4:2377
#  生成manager join的token
[root@localhost ~]# docker swarm join-token manager
To add a manager to this swarm, run the following command:

    docker swarm join --token SWMTKN-1-4euindy8toduoh3va8vyqz7xdjn6rkgn3p77g4tiomif6f27bx-bahthzqdkjntmde2ghl0mw0jk 192.168.137.4:2377


# 加入docker-1集群 成为一个worker 当前机器docker-2 ip为192.168.137.5
# 报错 Error response from daemon: rpc error: code = Unavailable desc = all SubConns are in TransientFailure, latest connection error: connection error: desc = "transport: Error while dialing dial tcp 192.168.137.4:2377: connect: no route to host"
# 需要关闭防火墙或者打开端口  否则端口docker-1的2377端口联不通
# 关闭防火墙  
# systemctl stop firewalld 
# 开放端口  
# firewall-cmd --zone=public --add-port=2377/tcp --permanent
# firewall-cmd --reload
[root@localhost ~]# docker swarm join --token SWMTKN-1-4euindy8toduoh3va8vyqz7xdjn6rkgn3p77g4tiomif6f27bx-06l4erft7xek04ytewiega313 192.168.137.4:2377
This node joined a swarm as a worker.

# 加入docker-1集群 成为一个worker 当前机器docker-3 ip为192.168.137.6
[root@localhost ~]# docker swarm join --token SWMTKN-1-4euindy8toduoh3va8vyqz7xdjn6rkgn3p77g4tiomif6f27bx-06l4erft7xek04ytewiega313 192.168.137.4:2377
This node joined a swarm as a worker.

# 加入docker-1集群 成为一个manager 当前机器docker-4 ip为192.168.137.7
# 报错 Error response from daemon: manager stopped: can't initialize raft node: rpc error: code = Unknown desc = could not connect to prospective new cluster member using its advertised address: rpc error: code = Unavailable desc = all SubConns are in TransientF
# 成为manager就需要关闭防火墙或者打开端口  只打开docker-1的不行 这个也需要打开 方式同上
[root@bogon ~]# docker swarm join --token SWMTKN-1-4euindy8toduoh3va8vyqz7xdjn6rkgn3p77g4tiomif6f27bx-bahthzqdkjntmde2ghl0mw0jk 192.168.137.4:2377
This node joined a swarm as a manager.

#  最终在docker-1 查看节点
[root@localhost ~]# docker node ls
ID                            HOSTNAME                STATUS              AVAILABILITY        MANAGER STATUS      ENGINE VERSION
3cy2vkwbe2kuefw3goqa3mcow     localhost               Ready               Active              Reachable           19.03.12
ijsh1hquejkzghwxz7al17jiu     localhost.localdomain   Ready               Active                                  19.03.12
rtigefxbfuql7o3dh53px14yn *   localhost.localdomain   Ready               Active              Leader              19.03.12
xgwd0fwqmjdo27hte2yye3p6o     localhost.localdomain   Ready               Active                                  19.03.12

Raft协议

双主双从: 假设一个主节点挂了,另一个主节点也不可用!!

Raft一致性协议:理解:就是保证绝大多数节点是存货的才可用,就是高可用理念,双主双从挂一个的剩一个的话,还谈什么高可用。所以集群的数量最起码大于三台。 两主只要挂一个就都不可用,三主挂一个另外两个还可用,挂两个就都不可用了。高可用就是 > 1。

测试:双主双从挂一个,另一个也不可用

# docker-1 docker-4 是主  docker-2 docker-3 是从
[root@localhost ~]# docker node ls
ID                            HOSTNAME                STATUS              AVAILABILITY        MANAGER STATUS      ENGINE VERSION
3cy2vkwbe2kuefw3goqa3mcow     localhost               Ready               Active              Reachable           19.03.12
ijsh1hquejkzghwxz7al17jiu     localhost.localdomain   Ready               Active                                  19.03.12
rtigefxbfuql7o3dh53px14yn *   localhost.localdomain   Ready               Active              Leader              19.03.12
xgwd0fwqmjdo27hte2yye3p6o     localhost.localdomain   Ready               Active                                  19.03.12
# 关闭 docker-1
[root@localhost ~]# systemctl stop docker
# docker-4 查看节点
[root@localhost ~]# docker node ls
Error response from daemon: rpc error: code = DeadlineExceeded desc = context deadline exceeded
# 开启 docker-1
[root@localhost ~]# systemctl start docker
# docker-1 或者 docker-4查看节点  发现docker-4成了Leader
[root@localhost ~]# docker node ls
ID                            HOSTNAME                STATUS              AVAILABILITY        MANAGER STATUS      ENGINE VERSION
3cy2vkwbe2kuefw3goqa3mcow *   localhost               Ready               Active              Leader              19.03.12
rtigefxbfuql7o3dh53px14yn     localhost               Ready               Active              Reachable           19.03.12
xgwd0fwqmjdo27hte2yye3p6o     localhost               Ready               Active                                  19.03.12
ijsh1hquejkzghwxz7al17jiu     localhost.localdomain   Ready               Active                                  19.03.12

测试:三主一从挂一个,另外两个可用,挂两个都不可用

# 先把一个worker docker-3离开再添加成为manager 
# docker-3
[root@localhost ~]# docker swarm leave
Node left the swarm.
[root@localhost ~]# docker swarm join --token SWMTKN-1-24ysfnawimd0who3788enz230baj1grsb0gubouwsvm8njun77-ep7bmlbflhv8yeyd2nnmxdl5s 192.168.137.4:2377
This node joined a swarm as a manager.
[root@localhost ~]# docker node ls
ID                            HOSTNAME                STATUS              AVAILABILITY        MANAGER STATUS      ENGINE VERSION
3cy2vkwbe2kuefw3goqa3mcow     localhost               Ready               Active              Leader              19.03.12
rtigefxbfuql7o3dh53px14yn     localhost               Ready               Active              Reachable           19.03.12
xgwd0fwqmjdo27hte2yye3p6o     localhost               Ready               Active                                  19.03.12
64b53pz8t50l46jv5wt5cs7of     localhost.localdomain   Down                Active                                  19.03.12
cmfh7kn7ojmoxlxbg5w5ptf35 *   localhost.localdomain   Ready               Active              Reachable           19.03.12
ijsh1hquejkzghwxz7al17jiu     localhost.localdomain   Down                Active                                  19.03.12
# 停掉 docker-1
[root@localhost ~]# systemctl stop docker
# docker-3 查看
[root@localhost ~]# docker node ls
ID                            HOSTNAME                STATUS              AVAILABILITY        MANAGER STATUS      ENGINE VERSION
3cy2vkwbe2kuefw3goqa3mcow     localhost               Ready               Active              Leader              19.03.12
rtigefxbfuql7o3dh53px14yn     localhost               Down                Active              Unreachable         19.03.12
xgwd0fwqmjdo27hte2yye3p6o     localhost               Ready               Active                                  19.03.12
64b53pz8t50l46jv5wt5cs7of     localhost.localdomain   Down                Active                                  19.03.12
cmfh7kn7ojmoxlxbg5w5ptf35 *   localhost.localdomain   Ready               Active              Reachable           19.03.12
ijsh1hquejkzghwxz7al17jiu     localhost.localdomain   Down                Active                                  19.03.12
# docker-4查看
[root@localhost ~]# docker node ls
ID                            HOSTNAME                STATUS              AVAILABILITY        MANAGER STATUS      ENGINE VERSION
3cy2vkwbe2kuefw3goqa3mcow *   localhost               Ready               Active              Leader              19.03.12
rtigefxbfuql7o3dh53px14yn     localhost               Down                Active              Unreachable         19.03.12
xgwd0fwqmjdo27hte2yye3p6o     localhost               Ready               Active                                  19.03.12
64b53pz8t50l46jv5wt5cs7of     localhost.localdomain   Down                Active                                  19.03.12
cmfh7kn7ojmoxlxbg5w5ptf35     localhost.localdomain   Ready               Active              Reachable           19.03.12
ijsh1hquejkzghwxz7al17jiu     localhost.localdomain   Down                Active                                  19.03.12

# 再停掉docker-4
[root@localhost ~]# systemctl stop docker
# docker-3查看
[root@localhost ~]# docker node ls
Error response from daemon: rpc error: code = DeadlineExceeded desc = context deadline exceeded

动态扩缩容

nginx搭建为例

企业级使用docker时,基本告别使用docker run 命令,docker-compose up 也是单机部署使用的,在swarm里,使用命令为docker service

概念变化:启动容器 -> 启动服务 -> 启动副本

redis集群就是一个redis服务,有10个副本就是开启了10个容器,动态扩缩容就是动态的增减副本。类似灰度发布,金丝雀发布的概念

# 当前 三主一从 docker-2从 docker-1 3 4 主
[root@docker-1 /]# docker node ls
ID                            HOSTNAME            STATUS              AVAILABILITY        MANAGER STATUS      ENGINE VERSION
rtigefxbfuql7o3dh53px14yn *   docker-1            Ready               Active              Leader              19.03.12
xgwd0fwqmjdo27hte2yye3p6o     docker-2            Ready               Active                                  19.03.12
cmfh7kn7ojmoxlxbg5w5ptf35     docker-3            Ready               Active              Reachable           19.03.12
3cy2vkwbe2kuefw3goqa3mcow     docker-4            Ready               Active              Reachable           19.03.12
[root@docker-1 /]# docker service --help
Usage:	docker service COMMAND
Manage services
Commands:
  create      Create a new service
  inspect     Display detailed information on one or more services
  logs        Fetch the logs of a service or task
  ls          List services
  ps          List the tasks of one or more services
  rm          Remove one or more services
  rollback    Revert changes to a service's configuration
  scale       Scale one or multiple replicated services
  update      Update a service
Run 'docker service COMMAND --help' for more information on a command.
# 创建一个服务 可想象为docker run 命令,不过是创建成了swarm集群
[root@docker-1 /]# docker service create -p 8888:80 --name my-nginx nginx
3hh8ny611f3kms7hhutn1xzdd
overall progress: 1 out of 1 tasks 
1/1: running   
verify: Service converged 

#  查看服务  详细:docker service inspect my-nginx
[root@docker-1 /]# docker service ls
ID                  NAME                MODE                REPLICAS            IMAGE               PORTS
3hh8ny611f3k        my-nginx            replicated          1/1                 nginx:latest        *:8888->80/tcp
[root@docker-1 /]# docker service ps my-nginx
ID                  NAME                IMAGE               NODE                DESIRED STATE       CURRENT STATE           ERROR               PORTS
l702uvr4ogf7        my-nginx.1          nginx:latest        docker-4            Running             Running 3 minutes ago    
# 现在可以在docker-1 2 3 4 上docker ps找一下看刚才启动的nginx服务  其本身的副本  也就是容器是跑在哪里的  发现是在docker-4里面
[root@docker-4 ~]# docker ps
CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS               NAMES
69985d9e1116        nginx:latest        "/docker-entrypoint.…"   8 minutes ago       Up 8 minutes        80/tcp              my-nginx.1.l702uvr4ogf7ewdeynbz4xgng

docker run 容器单机启动,不具有扩缩容功能

dokcer service 容器服务启动,可以动态扩缩容,滚动更新

访问: 容器虽然运行docker-4里面 ,但是访问三台主机任意一个都是可以访问成功的 http://192.168.137.4:8888/

# 那么意思就是默认情况下 create出来的服务  就只有一个副本  也就是只会创建一个容器  如果现在访问量增大  一个容器顶不住  需要增加集群数量  这个时候就需要用到扩缩容了

# 更新服务的副本数为3 那么现在docker ps会发现docker-1 3 4 都出现了nginx
[root@docker-1 /]# docker service update --replicas 3 my-nginx
my-nginx
overall progress: 3 out of 3 tasks 
1/3: running   
2/3: running   
3/3: running   
verify: Service converged 
# 副本数量是不受服务器数量限制的 只要服务器硬件条件足够就行  目前有docker-1 2 3 4 四台虚拟机 要更新服务为10个副本也是可以的 就是每台虚拟机上多跑几个容器 就是docker容器的特性  一个镜像可以运行多个容器
#  这时docker ps就发现docker-1运行了2个nginx容器 docker-2运行了3个 docker-3运行了2个  docker-4运行了3个
[root@docker-1 /]# docker service update --replicas 10 my-nginx
my-nginx
overall progress: 10 out of 10 tasks 
1/10: running   
2/10: running   
3/10: running   
4/10: running   
5/10: running   
6/10: running   
7/10: running   
8/10: running   
9/10: running   
10/10: running   
verify: Service converged 
#  如果流量变小了  不需要这么多副本了  也可以动态更新更少的副本
# 这时docker ps发现只有docker-4上有运行的1个容器了
[root@docker-1 /]# docker service update --replicas 1 my-nginx
my-nginx
overall progress: 1 out of 1 tasks 
1/1: running   
verify: Service converged 

另一个扩缩容命令 docker service scale 服务名=副本数

# 效果等同于 update命令
[root@docker-4 ~]# docker service scale my-nginx=3
my-nginx scaled to 3
overall progress: 3 out of 3 tasks 
1/3: running   
2/3: running   
3/3: running   
verify: Service converged 
[root@docker-4 ~]# docker service scale my-nginx=2
my-nginx scaled to 2
overall progress: 2 out of 2 tasks 
1/2: running   
2/2: running   
verify: Service converged 
# 移除服务命令
[root@docker-4 ~]# docker service rm my-nginx
my-nginx

让服务只在工作节点上运行,需要再创建时加上参数--mode

# --help 说明 
# --mode string  Service mode (replicated or global) (default "replicated")
# replicated: 指定几个副本 就会创建几个容器 初始化就是一个副本 只会创建一个容器
# global: 全局都有  初始化在四台虚拟机上都有一个容器
docker service create --mode replicated --name mytom tomcat:9 默认的
docker service create --mode global --name mytom tomcat:9

概念总结

swarm

集群的管理和编号,docker可以初始化一个swarm集群,其他节点可以加入,有管理节点manager和工作节点worker

node

就是一个docker集群节点,多个节点就组成了一个网络集群

service

任务,可以在管理节点或者工作节点来运行,是swarm核心

task

容器内的命令,细节任务,容器的创建与维护

拓展:swarm网络模式

docker service inspect my-nginx 发现网路模式是:"PublishMode":"ingress"

ingress 是特殊的Overlay网路,有负载均衡功能,虽然docker在4台机器上,但实际上网络是同一个。

以下有用到以后再学↓

方式:先找案例跑起来,再研究命令

Docker Stack

docker-compose 单机部署项目

docker stack 集群部署项目

[root@docker-1 /]# docker stack --help

Usage:	docker stack [OPTIONS] COMMAND

Manage Docker stacks

Options:
      --orchestrator string   Orchestrator to use
                              (swarm|kubernetes|all)

Commands:
  deploy      Deploy a new stack or update an existing stack
  ls          List stacks
  ps          List the tasks in the stack
  rm          Remove one or more stacks
  services    List the services in the stack

Run 'docker stack COMMAND --help' for more information on a command.

Docker Secret

安全相关

[root@docker-1 /]# docker secret --help

Usage:	docker secret COMMAND

Manage Docker secrets

Commands:
  create      Create a secret from a file or STDIN as content
  inspect     Display detailed information on one or more secrets
  ls          List secrets
  rm          Remove one or more secrets

Run 'docker secret COMMAND --help' for more information on a command.

Docker Config

配置相关

[root@docker-1 /]# docker config --help

Usage:	docker config COMMAND

Manage Docker configs

Commands:
  create      Create a config from a file or STDIN
  inspect     Display detailed information on one or more configs
  ls          List configs
  rm          Remove one or more configs

Run 'docker config COMMAND --help' for more information on a command.
posted @ 2024-08-07 11:00  Wenenenenen  阅读(10)  评论(0编辑  收藏  举报