安装
在线下载
curl -L "https://github.com/docker/compose/releases/download/v2.12.2/docker-compose-`uname -s`-`uname -m`" -o /usr/local/bin/docker-compose
# 代理下载
curl -L "https://github.com/docker/compose/releases/download/v2.12.2/docker-compose-`uname -s`-`uname -m`" -x http://192.168.1.4:1080 -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
离线下载
- 访问:https://github.com/docker/compose/releases
- 下载:docker-compose-Linux-x86_64
- 重命名文件为:
docker-compose
- 拷贝至Linux:
/usr/local/bin/docker-compose
- 执行脚本:
chmod +x /usr/local/bin/docker-compose
常用文档
常用
查看版本
docker-compose --version
Run容器
#会自动寻找当前目录的docker-compose.yml
docker-compose up -d
#指定yml安装
docker-compose -f docker-compose.yml up -d
#添加变量启动
sudo env ASPNETCORE_ENVIRONMENT=${ASPNETCORE_ENVIRONMENT} docker-compose -f ./docker-compose.yml up -d
停止
docker-compose stop
查看服务运行状态
docker-compose ps
移除容器和网络
docker-compose down
Yml
案例
version: '3.7'
services:
prometheus:
image: prom/prometheus:v2.36.2
container_name: prometheus
volumes:
- ./prometheus/:/etc/prometheus/
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/usr/share/prometheus/console_libraries'
- '--web.console.templates=/usr/share/prometheus/consoles'
environment:
- ASPNETCORE_ENVIRONMENT=${ASPNETCORE_ENVIRONMENT}
- TZ=Asia/Shanghai
ports:
- 9090:9090
语法
version: "3" # 指定docker-compose语法版本
services: # 从以下定义服务配置列表
server_name: # 可将server_name替换为自定义的名字,如mysql/php都可以
container_name: container_name # 指定实例化后的容器名,可将container_name替换为自定义名
image: xxx:latest # 指定使用的镜像名及标签
build: # 如果没有现成的镜像,需要自己构建使用这个选项
context: /xxx/xxx/Dockerfile # 指定构建镜像文件的路径
dockerfile: .... # 指定Dockerfile文件名,上一条指定,这一条就不要了
ports:
- "00:00" # 容器内的映射端口,本地端口:容器内端口
- "00:00" # 可指定多个
volumes:
- test1:/xx/xx # 这里使用managed volume的方法,将容器内的目录映射到物理机,方便管理
- test2:/xx/xx # 前者是volumes目录下的名字,后者是容器内目录
- test3:/xx/xx # 在文件的最后还要使用volumes指定这几个tests
volumes_from: # 指定卷容器
- volume_container_name # 卷容器名
restarts: always # 设置无论遇到什么错,重启容器
depends_on: # 用来解决依赖关系,如这个服务的启动,必须在哪个服务启动之后
- server_name # 这个是名字其他服务在这个文件中的server_name
- server_name1 # 按照先后顺序启动
links: # 与depend_on相对应,上面控制容器启动,这个控制容器连接
- mysql # 值可以是- 服务名,比较复杂,可以在该服务中使用links中mysql代替这个mysql的ip
networks: # 加入指定的网络,与之前的添加网卡名类似
- my_net # bridge类型的网卡名
- myapp_net # 如果没有网卡会被创建,建议使用时先创建号,在指定
environment: # 定义变量,类似dockerfile中的ENV
- TZ=Asia/Shanghai # 这里设置容器的时区为亚洲上海,也就解决了容器通过compose编排启动的 时区问题!!!!解决了容器的时区问题!!!
变量值: 变量名 # 这些变量将会被直接写到镜像中的/etc/profile
command: [ #使用 command 可以覆盖容器启动后默认执行的命令
'--character-set-server=utf8mb4', #设置数据库表的数据集
'--collation-server=utf8mb4_unicode_ci', #设置数据库表的数据集
'--default-time-zone=+8:00' #设置mysql数据库的 时区问题!!!! 而不是设置容器的时区问题!!!!
]
server_name2: # 开始第二个容器
server_name:
stdin_open: true # 类似于docker run -d
tty: true # 类似于docker run -t
volumes: # 以上每个服务中挂载映射的目录都在这里写入一次,也叫作声明volume
test1:
test2:
test3:
networks: # 如果要指定ip网段,还是创建好在使用即可,声明networks
my_net:
driver: bridge # 指定网卡类型
myapp_net:
driver: bridge
Build 镜像
version: '3.7'
services:
setup:
build:
context: setup/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
init: true
volumes:
- setup:/state:Z
environment:
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
networks:
- elk
depends_on:
- elasticsearch
elasticsearch:
build:
context: elasticsearch/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
volumes:
- ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro,z
- elasticsearch:/usr/share/elasticsearch/data:z
ports:
- "9200:9200"
- "9300:9300"
environment:
ES_JAVA_OPTS: -Xms512m -Xmx512m
# Bootstrap password.
# Used to initialize the keystore during the initial startup of
# Elasticsearch. Ignored on subsequent runs.
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
# Use single node discovery in order to disable production mode and avoid bootstrap checks.
# see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
discovery.type: single-node
networks:
- elk
logstash:
build:
context: logstash/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
volumes:
- ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro,Z
- ./logstash/pipeline:/usr/share/logstash/pipeline:ro,Z
ports:
- "5044:5044"
- "50000:50000/tcp"
- "50000:50000/udp"
- "9600:9600"
environment:
LS_JAVA_OPTS: -Xms256m -Xmx256m
LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
networks:
- elk
depends_on:
- elasticsearch
kibana:
build:
context: kibana/
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
volumes:
- ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro,Z
ports:
- "5601:5601"
environment:
KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
# Fleet plugin
KIBANA_FLEET_SETUP: '1'
networks:
- elk
depends_on:
- elasticsearch
networks:
elk:
driver: bridge
volumes:
setup:
elasticsearch:
Pull镜像
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.13.2
restart: always
container_name: elasticsearch
hostname: elasticsearch
environment:
- discovery.type=single-node
ports:
- 9200:9200
- 9300:9300
kibana:
image: docker.elastic.co/kibana/kibana:7.13.2
restart: always
container_name: kibana
hostname: kibana
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
ports:
- 5601:5601
depends_on:
- elasticsearch
apm_server:
image: docker.elastic.co/apm/apm-server:7.13.2
restart: always
container_name: apm_server
hostname: apm_server
command: --strict.perms=false -e
environment:
- output.elasticsearch.hosts=["elasticsearch:9200"]
ports:
- 8200:8200
depends_on:
- kibana
- elasticsearch