Docker使用手册--给你通用常用命令
卸载JDK
rpm -qa | grep -i java
rpm -qa | grep -i java | xargs -n1 rpm -e --nodeps
安装JDK
tar -zxvf jdk-8u351-linux-x64.tar.gz
vim /etc/profile
export JAVA_HOME=/home/jdk/jdk-11.0.19
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
export PATH=${JAVA_HOME}/bin:$PATH
source /etc/profile
常用命令--除了Docker run
# 停止所有的容器
docker stop $(docker ps -aq)
# 删除所有的容器
docker-compose rm --stop
# 删除所有的镜像
docker-compose down --rmi all
# 删除容器
docker rm -f 容器名称
# 删除镜像
docker rmi 镜像ID
# docker镜像打包
docker save 镜像ID -o 包名.tar
# 下载镜像
docker pull 镜像名称:tag版本
# d导入镜像
docker load -i 包名.tar
# 镜像tag重命名
docker tag thatName:thatTag thisName:thisTag
# 构建本地的镜像
docker build -t kafka-kafka:v1 -f Dockerfile ./
# 基本运行样式
docker run -d --name zookeeper -p 2181:2181 wurstmeister/zookeeper:tag
#查看容器日志(根据的容器名 --docker run —d 容器名)
docker logs <container name>
# 删除和停止容器
docker rm -f b7ee9987e6cb6881cad0e4bf09b9a7f9095003bbe5d67ff36fb2118b946478dd
# 创建docker通讯网络
docker network create datahub_network
# 指定运行脚本服务配置
docker-compose -f docker-compose.yml up -d
# mysql
docker-compose up -d mysql
Docker运行样式
# 运行Zookeeper
docker run --name zookeeper \
-p 2181:2181 \
-e "ZOOKEEPER_CLIENT_PORT=2181" \
-e "ZOOKEEPER_TICK_TIME=2000" \
-d confluentinc/cp-zookeeper:latest
# kafka----备用:原始的 ===真
docker run --name broker \
-p 9092:9092 \
--network my_network \
-e KAFKA_BROKER_ID=0 \
-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
-e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://localhost:9092 \
-e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 \
-v /path/to/kafka.properties:/etc/kafka/conf/server.properties \
-d confluentinc/cp-kafka:7.4.0
# 运行kafka-selfup
docker run --name=kafka \
-p 9092:9092 \
-e ALLOW_PLAINTEXT_LISTENER=yes \
-e KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 \
-e KAFKA_HEAP_OPTS="-Xmx180m -Xms180m" \
-e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://localhost:9092 \
-e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 \
-e USE_CONFLUENT_SCHEMA_REGISTRY="true" \
-e KAFKA_BOOTSTRAP_SERVER=broker:9092 \
-d kafka-kafka:v1
# 运行es 单机
docker run --name elastsearch \
-p 9200:9200 \
-p 9300:9300 \
-e "discovery.type=single-node" \
-e ES_JAVA_OPTS="-Xms256m -Xmx512m -Dlog4j2.formatMsgNoLookups=true" \
-e OPENSEARCH_JAVA_OPTS="-Xms512m -Xmx512m -Dlog4j2.formatMsgNoLookups=true" \
-d elasticsearch:7.10.1
安装Docker
虚拟机:::
# 先备份
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.bak
# 再更换
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
# 配置yum下载器
yum -y install yum-utils
# 安装yum配置阿里云镜像
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 安装docker
yum install docker-ce docker-ce-cli containerd.io
# 配置docker阿里云镜像
# 在自己的Linux上运行:
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://7iw3ne9w.mirror.aliyuncs.com"]
}
EOF
# 深度清理缓存
docker system prune -a --force
# 刷新docker的加速地址
systemctl daemon-reload
systemctl restart docker
# 验证
docker info
# 运行docker进程
systemctl start docker
# 查看系统进程及状态
systemctl status docker
# 查看运行中的镜像
docker ps
# 重启docker进程
systemctl restart docker
# 停止docker进程
systemctl stop docker
# Docker 运行
docker run hell-world
#守护进程重启
$ sudo systemctl daemon-reload
#重启docker服务
$ sudo systemctl restart docker
#离线下载dockerCompose--安装:
https://github.com/docker/compose/releases/download/v2.16.0/docker-compose-linux-x86_64
# 上传到liunx目录下,进入目录:修改名称docker-compose
mv docker-compose-linux-x86_64 docker-compose
# 链接到命令使用目录下/usr/local/bin/docker-compse
cp docker-compose /usr/local/bin/docker-compose
# 查看安装情况
docker-compse -v
# 方式二安装docker命令
yum install docker-compose
Docker-Compose
# Docker compose file covering DataHub's default configuration, which is to run all containers on a single host.
# Please see the README.md for instructions as to how to use and customize.
# NOTE: This file does not build! No dockerfiles are set. See the README.md in this directory.
---
version: '3.9'
services:
datahub-frontend-react:
container_name: datahub-frontend-react
hostname: datahub-frontend-react
image: ${DATAHUB_FRONTEND_IMAGE:-linkedin/datahub-frontend-react}:${DATAHUB_VERSION:-head}
ports:
- ${DATAHUB_MAPPED_FRONTEND_PORT:-9002}:9002
build:
context: ../
dockerfile: docker/datahub-frontend/Dockerfile
env_file: datahub-frontend/env/docker.env
depends_on:
datahub-gms:
condition: service_healthy
volumes:
- ${HOME}/.datahub/plugins:/etc/datahub/plugins
datahub-actions:
container_name: datahub-actions
hostname: actions
image: ${DATAHUB_ACTIONS_IMAGE:-acryldata/datahub-actions}:${ACTIONS_VERSION:-head}
env_file: datahub-actions/env/docker.env
environment:
- ACTIONS_EXTRA_PACKAGES=${ACTIONS_EXTRA_PACKAGES:-}
- ACTIONS_CONFIG=${ACTIONS_CONFIG:-}
depends_on:
datahub-gms:
condition: service_healthy
datahub-gms:
container_name: datahub-gms
hostname: datahub-gms
image: ${DATAHUB_GMS_IMAGE:-linkedin/datahub-gms}:${DATAHUB_VERSION:-head}
ports:
- ${DATAHUB_MAPPED_GMS_PORT:-8080}:8080
build:
context: ../
dockerfile: docker/datahub-gms/Dockerfile
healthcheck:
test: curl -sS --fail http://datahub-gms:${DATAHUB_MAPPED_GMS_PORT:-8080}/health
start_period: 90s
interval: 1s
retries: 3
timeout: 5s
depends_on:
datahub-upgrade:
condition: service_completed_successfully
volumes:
- ${HOME}/.datahub/plugins:/etc/datahub/plugins
datahub-upgrade:
container_name: datahub-upgrade
hostname: datahub-upgrade
image: ${DATAHUB_UPGRADE_IMAGE:-acryldata/datahub-upgrade}:${DATAHUB_VERSION:-head}
command:
- -u
- SystemUpdate
build:
context: ../
dockerfile: docker/datahub-upgrade/Dockerfile
env_file: datahub-upgrade/env/docker-without-neo4j.env
labels:
datahub_setup_job: true
depends_on:
mysql-setup:
condition: service_completed_successfully
elasticsearch-setup:
condition: service_completed_successfully
kafka-setup:
condition: service_completed_successfully
neo4j:
condition: service_healthy
# This "container" is a workaround to pre-create search indices
elasticsearch-setup:
container_name: elasticsearch-setup
hostname: elasticsearch-setup
image: ${DATAHUB_ELASTIC_SETUP_IMAGE:-linkedin/datahub-elasticsearch-setup}:${DATAHUB_VERSION:-head}
build:
context: ../
dockerfile: docker/elasticsearch-setup/Dockerfile
env_file: elasticsearch-setup/env/docker.env
environment:
- ELASTICSEARCH_USE_SSL=${ELASTICSEARCH_USE_SSL:-false}
- USE_AWS_ELASTICSEARCH=${USE_AWS_ELASTICSEARCH:-false}
depends_on:
elasticsearch:
condition: service_healthy
labels:
datahub_setup_job: true
# This "container" is a workaround to pre-create topics.
# This is not required in most cases, kept here for backwards compatibility with older clients that
# explicitly wait for this container
kafka-setup:
container_name: kafka-setup
hostname: kafka-setup
image: ${DATAHUB_KAFKA_SETUP_IMAGE:-linkedin/datahub-kafka-setup}:${DATAHUB_VERSION:-head}
build:
dockerfile: ./docker/kafka-setup/Dockerfile
context: ../
env_file: kafka-setup/env/docker.env
depends_on:
broker:
condition: service_healthy
schema-registry:
condition: service_healthy
labels:
datahub_setup_job: true
elasticsearch:
container_name: elasticsearch
hostname: elasticsearch
image: ${DATAHUB_SEARCH_IMAGE:-elasticsearch}:${DATAHUB_SEARCH_TAG:-7.10.1}
ports:
- ${DATAHUB_MAPPED_ELASTIC_PORT:-9200}:9200
env_file: elasticsearch/env/docker.env
environment:
- discovery.type=single-node
- ${XPACK_SECURITY_ENABLED:-xpack.security.enabled=false}
deploy:
resources:
limits:
memory: 1G
healthcheck:
test: curl -sS --fail http://elasticsearch:$${DATAHUB_MAPPED_ELASTIC_PORT:-9200}/_cluster/health?wait_for_status=yellow&timeout=0s
start_period: 20s
interval: 1s
retries: 3
timeout: 5s
volumes:
- esdata:/usr/share/elasticsearch/data
neo4j:
container_name: neo4j
hostname: neo4j
image: neo4j:4.4.9-community
ports:
- ${DATAHUB_MAPPED_NEO4J_HTTP_PORT:-7474}:7474
- ${DATAHUB_MAPPED_NEO4J_BOLT_PORT:-7687}:7687
env_file: neo4j/env/docker.env
healthcheck:
test: wget http://neo4j:$${DATAHUB_MAPPED_NEO4J_HTTP_PORT:-7474}
start_period: 5s
interval: 1s
retries: 5
timeout: 5s
volumes:
- neo4jdata:/data
schema-registry:
container_name: schema-registry
hostname: schema-registry
image: confluentinc/cp-schema-registry:7.4.0
ports:
- ${DATAHUB_MAPPED_SCHEMA_REGISTRY_PORT:-8081}:8081
env_file: schema-registry/env/docker.env
healthcheck:
test: nc -z schema-registry ${DATAHUB_MAPPED_SCHEMA_REGISTRY_PORT:-8081}
start_period: 60s
interval: 1s
retries: 3
timeout: 5s
depends_on:
broker:
condition: service_healthy
broker:
container_name: broker
hostname: broker
image: confluentinc/cp-kafka:7.4.0
ports:
- ${DATAHUB_MAPPED_KAFKA_BROKER_PORT:-9092}:9092
env_file: broker/env/docker.env
healthcheck:
test: nc -z broker $${DATAHUB_MAPPED_KAFKA_BROKER_PORT:-9092}
start_period: 60s
interval: 1s
retries: 5
timeout: 5s
depends_on:
zookeeper:
condition: service_healthy
volumes:
- broker:/var/lib/kafka/data/
zookeeper:
container_name: zookeeper
hostname: zookeeper
image: confluentinc/cp-zookeeper:7.4.0
ports:
- ${DATAHUB_MAPPED_ZK_PORT:-2181}:2181
env_file: zookeeper/env/docker.env
healthcheck:
test: echo srvr | nc zookeeper $${DATAHUB_MAPPED_ZK_PORT:-2181}
start_period: 10s
interval: 5s
retries: 3
timeout: 5s
volumes:
# See https://stackoverflow.com/a/61008432 for why we need two volumes.
# See also: https://docs.confluent.io/platform/current/installation/docker/operations/external-volumes.html#data-volumes-for-kafka-and-zk
- zkdata:/var/lib/zookeeper/data
- zklogs:/var/lib/zookeeper/log
networks:
default:
name: datahub_network
volumes:
esdata:
neo4jdata:
broker:
zkdata:
zklogs:
作者:隔壁老郭
个性签名:独学而无友,则孤陋而寡闻。做一个灵魂有趣的人!
如果觉得这篇文章对你有小小的帮助的话,记得在右下角点个“推荐”哦,博主在此感谢!
Java入门到入坟
万水千山总是情,打赏一分行不行,所以如果你心情还比较高兴,也是可以扫码打赏博主,哈哈哈(っ•̀ω•́)っ✎⁾⁾!