docker部署Zookeeper+Kafka+Storm

docker部署Zookeeper+Storm+Kafka

安装docker

信任DockerGPG公钥:

curl -fsSL https://repo.huaweicloud.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -`

添加软件仓库:

sudo add-apt-repository "deb [arch=amd64] https://repo.huaweicloud.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"

更新索引文件并安装

sudo apt-get update
sudo apt-get install docker-ce

创建专用网段

docker network create --subnet 172.30.0.0/24 --gateway 172.30.0.1 zookeeper

创建Zookeeper Compose文件

version: '3'
services:
zookeeper1:
image: zookeeper:3.4
restart: always
hostname: zookeeper1
container_name: zookeeper1
ports:
- "2181:2181"
volumes:
- "/home/zk/workspace/volumes/zkcluster/zookeeper1/data:/data"
- "/home/zk/workspace/volumes/zkcluster/zookeeper1/datalog:/datalog"
environment:
ZOO_MY_ID: 1
ZOO_SERVERS: server.1=0.0.0.0:2888:3888 server.2=zookeeper2:2888:3888 server.3=zookeeper3:2888:3888
networks:
zookeeper:
ipv4_address: 172.30.0.11
zookeeper2:
image: zookeeper:3.4
restart: always
hostname: zookeeper2
container_name: zookeeper2
ports:
- "2182:2181"
volumes:
- "/home/zk/workspace/volumes/zkcluster/zookeeper2/data:/data"
- "/home/zk/workspace/volumes/zkcluster/zookeeper2/datalog:/datalog"
environment:
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=zookeeper1:2888:3888 server.2=0.0.0.0:2888:3888 server.3=zookeeper3:2888:3888
networks:
zookeeper:
ipv4_address: 172.30.0.12
zookeeper3:
image: zookeeper:3.4
restart: always
hostname: zookeeper3
container_name: zookeeper3
ports:
- "2183:2181"
volumes:
- "/home/zk/workspace/volumes/zkcluster/zookeeper3/data:/data"
- "/home/zk/workspace/volumes/zkcluster/zookeeper3/datalog:/datalog"
environment:
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=zookeeper1:2888:3888 server.2=zookeeper2:2888:3888 server.3=0.0.0.0:2888:3888
networks:
zookeeper:
ipv4_address: 172.30.0.13
networks:
zookeeper:
external:
name: zookeeper

创建Kafka Compose文件

需要注意的是如果希望外部能够访问kafka服务,需要将ADVERTISED_LISTENERS更改为宿主机ip及对应端口

version: '3'
services:
kafka1:
image: wurstmeister/kafka:2.12-2.4.1
restart: always
hostname: kafka1
container_name: kafka1
privileged: true
ports:
- 9092:9092
environment:
KAFKA_ADVERTISED_HOST_NAME: kafka1
KAFKA_LISTENERS: PLAINTEXT://kafka1:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://serverIP:9092
KAFKA_ADVERTISED_PORT: 9092
KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
volumes:
- /home/zk/workspace/volumes/kafkaCluster/kafka1/logs:/kafka
- /home/w2110276138/dataset/:/tmp
networks:
kafka:
ipv4_address: 172.30.0.14
extra_hosts:
zookeeper1: 172.30.0.11
zookeeper2: 172.30.0.12
zookeeper3: 172.30.0.13
kafka2:
image: wurstmeister/kafka:2.12-2.4.1
restart: always
hostname: kafka2
container_name: kafka2
privileged: true
ports:
- 9093:9093
environment:
KAFKA_ADVERTISED_HOST_NAME: kafka2
KAFKA_LISTENERS: PLAINTEXT://kafka2:9093
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://serverIP:9093
KAFKA_ADVERTISED_PORT: 9093
KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
volumes:
- /home/zk/workspace/volumes/kafkaCluster/kafka2/logs:/kafka
- /home/w2110276138/dataset/:/tmp
networks:
kafka:
ipv4_address: 172.30.0.15
extra_hosts:
zookeeper1: 172.30.0.11
zookeeper2: 172.30.0.12
zookeeper3: 172.30.0.13
kafka3:
image: wurstmeister/kafka:2.12-2.4.1
restart: always
hostname: kafka3
container_name: kafka3
privileged: true
ports:
- 9094:9094
environment:
KAFKA_ADVERTISED_HOST_NAME: kafka3
KAFKA_LISTENERS: PLAINTEXT://kafka3:9094
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://serverIP:9094
KAFKA_ADVERTISED_PORT: 9094
KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
volumes:
- /home/zk/workspace/volumes/kafkaCluster/kafka3/logs:/kafka
- /home/w2110276138/dataset/:/tmp
networks:
kafka:
ipv4_address: 172.30.0.16
extra_hosts:
zookeeper1: 172.30.0.11
zookeeper2: 172.30.0.12
zookeeper3: 172.30.0.13
networks:
kafka:
external:
name: zookeeper

创建Storm Compose文件

version: '3'
services:
nimbus:
image: storm:1.2.2
command: storm nimbus
restart: always
environment:
- STORM_ZOOKEEPER_SERVERS=zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
container_name: nimbus
hostname: nimbus
networks:
storm:
ipv4_address: 172.30.0.17
ports:
- 6627:6627
volumes:
- "./storm.yaml:/conf/storm.yaml"
- "/home/w2110276138/jar:/tmp"
extra_hosts:
zookeeper1: 172.30.0.11
zookeeper2: 172.30.0.12
zookeeper3: 172.30.0.13
kafka1: 172.30.0.14
kafka2: 172.30.0.15
kafka3: 172.30.0.16
ui: 172.30.0.18
ui:
image: storm:1.2.2
command: storm ui
restart: always
environment:
- STORM_ZOOKEEPER_SERVERS=zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
container_name: ui
hostname: ui
networks:
storm:
ipv4_address: 172.30.0.18
ports:
- 8080:8080
volumes:
- "./storm.yaml:/conf/storm.yaml"
depends_on:
- nimbus
links:
- nimbus:nimbus
extra_hosts:
zookeeper1: 172.30.0.11
zookeeper2: 172.30.0.12
zookeeper3: 172.30.0.13
kafka1: 172.30.0.14
kafka2: 172.30.0.15
kafka3: 172.30.0.16
nimbus: 172.30.0.17
supervisor:
image: storm:1.2.2
command: storm supervisor
restart: always
environment:
- STORM_ZOOKEEPER_SERVERS=zookeeper1:2181,zookeeper2:2181,zookeeper3:2181
networks:
- storm
depends_on:
- nimbus
links:
- nimbus:nimbus
volumes:
- "./storm.yaml:/conf/storm.yaml"
extra_hosts:
zookeeper1: 172.30.0.11
zookeeper2: 172.30.0.12
zookeeper3: 172.30.0.13
kafka1: 172.30.0.14
kafka2: 172.30.0.15
kafka3: 172.30.0.16
networks:
storm:
external:
name: zookeeper

启动Zookeeper+Kafka+Storm

如果把所有的compose文件写在一起,则可以一起启动。

docker compose -f docker-compose-zookeeper.yml up -d
docker compose -f docker-compose-kafka.yml up -d
docker compose -f docker-compose-storm.yml up -d

扩展Supervisor

docker compose -f docker-compose-storm.yml up --scale supervisor=4 -d
posted @   Modest-Hamilton  阅读(109)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· 25岁的心里话
· 闲置电脑爆改个人服务器(超详细) #公网映射 #Vmware虚拟网络编辑器
· 零经验选手,Compose 一天开发一款小游戏!
· 因为Apifox不支持离线,我果断选择了Apipost!
· 通过 API 将Deepseek响应流式内容输出到前端
点击右上角即可分享
微信分享提示