【MQ】helm 安装 kafka
@
目录
写在前面
kafka 安装
helm 默认安装
- 请求
helm install kafka \ at ⎈ default with root@master at 23:39:48
--set replicaCount=3 \
--set externalAccess.enabled=true \
--set externalAccess.service.broker.type=LoadBalancer\
--set externalAccess.service.controller.type=LoadBalancer\
--set externalAccess.service.broker.ports.external=9094\
--set externalAccess.service.controller.containerPorts.external=9094\
--set externalAccess.autoDiscovery.enabled=true\
--set serviceAccount.create=true\
--set rbac.create=true\
bitnami/kafka
- 返回
# NAME: kafka
# LAST DEPLOYED: Sun Aug 6 23:39:56 2023
# NAMESPACE: default
# STATUS: deployed
# REVISION: 1
# TEST SUITE: None
# NOTES:
# CHART NAME: kafka
# CHART VERSION: 24.0.3
# APP VERSION: 3.5.1
# ** Please be patient while the chart is being deployed **
# Kafka can be accessed by consumers via port 19092 on the following DNS name from within your cluster:
kafka.default.svc.cluster.local
# Each Kafka broker can be accessed by producers via port 19092 on the following DNS name(s) from within your cluster:
kafka-controller-0.kafka-controller-headless.default.svc.cluster.local:19092
kafka-controller-1.kafka-controller-headless.default.svc.cluster.local:19092
kafka-controller-2.kafka-controller-headless.default.svc.cluster.local:19092
# The CLIENT listener for Kafka client connections from within your cluster have been configured with the following security settings:
- SASL authentication
# To connect a client to your Kafka, you need to create the 'client.properties' configuration files with the content below:
security.protocol=SASL_PLAINTEXT
sasl.mechanism=SCRAM-SHA-256
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="user1" \
password="$(kubectl get secret kafka-user-passwords --namespace default -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)";
# To create a pod that you can use as a Kafka client run the following commands:
kubectl run kafka-client --restart='Never' --image docker.io/bitnami/kafka:3.5.1-debian-11-r11 --namespace default --command -- sleep infinity
kubectl cp --namespace default /path/to/client.properties kafka-client:/tmp/client.properties
kubectl exec --tty -i kafka-client --namespace default -- bash
# PRODUCER:
kafka-console-producer.sh \
--producer.config /tmp/client.properties \
--broker-list kafka-controller-0.kafka-controller-headless.default.svc.cluster.local:19092,kafka-controller-1.kafka-controller-headless.default.svc.cluster.local:19092,kafka-controller-2.kafka-controller-headless.default.svc.cluster.local:19092 \
--topic test
# CONSUMER:
kafka-console-consumer.sh \
--consumer.config /tmp/client.properties \
--bootstrap-server kafka.default.svc.cluster.local:19092 \
--topic test \
--from-beginning
# To connect to your Kafka controller+broker nodes from outside the cluster, follow these instructions:
# NOTE: It may take a few minutes for the LoadBalancer IPs to be available.
Watch the status with: 'kubectl get svc --namespace default -l "app.kubernetes.io/name=kafka,app.kubernetes.io/instance=kafka,app.kubernetes.io/component=kafka,pod" -w'
# Kafka Brokers domain: You will have a different external IP for each Kafka broker. You can get the list of external IPs using the command below:
echo "$(kubectl get svc --namespace default -l "app.kubernetes.io/name=kafka,app.kubernetes.io/instance=kafka,app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')"
Kafka Brokers port: 9094
# The EXTERNAL listener for Kafka client connections from within your cluster have been configured with the following settings:
- SASL authentication
# To connect a client to your Kafka, you need to create the 'client.properties' configuration files with the content below:
security.protocol=SASL_PLAINTEXT
sasl.mechanism=SCRAM-SHA-256
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="user1" \
password="$(kubectl get secret kafka-user-passwords --namespace default -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)";
bitname/kafka 安装
replicas: 3
image:
tag: 3.0.0
persistence:
enabled: true
size: 1Gi
broker:
persistence:
size: 1Gi
log:
message:
timestamp:
difference:
max:
ms: 9223372036854775807
preallocate: false
type: kraft
config:
transaction.state.log.replication.factor: 3
transaction.state.log.min.isr: 2
offsets.topic.replication.factor: 3
service:
type: NodePort
port: 9092
nodePorts:
kafka: 30092
docker-compopse zookeeper安装 kafka
version: '3.8'
services:
zookeeper:
image: 'zookeeper:3.7.0'
ports:
- '2181:2181'
environment:
ZOO_MY_ID: 1
ZOO_SERVERS: server.1=zookeeper:2888:3888;2181
volumes:
- 'zookeeper-data:/data'
kafka-1:
image: 'bitnami/kafka:3.0.0'
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1:9092
KAFKA_NUM_PARTITIONS: 3
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 3
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 2
ports:
- '9092:9092'
volumes:
- 'kafka-1-data:/bitnami/kafka/data'
kafka-2:
image: 'bitnami/kafka:3.0.0'
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 2
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-2:9092
KAFKA_NUM_PARTITIONS: 3
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 3
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 2
ports:
- '9093:9092'
volumes:
- 'kafka-2-data:/bitnami/kafka/data'
kafka-3:
image: 'bitnami/kafka:3.0.0'
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 3
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-3:9092
KAFKA_NUM_PARTITIONS: 3
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 3
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 2
ports:
- '9094:9092'
volumes:
- 'kafka-3-data:/bitnami/kafka/data'
volumes:
zookeeper-data:
kafka-1-data:
kafka-2-data:
kafka-3-data:
#zookeeper服务使用官方Zookeeper镜像,启动一个Zookeeper节点,并将数据存储在zookeeper-data卷中。
#kafka-1、kafka-2、kafka-3服务使用Bitnami Kafka镜像,启动3个Kafka Broker节点,分别使用kafka-1-data、kafka-2-data、kafka-3-data卷存储数据。
#kafka-1、kafka-2、kafka-3服务的depends_on指定依赖zookeeper服务。
#kafka-1、kafka-2、kafka-3服务的environment指定每个Kafka Broker节点的配置,包括Broker ID、Zookeeper连接地址、监听器、广告监听器、分区数、位移日志复制因子、事务日志复制因子等。
#kafka-1、kafka-2、kafka-3服务的ports指定每个Kafka Broker节点对外暴露的端口号。
#kafka-1、kafka-2、kafka-3服务的volumes指定每个Kafka Broker节点的数据持久化卷。
docker compose
version: "2"
services:
zookeeper:
container_name: zookeeper
hostname: zookeeper
image: docker.io/bitnami/zookeeper:3.8
# ports:
# - "2181"
environment:
- ALLOW_ANONYMOUS_LOGIN=yes
volumes:
- zookeeper_data:/bitnami/zookeeper
kafka-0:
container_name: kafka-0
hostname: kafka-0
image: docker.io/bitnami/kafka:3.2
# ports:
# - "9092"
environment:
- KAFKA_ENABLE_KRAFT=no
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_CFG_BROKER_ID=0
- ALLOW_PLAINTEXT_LISTENER=yes
- KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT
- KAFKA_CFG_LISTENERS=INTERNAL://:9092,EXTERNAL://0.0.0.0:9093
# 如果从别的主机连接kafka,请将localhost改成docker的宿主机IP
- KAFKA_CFG_ADVERTISED_LISTENERS=INTERNAL://kafka-0:9092,EXTERNAL://localhost:9093
- KAFKA_CFG_INTER_BROKER_LISTENER_NAME=INTERNAL
volumes:
- kafka_0_data:/bitnami/kafka
depends_on:
- zookeeper
kafka-1:
container_name: kafka-1
hostname: kafka-1
image: docker.io/bitnami/kafka:3.2
# ports:
# - "9092"
environment:
- KAFKA_ENABLE_KRAFT=no
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_CFG_BROKER_ID=1
- ALLOW_PLAINTEXT_LISTENER=yes
- KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT
- KAFKA_CFG_LISTENERS=INTERNAL://:9092,EXTERNAL://0.0.0.0:9094
# 如果从别的主机连接kafka,请将localhost改成docker的宿主机IP
- KAFKA_CFG_ADVERTISED_LISTENERS=INTERNAL://kafka-1:9092,EXTERNAL://localhost:9094
- KAFKA_CFG_INTER_BROKER_LISTENER_NAME=INTERNAL
volumes:
- kafka_1_data:/bitnami/kafka
depends_on:
- zookeeper
kafka-2:
container_name: kafka-2
hostname: kafka-2
image: docker.io/bitnami/kafka:3.2
# ports:
# - "9092"
environment:
- KAFKA_ENABLE_KRAFT=no
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- KAFKA_CFG_BROKER_ID=2
- ALLOW_PLAINTEXT_LISTENER=yes
- KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT
- KAFKA_CFG_LISTENERS=INTERNAL://:9092,EXTERNAL://0.0.0.0:9095
# 如果从别的主机连接kafka,请将localhost改成docker的宿主机IP
- KAFKA_CFG_ADVERTISED_LISTENERS=INTERNAL://kafka-2:9092,EXTERNAL://localhost:9095
- KAFKA_CFG_INTER_BROKER_LISTENER_NAME=INTERNAL
volumes:
- kafka_2_data:/bitnami/kafka
depends_on:
- zookeeper
nginx:
container_name: nginx
hostname: nginx
image: nginx:1.22.0-alpine
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf:ro
ports:
- "9093-9095:9093-9095"
depends_on:
- kafka-0
- kafka-1
- kafka-2
volumes:
zookeeper_data:
driver: local
kafka_0_data:
driver: local
kafka_1_data:
driver: local
kafka_2_data:
driver: local
- nginx
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log notice;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
stream {
upstream kafka-0 {
server kafka-0:9093;
}
upstream kafka-1 {
server kafka-1:9094;
}
upstream kafka-2 {
server kafka-2:9095;
}
server {
listen 9093;
proxy_pass kafka-0;
}
server {
listen 9094;
proxy_pass kafka-1;
}
server {
listen 9095;
proxy_pass kafka-2;
}
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
无 zookeeper kafka
# kraft通用配置
x-kraft: &common-config
ALLOW_PLAINTEXT_LISTENER: yes
KAFKA_ENABLE_KRAFT: yes
KAFKA_KRAFT_CLUSTER_ID: MTIzNDU2Nzg5MGFiY2RlZg
KAFKA_CFG_PROCESS_ROLES: broker,controller
KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER
KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: BROKER:PLAINTEXT,CONTROLLER:PLAINTEXT
KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: 1@kafka-1:9091,2@kafka-2:9091,3@kafka-3:9091
KAFKA_CFG_INTER_BROKER_LISTENER_NAME: BROKER
# 镜像通用配置
x-kafka: &kafka
image: 'bitnami/kafka:3.3.1'
networks:
net:
# 自定义网络
networks:
net:
# project名称
name: kraft
services:
# combined server
kafka-1:
<<: *kafka
container_name: kafka-1
ports:
- '9092:9092'
environment:
<<: *common-config
KAFKA_CFG_BROKER_ID: 1
KAFKA_CFG_LISTENERS: CONTROLLER://:9091,BROKER://:9092
KAFKA_CFG_ADVERTISED_LISTENERS: BROKER://10.150.36.72:9092 #宿主机IP
kafka-2:
<<: *kafka
container_name: kafka-2
ports:
- '9093:9093'
environment:
<<: *common-config
KAFKA_CFG_BROKER_ID: 2
KAFKA_CFG_LISTENERS: CONTROLLER://:9091,BROKER://:9093
KAFKA_CFG_ADVERTISED_LISTENERS: BROKER://10.150.36.72:9093 #宿主机IP
kafka-3:
<<: *kafka
container_name: kafka-3
ports:
- '9094:9094'
environment:
<<: *common-config
KAFKA_CFG_BROKER_ID: 3
KAFKA_CFG_LISTENERS: CONTROLLER://:9091,BROKER://:9094
KAFKA_CFG_ADVERTISED_LISTENERS: BROKER://10.150.36.72:9094 #宿主机IP
#broker only
kafka-4:
<<: *kafka
container_name: kafka-4
ports:
- '9095:9095'
environment:
<<: *common-config
KAFKA_CFG_BROKER_ID: 4
KAFKA_CFG_PROCESS_ROLES: broker
KAFKA_CFG_LISTENERS: BROKER://:9095
KAFKA_CFG_ADVERTISED_LISTENERS: BROKER://10.150.36.72:9095
参考资料
免责声明:
本站提供的资源,都来自网络,版权争议与本站无关,所有内容及软件的文章仅限用于学习和研究目的。不得将上述内容用于商业或者非法用途,否则,一切后果请用户自负,我们不保证内容的长久可用性,通过使用本站内容随之而来的风险与本站无关,您必须在下载后的24个小时之内,从您的电脑/手机中彻底删除上述内容。如果您喜欢该程序,请支持正版软件,购买注册,得到更好的正版服务。侵删请致信