docker-compose kafka集群部署,kafka-manager可视化访问页面增加用户密码校验
结合前文docker-compose kafka部署文章扩展集群部署方案
version: "3" services: zookeeper_sasl: image: wurstmeister/zookeeper hostname: zookeeper_sasl container_name: zookeeper_sasl restart: always ports: - 52181:2181 environment: ZOOKEEPER_CLIENT_PORT: 2181 SERVER_JVMFLAGS: -Djava.security.auth.login.config=/opt/zookeeper-3.4.13/secrets/server_jaas.conf volumes: - ./conf:/opt/zookeeper-3.4.13/conf - ./conf/:/opt/zookeeper-3.4.13/secrets/ - ./runtime/zookeeper/data:/data kafka1: image: wurstmeister/kafka:2.11-0.11.0.3 restart: always hostname: broker1 container_name: kafka_sasl1 ports: - 59091:9091 environment: KAFKA_BROKER_ID: 1 KAFKA_ADVERTISED_LISTENERS: SASL_PLAINTEXT://10.18.25.106:59091 KAFKA_ADVERTISED_PORT: 59091 KAFKA_LISTENERS: SASL_PLAINTEXT://0.0.0.0:9091 KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SASL_PLAINTEXT KAFKA_PORT: 59091 KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN KAFKA_SASL_ENABLED_MECHANISMS: PLAIN KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer KAFKA_SUPER_USERS: User:admin KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true" #设置为true,ACL机制为黑名单机制,只有黑名单中的用户无法访问,默认为false,ACL机制为白名单机制,只有白名单中的用户可以访问 KAFKA_ZOOKEEPER_CONNECT: zookeeper_sasl:2181 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false" KAFKA_OPTS: -Djava.security.auth.login.config=/opt/kafka/secrets/server_jaas.conf volumes: - ./conf/:/opt/kafka/secrets/ - ./runtime/kafka/logs:/kafka kafka2: image: wurstmeister/kafka:2.11-0.11.0.3 restart: always hostname: broker2 container_name: kafka_sasl2 ports: - 59092:9092 environment: KAFKA_BROKER_ID: 2 KAFKA_ADVERTISED_LISTENERS: SASL_PLAINTEXT://10.18.25.106:59092 KAFKA_ADVERTISED_PORT: 59092 KAFKA_LISTENERS: SASL_PLAINTEXT://0.0.0.0:9092 KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SASL_PLAINTEXT KAFKA_PORT: 59092 KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN KAFKA_SASL_ENABLED_MECHANISMS: PLAIN KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer KAFKA_SUPER_USERS: User:admin KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true" #设置为true,ACL机制为黑名单机制,只有黑名单中的用户无法访问,默认为false,ACL机制为白名单机制,只有白名单中的用户可以访问 KAFKA_ZOOKEEPER_CONNECT: zookeeper_sasl:2181 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false" KAFKA_OPTS: -Djava.security.auth.login.config=/opt/kafka/secrets/server_jaas.conf volumes: - ./conf/:/opt/kafka/secrets/ - ./runtime/kafka/logs:/kafka kafka3: image: wurstmeister/kafka:2.11-0.11.0.3 restart: always hostname: broker3 container_name: kafka_sasl3 ports: - 59093:9093 environment: KAFKA_BROKER_ID: 3 KAFKA_ADVERTISED_LISTENERS: SASL_PLAINTEXT://10.18.25.106:59093 KAFKA_ADVERTISED_PORT: 59093 KAFKA_LISTENERS: SASL_PLAINTEXT://0.0.0.0:9093 KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SASL_PLAINTEXT KAFKA_PORT: 59093 KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN KAFKA_SASL_ENABLED_MECHANISMS: PLAIN KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer KAFKA_SUPER_USERS: User:admin KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true" #设置为true,ACL机制为黑名单机制,只有黑名单中的用户无法访问,默认为false,ACL机制为白名单机制,只有白名单中的用户可以访问 KAFKA_ZOOKEEPER_CONNECT: zookeeper_sasl:2181 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_AUTO_CREATE_TOPICS_ENABLE: "false" KAFKA_OPTS: -Djava.security.auth.login.config=/opt/kafka/secrets/server_jaas.conf volumes: - ./conf/:/opt/kafka/secrets/ - ./runtime/kafka/logs:/kafka kafka-manager: image: sheepkiller/kafka-manager:1.3.1.8 restart: always container_name: kafka-manager environment: ZK_HOSTS: zookeeper_sasl:2181 KAFKA_MANAGER_AUTH_ENABLED: "true" KAFKA_MANAGER_USERNAME: admin KAFKA_MANAGER_PASSWORD: meiyoumima ports: - 59000:9000 volumes: - ./conf/application.conf:/kafka-manager-1.3.1.8/conf/application.conf command: -Dpidfile.path=/dev/null
宿主机同级目录下新建文件conf/application.conf
# Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0 # See accompanying LICENSE file. # This is the main configuration file for the application. # ~~~~~ # Secret key # ~~~~~ # The secret key is used to secure cryptographics functions. # If you deploy your application to several instances be sure to use the same key! play.crypto.secret="^<csmm5Fx4d=r2HEX8pelM3iBkFVv?k[mc;IZE<_Qoq8EkX_/7@Zt6dP05Pzea3U" play.crypto.secret=${?APPLICATION_SECRET} # The application languages # ~~~~~ play.i18n.langs=["en"] play.http.requestHandler = "play.http.DefaultHttpRequestHandler" play.http.context = "/" play.application.loader=loader.KafkaManagerLoader kafka-manager.zkhosts="kafka-manager-zookeeper:2181" kafka-manager.zkhosts=${?ZK_HOSTS} pinned-dispatcher.type="PinnedDispatcher" pinned-dispatcher.executor="thread-pool-executor" application.features=["KMClusterManagerFeature","KMTopicManagerFeature","KMPreferredReplicaElectionFeature","KMReassignPartitionsFeature"] akka { loggers = ["akka.event.slf4j.Slf4jLogger"] loglevel = "INFO" } basicAuthentication.enabled=true #开启登录kafka-manager验证 basicAuthentication.enabled=${?KAFKA_MANAGER_AUTH_ENABLED} basicAuthentication.username="admin" basicAuthentication.username=${?KAFKA_MANAGER_USERNAME} basicAuthentication.password="meiyoumima" basicAuthentication.password=${?KAFKA_MANAGER_PASSWORD} basicAuthentication.realm="Kafka-Manager" kafka-manager.consumer.properties.file=${?CONSUMER_PROPERTIES_FILE}
注:创建topic时,副本数不能超过brokers数(分区是可以超过的),否则会创建失败
创建topic两种方式:
1.随便进入一个kafka容器,执行创建topic的指令既可,
2.访问kafka-manager集群可视化访问管理页面,先建立cluster集群,然后在cluster中创建topic