第二章 Spring Boot 整合 Kafka消息队列 生产者
1.引入库
系列文章目录
第一章 Kafka 配置部署及SASL_PLAINTEXT安全认证
第二章 Spring Boot 整合 Kafka消息队列 生产者
第三章 Spring Boot 整合 Kafka消息队列 消息者(待续)
前言
Kafka 是一个消息队列产品,基于Topic partitions的设计,能达到非常高的消息发送处理性能。本文主是基于Spirng Boot封装了Apache 的Kafka-client,用于在Spring Boot 项目里快速集成kafka。
一、Kafka 是什么?
Apache Kafka是分布式发布-订阅消息系统。
它最初由LinkedIn公司开发,之后成为Apache项目的一部分。
Kafka是一种快速、可扩展的、设计内在就是分布式的,分区的和可复制的提交日志服务。
二、生产者
1.引入库
引入需要依赖的jar包,引入POM文件
<dependencies> <dependency> <groupId>org.springframework.kafka</groupId> <artifactId>spring-kafka</artifactId> </dependency> </dependencies>
2.配置文件
spring:
custom:
kafka:
username: admin
password: admin-secret
partitions: 1
enable-auto-commit: false
batch-listener: false
bootstrap-servers:
- 192.168.1.95:9092
3.端启动类
启动类名 EnableAutoKafka
package com.cdkjframework.kafka.producer.annotation; import com.cdkjframework.kafka.producer.config.KafkaMarkerConfiguration; import org.springframework.context.annotation.Import; import java.lang.annotation.*; /** * @ProjectName: cdkj-framework * @Package: com.cdkjframework.kafka.producer.annotation * @ClassName: EnableAutoKafka * @Description: Kafka自动启动类 * @Author: xiaLin * @Date: 2023/7/18 9:20 * @Version: 1.0 */ @Target(ElementType.TYPE) @Retention(RetentionPolicy.RUNTIME) @Documented @Import({KafkaMarkerConfiguration.class}) public @interface EnableAutoKafka { }
4.spring.factories配置文件
org.springframework.boot.autoconfigure.EnableAutoConfiguration=\
com.cdkjframework.kafka.producer.config.KafkaAutoConfiguration
5.配置类
5.1 kafka 配置 KafkaConfig
package com.cdkjframework.kafka.producer.config; import lombok.Data; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.context.annotation.Configuration; import java.util.List; /** * @ProjectName: cdkj-framework * @Package: com.cdkjframework.kafka.producer.config; * @ClassName: KafakConfig * @Description: Kafak 配置 * @Author: xiaLin * @Version: 1.0 */ @Data @Configuration @ConfigurationProperties(prefix = "spring.custom.kafka") public class KafkaConfig { /** * 服务列表 */ private List<String> bootstrapServers; /** * 主题 */ private List<String> topics; /** * 账号 */ private String username; /** * 密码 */ private String password; /** * 延迟为1毫秒 */ private Integer linger = 1; /** * 批量大小 */ private Integer batchSize = 16384; /** * 重试次数,0为不启用重试机制 */ private Integer retries = 0; /** * 人锁 */ private Integer maxBlock = 6000; /** * acks */ private String acks = "1"; /** * security.providers */ private String securityProviders; /** * 启用自动提交 */ private boolean enableAutoCommit = true; /** * 会话超时 */ private String sessionTimeout = "5000"; /** * 会话超时 */ private Integer maxPollInterval = 10000; /** * 组ID */ private String groupId = "defaultGroup"; /** * 最大投票记录 */ private Integer maxPollRecords = 1; /** * 并发性 */ private Integer concurrency = 3; /** * 拉取超时时间 */ private Integer pollTimeout = 60000; /** * 批量监听 */ private boolean batchListener = false; /** * 副本数量 */ private String sort = "1"; /** * 分区数 */ private Integer partitions = 3; /** * 消费者默认支持解压 */ private String compressionType = "none"; /** * offset偏移量规则设置 */ private String autoOffsetReset = "earliest"; /** * 自动提交的频率 */ private Integer autoCommitInterval = 100; /** * 生产者可以使用的总内存字节来缓冲等待发送到服务器的记录 */ private Integer bufferMemory = 33554432; /** * 消息的最大大小限制 */ private Integer maxRequestSize = 1048576; }
5.2 kafka 自动配置 KafkaAutoConfiguration
package com.cdkjframework.kafka.producer.config; import com.cdkjframework.kafka.producer.ProducerConfiguration; import com.cdkjframework.kafka.producer.util.ProducerUtils; import lombok.RequiredArgsConstructor; import org.springframework.boot.autoconfigure.AutoConfigureAfter; import org.springframework.boot.autoconfigure.ImportAutoConfiguration; import org.springframework.boot.autoconfigure.condition.ConditionalOnBean; import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean; import org.springframework.boot.autoconfigure.web.reactive.function.client.WebClientAutoConfiguration; import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Lazy; /** * @ProjectName: cdkj-framework * @Package: com.cdkjframework.kafka.producer.config * @ClassName: KafkaAutoConfiguration * @Description: kafka 自动配置 * @Author: xiaLin * @Date: 2023/7/18 9:21 * @Version: 1.0 */ @Lazy(false) @RequiredArgsConstructor @Configuration(proxyBeanMethods = false) @EnableConfigurationProperties(KafkaConfig.class) @AutoConfigureAfter({WebClientAutoConfiguration.class}) @ImportAutoConfiguration(ProducerConfiguration.class) @ConditionalOnBean(KafkaMarkerConfiguration.Marker.class) public class KafkaAutoConfiguration { /** * 读取配置文件 */ private final KafkaConfig kafkaConfig; /** * kafka topic 启动触发器 * * @return 返回结果 */ @Bean(initMethod = "kafkaAdmin") @ConditionalOnMissingBean public TopicConfig kafkaTopic() { TopicConfig trigger = new TopicConfig(kafkaConfig); return trigger; } /** * kafka 配置 启动触发器 * * @return 返回结果 */ @Bean(initMethod = "start") @ConditionalOnMissingBean public ProducerUtils Producer() { return new ProducerUtils(); } }
5.3 kafka 标记配置 KafkaMarkerConfiguration
package com.cdkjframework.kafka.producer.config; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.kafka.annotation.EnableKafka; /** * @ProjectName: cdkj-framework * @Package: com.cdkjframework.kafka.producer.config * @ClassName: KafkaMarkerConfiguration * @Description: Kafka标记配置 * @Author: xiaLin * @Date: 2023/12/6 9:45 * @Version: 1.0 */ @EnableKafka @Configuration(proxyBeanMethods = false) public class KafkaMarkerConfiguration { @Bean public Marker kafkaMarker() { return new Marker(); } public static class Marker { } }
5.4 topic配置 TopicConfig
package com.cdkjframework.kafka.producer.config; import com.cdkjframework.constant.IntegerConsts; import org.apache.kafka.clients.admin.AdminClientConfig; import org.apache.kafka.clients.admin.NewTopic; import org.springframework.kafka.core.KafkaAdmin; import java.util.HashMap; import java.util.Map; /** * @ProjectName: cdkj-framework * @Package: com.cdkjframework.kafka.producer.config * @ClassName: TopicConfig * @Description: topic配置 * @Author: xiaLin * @Version: 1.0 */ public class TopicConfig { /** * 配置 */ private final KafkaConfig kafkaConfig; /** * 构造函数 */ public TopicConfig(KafkaConfig kafkaConfig) { this.kafkaConfig = kafkaConfig; } /** * 定义一个KafkaAdmin的bean,可以自动检测集群中是否存在topic,不存在则创建 */ public KafkaAdmin kafkaAdmin() { Map<String, Object> configs = new HashMap<>(IntegerConsts.ONE); // 指定多个kafka集群多个地址,例如:192.168.2.11,9092,192.168.2.12:9092,192.168.2.13:9092 configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConfig.getBootstrapServers()); configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConfig.getBootstrapServers()); return new KafkaAdmin(configs); } }
6.生产者配置
生产者配置类 ProducerConfiguration
package com.cdkjframework.kafka.producer; import com.cdkjframework.kafka.producer.config.KafkaConfig; import com.cdkjframework.kafka.producer.util.ProducerUtils; import com.cdkjframework.util.tool.StringUtils; import lombok.RequiredArgsConstructor; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.common.config.SaslConfigs; import org.apache.kafka.common.security.auth.SecurityProtocol; import org.apache.kafka.common.serialization.StringSerializer; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.kafka.core.DefaultKafkaProducerFactory; import org.springframework.kafka.core.KafkaTemplate; import org.springframework.kafka.core.ProducerFactory; import javax.annotation.Resource; import java.util.HashMap; import java.util.Map; /** * @ProjectName: cdkj-framework * @Package: com.cdkjframework.kafka.producer * @ClassName: ProducerConfiguration * @Description: 设置@Configuration、@EnableKafka两个注解,声明Config并且打开KafkaTemplate能力。 * @Author: xiaLin * @Version: 1.0 */ @Configuration @RequiredArgsConstructor public class ProducerConfiguration { /** * 配置 */ private final KafkaConfig kafkaConfig; /** * JAAS配置 */ private String JAAS_CONFIG = "org.apache.kafka.common.security.plain.PlainLoginModule required username=%s password=%s;"; /** * Producer Template 配置 */ @Bean(name = "kafkaTemplate") public KafkaTemplate<String, String> kafkaTemplate() { return new KafkaTemplate<>(producerFactory()); } /** * Producer 工厂配置 */ @Bean(name = "producerFactory") public ProducerFactory<String, String> producerFactory() { return new DefaultKafkaProducerFactory<>(producerConfigs()); } /** * Producer 参数配置 */ public Map<String, Object> producerConfigs() { Map<String, Object> props = new HashMap<>(); // 指定多个kafka集群多个地址 props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConfig.getBootstrapServers()); // 重试次数,0为不启用重试机制 props.put(ProducerConfig.RETRIES_CONFIG, kafkaConfig.getRetries()); //同步到副本, 默认为1 // acks=0 把消息发送到kafka就认为发送成功 // acks=1 把消息发送到kafka leader分区,并且写入磁盘就认为发送成功 // acks=all 把消息发送到kafka leader分区,并且leader分区的副本follower对消息进行了同步就任务发送成功 props.put(ProducerConfig.ACKS_CONFIG, kafkaConfig.getAcks()); // 生产者空间不足时,send()被阻塞的时间,默认60s props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, kafkaConfig.getMaxBlock()); // security.providers props.put(ProducerConfig.SECURITY_PROVIDERS_CONFIG, kafkaConfig.getSecurityProviders()); // 控制批处理大小,单位为字节 props.put(ProducerConfig.BATCH_SIZE_CONFIG, kafkaConfig.getBatchSize()); // 批量发送,延迟为1毫秒,启用该功能能有效减少生产者发送消息次数,从而提高并发量 props.put(ProducerConfig.LINGER_MS_CONFIG, kafkaConfig.getLinger()); // 生产者可以使用的总内存字节来缓冲等待发送到服务器的记录 props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, kafkaConfig.getBufferMemory()); // 消息的最大大小限制,也就是说send的消息大小不能超过这个限制, 默认1048576(1MB) props.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, kafkaConfig.getMaxRequestSize()); // 键的序列化方式 props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); // 值的序列化方式 props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); // 压缩消息,支持四种类型,分别为:none、lz4、gzip、snappy,默认为none。 // 消费者默认支持解压,所以压缩设置在生产者,消费者无需设置。 props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, kafkaConfig.getCompressionType()); props.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, kafkaConfig.getPartitions()); // 账号密码 if (StringUtils.isNotNullAndEmpty(kafkaConfig.getUsername()) && StringUtils.isNotNullAndEmpty(kafkaConfig.getPassword())) { props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); String SASL_MECHANISM = "PLAIN"; props.put(SaslConfigs.SASL_MECHANISM, SASL_MECHANISM); props.put(SaslConfigs.SASL_JAAS_CONFIG, String.format(JAAS_CONFIG, kafkaConfig.getUsername(), kafkaConfig.getPassword())); } return props; } }
7. 生产者工具
生产者端 ProducerUtils
package com.cdkjframework.kafka.producer.util; import com.cdkjframework.constant.IntegerConsts; import com.cdkjframework.util.log.LogUtils; import org.springframework.kafka.core.KafkaTemplate; import org.springframework.stereotype.Service; import org.springframework.util.concurrent.ListenableFutureCallback; import javax.annotation.Resource; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; /** * @ProjectName: cdkj-framework * @Package: com.cdkjframework.kafka.producer.util * @ClassName: ProducerUtils * @Description: 生产工具 * @Author: xiaLin * @Version: 1.0 */ public class ProducerUtils { /** * 日志 */ private static LogUtils logUtils = LogUtils.getLogger(ProducerUtils.class); /** * 模板 */ private static KafkaTemplate kafkaTemplate; /** * 数据模板 */ @Resource(name = "kafkaTemplate") private KafkaTemplate template; /** * 初始化工具 */ private void start() { kafkaTemplate = template; } /** * producer 同步方式发送数据 * * @param topic topic名称 * @param message producer发送的数据 * @throws InterruptedException 异常信息 * @throws ExecutionException 异常信息 * @throws TimeoutException 异常信息 */ public static void sendMessageSync(String topic, String message) throws InterruptedException, ExecutionException, TimeoutException { kafkaTemplate.send(topic, message).get(IntegerConsts.TEN, TimeUnit.SECONDS); } /** * producer 异步方式发送数据 * * @param topic topic名称 * @param message producer发送的数据 */ public static void sendMessageAsync(String topic, String message) { kafkaTemplate.send(topic, message).addCallback(new ListenableFutureCallback() { @Override public void onFailure(Throwable throwable) { logUtils.error("topic:" + topic + ",message:" + message); logUtils.error(throwable, throwable.getMessage()); } @Override public void onSuccess(Object o) { logUtils.info("topic:" + topic + ",发送成功"); } }); } /** * producer 异步方式发送数据 * * @param topic topic名称 * @param key key值 * @param message producer发送的数据 */ public static void sendMessageAsync(String topic, String key, String message) { kafkaTemplate.send(topic, key, message).addCallback(new ListenableFutureCallback() { @Override public void onFailure(Throwable throwable) { logUtils.error("topic:" + topic + ",message:" + message); logUtils.error(throwable, throwable.getMessage()); } @Override public void onSuccess(Object o) { logUtils.info("topic:" + topic + ",发送成功"); } }); } }
总结
例如:以上就是今天要讲的内容,本文仅仅简单介绍了 Spring Boot 集成消息生产者的封装,消息者待续。
相对应的开源项目欢迎访问:维基框架