springboot-kafka实战
前言:笔者使用springboot版本为2.5.0
目录
(1)引入kafka相关依赖,因为springboot内置kafka-starter,所以会自动注入kafka相关bean
<!-- 仅用于测试接口 --> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-web</artifactId> </dependency> <!-- kafka依赖 --> <dependency> <groupId>org.springframework.kafka</groupId> <artifactId>spring-kafka</artifactId> </dependency>
(2)在application.properties中或者application.yml中加入kafka配置
###########【Kafka集群】###########
spring.kafka.bootstrap-servers=192.168.184.128:9092
# 设置批量消费
#spring.kafka.listener.type=batch
# 批量消费每次最多消费多少条消息
spring.kafka.consumer.max-poll-records=50
###########【初始化生产者配置】###########
# 重试次数
spring.kafka.producer.retries=0
# 应答级别:多少个分区副本备份完成时向生产者发送ack确认(可选0、1、all/-1)
spring.kafka.producer.acks=all
# 批量大小
spring.kafka.producer.batch-size=5
# 提交延时
spring.kafka.producer.properties.linger.ms=1000
# 当生产端积累的消息达到batch-size或接收到消息linger.ms后,生产者就会将消息提交给kafka
# linger.ms为0表示每接收到一条消息就提交给kafka,这时候batch-size其实就没用了
# 生产端缓冲区大小
spring.kafka.producer.buffer-memory = 33554432
# Kafka提供的序列化和反序列化类
spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer
# 自定义分区器
# spring.kafka.producer.properties.partitioner.class=com.felix.kafka.producer.CustomizePartitioner
# 开启事务
#spring.kafka.producer.transaction-id-prefix=tx
###########【初始化消费者配置】###########
# 默认的消费组ID
spring.kafka.consumer.properties.group.id=defaultConsumerGroup
# 是否自动提交offset
spring.kafka.consumer.enable-auto-commit=true
# 提交offset延时(接收到消息后多久提交offset)
spring.kafka.consumer.auto-commit-interval=1000
# 当kafka中没有初始offset或offset超出范围时将自动重置offset
# earliest:重置为分区中最小的offset;
# latest:重置为分区中最新的offset(消费分区中新产生的数据);
# none:只要有一个分区不存在已提交的offset,就抛出异常;
spring.kafka.consumer.auto-offset-reset=latest
# 消费会话超时时间(超过这个时间consumer没有发送心跳,就会触发rebalance操作)
spring.kafka.consumer.properties.session.timeout.ms=120000
# 消费请求超时时间
spring.kafka.consumer.properties.request.timeout.ms=180000
# Kafka提供的序列化和反序列化类
spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer
# 消费端监听的topic不存在时,项目启动会报错(关掉)
spring.kafka.listener.missing-topics-fatal=false
(3)定义kafka相关的javabean
import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringSerializer; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; import org.springframework.context.annotation.Bean; import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; import org.springframework.kafka.config.KafkaListenerContainerFactory; import org.springframework.kafka.core.*; import java.util.HashMap; import java.util.Map; /** * @Classname KafkaConfig * @Description * @Author 冷小为 **/ public class KafkaConfig{ @Autowired ConsumerFactory consumerFactory; @Value("${spring.kafka.bootstrap-servers}") private String bootstrapServers; @Value("${spring.kafka.consumer.enable-auto-commit}") private Boolean autoCommit; @Value("${spring.kafka.consumer.auto-commit-interval}") private Integer autoCommitInterval; @Value("${spring.kafka.consumer.properties.group.id}") private String groupId; @Value("${spring.kafka.consumer.max-poll-records}") private Integer maxPollRecords; @Value("${spring.kafka.consumer.auto-offset-reset}") private String autoOffsetReset; @Value("${spring.kafka.producer.retries}") private Integer retries; @Value("${spring.kafka.producer.batch-size}") private Integer batchSize; @Value("${spring.kafka.producer.buffer-memory}") private Integer bufferMemory; /** * 生产者配置信息 */ @Bean public Map<String, Object> producerConfigs() { Map<String, Object> props = new HashMap<>(); props.put(ProducerConfig.ACKS_CONFIG, "0"); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); props.put(ProducerConfig.RETRIES_CONFIG, retries); props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); props.put(ProducerConfig.LINGER_MS_CONFIG, 1); props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); return props; } /** * 生产者工厂 */ @Bean public ProducerFactory<String, Object> producerFactory() { return new DefaultKafkaProducerFactory<>(producerConfigs()); } /** * 生产者模板 */ @Bean public KafkaTemplate<String, Object> kafkaTemplate() { return new KafkaTemplate<>(producerFactory()); } /** * 自动确认消费者配置信息 */ @Bean public Map<String, Object> consumerConfigs() { Map<String, Object> props = new HashMap<>(); props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommit); props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 120000); props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 180000); props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); return props; } /** * 简单消费者工厂 */ @Bean("simpleFactory") public KafkaListenerContainerFactory<?> simpleFactory() { ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>(); factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigs())); factory.setBatchListener(false);//设置关闭批量消费(默认就是关闭) return factory; } }
(4)生产者代码
@RestController public class KafkaProducer { @Autowired private KafkaTemplate<String, Object> kafkaTemplate; //发送普通的消息 @GetMapping("/kafka/normal/{message}") public void sendMessage(@PathVariable("message") String normalMessage) { System.out.println("发送简单消息!!"+normalMessage); kafkaTemplate.send("testTopic", normalMessage); } }
(5)消费者代码
@Component public class KafkaConsumer { // 消费监听 @KafkaListener(topics = {"testTopic"}, containerFactory = "simpleFactory") public void onMessage1(ConsumerRecord<?, ?> record){ // 消费的哪个topic、partition的消息,打印出消息内容 System.out.println("简单消费:"+record.topic()+"-"+record.partition()+"-"+record.value()); } }
启动项目发送消息就可以正常消费啦!!
(1)只需要设置KafkaListenerContainerFactory的BatchListener为true就可以了,每次批量消费的数量在配置参数中配置,key为ConsumerConfig.MAX_POLL_RECORDS_CONFIG
/** * 批量消费者工厂 */ @Bean("batchFactory") public KafkaListenerContainerFactory<?> batchFactory() { ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>(); factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigs())); //设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG factory.setBatchListener(true); return factory; }
(2)配置监听器
// 批量消费监听 @KafkaListener(topics = {"testBatchTopic"},containerFactory = "batchFactory") public void onMessage2(List<ConsumerRecord<?, ?>> records){ System.out.println(">>>批量消费一次,records.size()="+records.size()); for (ConsumerRecord<?, ?> record : records) { System.out.println(record.value()); } }
springboot是默认自动提交offset,如果我们想手动去控制ack,则需要将ACKMODE设置为manual,然后在listener中手动确认消息即可。
(1)将consumer的参数ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG设置为false
/**
* 手动确认消费者配置信息
*/
@Bean
public Map<String, Object> manualConsumerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false); //在这里配置,关闭自动提交
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 120000);
props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 180000);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
return props;
}
(2)在ListenerContainerFactory中配置ackmode为manual
/** * 手动确认消费者工程 */ @Bean("manualFactory") public KafkaListenerContainerFactory<?> manualFactory() { ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>(); factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(manualConsumerConfigs())); factory.setBatchListener(false); //设置确认模式 //RECORD 每处理一条commit一次 //BATCH(默认) 每次poll的时候批量提交一次,频率取决于每次poll的调用频率 //TIME 每次间隔ackTime的时间去commit //COUNT 累积达到ackCount次的ack去commit //COUNT_TIME ackTime或ackCount哪个条件先满足,就commit //MANUAL listener负责ack,但是背后也是批量上去 //MANUAL_IMMEDIATE listner负责ack,每调用一次,就立即commit factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL); return factory; }
(3)配置监听器
// 手动确认消费监听 @KafkaListener(topics = {"testManualTopic"}, containerFactory = "manualFactory") public void onMessage4(ConsumerRecord<?, ?> record, Acknowledgment ack){ // 消费的哪个topic、partition的消息,打印出消息内容 System.out.println("手动确认消费:"+record.topic()+"-"+record.partition()+"-"+record.value()); ack.acknowledge(); }
(1)在生产者配置文件里添加事务配置
props.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "tx");
(2)可以用kafkaTemplate.executeInTransaction或者使用@Transactional注解
//发送事务消息
@GetMapping("/kafka/transaction/{message}")
public void sendTransactionMessage(@PathVariable("message") String transactionMessage) {
// 声明事务:后面报错消息不会发出去
kafkaTemplate.executeInTransaction(operations -> {
operations.send("testTopic",transactionMessage);
operations.flush();
//throw new RuntimeException("fail");
return "commit";
});
}
五、转发消息
在实际开发中,我们可能有这样的需求,应用A从TopicA获取到消息,经过处理后转发到TopicB,再由应用B监听处理消息,即一个应用处理完成后将该消息转发至其他应用,完成消息的转发。
在SpringBoot集成Kafka实现消息的转发也很简单,只需要通过一个@SendTo注解,被注解方法的return值即转发的消息内容。
(1)在ListenerContainerFactory中配置ReplyTemplate,否则使用sendTo注解会报错
/** * 简单消费者工程 */ @Bean("simpleFactory") public KafkaListenerContainerFactory<?> simpleFactory() { ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>(); factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigs()));//如果要使用SendTo转发,需要配置replyTemplate factory.setReplyTemplate(kafkaTemplate()); factory.setBatchListener(false); return factory; }
(2)在Listener上添加SendTo注解,可以将消息转发到SendTo指定的Topic中
//转发消费 @KafkaListener(topics = "testTransferTopic",containerFactory = "simpleFactory") @SendTo("testTopic") public String onMessage3(ConsumerRecord<?, ?> record){ // 消费的哪个topic、partition的消息,打印出消息内容 System.out.println("transfer消费:"+record.topic()+"-"+record.partition()+"-"+record.value()); return "transfer message :"+record.value(); }
(1)发送消息时添加callback回调
//发送带回调的消息 @GetMapping("/kafka/callback/{message}") public void sendCallbackMessage(@PathVariable("message") String callbackMessage) { kafkaTemplate.send("testTopic", callbackMessage).addCallback(new ListenableFutureCallback<SendResult<String, Object>>() { @Override public void onFailure(Throwable throwable) { System.out.println("发送消息异常"); } @Override public void onSuccess(SendResult<String, Object> stringObjectSendResult) { System.out.println("发送消息成功:"+stringObjectSendResult.toString()); } }); }
七、自定义消费者消费异常处理器
(1)在config中定义ConsumerAwareListenerErrorHandler
// 新建一个异常处理器,用@Bean注入 @KafkaListener(topics = "topic1",errorHandler="consumerAwareErrorHandler") @Bean public ConsumerAwareListenerErrorHandler consumerAwareErrorHandler() { return (message, exception, consumer) -> { System.out.println("消费异常:"+message.getPayload()); return "exception"; }; }
(2)在listener上配置errorHandler属性
@KafkaListener(topics = {"testTopic"}, containerFactory = "simpleFactory",errorHandler = "consumerAwareErrorHandler")
(1)消息过滤器可以在消息抵达consumer之前被拦截,在实际应用中,我们可以根据自己的业务逻辑,筛选出需要的信息再交由KafkaListener处理,不需要的消息则过滤掉。
在config中定义ConcurrentKafkaListenerContainerFactory
@Bean public ConcurrentKafkaListenerContainerFactory filterContainerFactory() { ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory(); factory.setConsumerFactory(consumerFactory); // 被过滤的消息将被丢弃 factory.setAckDiscarded(true); // 消息过滤策略 factory.setRecordFilterStrategy(consumerRecord -> { if (Integer.parseInt(consumerRecord.value().toString()) % 2 == 0) { return false; } //返回true消息则被过滤 return true; }); return factory; }
(2)配置过滤监听
// 消息过滤监听 @KafkaListener(topics = {"testFilterTopic"},containerFactory = "filterContainerFactory") public void onMessage6(ConsumerRecord<?, ?> record) { System.out.println(record.value()); }

浙公网安备 33010602011771号