代码运行版本
| springboot.version=2.7.7 |
| spring-kafka.version=2.8.11 |
1 POM
| <dependency> |
| <groupId>org.springframework.kafka</groupId> |
| <artifactId>spring-kafka</artifactId> |
| |
| <version>2.8.11</version> |
| </dependency> |
2 JavaConfig
KafkaConfiguration.java 生产者消费者配置类
TopicProperties 主题属性类
2.1 KafkaConfiguration.java
| @Configuration |
| public class KafkaConfiguration { |
| |
| @Value("${spring.kafka.bootstrap-servers}") |
| private String bootstrapServers; |
| |
| @Value("${spring.kafka.producer.retries}") |
| private Integer retries; |
| |
| @Value("${spring.kafka.producer.batch-size}") |
| private Integer batchSize; |
| |
| @Value("${spring.kafka.producer.buffer-memory}") |
| private Integer bufferMemory; |
| |
| @Value("${spring.kafka.consumer.group-id}") |
| private String groupId; |
| |
| @Value("${spring.kafka.consumer.auto-offset-reset}") |
| private String autoOffsetReset; |
| |
| @Value("${spring.kafka.consumer.max-poll-records}") |
| private Integer maxPollRecords; |
| |
| @Value("${spring.kafka.consumer.batch.concurrency}") |
| private Integer batchConcurrency; |
| |
| @Value("${spring.kafka.consumer.enable-auto-commit}") |
| private Boolean autoCommit; |
| |
| @Value("${spring.kafka.consumer.auto-commit-interval}") |
| private Integer autoCommitInterval; |
| |
| |
| @Value("${spring.kafka.properties.sasl.mechanism}") |
| private String mechanism; |
| |
| @Value("${spring.kafka.properties.sasl.jaas.config}") |
| private String jaasConfig; |
| |
| @Value("${spring.kafka.properties.security.protocol}") |
| private String protocol; |
| |
| |
| |
| |
| |
| @Bean |
| public Map<String, Object> producerConfigs() { |
| |
| Map<String, Object> props = new HashMap<>(); |
| |
| |
| props.put(ProducerConfig.ACKS_CONFIG, "1"); |
| |
| props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); |
| |
| props.put(ProducerConfig.RETRIES_CONFIG, retries); |
| |
| props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); |
| |
| props.put(ProducerConfig.LINGER_MS_CONFIG, 1); |
| |
| props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); |
| |
| props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); |
| |
| props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); |
| |
| |
| |
| props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, protocol); |
| |
| props.put(SaslConfigs.SASL_JAAS_CONFIG, jaasConfig); |
| |
| props.put(SaslConfigs.SASL_MECHANISM, mechanism); |
| return props; |
| } |
| |
| |
| |
| |
| @Bean |
| public ProducerFactory<String, String> producerFactory() { |
| return new DefaultKafkaProducerFactory<>(producerConfigs()); |
| } |
| |
| |
| |
| |
| @Bean |
| public KafkaTemplate<String, String> kafkaTemplate() { |
| return new KafkaTemplate<>(producerFactory()); |
| } |
| |
| |
| |
| |
| |
| @Bean |
| public Map<String, Object> consumerConfigs() { |
| Map<String, Object> props = new HashMap<>(); |
| |
| props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); |
| |
| props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); |
| |
| props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); |
| |
| props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); |
| |
| props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommit); |
| |
| props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 30000); |
| |
| props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 30000); |
| |
| props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); |
| |
| props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); |
| |
| |
| props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, protocol); |
| |
| props.put(SaslConfigs.SASL_JAAS_CONFIG, jaasConfig); |
| |
| props.put(SaslConfigs.SASL_MECHANISM, mechanism); |
| return props; |
| } |
| |
| |
| |
| |
| @Bean |
| public ConcurrentKafkaListenerContainerFactory<String, String> batchFactory() { |
| ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>(); |
| factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigs())); |
| |
| factory.setConcurrency(batchConcurrency); |
| factory.getContainerProperties().setPollTimeout(3000); |
| factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE); |
| |
| factory.setBatchListener(true); |
| return factory; |
| } |
| |
| } |
2.2 TopicProperties.java
| @RefreshScope |
| @ConfigurationProperties(prefix = "spring.kafka.topic") |
| @Component |
| @Data |
| public class TopicProperties { |
| |
| |
| |
| |
| private String sourceTopic; |
| |
| } |
3.Producer & Consumer
3.1 发送消息
生产者发送消息,或者直接使用kafkaUtils中的方法
| @Autowired |
| private KafkaTemplate<String, String> kafkaTemplate; |
| |
| ... |
| |
| private boolean sendKafkaMsg(Source source) { |
| kafkaTemplate.send(topicProperties.getSourceTopic(), JSONObject.toJSONString(source)).addCallback(new CommonListenableFutureCallback()); |
| return true; |
| } |
3.2 消费消息
| @Slf4j |
| @Component |
| public class SourceTopicListener { |
| |
| @Autowired |
| private SourceService sourceService; |
| |
| @Autowired |
| private RobotEventBuilder robotEventBuilder; |
| |
| |
| @KafkaListener(topics = {"${spring.kafka.topic.source-topic}"}, containerFactory = "batchFactory") |
| public void consumer(List<ConsumerRecord<String, String>> consumerRecords, Acknowledgment ack) { |
| List<Source> sources = new ArrayList<>(); |
| for (ConsumerRecord<String, String> record : consumerRecords) { |
| Source source = JSONObject.parseObject(record.value(), Source.class); |
| sources.add(source); |
| } |
| |
| |
| ack.acknowledge(); |
| } |
| } |
3.3 消息发送回调
| @Slf4j |
| public class CommonListenableFutureCallback implements ListenableFutureCallback<SendResult<String, String>> { |
| |
| @Override |
| public void onFailure(Throwable ex) { |
| log.error("Kafka 消息发送失败", ex); |
| } |
| |
| @Override |
| public void onSuccess(SendResult<String, String> result) { |
| log.info("kafka 消息发送成功"); |
| } |
| } |
4 application.yml
| |
| spring: |
| kafka: |
| |
| bootstrap-servers: 192.168.1.10:9092 |
| |
| properties: |
| sasl: |
| mechanism: PLAIN |
| jaas: |
| |
| config: org.apache.kafka.common.security.scram.ScramLoginModule required username="kafka" password="1qaz@WSX"; |
| security: |
| protocol: SASL_PLAINTEXT |
| producer: |
| |
| retries: 3 |
| |
| batch-size: 1000 |
| |
| buffer-memory: 33554432 |
| |
| |
| consumer: |
| group-id: analyze-service-dev |
| |
| auto-offset-reset: earliest |
| |
| max-poll-records: 10 |
| |
| enable-auto-commit: false |
| |
| auto-commit-interval: 1000 |
| |
| batch.concurrency: 3 |
| topic: |
| |
| source-topic: evt_source_topic_dev |
| |
| event-topic: evt_event_topic_dev |
| |
5 Utils
| @Component |
| public class KafkaUtils { |
| |
| @Value("${spring.kafka.bootstrap-servers}") |
| private String springKafkaBootstrapServers; |
| |
| private AdminClient adminClient; |
| |
| @Autowired |
| private KafkaTemplate<String, String> kafkaTemplate; |
| |
| |
| |
| |
| |
| |
| |
| @PostConstruct |
| private void initAdminClient() { |
| Map<String, Object> props = new HashMap<>(1); |
| props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, springKafkaBootstrapServers); |
| adminClient = KafkaAdminClient.create(props); |
| } |
| |
| |
| |
| |
| public void createTopic(Collection<NewTopic> newTopics) { |
| adminClient.createTopics(newTopics); |
| } |
| |
| |
| |
| |
| public void deleteTopic(Collection<String> topics) { |
| adminClient.deleteTopics(topics); |
| } |
| |
| |
| |
| |
| public String getTopicInfo(Collection<String> topics) { |
| AtomicReference<String> info = new AtomicReference<>(""); |
| try { |
| adminClient.describeTopics(topics).all().get().forEach((topic, description) -> { |
| for (TopicPartitionInfo partition : description.partitions()) { |
| info.set(info + partition.toString() + "\n"); |
| } |
| }); |
| } catch (InterruptedException | ExecutionException e) { |
| e.printStackTrace(); |
| } |
| return info.get(); |
| } |
| |
| |
| |
| |
| public List<String> getAllTopic() { |
| try { |
| return adminClient.listTopics().listings().get().stream().map(TopicListing::name).collect(Collectors.toList()); |
| } catch (InterruptedException | ExecutionException e) { |
| e.printStackTrace(); |
| } |
| return Lists.newArrayList(); |
| } |
| |
| |
| |
| |
| public void sendMessage(String topic, String message) { |
| kafkaTemplate.send(topic, message); |
| } |
| |
| public void sendMessage(String topic, String key, String message) { |
| kafkaTemplate.send(topic, key, message); |
| } |
| } |
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 全程不用写代码,我用AI程序员写了一个飞机大战
· DeepSeek 开源周回顾「GitHub 热点速览」
· 记一次.NET内存居高不下排查解决与启示
· MongoDB 8.0这个新功能碉堡了,比商业数据库还牛
· .NET10 - 预览版1新功能体验(一)