kafka 消费者配置
一、客户端方式:
1、导入相关依赖
<!--pom 导入依赖,根据实际情况选择版本 -->
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.3.0</version>
</dependency>
2、代码实现
public class KafkaConfig {private static Properties getProperties(){ Properties properties = new Properties(); properties.setProperty("bootstrap.servers","localhost:9092"); //消费者组 properties.setProperty("group.id",GROUP_ID); //序列化 properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); //反序列化 properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName()); //安全协议,根据实际需求是否需要 properties.setProperty("security.protocol","SASL_PLAINTEXT"); //Sasl 机制,根据实际需求是否需要 properties.setProperty("sasl.mechanism","SCRAM-SHA-256"); //Jaas 配置 properties.setProperty("sasl.jaas.config","org.apache.kafka.common.security.scram.ScramLoginModule required username=\"username\" password=\"password\";"); return properties; } public static void main(String[] args){ Properties properties = getProperties(); KafkaConsumer<String,String> kafkaConsumer = new KafkaConsumer<>(properties); //订阅主题 kafkaConsumer.subscribe(Arrays.asList("topicName")); while (true){
//poll(),轮询拉取消息 ConsumerRecords<String, String> poll = kafkaConsumer.poll(Duration.ofMillis(1000)); System.out.println("======================================================="); for (ConsumerRecord data : poll) { System.out.println("=========="+data.value()+"=================");
//业务数据处理 } System.out.println("======================================================="); } } }
二、使用@KafkaListener 注解监听
1、导入依赖
<!--根据实际情况选择版本-->
<dependency> <groupId>org.springframework.kafka</groupId> <artifactId>spring-kafka</artifactId> <version>2.3.7.RELEASE</version> </dependency>
2、yml配置
spring: flyway: enabled: true baseline-on-migrate: true validate-on-migrate: false out-of-order: false clean-disabled: true kafka: bootstrap-servers: 127.0.0.1:9092
#properties 安全机制根据实际情况选择是否配置 properties: security: protocol: SASL_PLAINTEXT sasl: mechanism: SCRAM-SHA-256 jaas: config: org.apache.kafka.common.security.scram.ScramLoginModule required username="10322022155S866XTKPOH" password="FKWmtz15htCpsvV9"; consumer: # 指定默认消费者group id --> 由于在kafka中,同一组中的consumer不会读取到同一个消息,依靠groud.id设置组名 group-id:groupId #key value 的反序列化 key-deserializer: org.apache.kafka.common.serialization.StringDeserializer value-deserializer: org.apache.kafka.common.serialization.StringDeserializer # smallest和largest才有效,如果smallest重新0开始读取,如果是largest从logfile的offset读取。一般情况下我们都是设置smallest #auto-offset-reset: smallest #关闭自动提交 enable-auto-commit: false listener: # RECORD 当每一条记录被消费者监听器(ListenerConsumer)处理后提交 # BATCH 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理后提交 # TIME 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理后,距离上次提交时间大于TIME时提交 # COUNT 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理后,被处理record数量大于COUNT时提交 # COUNT_TIME TIME | COUMT 有一个条件满足时提交 # MANUAL 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理后,手动调用 Acknowledgment.acknowledge()后提交 # MANUAL_IMMEDIATE 手动调用 Acknowledgment.acknowledge() 之后 立即提交 ack-mode: manual_immediate # 消费监听接口监听的主题不存在时,默认会报错 missing-topics-fatal: false
#kafka自定义消息发送配置 #autostartup 是否启动监听 kafka: autostartup: false groupId: groupId topicCharge: topicName
3、代码实现
@KafkaListener(topics = "${kafka.topicCharge}",groupId = "${kafka.groupId}",autoStartup="${kafka.autostartup}") public void listenGroup(ConsumerRecord<String,String> record, Acknowledgment ack){
System.out.println(record.value());
//业务数据处理
//提交消费 ack.acknowledge(); }