//今天在用kafka接收数据的时候遇到一个问题,后注册的配置地址信息会把前一个配置地址覆盖,多方验证后发现是注册bean的时候出问题了。
//因为不在一个类中,但是配置文件共用的一个,所以写了多个获取配置的方法,监听功能再去监听信息的时候获取到的配置文件是默认的,默认的信息就是最后注入的那个地址bootstrap.server
//注意工厂2没有指定工厂信息,需要指定的话配置如下代码
//containerFactory ="需要指定的工厂"
//配置消费者工厂1
public ConsumerFactory<Integer, String> consumer1Factory() {
return new DefaultKafkaConsumerFactory<>(consumerConfig1());
}
//获取配置文件1
public Map<String, Object> consumerConfig1() {
Map<String, Object> propsMap = new HashMap<>();
propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout);
propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
// kafka安全配置SASL(若不启用安全认证,需注释掉)
propsMap.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, securityProtocol);
propsMap.put(SaslConfigs.SASL_MECHANISM, saslMechanism);
propsMap.put(SaslConfigs.SASL_JAAS_CONFIG, saslJaasConfig.replace("@[username]", userName).replace("@[password]", password));
return propsMap;
}
//配置消费者工厂2
public ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
//获取配置文件2
public Map<String, Object> consumerConfigs() {
Map<String, Object> propsMap = new HashMap<>();
propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout);
propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
return propsMap;
}
//通过不同的配置工厂生成实例bean
//topic消费1
@Bean("kafkaListenerContainerNwFactory")
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<Integer, String>>
kafkaListenerContainerNwFactory() {
ConcurrentKafkaListenerContainerFactory<Integer, String> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerNwFactory());
factory.setConcurrency(concurrency);
factory.setBatchListener(true);//设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
factory.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL_IMMEDIATE);//设置提交偏移量的方式
return factory;
}
//topic消费1
@Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(concurrency);
factory.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL_IMMEDIATE);//设置提交偏移量的方式
return factory;
}
//配置监听消息
//topic1
@KafkaListener(topics = "#{'${kafka.consumer.topics1}'.split(',')}",containerFactory ="kafkaListenerContainerNwFactory")
public void listen(List<ConsumerRecord<String, String>> records,Acknowledgment ack) throws IOException {}
//topic2
@KafkaListener(topics = "#{'${kafka.consumer.topics2}'.split(',')}")
public void listen(List<ConsumerRecord<String, String>> records,Acknowledgment ack) throws IOException {}