springboot 连接kafka

kafka配置

 引入pom

<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
   <version>2.8.5</version>
</dependency>
自动装备template
yml配置

spring.kafka.bootstrap-servers = 17*.**.**.***:19091,17*.**.***.***:19091,17*.**.***.***:19091
spring.kafka.properties.sasl.mechanism = SCRAM-SHA-256
spring.kafka.properties.sasl.jaas.config = org.apache.kafka.common.security.scram.ScramLoginModule required username="***" password="****";
spring.kafka.properties.security.protocol = SASL_PLAINTEXT
spring.kafka.producer.retries = 3
spring.kafka.producer.acks = all
spring.kafka.producer.batch-size = 512000
spring.kafka.producer.properties.linger.ms = 1
spring.kafka.producer.buffer-memory = 33554432
spring.kafka.producer.key-serializer = org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.value-serializer = org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.compression-type = lz4
spring.kafka.properties.enable.idempotence = false



配置utils
package com.xx.xx.util;
import cn.hutool.extra.spring.SpringUtil;
import com.alibaba.fastjson.JSONObject;
import com.epay.dtc.ms.batch.model.KafkaCallback;
import lombok.extern.slf4j.Slf4j;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.util.concurrent.FailureCallback;
import org.springframework.util.concurrent.SuccessCallback;

@Slf4j
public class KafkaUtils {
private static KafkaTemplate kafkaTemplate = SpringUtil.getBean("kafkaTemplate");
private static SuccessCallback successCallback = new SuccessCallback() {
@Override
public void onSuccess(Object o) {
log.info("消息发送成功========" + o.toString());
}
};
private static FailureCallback failureCallback = new FailureCallback() {
@Override
public void onFailure(Throwable throwable) {
log.info("消息发送失败========" + throwable.toString());
}
};

public static <T> void send(String topic, T message) {
send(topic, message, successCallback, failureCallback);
}

public static <T> void send(String topic, T message, SuccessCallback successCallback, FailureCallback failureCallback) {
kafkaTemplate.send(topic, JSONObject.toJSONString(message)).addCallback(successCallback, failureCallback);
}
public static <T> void send(String topic, T message, FailureCallback failureCallback) {
send(topic, message, successCallback, failureCallback);
}

public static <T> void sendAndCallBack(String topic, T message, KafkaCallback callback){
kafkaTemplate.send(topic, message).addCallback(callback);
}

}
//发送端
@Service
@Slf4j
public class TestSendServiceImpl {

@Value("${test.maxrows}")
int maxRows;

@Value("${test.batchcount}")
int batchCount;

public void send(){
//1.组织数据
for (int i=0;i<maxRows;i++){
DataDTO dataDTO =new DataDTO();
dataDTO.setDate(new Date());
dataDTO.setPhone(callNO().toString());
dataDTO.setVar1(new Random().nextFloat());
dataDTO.setVar2(new Random().nextDouble());
String dataDTOStr = JSON.toJSONString(dataDTO);
KafkaMessage kafkaMessage = new KafkaMessage();
kafkaMessage.setData(dataDTOStr);
kafkaMessage.setRequestId(IdUtil.simpleUUID());
Date sendTime = new Date();
kafkaMessage.setDataTime(sendTime);
KafkaUtils.send("***-**-TEST",kafkaMessage);
}//移动电话号码前三位
public static final String[] YD={
"134","135","136",
"137","138","139",
"150","151","152",
"157","158","159",
"180","181","182",
"183","184","185",
"174","192","178",

};

public static StringBuffer callNO(){
//定义随机数
Random random = new Random();
//从移动号码规则里面随机一个号码前三位
int i = random.nextInt(YD.length);
//随机号码的第4位数字
int i1 = random.nextInt(10);
//随机号码的第5位数字
int i2 = random.nextInt(10);
//随机号码的第6位数字
int i3 = random.nextInt(10);
//随机号码的第7位数字
int i4 = random.nextInt(10);
//随机号码的第8位数字
int i5 = random.nextInt(10);
//随机号码的第9位数字
int i6 = random.nextInt(10);
//随机号码的第10位数字
int i7 = random.nextInt(10);
//随机号码的第11位数字
int i8 = random.nextInt(10);
StringBuffer stringBuffer = new StringBuffer();
stringBuffer.append(YD[i]).append(i1).append(i2).append(i3).append(i4).append(i5).append(i6).append(i7).append(i8);
return stringBuffer;
}
}
//监听端
配置

spring.kafka.bootstrap-servers = ***:19091,***:19091,***:19091
spring.kafka.consumer.enable-auto-commit = false
spring.kafka.listener.ack-mode = manual_immediate
spring.kafka.consumer.auto-offset-reset = latest
spring.kafka.consumer.properties.session.timeout.ms = 120000
spring.kafka.consumer.properties.request.timeout.ms = 600000
spring.kafka.consumer.key-deserializer = org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.value-deserializer = org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.listener.missing-topics-fatal = false
spring.kafka.listener.type = batch
spring.kafka.consumer.max-poll-records = 1000
spring.kafka.listener.concurrency = 3
spring.kafka.properties.sasl.mechanism = SCRAM-SHA-256
spring.kafka.properties.sasl.jaas.config = org.apache.kafka.common.security.scram.ScramLoginModule required username="***" password="****";
spring.kafka.properties.security.protocol = SASL_PLAINTEXT

 
//配置监听工厂
package com.***.kafka.kafkaconfig;

import cn.hutool.json.JSONUtil;
import com.***.constant.GalaxyRedisConstant;
import com.***.common.model.KafkaMessage;
import com.***.dao.GalaxyRedisDao;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.listener.BatchErrorHandler;
import org.springframework.kafka.listener.ConsumerAwareErrorHandler;
import org.springframework.stereotype.Component;

import java.util.Iterator;

@Component
@Slf4j
public class KafkaConsumerConfig {

@Value("${spring.kafka.bootstrap-servers}")
private String bootstrapServers;
@Autowired
private KafkaTemplate kafkaTemplate;
@Autowired
private KafkaProperties properties;
@Autowired
private GalaxyRedisDao galaxyRedisDao;
@Autowired
private ConcurrentKafkaListenerContainerFactory factory;

// //构造消费者属性map,ConsumerConfig中的可配置属性比spring boot自动配置要多
// private Map<String, Object> consumerProperties() {
// Map<String, Object> props = new HashMap<>();
// //关闭自动提交
// props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
// props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
// props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
// props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
// //批量消费每次最多消费多少条消息 spring.kafka.consumer.max-poll-records=5
// props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 5);
// props.put(ConsumerConfig.GROUP_ID_CONFIG, "activity-service");
// props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
// return props;
// }

/**
* 不使用spring boot默认方式创建的DefaultKafkaConsumerFactory,重新定义创建方式
*
* @return
*/
// @Bean("consumerFactory")
// public DefaultKafkaConsumerFactory consumerFactory() {
// //使用本类内部方法consumerProperties定义的配置
//// return new DefaultKafkaConsumerFactory(consumerProperties());
// }


@Bean("listenerContainerFactory")
//个性化定义消费者
public ConcurrentKafkaListenerContainerFactory listenerContainerFactory() {
// 拉取超时时间
factory.getContainerProperties().setPollTimeout(3000);
// 将单条消息异常处理器添加到参数中
// factory.getContainerProperties().setErrorHandler(new ConsumerAwareErrorHandler() {
factory.setErrorHandler(new ConsumerAwareErrorHandler() {
@Override
public void handle(Exception thrownException, ConsumerRecord<?, ?> data, Consumer<?, ?> consumer) {
log.error("单条消息异常 " + data.toString() + "==========" + consumer.toString());
// kafkaTemplate.send(data.topic()+".DLT", data);
KafkaMessage message = JSONUtil.toBean((String) data.value(), KafkaMessage.class);
//先将异常消息放到Redis
galaxyRedisDao.hset(GalaxyRedisConstant.SYSTEM_NAME + "cousumer_error_list", message.getRequestId(), data.value());
}
});
// 将批量消息异常处理器添加到参数中
// factory.getContainerProperties().setErrorHandler(new ConsumerAwareErrorHandler() {
factory.setBatchErrorHandler(new BatchErrorHandler() {
@Override
public void handle(Exception thrownException, ConsumerRecords<?, ?> data) {
log.error("批量消息异常" + thrownException);
Iterator<? extends ConsumerRecord<?, ?>> iterator = data.iterator();
while (iterator.hasNext()) {
ConsumerRecord record = iterator.next();
// kafkaTemplate.send(record.topic() + ".DLT", record.toString());
//先将异常消息放到Redis
KafkaMessage message = JSONUtil.toBean((String) record.value(), KafkaMessage.class);
galaxyRedisDao.hset(GalaxyRedisConstant.SYSTEM_NAME + "cousumer_error_list", message.getRequestId(), record.value());
}
}
});
//设置可批量拉取消息消费
// factory.setBatchListener(true);
return factory;
}

}

 

package com.***.***.**.***.****.service.impl;

import cn.hutool.json.JSONUtil;
import com.alibaba.fastjson.JSONObject;
import com.epay.dtc.ms.common.model.KafkaMessage;
import com.epay.dtc.ms.exe.provider.configuration.thread.ThreadPoolExecutor;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DataAccessException;
import org.springframework.data.redis.connection.RedisConnection;
import org.springframework.data.redis.connection.RedisStringCommands;
import org.springframework.data.redis.core.RedisCallback;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.core.types.Expiration;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Service;

import javax.annotation.Resource;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import java.util.Vector;
import java.util.concurrent.TimeUnit;

/**
* @author zhangsy
* @date 2023/2/3 9:54
* @description 消费类
*/

@Service
@Slf4j
public class TestConsumerServiceImpl {

/* @Autowired
GalaxyRedisDao galaxyRedisDao;*/

@Resource(name = "secondaryRedisTemplate")
private RedisTemplate<String, Object> redisTemplate;

@Autowired
ThreadPoolExecutor threadPoolExecutor;


@KafkaListener(topics = "***-**-TEST", groupId = "***-**-TEST-CONSUMER-GROUP", containerFactory = "kafkaListenerContainerFactory", concurrency = "16")
public void consumer(List<ConsumerRecord> records, Acknowledgment ack) {
log.info("收到数据:{}条", records.size());
long startTime = System.currentTimeMillis();
try {
Vector<KafkaMessage> kafkaMessageVector = new Vector<>();
Iterator<ConsumerRecord> iterator = records.iterator();
Random random = new Random();
while (iterator.hasNext()) {
ConsumerRecord re = iterator.next();
KafkaMessage kafkaMessage = JSONUtil.toBean((String) re.value(), KafkaMessage.class);
//galaxyRedisDao.setWithTTL("dtc-ms"+random.nextInt(),kafkaMessage.getData().toString(),120);
kafkaMessageVector.add(kafkaMessage);
/*threadPoolExecutor.execute("send", new Runnable() {
ConsumerRecord re = iterator.next();
@Override
public void run() {
KafkaMessage kafkaMessage = JSONUtil.toBean((String) re.value(), KafkaMessage.class);
galaxyRedisDao.setWithTTL("dtc-ms" + random.nextInt(), kafkaMessage.getData().toString(), 120);
}
});*/
}
log.info("消费写入redis数量:{}开始",records.size());
redisTemplate.executePipelined(new RedisCallback<String>() {
@Override
public String doInRedis(RedisConnection connection) throws DataAccessException {
for (KafkaMessage kafkaMessage : kafkaMessageVector) {
String key = "dtc-ms"+random.nextInt();
String value = JSONObject.toJSONString(kafkaMessage.getData());
connection.set(key.getBytes(), value.getBytes(), Expiration.from(2, TimeUnit.MINUTES), RedisStringCommands.SetOption.UPSERT);
}
return null;
}

});
} catch (Exception e) {

} finally {
long end = System.currentTimeMillis();
log.info("消费写入redis结束,耗时:{}ms", end - startTime);
ack.acknowledge();
}

}


}
posted @ 2023-02-28 13:40  老小包的博客  阅读(736)  评论(0编辑  收藏  举报