1、相关可复用代码:

1.1KafkaConfig.java

import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.CommandLineRunner;
import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.ContainerProperties.AckMode;

import java.util.HashMap;
import java.util.Map;

@Configuration
@EnableKafka
@ConditionalOnExpression("${kafka.enable:false}")
public class KafkaConfig implements CommandLineRunner {

    @Value("${kafka.servers}")
    private String kafka_servers;

    @Value("${kafka.groupId:default}")
    private String kafka_groupId;

    //生产环境配置了用户名密码使用
    @Value("${kafka.account:}")
    private String kafka_account;

    @Autowired
    private KafkaListenerEndpointRegistry registry;

    //创建Kafka监听器的工

    @Bean
    public ConcurrentKafkaListenerContainerFactory<Integer, String> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setAutoStartup(false);
        return factory;
    }

    //根据consumerProps填写的参数创建消费者工厂
    @Bean
    public ConsumerFactory<Integer, String> consumerFactory() {
        return new DefaultKafkaConsumerFactory<>(consumerProps());
    }
    //kafka手动确认机制,如果发生错误,消费者需要在处理完消息后显式调用 ack.acknowledge() 方法。
    //这个方法的作用就是告知 Kafka 消费者已成功处理该条消息,可以将偏移量更新到这条消息之后的下一条消息。这样,如果消费者重新启动或发生故障,它可以从上次处理成功的消息之后的位置继续消费。
    @Bean("ackContainerFactory")
    public ConcurrentKafkaListenerContainerFactory ackContainerFactory() {
        ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory();
        factory.setConsumerFactory(new DefaultKafkaConsumerFactory(consumerProps()));
        factory.getContainerProperties().setAckMode(AckMode.MANUAL_IMMEDIATE);
        factory.setAutoStartup(false);
        return factory;
    }

    //根据senderProps填写的参数创建生产者工厂
    @Bean
    public ProducerFactory<String, String> producerFactory() {
        return new DefaultKafkaProducerFactory<>(senderProps());
    }

    //kafkaTemplate实现了Kafka发送接收等功能
    @Bean
    public KafkaTemplate<String, String> kafkaTemplate() {
        KafkaTemplate<String, String> template = new KafkaTemplate<String, String>(producerFactory());
        return template;
    }

    //消费者配置参数
    private Map<String, Object> consumerProps() {
        Map<String, Object> props = new HashMap<>();
        //连接地址
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka_servers);
        //GroupID
        props.put(ConsumerConfig.GROUP_ID_CONFIG, kafka_groupId);
        //是否自动提交
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        //自动提交的频率
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "100");
        //Session超时设置
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "120000");
        //一次性处理数量
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 5);
        //键的反序列化方式
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        //值的反序列化方式
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

        if(StringUtils.isNotBlank(kafka_account)) {
            //配置security.protocol
            props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
            //配置sasl.mechanism
            props.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
            //配置sasl.jaas.config
            props.put(SaslConfigs.SASL_JAAS_CONFIG, kafka_account);
        }

        return props;
    }

    //生产者配置
    private Map<String, Object> senderProps() {
        Map<String, Object> props = new HashMap<>();
        //连接地址
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka_servers);
        //重试,0为不启用重试机制
        props.put(ProducerConfig.RETRIES_CONFIG, 1);
        //控制批处理大小,单位为字节
        props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
        //批量发送,延迟为1毫秒,启用该功能能有效减少生产者发送消息次数,从而提高并发量
        props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
        //生产者可以使用的总内存字节来缓冲等待发送到服务器的记录
        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 104857600);
        //生产者可以使用的总内存字节来缓冲等待发送到服务器的记录
        props.put(ProducerConfig.ACKS_CONFIG, "-1");
        //键的序列化方式
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        //值的序列化方式
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        //这里把生成者的json串最大值扩大到100M
        props.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, 104857600);

        if(StringUtils.isNotBlank(kafka_account)) {
            //配置security.protocol
            props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
            //配置sasl.mechanism
            props.put(SaslConfigs.SASL_MECHANISM, "PLAIN");
            //配置sasl.jaas.config
            props.put(SaslConfigs.SASL_JAAS_CONFIG, kafka_account);
        }
        return props;
    }

    @Override
    public void run(String... args) throws Exception {
        System.out.println("启动kafka监听");
        registry.start();
    }
}

 

1.2application.yml生产库配置(开发库不需要account即可,account是生产库sasl认证所需。):

log.path: ./log/

kafka:
  enable: true
  servers: 36.137.133.3:30000
  groupId: python_process
  account: org.apache.kafka.common.security.scram.ScramLoginModule required username="admin" password="Jysj@admin";

 

 

1.3KafkaServiceImpl.java   生产者kafka发送类

 

@Service
@Slf4j
public class KafkaServiceImpl implements KafkaService {

    @Autowired
    private KafkaUtils kafkaUtils;

   @Autowired
   private KafkaTemplate<String,String> kafkaTemplate;

   @Override
   public void sendToKafka(String topic, Object entity) {
        // 1. 调用通用方法转为python需要的json格式
        String finalJsonString = JsonUtils.toPython(JSON.toJSONString(entity));
        // 2. 定义备份文件路径和文件名
        String backupFilePath = kafkaUtils.getBackupFilePath(topic);
        // 3. 将原始的json串追加写入备份文件中
        kafkaUtils.writeToFile(backupFilePath, JSON.toJSONString(entity));
        // 4. json串发送kafka
        ListenableFutureCallback<SendResult<String, String>> callback = new ListenableFutureCallback<SendResult<String, String>>() {
            @Override
            public void onSuccess(SendResult<String, String> result) {
                // 消息发送成功
                log.info("成功,Message sent successfully: {}", result);
            }

            @Override
            public void onFailure(Throwable ex) {
                // 消息发送失败
                log.error("kafka发送失败,Body=" + entity + " Message sent failed: {}", ex.getMessage(), ex);
            }
        };
        ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send(topic, finalJsonString);
        future.addCallback(callback);
    }

}

 

1.4CompanyAndDepartmentServiceImpl.java 业务调用类

@Service
@Slf4j
public class CompanyAndDepartmentServiceImpl extends ServiceImpl<CompanyAndDepartmentMapper, CompanyAndDepartment> implements CompanyAndDepartmentService {

    @Autowired
    private KafkaService kafkaService;

    /**
     * @date 2022/4/13
     * @author qihuan.yan
     */
    @Transactional(rollbackFor = Exception.class)
    @Override
    public Boolean addCompanyAndDepartment(CompanyAndDepartment companyAndDepartment) throws JsonProcessingException {
        Date date = new Date();
        companyAndDepartment.setCreateDate(date)
                .setUpdateDate(date)
                .setLatitudeLongitudeCreateDate(date)
                .setLatitudeLongitudeUpdateDate(date)
                .setLatitudeAndLongitude();
        companyAndDepartment.setHanled();

        //插入主数据表
        Boolean createSuccess = companyAndDepartment.insert();
        if (createSuccess) {
            String topic = "masterCompanyAndDepartment";
            kafkaService.sendToKafka(topic, companyAndDepartment);
        }
        return createSuccess;
    }

}

 

 

1.5AttenceRecordListener.java

消费者-监听

 

/**
 * {
  "data_time": "2022-12-15 13:52:53",
"deviceNo":"ceshidevice",
"person_id":2,
"type":3,
"img_data":2
}
 * 考勤数据监听
 */
@Service
@Slf4j
public class AttenceRecordListener {

   /**
    * ack.acknowledge();开启线程最后通知kafka回调,期间程序崩溃有可能导致部分数据丢失(此操作被允许)
    * @param record
    * @param ack
    */
   @SuppressWarnings("rawtypes")
   @KafkaListener(topics = "masterAddCompanyAndDepartment", containerFactory = "ackContainerFactory")
   public void process(ConsumerRecord record, Acknowledgment ack) {
      String value = record.value().toString();
      log.info("收到AttenceRecord信息:" + value);
      JSONObject jb = JSONObject.parseObject(value);
      String id = jb.getString(CommonConstant.SYNC_RESULT_ID);

      try {
         //纯业务逻辑,更新任务为处理中
         syncResultService.changeSyncResultStatus(id, CommonConstant.SYNCRESULT_PROCESS,"处理中");
         DAttendanceRecord entity = JSONObject.parseObject(jb.getString(CommonConstant.SYNC_RESULT_DATA), DAttendanceRecord.class);
         if(StringUtils.isBlank(entity.getDevice_no())) {
            throw new ServiceException("device_no必填");
         }

         if(!deviceService.existDeviceByNo(entity.getDevice_no(), DeviceType.PERSONNEL_GATE.getCode())) {
            throw new ServiceException("不存在该设备:"+entity.getDevice_no());
         }

         String time = String.valueOf(TimeUnit.MILLISECONDS);
         entity.setTime(time);

         try {//转存图片
            DateTime date = new DateTime();
            String path = fileService.uploadImgWithNetUrl(entity.getImg_data(), "/door/"+date.getYear()+"/"+date.getMonthOfYear()+"/"+date.getDayOfMonth());
            entity.setImg_data(path);
         } catch (Exception e) {
            log.error("上传图片失败", e);
         }

         influxDao.insert(entity);

         //更新任务为处理成功
         syncResultService.changeSyncResultStatus(id, CommonConstant.SYNCRESULT_SUCCESS,"处理成功");

      } catch (Throwable e) {
         log.error("接收消息异常:", e);
         //更新任务为处理失败
         syncResultService.changeSyncResultStatus(id, CommonConstant.SYNCRESULT_ERROR,e.getMessage());
      } finally {
         try {
            ack.acknowledge();
         } catch (Exception e) {
            log.error("kafaka failed,message is:" + value, e);
         }
      }
   }

}

 

2、其中 Spring Kafka 中使用手动消息确认机制时,消费者需要在处理完消息后显式调用 ack.acknowledge() 方法。这个方法的作用就是告知 Kafka 消费者已成功处理该条消息,可以将偏移量更新到这条消息之后的下一条消息。这样,如果消费者重新启动或发生故障,它可以从上次处理成功的消息之后的位置继续消费。

使用手动确认机制可以更好地控制何时确认消息。例如,如果在处理消息时出现错误,可以选择不调用 ack.acknowledge(),这样在下次启动或重试时,消费者仍然会从这条未确认的消息开始读取。

相反,如果使用自动确认机制,Kafka会在消费者拉取到消息后立即更新偏移量,而不管消息是否已成功处理。这可能会导致在消息处理失败的情况下丢失消息。