Kafka从指定位置拉取消息

某次Kafka扩partition时,消息消费出现异常,需要拉取出问题时间段内的消息重新消费。
``java

  for (int i = 0; i < 12; i++) {
      try {
          fixKafkaData(i, Boolean.TRUE.equals(fixNow));
      } catch (Exception e) {
          log.error("exception", e);
      }
  }

public void fixKafkaData(int pollPartition, Boolean fixNow) {
    Config config = ConfigService.getConfig("middleware.kafka-xxx");
    String kafkaServer = config.getProperty("spring.kafka.bootstrap-servers", "");
    Properties props = new Properties();
    props.put("bootstrap.servers", kafkaServer);
    props.put("group.id", GROUP_ID);
    props.put("enable.auto.commit", "true");
    // props.put("auto.commit.interval.ms", "1000");
    props.put("session.timeout.ms", "30000");
    // props.put("auto.offset.reset", "earliest");
    props.put("key.deserializer", StringDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

    Map<TopicPartition, Long> map = Maps.newHashMap();
    List<PartitionInfo> partitions = consumer.partitionsFor(TOPIC_NAME);
    log.info("partitions.size={}", partitions.size());
    for (PartitionInfo partitionInfo : partitions) {
        map.put(new TopicPartition(TOPIC_NAME, partitionInfo.partition()), beginTime.getTime());
    }
    Map<TopicPartition, OffsetAndTimestamp> parMap = consumer.offsetsForTimes(map);
    for (Map.Entry<TopicPartition, OffsetAndTimestamp> entry : parMap.entrySet()) {
        TopicPartition topicPartition = entry.getKey();
        int partition = topicPartition.partition();
        OffsetAndTimestamp offsetAndTimestamp = entry.getValue();
        log.info("TopicPartition topicPartition={}", topicPartition);
        log.info("TopicPartition offsetAndTimestamp={}", offsetAndTimestamp);
        //根据消费里的timestamp确定offset
        if (partition == pollPartition) {
            long offset = offsetAndTimestamp.offset();
            consumer.assign(Collections.singletonList(topicPartition));
            consumer.seek(topicPartition, offset);
            // break;
        }
    }
    //拉取消息
    boolean isBreak = false;
    while (true) {
        ConsumerRecords<String, String> poll = consumer.poll(1000);
        for (ConsumerRecord<String, String> record : poll) {
            int partition = record.partition();
            if (record.timestamp() <= endTime.getTime()) {
                String value = record.value();
                String key = record.key();
                long timestamp = record.timestamp();

                log.info("partition={}", partition);
                Map<String, Object> logMap = Maps.newHashMap();
                logMap.put("key", key);
                logMap.put("value", value);
                logMap.put("timestamp", timestamp);
                logMap.put("partition", partition);
                log.info("{}", JSON.toJSONString(logMap));
                try {
                    processKafka(value, fixNow);
                } catch (Exception e) {
                    log.error("processKafka exception, value={}", value, e);
                }
            } else {
                log.info("break....timestamp={}", record.timestamp());
                log.info("break....partition={}", partition);
                isBreak = true;
            }
        }
        if (isBreak) {
            break;
        }
    }
}

``

posted @ 2021-11-23 11:06  商商-77  阅读(500)  评论(0编辑  收藏  举报