package com.sea.cbb.utils;
import org.apache.kafka.clients.consumer.*;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.TimeUnit;

/************************************
 *         <dependency>
 *             <groupId>org.apache.kafka</groupId>
 *             <artifactId>kafka-clients</artifactId>
 *             <version>3.0.0</version>
 *         </dependency>
 * @PACKAGE : com.sea.cbb.utils
 * @Author  : Sea
 * @Date    : 2022/6/26 17:23
 * @Desc    :
 * @History :
 ***********************************/



public  class KafkaConsumerUtils {
    @FunctionalInterface
    public interface  KafkaBacthMsgHandler{
        void handler(KafkaConsumer consumer,ConsumerRecords records) throws Exception;

    }
    @FunctionalInterface
    public  interface  KafkaMsgHandler<Out>{
        Out handler(ConsumerRecord record);

    }

    /**
     * test
     * @param args
     */
    public static void main(String[] args) {
//        KafkaConsumerUtils.consumerMsgAutoCommit("cbb_api_request_log","192.168.18.54:9092,192.168.18.199:9092,192.168.18.176:9092",
//                "test",
//               (record) ->  {
//                    System.out.println(record.timestamp() + "," +record.topic() + "," + record.partition() + "," + record.offset() + " " + record.key() +"," + record.value());
//                });

        KafkaConsumerUtils. bacthMsgManualCommitSync("cbb_api_request_log", "192.168.18.54:9092,192.168.18.199:9092,192.168.18.176:9092",
                "test11", (KafkaConsumer consumer, ConsumerRecords records) ->{
                          for(Object record1 : records){
                            ConsumerRecord  record = (ConsumerRecord) record1;
                            System.out.println(record.timestamp() + "," +record.topic() + "," + record.partition() + "," + record.offset() + " " + record.key() +"," + record.value());
                        }
//                consumer.commitSync();
                    }
                );

    }



/*    max.poll.interval.ms:两次poll的最大间隔时间,超过该时间则被kafka认为是挂掉了。
    需要注意的是,如果我们采取的是先消费完拉取的任务,再提交offset的模式,要确保消费完这些任务的时间小于max.poll.interval.ms。
    但是一般情况下我们采用的是另起线程或者线程池的方式消费消息。
    session.timeout.ms:检测心跳的最长时间间隔。如果超过该时间没有检测到心跳,则认为该消费者挂掉。
    poll(timeout):timeout的表示,如果主题中有消息,则拉取,否则等待timeout的时间后再拉取。调用poll方法的时候回发送心跳。
    max.poll.records:最大拉取多少条记录。
    注:在0.10.1之后的版本中,poll会启动两个线程,一个用于拉取消息。一个用于专门上报心跳,间隔是heartbeat.interval.ms。注意这个时间要小于session.timeout.ms。
    这样的好处是:就算消费消息的时间超过了max.poll.interval.ms,也会应为有专门的线程在上报心跳,而不会将该消费者移除。*/
    private static Properties  initSerializer() {
        Properties pros = new Properties();
        pros.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, Integer.valueOf(Duration.ofMinutes(15).toMillis()+""));//#连接超时时间 15min
        pros.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, Integer.valueOf(Duration.ofSeconds(3).toMillis()+""));//通常设置的值要低于 session.timeout.ms 的 1/3
        pros.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, new Integer(Duration.ofMinutes(10).toMillis()+""));//10Min  //默认为 5 分钟
        pros.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
        pros.put("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
        pros.put("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
        return  pros;
    }


    /**
     * consumer 1 : 自动提交位移
     */
    private static void consumerMsgAutoCommit(String topic,String bootstrapServers,String group,KafkaMsgHandler<ConsumerRecord> kafkaMagHandler){
        Properties pros = initSerializer();
        pros.put("bootstrap.servers",bootstrapServers);
        pros.put("group.id",group);
        pros.put("enable.auto.commit",true);
        KafkaConsumer consumer = new KafkaConsumer<String,String>(pros);
        consumer.subscribe(Collections.singletonList(topic));
        //指定topic
//        TopicPartition partition = new TopicPartition(topic, 1);
//        List<TopicPartition> lists = Arrays.asList(partition);
//        consumer.assign(lists);
//        consumer.seekToBeginning(lists);
////        consumer.seek(partition, 0);
        try{
            while(true){
                ConsumerRecords records = consumer.poll(Duration.ofMillis(100));
                Iterator iterator = records.iterator();
                while (iterator.hasNext()){
                    ConsumerRecord record = (ConsumerRecord) iterator.next();
                    kafkaMagHandler.handler(record);
//                    System.out.println(record.timestamp() + "," +record.topic() + "," + record.partition() + "," + record.offset() + " " + record.key() +"," + record.value());
                }
                try{
                    Thread.sleep(100);
                }catch (Exception e){
                    e.printStackTrace();
                }
            }
        }finally {
            consumer.close();
        }
    }




    /**
     * consumer 2 : 手动提交位移, 批量提交
     */
    public static void bacthMsgManualCommitSync(String topic,String bootstrapServers,String group,KafkaBacthMsgHandler kafkaMsgHandler) {
        Properties pros = initSerializer();
        pros.put("bootstrap.servers",bootstrapServers);
        pros.put("group.id",group);
        pros.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 100);//获取最大提交数量1
        pros.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,false);
        KafkaConsumer consumer = new KafkaConsumer<String,Object>(pros);
        consumer.subscribe(Collections.singletonList(topic));
        while(true)
        {
            try{
                // poll(timeout):timeout的表示,如果主题中有消息,则拉取,否则等待timeout的时间后再拉取。调用poll方法的时候回发送心跳。
                ConsumerRecords records = consumer.poll(Duration.ofMillis(1000));
                long start = System.currentTimeMillis();
                kafkaMsgHandler.handler(consumer,records);
                if(System.currentTimeMillis()-start<100){Thread.sleep(300l);}
//            for(Object record1 : records){
//                ConsumerRecord  record = (ConsumerRecord) record1;
//                System.out.println(record.timestamp() + "," +record.topic() + "," + record.partition() + "," + record.offset() + " " + record.key() +"," + record.value());
//            }
//                consumer.commitSync();
            }catch (Exception e){
                e.printStackTrace();
                System.out.println("commit failed msg" + e.getMessage());
            }
        }
    }

    /**
     * consumer 3 异步提交位移
     */
    public static void consumerMsgManualCommitAsync(String topic,String bootstrapServers,String group){
            Properties pros = initSerializer();
            pros.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 100);//获取最大提交数量1
            pros.put("bootstrap.servers",bootstrapServers);
            pros.put("group.id",group);
            pros.put("enable.auto.commit",false);
        KafkaConsumer<String,String> consumer = new KafkaConsumer<>(pros);
        consumer.subscribe(Collections.singletonList(topic));
        while(true)
        {
            ConsumerRecords records = consumer.poll(Duration.ofMillis(100));
            for(Object record1 : records){
                ConsumerRecord  record = (ConsumerRecord) record1;
                System.out.println(record.timestamp() + "," +record.topic() + "," + record.partition() + "," + record.offset() + " " + record.key() +"," + record.value());
            }
            consumer.commitAsync();
        }
    }

    /**
     * consumer 4 异步提交位移带回调
     */
    public static void consumerMessageManualCommitAsyncWithCallBack(String topic,String bootstrapServers,String group){

        Properties pros = initSerializer();
        pros.put("bootstrap.servers",bootstrapServers);
        pros.put("group.id",group);
        pros.put("enable.auto.commit",false);
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(pros);
        consumer.subscribe(Collections.singletonList("kafkatest"));

        while(true){

            ConsumerRecords records = consumer.poll(80);

            for(Object record1 : records){
                ConsumerRecord  record = (ConsumerRecord) record1;
                System.out.println(record.timestamp() + "," +record.topic() + "," + record.partition() + "," + record.offset() + " " + record.key() +"," + record.value());

            }
            consumer.commitAsync((offsets,e)->{
                if(null != e){
                    System.out.println("commit async callback error" + e.getMessage());

                    System.out.println(offsets);

                }

            });

        }

    }

    /**
     * consumer 5 混合提交方式
     */
    public static void mixSyncAndAsyncCommit(String topic,String bootstrapServers,String group){
        Properties pros = initSerializer();
        pros.put("bootstrap.servers",bootstrapServers);
        pros.put("group.id",group);
        pros.put("enable.auto.commit",false);
        KafkaConsumer<String,String> consumer = new KafkaConsumer<>(pros);
        consumer.subscribe(Collections.singletonList(topic));
        try{
            while(true){
                ConsumerRecords records = consumer.poll(Duration.ofMillis(100));
                for(Object record1 : records){
                    ConsumerRecord  record = (ConsumerRecord) record1;
                    System.out.println(record.timestamp() + "," +record.topic() + "," + record.partition() + "," + record.offset() + " " + record.key() +"," + record.value());
                }
                consumer.commitAsync();
            }

        }catch (Exception e){
            System.out.println("commit async error: " + e.getMessage());
        }finally {
            try{
                consumer.commitSync();
            }finally {
                consumer.close();

            }

        }

    }

}

 

posted on 2022-06-26 18:42  lshan  阅读(400)  评论(0编辑  收藏  举报