剑道第一仙

导航

< 2025年3月 >
23 24 25 26 27 28 1
2 3 4 5 6 7 8
9 10 11 12 13 14 15
16 17 18 19 20 21 22
23 24 25 26 27 28 29
30 31 1 2 3 4 5

统计

flink 消费多个topic

转:https://www.404bugs.com/details/1081256252897284096

flink 消费多个topic示例:

复制代码
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.serialization.StringDeserializer;

import com.xxx.flink.demo.util.Config;
import com.xxx.flink.xxx.CustomDeSerializationSchema;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.Properties;
 
public class FlinkSourceFromKafka {
 
    public static void main(String[] args) throws Exception {
 
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        env.enableCheckpointing(5000);
 
        Properties props = new Properties();
        // 只需要提供一个或多个 broker 的 IP 和端口
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "xxx.xxx.xxx.xxx:xxxx");
        props.setProperty("group.id", "consumer01");//消费者组id
        props.setProperty("auto.offset.reset", "earliest");//latest有offset记录从记录位置开始消费,没有记录从最新的/最后的消息开始消费 /earliest有offset记录从记录位置开始消费,没有记录从最早的/最开始的消息开始消费
        props.put("key.deserializer", StringDeserializer.class);
        props.put("value.deserializer",StringDeserializer.class);
        props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
        props.put(SaslConfigs.SASL_MECHANISM, "SCRAM-SHA-512");
        props.put("sasl.jaas.config","org.apache.kafka.common.security.scram.ScramLoginModule required username=\"username\" password=\"password\";");
        props.setProperty("auto.commit.interval.ms", "2000");//自动提交的时间间隔
        
 
//        final ArrayList<String> topics = new ArrayList<>();
//        topics.add("test1");
        //配置消费者
        //SimpleStringSchema  序列化方式返回的结果只有原数据,没有关于分区,topic的数据,我们选择自定义序列化
        final FlinkKafkaConsumer<ConsumerRecord<String, String>> consumer = 
                new FlinkKafkaConsumer<ConsumerRecord<String, String>>(Arrays.asList(Config.tableTopicArr), new CustomDeSerializationSchema(), props);
        //使用正则表达式,动态发现topic
        // final FlinkKafkaConsumer<ConsumerRecord<String,String>> consumer  = new FlinkKafkaConsumer<ConsumerRecord<String,String>>(Pattern.compile("^test_([A-Za-z0-9]*)$"), new CustomDeSerializationSchema(), properties);
 
        //设置从最早的offset消费
        consumer.setStartFromEarliest();
        /**
         * 手动指定相应的位置消费
         * */
        //final HashMap<KafkaTopicPartition, Long> map  = new HashMap<>();
        //map.put(new KafkaTopicPartition("test",0),10240L);
        //map.put(new KafkaTopicPartition("test",1),10240L);
        //map.put(new KafkaTopicPartition("test",2),10240L);
        //consumer.setStartFromSpecificOffsets(map);
 
        /**
         * flink从指定的时间点开始消费
         * */
        // consumer.setStartFromTimestamp(1559801580000L);
 
        /**
         * Flink从topic中指定的group上次消费的位置开始消费,所以必须配置group.id参数
         */
        //consumer.setStartFromGroupOffsets();
 
 
        final SingleOutputStreamOperator<ConsumerRecord<String, String>> source = env.addSource(consumer).flatMap(
                new FlatMapFunction<ConsumerRecord<String, String>, ConsumerRecord<String, String>>() {
                    @Override
                    public void flatMap(ConsumerRecord<String, String> value, Collector<ConsumerRecord<String, String>> collector) throws Exception {
                        System.out.println(value);
                    }
 
                });
 
        env.execute("consumer...");
    }
}
复制代码

 

 

复制代码
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.streaming.connectors.kafka.KafkaDeserializationSchema;
import org.apache.kafka.clients.consumer.ConsumerRecord;
 
public class CustomDeSerializationSchema implements KafkaDeserializationSchema<ConsumerRecord<String, String>> {
    //是否表示l流的最后一条元素,设置为false,表示数据会源源不断的到来
    @Override
    public boolean isEndOfStream(ConsumerRecord<String, String> nextElement) {
        return false;
    }
 
    //这里返回一个ConsumerRecord<String,String>类型的数据,除了原数据还包括topic,offset,partition等信息
    @Override
    public ConsumerRecord<String, String> deserialize(ConsumerRecord<byte[], byte[]> record) throws Exception {
        return new ConsumerRecord<String, String>(
                record.topic(),
                record.partition(),
                record.offset(),
                new String(record.key()),
                new String(record.value()));
    }
 
    //指定数据的输入类型
    @Override
    public TypeInformation<ConsumerRecord<String, String>> getProducedType() {
        return TypeInformation.of(new TypeHint<ConsumerRecord<String, String>>() {
        });
    }
}
复制代码

 

posted on   剑道第一仙  阅读(409)  评论(0编辑  收藏  举报

相关博文:
阅读排行:
· TypeScript + Deepseek 打造卜卦网站:技术与玄学的结合
· 阿里巴巴 QwQ-32B真的超越了 DeepSeek R-1吗?
· 【译】Visual Studio 中新的强大生产力特性
· 【设计模式】告别冗长if-else语句:使用策略模式优化代码结构
· 10年+ .NET Coder 心语 ── 封装的思维:从隐藏、稳定开始理解其本质意义
点击右上角即可分享
微信分享提示