SparkStreaming 连接 Kafka数据源

本文的前提条件: SparkStreaming in Java
参考地址:Spark Streaming + Kafka Integration Guide (Kafka broker version 0.10.0 or higher)

1.添加POM依赖

        <dependency>
            <groupId>com.fasterxml.jackson.core</groupId>
            <artifactId>jackson-core</artifactId>
            <version>2.16.1</version>
        </dependency>

        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-streaming-kafka-0-10_2.13</artifactId>
            <version>3.5.0</version>
        </dependency>

2.使用

package cn.coreqi;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.spark.SparkConf;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.*;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies;
import org.apache.kafka.common.serialization.StringDeserializer;
import scala.Tuple2;

import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;

public class Main {
    public static void main(String[] args) throws InterruptedException {
        // 创建SparkConf对象
        SparkConf sparkConf = new SparkConf()
                .setMaster("local[*]")
                .setAppName("sparkSql");

        // 第一个参数表示环境配置,第二个参数表示批量处理的周期(采集周期)
        JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, Durations.seconds(3));


        // 定义Kafka参数
        Map<String, Object> kafkaParams = new HashMap<String, Object>();
        kafkaParams.put("bootstrap.servers", "192.168.58.130:9092,192.168.58.131:9092,192.168.58.132:9092");
        kafkaParams.put("key.deserializer", StringDeserializer.class);
        kafkaParams.put("value.deserializer", StringDeserializer.class);
        kafkaParams.put("group.id", "coreqi");  // 配置消费者组
        kafkaParams.put("auto.offset.reset", "latest");
        kafkaParams.put("enable.auto.commit", false);

        // 配置 kafka主题
        Collection<String> topics = Arrays.asList("topicA", "topicB");

        //读取kafka数据创建DStream
        // kafka传入的K,V均为string类型
        JavaInputDStream<ConsumerRecord<String, String>> kafkaDataDS  =
                KafkaUtils.createDirectStream(ssc,
                        LocationStrategies.PreferConsistent(),  //位置策略,采集节点和计算节点如何做匹配,此值为'由框架自行匹配'
                        ConsumerStrategies.<String, String>Subscribe(topics, kafkaParams)); //消费者策略,订阅
        JavaPairDStream<String, String> mapToPair = kafkaDataDS.mapToPair(record -> new Tuple2<>(record.key(), record.value()));
        mapToPair.print();

        // 由于SparkStreaming采集器是长期执行的任务,所以不能直接关闭
        // 如果main方法执行完毕,应用程序也会自动结束,所以不能让main执行完毕
        ssc.start();              // 启动采集器

        ssc.awaitTermination();   // 等待采集器的关闭
    }
}
posted @ 2024-01-15 21:24  SpringCore  阅读(44)  评论(0编辑  收藏  举报