flink_初识02kafkawordcount

1.启动zookeeper服务

./bin/zookeeper-server-start.sh config/zookeeper.properties

2.开启kafka服务

.\bin\windows\kafka-server-start.bat .\config\server.properties
./bin/kafka-server-start.sh config/server.properties

3.创建topic

.\bin\windows\kafka-topics.bat --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test_flink
./bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test_flink

4.创建生产者

.\bin\windows\kafka-console-producer.bat --broker-list localhost:9092 --topic test_flink
./bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test_flink

--5.创建消费者
--.\bin\windows\kafka-console-consumer.bat --zookeeper localhost:2181 --topic test_flink
--./bin/kafka-console-consumer.sh --zookeeper localhost:2181 --topic test_flink --from-beginning

5.

package flink

import java.util.Properties

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.core.fs.FileSystem.WriteMode
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011
import org.apache.flink.streaming.api.scala._

object KafkaWordCount {


  def getFlinkKafkaConsumer() = {
    val prop = new Properties()

    prop.setProperty("zookeeper.connect", "localhost:2181") //ZOOKEEPER_HOST
    prop.setProperty("bootstrap.servers", "localhost:9092") //KAFKA_BROKER
    prop.setProperty("group.id", "group1") //TRANSACTION_GROUP
    new FlinkKafkaConsumer011[String]("test_flink", new SimpleStringSchema(), prop) //TOPIC
  }


  def main(args: Array[String]): Unit = {

    val env = StreamExecutionEnvironment.getExecutionEnvironment

    val source = getFlinkKafkaConsumer()

    source.setStartFromEarliest()

    val dStream = env.addSource(source)

    val result = dStream.flatMap(x => x.split("\\s"))
      .map(x => (x, 1)).keyBy(0).timeWindow(Time.seconds(2l)).sum(1)

    result.setParallelism(1).print()

    result.writeAsText("E:\\\\sparkproject\\\\src\\\\test\\\\data\\\\result2.txt", WriteMode.OVERWRITE)

    env.execute("KafkaWordCount")
  }

}

 

posted @ 2019-07-10 17:58  问题不大1  阅读(356)  评论(0编辑  收藏  举报