Flink CDC 实践记录
hadoop 2.7.5
spark2.4.4
hive2.3.9
flink:1.13.1
jdk:1.8
scala:2.11.6
hudi:0.10.0
第一步:
mvn archetype:generate \
-DarchetypeGroupId=org.apache.flink \
-DarchetypeArtifactId=flink-quickstart-java \
-DarchetypeVersion=1.13.2 \
-DgroupId=com.lkr.flink \
-DartifactId=flink-cdc \
-Dversion=1.0 \
-Dpackage=com.lkr.flink \
-DinteractiveMode=false
第二步:
<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>com.lkr.flink</groupId> <artifactId>flink-cdc</artifactId> <version>1.0</version> <packaging>jar</packaging> <name>Flink Quickstart Job</name> <properties> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> <flink.version>1.13.2</flink.version> <target.java.version>1.8</target.java.version> <scala.binary.version>2.11</scala.binary.version> <log4j.version>2.12.1</log4j.version> <maven.compiler.source>8</maven.compiler.source> <maven.compiler.target>8</maven.compiler.target> <mysql.version>5.1.49</mysql.version> <flinkcdc.version>2.0.2</flinkcdc.version> <fastjson.version>1.2.75</fastjson.version> </properties> <repositories> <repository> <id>apache.snapshots</id> <name>Apache Development Snapshot Repository</name> <url>https://repository.apache.org/content/repositories/snapshots/</url> <releases> <enabled>false</enabled> </releases> <snapshots> <enabled>true</enabled> </snapshots> </repository> </repositories> <dependencies> <dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-java</artifactId> <version>${flink.version}</version> <scope>provided</scope> </dependency> <dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-streaming-java_${scala.binary.version}</artifactId> <version>${flink.version}</version> <scope>provided</scope> </dependency> <dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-clients_${scala.binary.version}</artifactId> <version>${flink.version}</version> <scope>provided</scope> </dependency> <dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-table-planner-blink_${scala.binary.version}</artifactId> <version>${flink.version}</version> <scope>provided</scope> </dependency> <dependency> <groupId>com.ververica</groupId> <artifactId>flink-connector-mysql-cdc</artifactId> <version>${flinkcdc.version}</version> </dependency> <dependency> <groupId>com.alibaba</groupId> <artifactId>fastjson</artifactId> <version>${fastjson.version}</version> </dependency> <dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-connector-jdbc_${scala.binary.version}</artifactId> <version>${flink.version}</version> </dependency> <dependency> <groupId>mysql</groupId> <artifactId>mysql-connector-java</artifactId> <version>${mysql.version}</version> </dependency> </dependencies> <build> <plugins> <!-- Java Compiler --> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <version>3.1</version> <configuration> <source>${target.java.version}</source> <target>${target.java.version}</target> </configuration> </plugin> <!-- We use the maven-shade plugin to create a fat jar that contains all necessary dependencies. --> <!-- Change the value of <mainClass>...</mainClass> if your program entry point changes. --> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-shade-plugin</artifactId> <version>3.1.1</version> <executions> <!-- Run shade goal on package phase --> <execution> <phase>package</phase> <goals> <goal>shade</goal> </goals> <configuration> <artifactSet> <excludes> <exclude>org.apache.flink:force-shading</exclude> <exclude>com.google.code.findbugs:jsr305</exclude> <exclude>org.slf4j:*</exclude> <exclude>org.apache.logging.log4j:*</exclude> </excludes> </artifactSet> <filters> <filter> <!-- Do not copy the signatures in the META-INF folder. Otherwise, this might cause SecurityExceptions when using the JAR. --> <artifact>*:*</artifact> <excludes> <exclude>META-INF/*.SF</exclude> <exclude>META-INF/*.DSA</exclude> <exclude>META-INF/*.RSA</exclude> </excludes> </filter> </filters> <transformers> <transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer"> <mainClass>com.lkr.flink.StreamingJob</mainClass> </transformer> </transformers> </configuration> </execution> </executions> </plugin> </plugins> </build> </project>
第三步:
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.lkr.flink; import com.ververica.cdc.connectors.mysql.table.StartupOptions; import org.apache.flink.api.common.restartstrategy.RestartStrategies; import org.apache.flink.runtime.state.filesystem.FsStateBackend; import org.apache.flink.streaming.api.CheckpointingMode; import org.apache.flink.streaming.api.datastream.DataStreamSource; import org.apache.flink.streaming.api.environment.CheckpointConfig; import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; import org.apache.flink.streaming.api.functions.source.SourceFunction; import com.ververica.cdc.connectors.mysql.MySqlSource; import java.util.Properties; /** * Skeleton for a Flink Streaming Job. * * <p>For a tutorial how to write a Flink streaming application, check the * tutorials and examples on the <a href="https://flink.apache.org/docs/stable/">Flink Website</a>. * * <p>To package your application into a JAR file for execution, run * 'mvn clean package' on the command line. * * <p>If you change the name of the main class (with the public static void main(String[] args)) * method, change the respective entry in the POM.xml file (simply search for 'mainClass'). */ public class StreamingJob { public static void main(String[] args) throws Exception { Properties debeziumProperties = new Properties(); debeziumProperties.put("snapshot.locking.mode", "none");// do not use lock SourceFunction<String> sourceFunction = MySqlSource.<String>builder() .hostname("121.36.**.47") .port(30924) .databaseList("test11") // set captured database .tableList("test11.*") // set captured table .username("root") .password("******") .deserializer(new MyDeserializationSchemaFunction()) // converts SourceRecord to JSON String .debeziumProperties(debeziumProperties) .build(); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env .addSource(sourceFunction) .print().setParallelism(1); // use parallelism 1 for sink to keep message ordering env.execute(); } }
第四步:
package com.lkr.flink; import com.alibaba.fastjson.JSONObject; import com.ververica.cdc.debezium.DebeziumDeserializationSchema; import io.debezium.data.Envelope; import org.apache.flink.api.common.typeinfo.TypeInformation; import org.apache.flink.util.Collector; import org.apache.kafka.connect.data.Field; import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.source.SourceRecord; /** * 自定义反序列化器,将FlinkCDC读取到的数据转为json格式 */ public class MyDeserializationSchemaFunction implements DebeziumDeserializationSchema<String> { @Override public void deserialize(SourceRecord sourceRecord, Collector<String> collector) throws Exception { Struct valueStruct = (Struct) sourceRecord.value(); Struct sourceStrut = valueStruct.getStruct("source"); //获取数据库的名称 String database = sourceStrut.getString("db"); //获取表名 String table = sourceStrut.getString("table"); //获取类型(c-->insert,u-->update) String type = Envelope.operationFor(sourceRecord).toString().toLowerCase(); if(type.equals("create")){ type="insert"; } JSONObject jsonObj = new JSONObject(); jsonObj.put("database",database); jsonObj.put("table",table); jsonObj.put("type",type); //获取数据data Struct afterStruct = valueStruct.getStruct("after"); JSONObject dataJsonObj = new JSONObject(); if(afterStruct!=null){ for (Field field : afterStruct.schema().fields()) { String fieldName = field.name(); Object fieldValue = afterStruct.get(field); dataJsonObj.put(fieldName,fieldValue); } } jsonObj.put("data",dataJsonObj); //向下游传递数据 collector.collect(jsonObj.toJSONString()); } @Override public TypeInformation<String> getProducedType() { return TypeInformation.of(String.class); } }
第五步:
mvn clean package -DskipTests
第六步:
上传jar包到flink集群