flink cdc 读取sqlserver数据

首先sqlserver版本:要求sqlserver版本为14及以上,也就是SQL Server 2017 版。

sqlserver开启cdc,具体细节可以百度,有一点要注意,必须启动SQL Server 代理服务。

如果需要断点续传,需要设置ck,由于我们这边设备有限。使用的是RocksDBStateBackend,把数据保存到本地了。如果有大数据环境,建议使用FsStateBackend(文件系统状态后端 hdfs),将数据保存到hdfs

 

1、sqlserver开启cdc

1)查看库表是否启动 CDC

-- 查看数据库是否启用cdc
SELECT name,is_cdc_enabled FROM sys.databases WHERE is_cdc_enabled = 1;

-- 查看当前数据库表是否启用cdc
SELECT name,is_tracked_by_cdc FROM sys.tables WHERE is_tracked_by_cdc = 1;

2)数据库启用和禁用 CDC

-- 对当前数据库启用 CDC
USE MyDB 
GO
EXECUTE sys.sp_cdc_enable_db;
GO
-- 对当前数据库禁用 CDC
USE MyDB  
GO  
EXEC sys.sp_cdc_disable_db  
GO  

3)数据库表启用和禁用 CDC

-- 启用
USE MyDB  
GO  
EXEC sys.sp_cdc_enable_table  
@source_schema = N'dbo',  
@source_name   = N'MyTable',  
@role_name     = NULL,  
@supports_net_changes = 1  
GO 
-- 禁用
USE MyDB  
GO  
EXEC sys.sp_cdc_disable_table  
@source_schema = N'dbo',  
@source_name   = N'MyTable',  
@capture_instance = N'dbo_MyTable'  
GO  

4)查看表 CDC 功能是否启用

SELECT  name ,
        is_tracked_by_cdc ,
        CASE WHEN is_tracked_by_cdc = 0 THEN 'CDC功能禁用'
             ELSE 'CDC功能启用'
        END 描述
FROM    sys.tables;

注意:

1> 开启授权

ALTER AUTHORIZATION ON DATABASE::[MyDB] TO [sa]

2> 需开启 SQL Server Agent 服务

问题及解决办法:
如无法执行启用或禁用表的 CDC,查看主机名的是否一致。

SELECT SERVERPROPERTY('ServerName'), srvname, datasource FROM master.dbo.sysservers;

如不一致,执行下面语句:

IF serverproperty('servername')<>@@servername    
  BEGIN  
  DECLARE  @server SYSNAME  
  SET   @server=@@servername      
  EXEC  sp_dropserver @server=@server    
  SET   @server=cast(serverproperty('servername') AS SYSNAME)   
  EXEC  sp_addserver @server=@server,@local='LOCAL'     
  END  
  ELSE  
    PRINT '实例名与主机名一致,无需修改!' ;

2、引入pom依赖

<properties>
    <flink.version>1.13.5</flink.version>
    <scala.version>2.11</scala.version>
    <maven.compiler.source>1.8</maven.compiler.source>
    <maven.compiler.target>1.8</maven.compiler.target>
</properties>

<dependencies>
    <dependency>
        <groupId>org.apache.flink</groupId>
        <artifactId>flink-table-planner_2.11</artifactId>
        <version>${flink.version}</version>
    </dependency>

    <dependency>
        <groupId>org.apache.flink</groupId>
        <artifactId>flink-table-planner-blink_2.11</artifactId>
        <version>${flink.version}</version>
    </dependency>

    <dependency>
        <groupId>com.ververica</groupId>
        <artifactId>flink-connector-sqlserver-cdc</artifactId>
        <!-- The dependency is available only for stable releases, SNAPSHOT dependency need build by yourself. -->
        <version>2.3-SNAPSHOT</version>
    </dependency>

    <dependency>
        <groupId>org.apache.flink</groupId>
        <artifactId>flink-streaming-java_2.11</artifactId>
        <version>${flink.version}</version>
        <!--            <scope>provided</scope>-->
    </dependency>

    <dependency>
        <groupId>org.apache.flink</groupId>
        <artifactId>flink-streaming-scala_2.11</artifactId>
        <version>${flink.version}</version>
        <!--            <scope>provided</scope>-->
    </dependency>

    <dependency>
        <groupId>org.apache.flink</groupId>
        <artifactId>flink-runtime-web_2.11</artifactId>
        <version>${flink.version}</version>
        <!--            <scope>provided</scope>-->
    </dependency>

    <dependency>
        <groupId>org.apache.flink</groupId>
        <artifactId>flink-statebackend-rocksdb_2.11</artifactId>
        <version>${flink.version}</version>
    </dependency>
    <dependency>
        <groupId>joda-time</groupId>
        <artifactId>joda-time</artifactId>
        <version>2.7</version>
    </dependency>


    <dependency>
        <groupId>com.google.code.gson</groupId>
        <artifactId>gson</artifactId>
        <version>2.8.2</version>
    </dependency>

    <dependency>
        <groupId>ru.yandex.clickhouse</groupId>
        <artifactId>clickhouse-jdbc</artifactId>
        <version>0.2.6</version>
    </dependency>


    <dependency>
        <groupId>org.apache.flink</groupId>
        <artifactId>flink-connector-kafka_2.11</artifactId>
        <version>${flink.version}</version>
    </dependency>

    <dependency>
        <groupId>org.apache.kafka</groupId>
        <artifactId>kafka-clients</artifactId>
        <version>2.7.0</version>
    </dependency>

</dependencies>
View Code

3、java代码

package com.cmei.s2c;

import com.ververica.cdc.connectors.sqlserver.SqlServerSource;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.runtime.state.memory.MemoryStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;


public class SqlServerSourceExample {

    public static void main(String[] args) throws Exception {
        SourceFunction<String> sourceFunction = SqlServerSource.<String>builder()
                .hostname("192.168.10.134")
                .port(1433)
                .database("inventory") // monitor sqlserver database
                .tableList("dbo.products") // monitor products table
                .username("sa")
                .password("qwe123==")
                .deserializer(new JsonDebeziumDeserializationSchema()) // converts SourceRecord to JSON String
                .build();
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime);
        //2.Flink-CDC将读取binlog的位置信息以状态的方式保存在CK,如果想要做到断点续传,需要从Checkpoint或者Savepoint启动程序
        //2.1 开启Checkpoint,每隔5秒钟做一次CK
        env.enableCheckpointing(5000L);
        //2.2 指定CK的一致性语义
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        //2.3 设置任务关闭的时候保留最后一次CK数据
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //2.4 指定从CK自动重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 2000L));
        //2.5 设置状态后端
        env.setStateBackend(new RocksDBStateBackend("file:///usr/local/flink-1.13.5/ck"));
        //env.setStateBackend(new MemoryStateBackend());
        // MemoryStateBackend(内存状态后端)
        // FsStateBackend(文件系统状态后端 hdfs)
        // RocksDBStateBackend(RocksDB状态后端)
        //env.setStateBackend(new FsStateBackend("hdfs://sc2:8020/flinkCDC"));
        //2.6 设置访问HDFS的用户名
        //System.setProperty("HADOOP_USER_NAME", "root");

        env.addSource(sourceFunction).addSink(new ClickHouseSink()).setParallelism(1);
        //env.addSource(sourceFunction).print().setParallelism(1); // use parallelism 1 for sink to keep message ordering

        env.execute("1");

    }
}
3、json解析

package com.cmei.s2c;

import com.google.gson.Gson;
import com.ververica.cdc.debezium.DebeziumDeserializationSchema;
import io.debezium.data.Envelope;
import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.util.Collector;
import org.apache.kafka.connect.data.Field;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord;
import java.util.HashMap;

public class JsonDebeziumDeserializationSchema implements DebeziumDeserializationSchema {

    @Override
    public void deserialize(SourceRecord sourceRecord, Collector collector) throws Exception {
        HashMap<String, Object> hashMap = new HashMap<>();

        String topic = sourceRecord.topic();
        String[] split = topic.split("[.]");
        String database = split[1];
        String table = split[2];
        hashMap.put("database",database);
        hashMap.put("table",table);

        //获取操作类型
        Envelope.Operation operation = Envelope.operationFor(sourceRecord);
        //获取数据本身
        Struct struct = (Struct)sourceRecord.value();
        Struct after = struct.getStruct("after");
        Struct before = struct.getStruct("before");
        /*
            1,同时存在 beforeStruct 跟 afterStruct数据的话,就代表是update的数据
             2,只存在 beforeStruct 就是delete数据
             3,只存在 afterStruct数据 就是insert数据
        */

        if (after != null) {
            //insert
            Schema schema = after.schema();
            HashMap<String, Object> hm = new HashMap<>();
            for (Field field : schema.fields()) {
                hm.put(field.name(), after.get(field.name()));
            }
            hashMap.put("data",hm);
        }else if (before !=null){
            //delete
            Schema schema = before.schema();
            HashMap<String, Object> hm = new HashMap<>();
            for (Field field : schema.fields()) {
                hm.put(field.name(), before.get(field.name()));
            }
            hashMap.put("data",hm);
        }else if(before !=null && after !=null){
            //update
            Schema schema = after.schema();
            HashMap<String, Object> hm = new HashMap<>();
            for (Field field : schema.fields()) {
                hm.put(field.name(), after.get(field.name()));
            }
            hashMap.put("data",hm);
        }

        String type = operation.toString().toLowerCase();
        if ("create".equals(type)) {
            type = "insert";
        }else if("delete".equals(type)) {
            type = "delete";
        }else if("update".equals(type)) {
            type = "update";
        }

        hashMap.put("type",type);

        Gson gson = new Gson();
        collector.collect(gson.toJson(hashMap));

    }

    @Override
    public TypeInformation<String> getProducedType() {
        return  BasicTypeInfo.STRING_TYPE_INFO;
    }
}
4、clickhousesink,只实现了insert其他可以自己补全

package com.cmei.s2c;

import com.google.gson.Gson;
import com.google.gson.internal.LinkedTreeMap;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;

import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.util.HashMap;

public class ClickHouseSink extends RichSinkFunction<String> {

    Connection connection;

    PreparedStatement pstmt;
    PreparedStatement iStmt;
    PreparedStatement dStmt;
    PreparedStatement uStmt;
    private Connection getConnection() {
        Connection conn = null;
        try {
            Class.forName("ru.yandex.clickhouse.ClickHouseDriver");
            String url = "jdbc:clickhouse://192.168.10.61:8123/drugdb";
            conn = DriverManager.getConnection(url,"bigdata","bigdata");

        } catch (Exception e) {
            e.printStackTrace();
        }
        return conn;
    }


    @Override
    public void open(Configuration parameters) throws Exception {
        super.open(parameters);
        connection = getConnection();
        String insertSql = "insert into product(id,name,description,weight) values (?,?,?,?)";
        String deleteSql = "delete from product where id=?";
        String updateSql = "update product set name=? ,description=?,weight=? where id=?";
        iStmt = connection.prepareStatement(insertSql);
        dStmt = connection.prepareStatement(deleteSql);
        uStmt = connection.prepareStatement(updateSql);

    }

    // 每条记录插入时调用一次
    public void invoke(String value, Context context) throws Exception {

        Gson t = new Gson();
        HashMap<String, Object> hs = t.fromJson(value, HashMap.class);

        LinkedTreeMap<String,Object> source = (LinkedTreeMap<String,Object>)hs.get("source");
        String database = (String) source.get("db");
        String table = (String) source.get("table");
        String op = (String) hs.get("op");
        /**
         * {"before":null,
         * "after":{"id":109,"name":"spare tire","description":"24 inch spare tire","weight":22.2},
         * "source":{"version":"1.5.4.Final","connector":"sqlserver","name":"sqlserver_transaction_log_source","ts_ms":1648776173094,"snapshot":"last","db":"inventory","sequence":null,"schema":"dbo","table":"products","change_lsn":null,"commit_lsn":"0000002c:00001a60:0001","event_serial_no":null},
         * "op":"r","ts_ms":1648776173094,"transaction":null}*/

        //实现insert方法
        if ("inventory".equals(database) && "products".equals(table)) {
            if ("r".equals(op) || "c".equals(op)) {
                LinkedTreeMap<String, Object> data = (LinkedTreeMap<String, Object>) hs.get("after");
                Double ids = (Double)data.get("id");
                int id =  ids.intValue();
                String name = (String) data.get("name");
                String description = (String) data.get("description");
                Double weights = (Double)data.get("weight");
                float weight=0;
                if("".equals(weights) || weights != null ){
                    weight =  weights.floatValue();
                }

                iStmt.setInt(1, id);
                iStmt.setString(2, name);
                iStmt.setString(3, description);
                iStmt.setFloat(4, weight);

                iStmt.executeUpdate();
            }

//            else if ("d".equals(type)) {
//                System.out.println("delete => " + value);
//                LinkedTreeMap<String, Object> data = (LinkedTreeMap<String, Object>) hs.get("data");
//                String id = (String) data.get("ID");
//                dStmt.setString(1, id);
//                dStmt.executeUpdate();
//            }
//            else if ("u".equals(type)) {
//                System.out.println("update => " + value);
//                LinkedTreeMap<String, Object> data = (LinkedTreeMap<String, Object>) hs.get("data");
//                String id = (String) data.get("ID");
//                String cron = (String) data.get("CRON");
//                uStmt.setString(1, cron);
//                uStmt.setString(2, id);
//                uStmt.executeUpdate();
//            }
        }
    }

    @Override
    public void close() throws Exception {
        super.close();

        if(pstmt != null) {
            pstmt.close();
        }

        if(connection != null) {
            connection.close();
        }
    }

}
View Code

 

参考文档:

https://blog.csdn.net/cardinalzbk/article/details/124187230

https://www.jianshu.com/p/2f2a1df1921a

https://blog.csdn.net/qq_42575907/article/details/128410448?spm=1001.2101.3001.6650.1&utm_medium=distribute.pc_relevant.none-task-blog-2%7Edefault%7ECTRLIST%7ERate-1-128410448-blog-124187230.pc_relevant_multi_platform_whitelistv3&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2%7Edefault%7ECTRLIST%7ERate-1-128410448-blog-124187230.pc_relevant_multi_platform_whitelistv3&utm_relevant_index=2

posted @ 2023-03-16 11:13  所向披靡zz  阅读(2087)  评论(0编辑  收藏  举报