基于Canal与Flink实现数据实时增量同步(二)
背景
性能瓶颈:随着业务规模的增长,Select From MySQL -> Save to Localfile -> Load to Hive这种数据流花费的时间越来越长,无法满足下游数仓生产的时间要求。
直接从MySQL中Select大量数据,对MySQL的影响非常大,容易造成慢查询,影响业务线上的正常服务。
由于Hive本身的语法不支持更新、删除等SQL原语(高版本Hive支持,但是需要分桶+ORC存储格式),对于MySQL中发生Update/Delete的数据无法很好地进行支持。
实现思路
实现方案
Flink处理Kafka的binlog日志
package com.etl.kafka2hdfs;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.parser.Feature;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.core.fs.Path;
import org.apache.flink.runtime.state.StateBackend;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.RollingPolicy;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import java.util.Map;
import java.util.Properties;
/**
* @Created with IntelliJ IDEA.
* @author : jmx
* @Date: 2020/3/27
* @Time: 12:52
*
*/
public class HdfsSink {
public static void main(String[] args) throws Exception {
String fieldDelimiter = ",";
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
// checkpoint
env.enableCheckpointing(10_000);
//env.setStateBackend((StateBackend) new FsStateBackend("file:///E://checkpoint"));
env.setStateBackend((StateBackend) new FsStateBackend("hdfs://kms-1:8020/checkpoint"));
CheckpointConfig config = env.getCheckpointConfig();
config.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.DELETE_ON_CANCELLATION);
// source
Properties props = new Properties();
props.setProperty("bootstrap.servers", "kms-2:9092,kms-3:9092,kms-4:9092");
// only required for Kafka 0.8
props.setProperty("zookeeper.connect", "kms-2:2181,kms-3:2181,kms-4:2181");
props.setProperty("group.id", "test123");
FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<>(
"qfbap_ods.code_city", new SimpleStringSchema(), props);
consumer.setStartFromEarliest();
DataStream<String> stream = env.addSource(consumer);
// transform
SingleOutputStreamOperator<String> cityDS = stream
.filter(new FilterFunction<String>() {
// 过滤掉DDL操作
@Override
public boolean filter(String jsonVal) throws Exception {
JSONObject record = JSON.parseObject(jsonVal, Feature.OrderedField);
return record.getString("isDdl").equals("false");
}
})
.map(new MapFunction<String, String>() {
@Override
public String map(String value) throws Exception {
StringBuilder fieldsBuilder = new StringBuilder();
// 解析JSON数据
JSONObject record = JSON.parseObject(value, Feature.OrderedField);
// 获取最新的字段值
JSONArray data = record.getJSONArray("data");
// 遍历,字段值的JSON数组,只有一个元素
for (int i = 0; i < data.size(); i++) {
// 获取到JSON数组的第i个元素
JSONObject obj = data.getJSONObject(i);
if (obj != null) {
fieldsBuilder.append(record.getLong("id")); // 序号id
fieldsBuilder.append(fieldDelimiter); // 字段分隔符
fieldsBuilder.append(record.getLong("es")); //业务时间戳
fieldsBuilder.append(fieldDelimiter);
fieldsBuilder.append(record.getLong("ts")); // 日志时间戳
fieldsBuilder.append(fieldDelimiter);
fieldsBuilder.append(record.getString("type")); // 操作类型
for (Map.Entry<String, Object> entry : obj.entrySet()) {
fieldsBuilder.append(fieldDelimiter);
fieldsBuilder.append(entry.getValue()); // 表字段数据
}
}
}
return fieldsBuilder.toString();
}
});
//cityDS.print();
//stream.print();
// sink
// 以下条件满足其中之一就会滚动生成新的文件
RollingPolicy<String, String> rollingPolicy = DefaultRollingPolicy.create()
.withRolloverInterval(60L * 1000L) //滚动写入新文件的时间,默认60s。根据具体情况调节
.withMaxPartSize(1024 * 1024 * 128L) //设置每个文件的最大大小 ,默认是128M,这里设置为128M
.withInactivityInterval(60L * 1000L) //默认60秒,未写入数据处于不活跃状态超时会滚动新文件
.build();
StreamingFileSink<String> sink = StreamingFileSink
//.forRowFormat(new Path("file:///E://binlog_db/city"), new SimpleStringEncoder<String>())
.forRowFormat(new Path("hdfs://kms-1:8020/binlog_db/code_city_delta"), new SimpleStringEncoder<String>())
.withBucketAssigner(new EventTimeBucketAssigner())
.withRollingPolicy(rollingPolicy)
.withBucketCheckInterval(1000) // 桶检查间隔,这里设置1S
.build();
cityDS.addSink(sink);
env.execute();
}
}
StreamingFileSink
替代了先前的 BucketingSink
,用来将上游数据存储到 HDFS 的不同目录中。它的核心逻辑是分桶,默认的分桶方式是 DateTimeBucketAssigner
,即按照处理时间分桶。处理时间指的是消息到达 Flink 程序的时间,这点并不符合我们的需求。因此,我们需要自己编写代码将事件时间从消息体中解析出来,按规则生成分桶的名称,具体代码如下:
package com.etl.kafka2hdfs;
import org.apache.flink.core.io.SimpleVersionedSerializer;
import org.apache.flink.streaming.api.functions.sink.filesystem.BucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.SimpleVersionedStringSerializer;
import java.text.SimpleDateFormat;
import java.util.Date;
/**
* @Created with IntelliJ IDEA.
* @author : jmx
* @Date: 2020/3/27
* @Time: 12:49
*
*/
public class EventTimeBucketAssigner implements BucketAssigner<String, String> {
@Override
public String getBucketId(String element, Context context) {
String partitionValue;
try {
partitionValue = getPartitionValue(element);
} catch (Exception e) {
partitionValue = "00000000";
}
return "dt=" + partitionValue;//分区目录名称
}
@Override
public SimpleVersionedSerializer<String> getSerializer() {
return SimpleVersionedStringSerializer.INSTANCE;
}
private String getPartitionValue(String element) throws Exception {
// 取出最后拼接字符串的es字段值,该值为业务时间
long eventTime = Long.parseLong(element.split(",")[1]);
Date eventDate = new Date(eventTime);
return new SimpleDateFormat("yyyyMMdd").format(eventDate);
}
}
FULL OUTER JOIN
,将存量和增量数据合并成一张最新的数据表,并作为明天的存量数据:
INSERT OVERWRITE TABLE code_city
SELECT
COALESCE( t2.id, t1.id ) AS id,
COALESCE ( t2.city, t1.city ) AS city,
COALESCE ( t2.province, t1.province ) AS province,
COALESCE ( t2.event_time, t1.event_time ) AS event_time
FROM
code_city t1
FULL OUTER JOIN (
SELECT
id,
city,
province,
event_time
FROM
(-- 取最后一条状态数据
SELECT
id,
city,
province,
dml_type,
event_time,
row_number ( ) over ( PARTITION BY id ORDER BY event_time DESC ) AS rank
FROM
code_city_delta
WHERE
dt = '20200324'
) temp
WHERE
rank = 1
) t2 ON t1.id = t2.id;
小结
完
本文分享自微信公众号 - 大数据技术与数仓(gh_95306769522d)。
如有侵权,请联系 support@oschina.cn 删除。
本文参与“OSC源创计划”,欢迎正在阅读的你也加入,一起分享。
公众号「大数据技术与数仓」
专注分享数据仓库与大数据技术