Debezium Mysql to Postgres测试
简介
使用Benchmarksql-mysql来测试基于Debezium Mysql Connector跟Confluent Jdbc SinkConnector的数据同步链路
源端数据库为Mysql5.7
目标端数据库为Postgres13.5
准备环境
使用docker-compose
version: '3'
services:
dbz-zookeeper:
image: debezium/zookeeper:1.7
container_name: dbz-zookeeper
ports:
- 2181:2181
networks:
- dbz-network
deploy:
resources:
limits:
cpus: '0.5'
memory: 1024M
dbz-kafka:
image: debezium/kafka:1.7
container_name: dbz-kafka
ports:
- 9092:9092
depends_on:
- dbz-zookeeper
environment:
- ZOOKEEPER_CONNECT=dbz-zookeeper:2181
networks:
- dbz-network
deploy:
resources:
limits:
cpus: '2'
memory: 2048M
pg13:
container_name: pg13
image: postgres:13.5
ports:
- "5432:5432"
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
networks:
- dbz-network
deploy:
resources:
limits:
cpus: '2'
memory: 4096M
mysql5.7:
container_name: mysql5.7
image: mysql:5.7.36
ports:
- "3306:3306"
environment:
- MYSQL_ROOT_PASSWORD=mysql
networks:
- dbz-network
volumes:
# 根据实际情况挂载配置文件
- /root/cdc/compose/mysql/conf/mysql.cnf:/etc/mysql/conf.d/mysql.cnf
deploy:
resources:
limits:
cpus: '2'
memory: 4096M
dbz-connect:
container_name: dbz-connect
image: debezium/connect:1.7
ports:
- 8083:8083
depends_on:
- dbz-kafka
environment:
- BOOTSTRAP_SERVERS=dbz-kafka:9092
- CONNECT_GROUP_ID:compose-connect-group
- CONFIG_STORAGE_TOPIC=my_connect_configs
- OFFSET_STORAGE_TOPIC=my_connect_offsets
- STATUS_STORAGE_TOPIC=my_connect_statuses
networks:
- dbz-network
deploy:
resources:
limits:
cpus: '4'
memory: 4096M
networks:
dbz-network:
external: false
mysql 挂载的配置文件
mysql.cnf文件
[mysqld]
skip-host-cache
skip-name-resolve
user=mysql
symbolic-links=0
server-id = 223344
log_bin = mysql-bin
expire_logs_days = 1
binlog_format = row
default_authentication_plugin = mysql_native_password
没有使用GTID 如果需要设置GTID 可以添加相关参数
expire_logs_days = 10
binlog_rows_query_log_events=ON
enforce-gtid-consistency
gtid-mode=ON
source文件
{
"name": "m-s-connector",
"config": {
"connector.class": "io.debezium.connector.mysql.MySqlConnector",
"tasks.max": "1",
"database.server.name": "test",
"database.user": "root",
"database.server.id": "184054",
"database.password" : "mysql",
"database.dbname" : "test",
"database.hostname": "ip",
"database.port": "3306",
"table.include.list": "test.bmsql_config,test.bmsql_customer,test.bmsql_district,test.bmsql_history,test.bmsql_item,test.bmsql_new_order,test.bmsql_oorder,test.bmsql_order_line,test.bmsql_stock,test.bmsql_warehouse",
"database.history.kafka.bootstrap.servers" : "dbz-kafka:9092",
"database.history.kafka.topic": "schema-changes.mysql-source",
"include.schema.changes": "true",
"database.history.skip.unparseable.ddl": true,
"database.history.store.only.captured.tables.ddl": true
}
}
sink文件
{
"name":"p-s-connector",
"config":{
"topics": "test.test.bmsql_config,test.test.bmsql_customer,test.test.bmsql_district,test.test.bmsql_history,test.test.bmsql_item,test.test.bmsql_new_order,test.test.bmsql_oorder,test.test.bmsql_order_line,test.test.bmsql_stock,test.test.bmsql_warehouse",
"connector.class": "io.confluent.connect.jdbc.JdbcSinkConnector",
"connection.url": "jdbc:postgresql://ip:5432/test",
"connection.user": "postgres",
"connection.password": "postgres",
"tasks.max": "1",
"auto.create": true,
"auto.evolve": true,
"insert.mode":"upsert",
"delete.enabled": true,
"batch.size": 3000,
"delete.enabled": false,
"pk.mode": "record_key",
"transforms":"ExtractField,repTopic,TimestampConverter",
"transforms.ExtractField.type":"org.apache.kafka.connect.transforms.ExtractField$Value",
"transforms.ExtractField.field":"after",
"transforms.repTopic.type":"org.apache.kafka.connect.transforms.RegexRouter",
"transforms.repTopic.regex":"(.*.test).(.*)",
"transforms.repTopic.replacement":"$2",
"transforms.TimestampConverter.type": "com.github.howareyouo.kafka.connect.transforms.TimestampConverter$Value",
"transforms.TimestampConverter.target.type": "Timestamp",
"transforms.TimestampConverter.format":"yyyy-MM-dd'T'HH:mm:ss'Z'",
"transforms.TimestampConverter.fields": "ol_delivery_d,o_entry_d,h_date,c_since"
}
}
测试软件下载
点击下载 下载zip文件 支持mysql的版本
创建数据库
在mysql端跟postgres分别进入容器操作数据库创建database
mysql用户名密码 root/mysql
postgres用户名密码 postgres/postgres 切换postgres用户免密
双端创建测试数据库
CREATE DATABASE test
使用Benchmark的建表创建表跟索引 不使用Benchmark的初始化会额外创建外键等
mysql仅执行benchmarksql-mysql/run/sql.mysql/tableCreates.sql 即可
postgresql端要执行
benchmarksql-mysql/run/sql.common/tableCreates.sql
benchmarksql-mysql/run/sql.common/indexCreates.sql
benchmarksql-mysql/run/sql.postgres/extraHistID.sql
三个sql的内容
加载测试数据
./runLoader.sh 配置文件
./runBenchmark.sh 配置文件
配置文件示例
db=mysql
driver=com.mysql.jdbc.Driver
conn=jdbc:mysql://localhost:3306/test?useSSL=false&useServerPrepStmts=true&useConfigs=maxPerformance&rewriteBatchedStatements=true
user=root
password=mysql
warehouses=1
loadWorkers=4
terminals=1
//To run specified transactions per terminal- runMins must equal zero
runTxnsPerTerminal=0
//To run for specified minutes- runTxnsPerTerminal must equal zero
runMins=10
//Number of total transactions per minute
limitTxnsPerMin=0
//Set to true to run in 4.x compatible mode. Set to false to use the
//entire configured database evenly.
terminalWarehouseFixed=true
//The following five values must add up to 100
//The default percentages of 45, 43, 4, 4 & 4 match the TPC-C spec
newOrderWeight=45
paymentWeight=43
orderStatusWeight=4
deliveryWeight=4
stockLevelWeight=4
// Directory name to create for collecting detailed result data.
// Comment this out to suppress.
resultDirectory=my_result_%tY-%tm-%td_%tH%tM%tS
//osCollectorScript=./misc/os_collector_linux.py
//osCollectorInterval=1
//osCollectorSSHAddr=user@dbhost
//osCollectorDevices=net_eth0 blk_sda
创建source、sink connector
curl -i -X POST -H "Accept:application/json" -H "Content-Type:application/json" http://localhost:8083/connectors/ -d @source.json
curl -i -X POST -H "Accept:application/json" -H "Content-Type:application/json" http://localhost:8083/connectors/ -d @sink.json