sharing-jdbc实现读写分离及分库分表
需求:
分库:按业务线business_id将不同业务线的订单存储在不同的数据库上;
分表:按user_id字段将不同用户的订单存储在不同的表上,为方便直接用非分片字段order_id查询,可使用基因法;
读写分离:为缓解主库的压力,读操作访问从库;
库表SQL
-- 主库 CREATE DATABASE `database_103`; CREATE DATABASE `database_112`; -- 从库 CREATE DATABASE `database_slave_103`; CREATE DATABASE `database_slave_112`; --每个库上分别建立如下表 CREATE TABLE `t_order_0` ( `id` bigint(20) NOT NULL, `order_id` bigint(20) NOT NULL, `user_id` bigint(20) NOT NULL, `business_id` bigint(20) NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE TABLE `t_order_1` ( `id` bigint(20) NOT NULL, `order_id` bigint(20) NOT NULL, `user_id` bigint(20) NOT NULL, `business_id` bigint(20) NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE TABLE `t_order_2` ( `id` bigint(20) NOT NULL, `order_id` bigint(20) NOT NULL, `user_id` bigint(20) NOT NULL, `business_id` bigint(20) NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8; INSERT INTO `t_order` VALUES (1,1,112); CREATE TABLE `t_order_3` ( `id` bigint(20) NOT NULL, `order_id` bigint(20) NOT NULL, `user_id` bigint(20) NOT NULL, `business_id` bigint(20) NOT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
pom.xml
<!-- sharding-jdbc --> <dependency> <groupId>com.dangdang</groupId> <artifactId>sharding-jdbc-core</artifactId> <version>1.4.2</version> </dependency> <dependency> <groupId>com.dangdang</groupId> <artifactId>sharding-jdbc-config-spring</artifactId> <version>1.4.0</version> </dependency>
spring-database.xml
<?xml version="1.0" encoding="UTF-8"?> <beans xmlns="http://www.springframework.org/schema/beans" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:tx="http://www.springframework.org/schema/tx" xmlns:rdb="http://www.dangdang.com/schema/ddframe/rdb" xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd http://www.springframework.org/schema/tx http://www.springframework.org/schema/tx/spring-tx.xsd http://www.dangdang.com/schema/ddframe/rdb http://www.dangdang.com/schema/ddframe/rdb/rdb.xsd"> <bean id="database_112" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close"> <property name="driverClassName" value="com.mysql.jdbc.Driver"/> <property name="url" value="${jdbc.url.112}"></property> <property name="username" value="${jdbc.username.112}"></property> <property name="password" value="${jdbc.password.112}"></property> <property name="maxActive" value="100"/> <property name="initialSize" value="50"/> <property name="maxWait" value="60000"/> <property name="minIdle" value="5"/> </bean> <bean id="database_slave_112" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close"> <property name="driverClassName" value="com.mysql.jdbc.Driver"/> <property name="url" value="${jdbc.url.slave.112}"></property> <property name="username" value="${jdbc.username.slave.112}"></property> <property name="password" value="${jdbc.password.slave.112}"></property> <property name="maxActive" value="100"/> <property name="initialSize" value="50"/> <property name="maxWait" value="60000"/> <property name="minIdle" value="5"/> </bean> <bean id="database_103" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close"> <property name="driverClassName" value="com.mysql.jdbc.Driver"/> <property name="url" value="${jdbc.url.103}"></property> <property name="username" value="${jdbc.username.103}"></property> <property name="password" value="${jdbc.password.103}"></property> <property name="maxActive" value="100"/> <property name="initialSize" value="50"/> <property name="maxWait" value="60000"/> <property name="minIdle" value="5"/> </bean> <bean id="database_slave_103" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close"> <property name="driverClassName" value="com.mysql.jdbc.Driver"/> <property name="url" value="${jdbc.url.slave.103}"></property> <property name="username" value="${jdbc.username.slave.103}"></property> <property name="password" value="${jdbc.password.slave.103}"></property> <property name="maxActive" value="100"/> <property name="initialSize" value="50"/> <property name="maxWait" value="60000"/> <property name="minIdle" value="5"/> </bean> <!--mybatis--> <bean id="sqlSessionFactory" class="org.mybatis.spring.SqlSessionFactoryBean"> <property name="mapperLocations" value="classpath:xmlmapper/*.xml"/> <property name="dataSource" ref="shardingDataSource"/> <!-- 配置Mybatis配置文件 --> <property name="configLocation" value="classpath:/mybatis/mybatis-config.xml"/> </bean> <bean id="sqlSession" class="org.mybatis.spring.SqlSessionTemplate"> <constructor-arg index="0" ref="sqlSessionFactory"/> </bean> <!-- 注解Mapper scanner --> <bean class="org.mybatis.spring.mapper.MapperScannerConfigurer"> <property name="basePackage" value="com.lc.sharding.mybatismapper"/> <property name="sqlSessionTemplateBeanName" value="sqlSession"/> </bean> <!-- 事务--> <bean id="txManager" class="org.springframework.jdbc.datasource.DataSourceTransactionManager"> <property name="dataSource" ref="shardingDataSource"/> </bean> <tx:annotation-driven transaction-manager="txManager"/> <!--读写分离--> <rdb:master-slave-data-source id="rbb_112" master-data-source-ref="database_112" slave-data-sources-ref="database_slave_112"/> <rdb:master-slave-data-source id="rbb_103" master-data-source-ref="database_103" slave-data-sources-ref="database_slave_103"/> <!--分库策略--> <rdb:strategy id="databaseShardingStrategy" sharding-columns="business_id" algorithm-expression="rbb_${business_id.longValue()}"/> <!--分表策略--> <rdb:strategy id="tableShardingStrategy" sharding-columns="user_id,order_id" algorithm-class="com.lc.sharding.algorithm.MultipleKeysTableShardingAlgorithmImpl"/> <rdb:data-source id="shardingDataSource"> <rdb:sharding-rule data-sources="rbb_112,rbb_103"> <rdb:table-rules> <rdb:table-rule logic-table="t_order" actual-tables="t_order_${0..3}" database-strategy="databaseShardingStrategy" table-strategy="tableShardingStrategy"/> </rdb:table-rules> </rdb:sharding-rule> <rdb:props> <prop key="metrics.enable">true</prop> <prop key="sql.show">true</prop> </rdb:props> </rdb:data-source> </beans
基因法多列分片
public class MultipleKeysTableShardingAlgorithmImpl implements MultipleKeysTableShardingAlgorithm { public Collection<String> doSharding(Collection<String> tableNames, Collection<ShardingValue<?>> shardingValues) { List<String> shardingSuffix = new ArrayList<String>(); long partId = 0; for (ShardingValue value : shardingValues) { if (value.getColumnName().equals("user_id")) { partId = ((Long) value.getValue()) % 4; break; } else if (value.getColumnName().equals("order_id")) { partId = ((Long) value.getValue()) % 4; break; } } for (String name : tableNames) { if (name.endsWith(partId + "")) { shardingSuffix.add(name); return shardingSuffix; } } return shardingSuffix; } }
什么是基因法分片?
在订单数据oid生成时,order_id末端加入分片基因,让同一个user_id下的所有订单都含有相同基因,落在同一个表上。
资料:https://mp.weixin.qq.com/s/PCzRAZa9n4aJwHOX-kAhtA
根据user_id生成order_id:
public long bulidOrderId(long userId) { //取用户id后4位 userId = userId & 15; //先取60位唯一id long uniqueId = this.nextId(); //唯一id左移4位、拼接userId后4位 return (uniqueId << 4) | userId; }
this.nextId();//使用雪花算法生成60位分布式唯一id:1位符号位+41位时间戳+5位workId+5位datacenterId+6位序列号+4位基因片
小结
数据分片:
- 支持分库+分表;
- 可支持 = , BETWEEN,IN等多维度分片,也支持多分片键共用;
- 支持聚合,分组,排序,分页,关联等复杂查询语句;
- 分片灵活,支持多分片键共用,支持inline表达式;
- 基于Hint的强制路由;
- 支持分布式主键
读写分离:
- 支持一主多从的读写分离;
- 支持分库分表与读写分离共同使用
- 支持分布式生成全局主键。
柔性事务:
- 最大努力到达型事务
分布式治理:
- 支持配置中心,可动态修改
- 支持客户端熔断和失效转移
引用:http://shardingsphere.io/
向上吧,少年