【ShardingSphere】springboot整合shardingjdbc使用精确分片算法自定义分库分表

上两篇文章讲到了两种分库分表的方式,这两种方式可以归结为一种类型,都是通过配置的形式来分片数据。本文我们继续讲解一种新的方式来分片数据,除了配置的形式外,shardingjdbc还支持通过代码来自定义规则。


自定义规则

之前我们实现了id取模和按日期分库分表,这里我们为了展示技术,还是继续按照日期分表,不过这里通过代码来自定义。在开始写代码之前,我们先将分库分表规则定义好。
这里我们建立两个库ds0,ds1。每个库建立表t_order2021、t_order2022两个表,语句如下:

CREATE TABLE `t_order2021` (
  `id` bigint(32) NOT NULL,
  `user_id` int(11) DEFAULT NULL,
  `order_id` int(11) DEFAULT NULL,
  `cloumn` varchar(45) DEFAULT NULL,
  `day_date` char(8) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
CREATE TABLE `t_order2022` (
  `id` bigint(32) NOT NULL,
  `user_id` int(11) DEFAULT NULL,
  `order_id` int(11) DEFAULT NULL,
  `cloumn` varchar(45) DEFAULT NULL,
  `day_date` char(8) DEFAULT NULL,
  PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;

搭建工程

基本环境准备好了,我们就可以开始我们的工程搭建了。这里搭建一个springboot工程,然后整合mybatis和shardingjdbc。具体依赖如下:

<properties>
    <java.version>1.8</java.version>
    <sharding-sphere.version>4.1.1</sharding-sphere.version>
</properties>
<dependencies>
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-web</artifactId>
    </dependency>

    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-test</artifactId>
        <scope>test</scope>
    </dependency>

    <dependency>
        <groupId>org.apache.shardingsphere</groupId>
        <artifactId>sharding-jdbc-spring-boot-starter</artifactId>
        <version>${sharding-sphere.version}</version>
    </dependency>

    <dependency>
        <groupId>org.projectlombok</groupId>
        <artifactId>lombok</artifactId>
        <scope>provided</scope>
    </dependency>

    <!-- https://mvnrepository.com/artifact/org.apache.commons/commons-lang3 -->
    <dependency>
        <groupId>org.apache.commons</groupId>
        <artifactId>commons-lang3</artifactId>
        <version>3.12.0</version>
    </dependency>

    <!-- https://mvnrepository.com/artifact/com.zaxxer/HikariCP -->
    <dependency>
        <groupId>com.zaxxer</groupId>
        <artifactId>HikariCP</artifactId>
        <version>4.0.3</version>
    </dependency>

    <!-- https://mvnrepository.com/artifact/mysql/mysql-connector-java -->
    <dependency>
        <groupId>mysql</groupId>
        <artifactId>mysql-connector-java</artifactId>
        <version>8.0.25</version>
    </dependency>

    <!-- https://mvnrepository.com/artifact/org.mybatis/mybatis -->
    <dependency>
        <groupId>org.mybatis.spring.boot</groupId>
        <artifactId>mybatis-spring-boot-starter</artifactId>
        <version>2.1.4</version>
    </dependency>
</dependencies>

上手sharding配置

添加mybatis和shardingjdbc的配置

server.port=10080

spring.shardingsphere.datasource.names=ds0,ds1

# 配置第一个数据库
spring.shardingsphere.datasource.ds0.type=com.zaxxer.hikari.HikariDataSource
spring.shardingsphere.datasource.ds0.driver-class-name=com.mysql.jdbc.Driver
spring.shardingsphere.datasource.ds0.jdbc-url=jdbc:mysql://localhost:3306/ds0
spring.shardingsphere.datasource.ds0.username=root
spring.shardingsphere.datasource.ds0.password=root

# 配置第二个数据库
spring.shardingsphere.datasource.ds1.type=com.zaxxer.hikari.HikariDataSource
spring.shardingsphere.datasource.ds1.driver-class-name=com.mysql.jdbc.Driver
spring.shardingsphere.datasource.ds1.jdbc-url=jdbc:mysql://localhost:3306/ds1
spring.shardingsphere.datasource.ds1.username=root
spring.shardingsphere.datasource.ds1.password=root

# 配置t_order表的分库策略
spring.shardingsphere.sharding.tables.t_order.database-strategy.standard.sharding-column=id
# 自定义分库策略
spring.shardingsphere.sharding.tables.t_order.database-strategy.standard.precise-algorithm-class-name=com.example.test.config.MyDbPreciseShardingAlgorithm

# 配置t_order的分表策略
spring.shardingsphere.sharding.tables.t_order.actual-data-nodes=ds$->{0..1}.t_order$->{2021..2022}
spring.shardingsphere.sharding.tables.t_order.table-strategy.inline.sharding-column=day_date
# 自定义分表策略
spring.shardingsphere.sharding.tables.t_order.table-strategy.standard.precise-algorithm-class-name=com.example.test.config.MyTablePreciseShardingAlgorithm

# 添加t_order表的id生成策略
spring.shardingsphere.sharding.tables.t_order.key-generator.column=id
spring.shardingsphere.sharding.tables.t_order.key-generator.type=SNOWFLAKE

# 打开sql输出日志
spring.shardingsphere.props.sql.show=true

# mybatis配置
mybatis.mapper-locations=classpath:mapping/*.xml
mybatis.type-aliases-package=com.example.test.po

# 配置日志级别
logging.level.com.echo.shardingjdbc.dao=DEBUG

启动类上添加mybatis的mapper扫描配置@MapperScan("com.example.test.dao")

在以上配置中,我们定义了自定义配置的类路径,接下来我们会去编写这两个自定义配置的内容。

编写自定义规则类

在文章开头我们就已经定义了规则,现在我们来实现这个规则。根据我们的规则我们可以选择精确分片算法来实现,具体代码如下:

package com.example.test.config;

import lombok.extern.slf4j.Slf4j;
import org.apache.shardingsphere.api.sharding.standard.PreciseShardingAlgorithm;
import org.apache.shardingsphere.api.sharding.standard.PreciseShardingValue;

import java.util.Collection;

/**
 * 自定义分库规则类
 * @author echo
 * @date 2021/6/10 0010 上午 10:09
 */
@Slf4j
public class MyDbPreciseShardingAlgorithm implements PreciseShardingAlgorithm<Long> {

    /**
     * 分片策略
     *
     * @param availableTargetNames 所有的数据源
     * @param shardingValue        SQL执行时传入的分片值
     * @return 返回
     */
    @Override
    public String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<Long> shardingValue) {
        // 真实节点
        availableTargetNames.forEach(item -> log.info("actual node db:{}", item));

        log.info("logic table name:{},rout column:{}", shardingValue.getLogicTableName(), shardingValue.getColumnName());

        //精确分片
        log.info("column value:{}", shardingValue.getValue());

        for (String each : availableTargetNames) {
            Long value = shardingValue.getValue();
            if (("ds" + value % 2).equals(each)) {
                return each;
            }
        }

        return null;
    }

}
package com.example.test.config;

import lombok.extern.slf4j.Slf4j;
import org.apache.shardingsphere.api.sharding.standard.PreciseShardingAlgorithm;
import org.apache.shardingsphere.api.sharding.standard.PreciseShardingValue;

import java.util.Collection;

/**
 * 自定义分表规则类
 *
 * @author echo
 * @date 2021/6/10 0010 上午 10:09
 */
@Slf4j
public class MyTablePreciseShardingAlgorithm implements PreciseShardingAlgorithm<String> {

    @Override
    public String doSharding(Collection<String> availableTargetNames, PreciseShardingValue<String> shardingValue) {
        // 真实节点
        availableTargetNames.forEach(item -> log.info("actual node table:{}", item));

        log.info("logic table name:{},rout column:{}", shardingValue.getLogicTableName(), shardingValue.getColumnName());

        // 精确分片
        log.info("column value:{}", shardingValue.getValue());

        for (String each : availableTargetNames) {
            if (("t_order" + shardingValue.getValue()).equals(each)) return each;
        }

        return null;
    }

}

上测试代码

按照之前文章的套路,我们写点测试代码,代码如下:

package com.example.test.controller;

import com.example.test.po.TOrder;
import com.example.test.service.TOrderService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;

import java.util.List;

/**
 * @author echo
 * @date 2021/6/3 0003 下午 16:37
 */
@RestController
@RequestMapping("/order")
public class TOrderController {

    @Autowired
    private TOrderService tOrderService;

    @PostMapping("/save")
    public String save(@RequestBody TOrder tOrder) {
        tOrderService.save(tOrder);
        return "success";
    }

    @PostMapping("/delete")
    public String delete(@RequestParam(value = "id") Long id) {
        tOrderService.delete(id);
        return "success";
    }

    @PostMapping("/update")
    public int update(@RequestBody TOrder tOrder) {
        return tOrderService.update(tOrder);
    }

    @GetMapping("/getList")
    public List<TOrder> getList() {
        return tOrderService.getList();
    }

}

public interface TOrderService {
    void save(TOrder tOrder);

    void delete(Long id);

    int update(TOrder tOrder);

    List<TOrder> getList();
}

@Service
public class TOrderServiceImpl implements TOrderService {

    @Autowired
    private TOrderDao tOrderDao;

    @Override
    public void save(TOrder tOrder) {
        tOrderDao.insert(tOrder);
    }

    @Override
    public void delete(Long id) {
        tOrderDao.delete(id);
    }

    @Override
    public int update(TOrder tOrder) {
        return tOrderDao.update(tOrder);
    }

    @Override
    public List<TOrder> getList() {
        return tOrderDao.getList();
    }

}

public interface TOrderDao {

    void insert(TOrder tOrder);

    List<TOrder> getList();

    void delete(Long id);

    int update(TOrder tOrder);
}

<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.example.test.dao.TOrderDao">
    <resultMap id="BaseResultMap" type="com.example.test.po.TOrder">
        <id column="id" jdbcType="BIGINT" property="id"/>
        <result column="user_id" jdbcType="INTEGER" property="userId"/>
        <result column="order_id" jdbcType="INTEGER" property="orderId"/>
        <result column="cloumn" jdbcType="VARCHAR" property="cloumn"/>
        <result column="day_date" jdbcType="CHAR" property="dayDate"/>
    </resultMap>
    <sql id="Base_Column_List">
        id, user_id, order_id, cloumn, day_date
    </sql>

    <insert id="insert" parameterType="com.example.test.po.TOrder">
        insert into t_order (user_id, order_id, cloumn, day_date) value (#{userId}, #{orderId}, #{cloumn}, #{dayDate})
    </insert>

    <select id="getList" resultMap="BaseResultMap">
        select
        <include refid="Base_Column_List"/>
        from t_order
    </select>

    <delete id="delete" parameterType="java.lang.Long">
        delete from t_order
        where id = #{id,jdbcType=BIGINT}
    </delete>

    <update id="update" parameterType="com.example.test.po.TOrder">
        update t_order
        set
          cloumn = #{cloumn,jdbcType=VARCHAR},
          order_id = #{orderId,jdbcType=INTEGER},
          user_id = #{userId,jdbcType=INTEGER}
        where id = #{id,jdbcType=BIGINT}
    </update>
</mapper>

完成之后我们可以测试一下

调用接口http://localhost:3306/order/save,我们会发现,我们的数据根据我们既定的规则进入了相应的表
在这里插入图片描述

总结

  • 在配置的时候,版本问题会对配置造成一定的影响,所以如果配置相应内容的话, 要注意版本信息对应的官网配置规则
  • 不同规则对应的配置规则不一,比如这里用的精确分片算法,需要找到对应的精确分片算法的配置内容,不然不会生效
posted @ 2021-06-11 17:22  xlecho  阅读(221)  评论(0编辑  收藏  举报