MySql时间字段分区
Mysql分区,一般都是按时间做的分区,但是却不能做自动分区,想要自动分区,要么写存储过程,要么定时执行脚本,下面是java的分区代码
import com.alibaba.druid.pool.DruidDataSource; import lombok.extern.slf4j.Slf4j; import org.springframework.jdbc.core.JdbcTemplate; import javax.sql.DataSource; import java.time.LocalDate; import java.util.HashMap; import java.util.List; import java.util.Map; @Slf4j public class DataSourceHelper { /** * 表分区数据量 */ public static final String PARTITION_COUNT_SQL = "SELECT count(1) FROM %s PARTITION(%s)"; /** * 查询表的分区信息SQL,注意这里的table_rows表示分区内数据条数,但是此值不准确 */ public static final String PARTITION_SQL = "SELECT partition_name part,table_rows rows,partition_description val " + "from information_schema.partitions where table_schema=schema() and table_name = ?"; /** * 移除表分区 */ public static final String REMOVE_MAX_PARTITION_SQL = "ALTER TABLE %s drop PARTITION %s"; /** * @Description 添加最大分区SQL */ public static final String ADD_MAX_PARTITION_SQL = "ALTER table %s ADD partition(PARTITION p_max VALUES less than MAXVALUE)"; /** * @Description 获取Datasource对象 * @Param [url, username, password] **/ public static DataSource getDataSource(String url, String username, String password) { DruidDataSource druidDataSource = new DruidDataSource(); druidDataSource.setUrl(url); druidDataSource.setUsername(username); druidDataSource.setPassword(password); // 最大连接数 druidDataSource.setMaxActive(1); // 获取连接最大等待时间 druidDataSource.setMaxWait(2000); return druidDataSource; } /** * 获取JdbcTemplate对象 * * @oara/n datasource */ public static JdbcTemplate getJdbcTemplate(DataSource datasource) { JdbcTemplate jdbcTemplate = new JdbcTemplate(); jdbcTemplate.setDataSource(datasource); return jdbcTemplate; } /** * 组装数据库连接URL * * @oaram dataConfig * ©return */ public static String getDataSourceUrl(DataConfig dataConfig) { String format = "jdbc:mysql://%s:%s/%s?useUnicode=true"; String url = String.format(format, dataConfig.getIp(), dataConfig.getPort(), dataConfig.getDataBase()); log.info("组装的数据库连接:{}", url); return url; } /** * @Description 得到待移除分区信息 **/ public static Map<String, PartitionInfo> getWaitRemoveTablePartitionInfo(List<PartitionInfo> partitions, DataConfig dataConfig) { Map<String, PartitionInfo> removePartition = new HashMap(); if (partitions.get(0).getPart() == null) { // 说明没有分区 return removePartition; } final LocalDate now = LocalDate.now(); final LocalDate localDate = now.minusDays(dataConfig.getRetainDay()); String variablePart = String.valueOf(localDate).replaceAll("-", ""); for (PartitionInfo partition : partitions) { String part = partition.getPart(); if (part.equals("p_max")) continue; String partName = part.substring(1); log.info("截止分区号:{},待判断分区号:{}", variablePart, partName); if (Long.valueOf(partName) < Long.valueOf(variablePart)) { removePartition.put(part, partition); } } return removePartition; } /** * @Description 判断分区是否存在 */ public static boolean partitionExits(List<PartitionInfo> partitions, String newPartition) { if (partitions.get(0).getPart() == null) { // 说明没有分区 return false; } for (PartitionInfo partition : partitions) { if (partition.getPart().equals(newPartition)) { return true; } } return false; } /** * @Description 判断最大分区是否存在 */ public static boolean maxPartitionExits(List<PartitionInfo> partitions, String dataSourceUrl, String table) { if (partitions.get(0).getPart() == null) { // 说明没有分区 return false; } for (PartitionInfo partition : partitions) { if (partition.getVal().equals("MAXVALUE")) { if (!partition.getPart().equals("p_max")) { log.warn("[{} {}]最大分区建议更名为p_max", dataSourceUrl, table); } return true; } } return false; } /** * @Description 获取最大分区 **/ public static String getMaxPartition(List<PartitionInfo> partitions) { if (partitions.get(0).getPart() == null) { // 说明没有分区 return "p_max"; } for (PartitionInfo partition : partitions) { if (partition.getVal().equals("MAXVALUE")) { return partition.getPart(); } } return "p_max"; } /** * @return java.lang.String * @Description 生成创建/添加分区SQL * append 表示是否是追加分区,为true时不创建最大分区(追加分区时最大分区不能存在,否则无法追加新分区) * MAXVALUE can only be used in last partition definition * 同时追加分区时不能逆序追加,比如现有分区范围为2020-01-01 不能追加新分区范围为2019-12-31 * VALUES LESS THAN value must be strictly increasing for each partition * @Param [dataConfig, append] **/ public static String generateCreatePartitionSql(DataConfig dataConfig, boolean append) { final LocalDate now = LocalDate.now(); final LocalDate localDate = now.plusDays(1); final String date = String.valueOf(localDate); String sql = ""; if (append) { // 追加分区 String template = "ALTER table %s ADD partition(PARTITION %s VALUES less than (TO_DAYS('%s')))"; sql = String.format(template, dataConfig.getTable(), generateNewPartitionName(), date); } else { // 创建最大分区 String template = "ALTER table %s partition by range(TO_DAYS(%s)) (PARTITION %s VALUES less than (TO_DAYS('%s'))," + "PARTITION p_max VALUES less than MAXVALUE)"; sql = String.format(template, dataConfig.getTable(), dataConfig.getField(), generateNewPartitionName(), date); } log.info("创建新分区的SQL: {}", sql); return sql; } /** * @return java.lang.String * @Description 生成新分区的名称(明天的分区) * @Param [] **/ public static String generateNewPartitionName() { final LocalDate now = LocalDate.now(); // 需要生成几天后的分区,就改动相应的数字 final LocalDate localDate = now.plusDays(1); final String date = String.valueOf(localDate); String partition = date.replaceAll("-", ""); return "p" + partition; } /** * 组装删除分区语句 */ public static String generateDeletePartitionSql(DataConfig dataConfig, String partition) { // 注意条件使用双引号 String deleteSql = "ALTER TABLE %s DROP PARTITION %s"; String sql = String.format(deleteSql, dataConfig.getTable(), partition); log.info("组装的清数SQL: {}", sql); return sql; } }
数据库配置类
import lombok.Data; /** * 数据库表配置 */ @Data public class DataConfig { /** * 数据库ip */ private String ip; /** * 数据库端口 */ private Integer port; /** * 数据库用户名 */ private String userName; /** * 数据库密码 */ private String password; /** * 数据库名 */ private String dataBase; /** * 数据库表 */ private String table; /** * 数据库分区字段:需要注意,分区字段需要满足是主键并且也是唯一索引的要求。如果字段是日期类型的字段,那么该字段不能为timestamp类型 */ private String field; /** * 分区保留天数 */ private Integer retainDay; }
分区信息类
import lombok.Data; @Data public class PartitionInfo { /** * @Description 分区名 */ private String part; /** * @Description 分区内的数据量 */ private long rows; /** * @Description 分区描述,即描述当前分区所属的范围.以时间字段为例,该字段即为时间字符串(显示为数值) eg:MAXVALUE */ private String val; }
测试类
import org.springframework.jdbc.core.BeanPropertyRowMapper; import org.springframework.jdbc.core.JdbcTemplate; import org.sql2o.Connection; import org.sql2o.Query; import org.sql2o.Sql2o; import javax.sql.DataSource; import java.util.HashMap; import java.util.List; import java.util.Map; public class MainTest { public static void main(String[] args) { DataConfig dataConfig = new DataConfig(); dataConfig.setDataBase("ybryejd"); dataConfig.setIp("*************"); dataConfig.setUserName("ybryejd_f"); dataConfig.setPassword("ispbpda5"); dataConfig.setField("create_time"); dataConfig.setPort(5505); dataConfig.setTable("sheet1"); dataConfig.setRetainDay(2); DataSource dataSource = DataSourceHelper.getDataSource(DataSourceHelper.getDataSourceUrl(dataConfig), dataConfig.getUserName(), dataConfig.getPassword()); JdbcTemplate jdbcTemplate = DataSourceHelper.getJdbcTemplate(dataSource); // checkPartition(dataConfig, jdbcTemplate); createPartition(dataConfig, jdbcTemplate); // deletePartition(dataConfig, jdbcTemplate,"p_max"); } /** * @Description 检测表分区 **/ public static void checkPartition(DataConfig dataConfig, JdbcTemplate jdbcTemplate) { List<PartitionInfo> partitionInfos = jdbcTemplate.query(DataSourceHelper.PARTITION_SQL, new BeanPropertyRowMapper<>(PartitionInfo.class), new String[]{dataConfig.getTable()}); System.out.println("检测表分区"); } /** * @Description 创建分区或添加分区 **/ public static void createPartition(DataConfig dataConfig, JdbcTemplate jdbcTemplate) { // 添加分区 String partitionSQL = DataSourceHelper.generateCreatePartitionSql(dataConfig, true); // 创建分区(第一次添加分区) // String partitionSQL = DataSourceHelper.generateCreatePartitionSql(dataConfig, false); jdbcTemplate.execute(partitionSQL); System.out.println("分区操作执行成功"); } /** * @Description 移除分区 **/ public static void deletePartition(DataConfig dataConfig, JdbcTemplate jdbcTemplate,String partitionName) { String partitionSQL = DataSourceHelper.generateDeletePartitionSql(dataConfig, partitionName); jdbcTemplate.execute(partitionSQL); System.out.println("移除分区执行成功"); } }
TIPS:
上面代码示例使用的分区字段为 datetime类型的日期字段,但是当我们在测试过程中发现,该字段竟然有重复值,由于分区字段必须满足唯一索引和主键,所以使得,该字段不能作为了分区字段,后来发现出现重复的数据,是由于存在并发的情况下,而我们的datetime 字段 是精确到时分秒,因此数据重复,由此我们修改datetime的精度问题,解决了数据重复问题