kudu的分区方式
为了提供可扩展性,Kudu 表被划分为称为 tablets 的单元,并分布在许多 tablet servers 上。行总是属于单个 tablet 。将行分配给 tablet 的方法由在表创建期间设置的表的分区决定。 kudu提供了3种分区方式:
Range Partitioning ( 范围分区 )
范围分区可以根据存入数据的数据量,均衡的存储到各个机器上,防止机器出现负载不均衡现象 创建一张表,要求按照如下方式进行分区: create table rangeTable(CompanyId Type.INT32 , WorkId Type.INT32 , Name Type.STRING , Gender Type.STRING , Photo Type.STRING) RANGE (CompanyId) ( PARTITION 0 <= VALUES < 10, PARTITION 10 <= VALUES < 20, PARTITION 20 <= VALUES < 30, PARTITION 30 <= VALUES < 40, PARTITION 40 <= VALUES < 50, PARTITION 50 <= VALUES < 60, PARTITION 60 <= VALUES < 70, PARTITION 70 <= VALUES < 80, PARTITION 80 <= VALUES < 90 )
代码实现:
public class createRangePartition { private static ColumnSchema newColumn(String column , Type type , boolean isPrimary){ final ColumnSchema.ColumnSchemaBuilder columnSchemaBuilder = new ColumnSchema.ColumnSchemaBuilder(column, type); columnSchemaBuilder.key(isPrimary); return columnSchemaBuilder.build(); } public static void main(String[] args) { //master地址 final String master = "hadoop01,hadoop02,hadoop03"; final KuduClient client = new KuduClient.KuduClientBuilder(master).defaultSocketReadTimeoutMs(6000).build(); // 设置表的schema List<ColumnSchema> columns = new LinkedList<ColumnSchema>(); columns.add(newColumn("CompanyId", Type.INT32, true)); columns.add(newColumn("WorkId", Type.INT32, false)); columns.add(newColumn("Name", Type.STRING, false)); columns.add(newColumn("Gender", Type.STRING, false)); columns.add(newColumn("Photo", Type.STRING, false)); Schema schema = new Schema(columns); //创建表时提供的所有选项 final CreateTableOptions options = new CreateTableOptions(); //设置备份数 options.setNumReplicas(1) ; //设置范围分区的分区规则 List<String> parcols = new LinkedList<String>(); parcols.add("CompanyId") ; //设置按照哪个字段进行range分区 options.setRangePartitionColumns(parcols); /** * 设置range的分区范围 * 分区1:0 < value < 10 * 分区2:10 <= value < 20 * 分区3:20 <= value < 30 * ........ * 分区9:80 <= value < 90 * */ int count = 0 ; for(int i=1 ; i< 10 ; i++){ PartialRow lower = schema.newPartialRow(); lower.addInt("CompanyId" , count); PartialRow upper = schema.newPartialRow(); count += 10; upper.addInt("CompanyId" , count); options.addRangePartition(lower , upper); } try { client.createTable("rangeTable" , schema , options); } catch (KuduException e) { e.printStackTrace(); }finally { try { client.close(); } catch (KuduException e) { e.printStackTrace(); } } } }
效果截图:
Hash Partitioning ( 哈希分区 )
哈希分区通过哈希值将行分配到许多 buckets ( 存储桶 )之一; 哈希分区是一种有效的策略,当不需要对表进行有序访问时。哈希分区对于在 tablet 之间随机散布这些功能是有效的,这有助于减轻热点和 tablet 大小不均匀。
创建一张表,要求按照如下方式进行分区:
create table rangeTable(CompanyId Type.INT32 , WorkId Type.INT32 , Name Type.STRING , Gender Type.STRING , Photo Type.STRING) HASH (CompanyId) PARTITIONS 6, RANGE (CompanyId) ( PARTITION UNBOUNDED )
代码实现:
public class createHashPartition { private static ColumnSchema newColumn(String column , Type type , boolean isPrimary){ final ColumnSchema.ColumnSchemaBuilder columnSchemaBuilder = new ColumnSchema.ColumnSchemaBuilder(column, type); columnSchemaBuilder.key(isPrimary); return columnSchemaBuilder.build(); } public static void main(String[] args) { //master地址 final String master = "hadoop01,hadoop02,hadoop03"; final KuduClient client = new KuduClient.KuduClientBuilder(master).defaultSocketReadTimeoutMs(6000).build(); // 设置表的schema List<ColumnSchema> columns = new LinkedList<ColumnSchema>(); columns.add(newColumn("CompanyId", Type.INT32, true)); columns.add(newColumn("WorkId", Type.INT32, false)); columns.add(newColumn("Name", Type.STRING, false)); columns.add(newColumn("Gender", Type.STRING, false)); columns.add(newColumn("Photo", Type.STRING, false)); Schema schema = new Schema(columns); //创建表时提供的所有选项 final CreateTableOptions options = new CreateTableOptions(); //设置备份数 options.setNumReplicas(1) ; //设置范围分区的分区规则 List<String> parcols = new LinkedList<String>(); parcols.add("CompanyId") ; //设置按照哪个字段进行Hash分区 options.addHashPartitions(parcols , 6); try { client.createTable("hashTable" , schema , options); } catch (KuduException e) { e.printStackTrace(); }finally { try { client.close(); } catch (KuduException e) { e.printStackTrace(); } } } }
HASH (CompanyId) PARTITIONS 10, RANGE (CompanyId) ( PARTITION 0 <= VALUES < 10, PARTITION 10 <= VALUES < 20, PARTITION 20 <= VALUES < 30, PARTITION 30 <= VALUES < 40, PARTITION 40 <= VALUES < 50, PARTITION 50 <= VALUES < 60, PARTITION 60 <= VALUES < 70, PARTITION 70 <= VALUES < 80, PARTITION 80 <= VALUES < 90 )
实现:
public class MultilevelParitition { private static ColumnSchema newColumn(String column , Type type , boolean isPrimary){ final ColumnSchema.ColumnSchemaBuilder columnSchemaBuilder = new ColumnSchema.ColumnSchemaBuilder(column, type); columnSchemaBuilder.key(isPrimary); return columnSchemaBuilder.build(); } public static void main(String[] args) { //master地址 final String master = "hadoop01,hadoop02,hadoop03"; final KuduClient client = new KuduClient.KuduClientBuilder(master).defaultSocketReadTimeoutMs(6000).build(); // 设置表的schema List<ColumnSchema> columns = new LinkedList<ColumnSchema>(); columns.add(newColumn("CompanyId", Type.INT32, true)); columns.add(newColumn("WorkId", Type.INT32, false)); columns.add(newColumn("Name", Type.STRING, false)); columns.add(newColumn("Gender", Type.STRING, false)); columns.add(newColumn("Photo", Type.STRING, false)); Schema schema = new Schema(columns); //创建表时提供的所有选项 final CreateTableOptions options = new CreateTableOptions(); //设置备份数 options.setNumReplicas(1) ; //设置范围分区的分区规则 List<String> parcols = new LinkedList<String>(); parcols.add("CompanyId") ; //设置按照哪个字段进行range分区 options.addHashPartitions(parcols , 10); options.setRangePartitionColumns(parcols); /** * 设置range的分区范围 * 分区1:0 < value < 10 * 分区2:10 <= value < 20 * 分区3:20 <= value < 30 * ........ * 分区9:80 <= value < 90 * */ int count = 0 ; for(int i=1 ; i< 10 ; i++){ PartialRow lower = schema.newPartialRow(); lower.addInt("CompanyId" , count); PartialRow upper = schema.newPartialRow(); count += 10; upper.addInt("CompanyId" , count); options.addRangePartition(lower , upper); } try { client.createTable("MultilevelTable" , schema , options); } catch (KuduException e) { e.printStackTrace(); }finally { try { client.close(); } catch (KuduException e) { e.printStackTrace(); } } } }
哈希分区有利于最大限度地提高写入吞吐量,而范围分区可避免 tablet 无限增长的问题;hash分区和range分区结合,可以极大提升kudu性能