hbase-2.2.4 部署与hbase-client-2.2.4 JAVA客户端调用

一、下载hbase-2.2.4-bin.tar.gz并解压至目录/home/hbase-2.2.4

二、配置/home/hbase-2.2.4/conf/hbase-env.sh

# The java implementation to use.  Java 1.8+ required.
export JAVA_HOME=/usr/java/jdk-11.0.4/

# Extra Java CLASSPATH elements.  Optional.
export HBASE_CLASSPATH=/home/hbase-2.2.4/conf

三、配置/home/hbase-2.2.4/conf/hbase-site.xml

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
/**
 *
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
-->
<configuration>
    <property>
        <name>hbase.rootdir</name>
        <value>hdfs://iZwz974yt1dail4ihlqh6fZ:9000/hbase</value>        
      <description>
          file:///home/hbase-2.2.4/hdfs 或者 hdfs://iZwz974yt1dail4ihlqh6fZ:9000/hbase
          hbase.rootdir是RegionServer的共享目录,用于持久化存储HBase数据,默认写入/tmp中。
如果不修改此配置,在HBase重启时,数据会丢失。此处一般设置的是hdfs的文件目录,
如NameNode运行在hostname1主机的9000端口,则需要设置为hdfs://hostname1:9000/hbase
        </description>
    </property>
    <property>
        <name>hbase.unsafe.stream.capability.enforce</name>
        <value>false</value>
    </property>
    <property>
        <name>hbase.cluster.distributed</name>
        <value>false</value>
        <description>
          此项用于配置HBase的部署模式,false表示单机或者伪分布式模式,true表不完全分布式模式。
        </description>
    </property>
    <property>
        <name>hbase.zookeeper.property.dataDir</name>
        <value>/home/hbase-2.2.4/zookeeperData</value>
        <description>
           此项用于设置存储ZooKeeper的元数据,如果不设置默认存在/tmp下,重启时数据会丢失。
        </description>
    </property>
    
    <property>
        <name>hbase.zookeeper.property.clientPort</name>
        <value>2181</value>
    </property>
    <!--
    <property>
        <name>dfs.replication</name>
        <value>1</value>
    </property>
    <property>
        <name>hbase.unsafe.stream.capability.enforce</name>
        <value>false</value>
    </property>

    <property>
        <name>hbase.zookeeper.quorum</name>
        <value>39.108.***.***</value>
        <description>
          此项用于配置ZooKeeper集群所在的主机地址。examplel、 example2、example3是运行数据节点的主机地址,zookeeper服务的默认端口为2181。
          分布式模式:修改 conf/regionservers 文件,罗列了所有region服务器的主机名.同时在配置完成后,需要同步这些文件到集群上的其他节点 .
        </description>
    </property>
        -->
    <property>
        <name>hbase.master.info.port</name>
        <value>60010</value>
    </property>
    
    <!-- 权限管理依赖协处理器 -->
    <property>
       <name>hbase.security.authorization</name>
       <value>true</value>
    </property>
    <property>
      <name>hbase.coprocessor.region.classes</name>    
      <value>org.apache.hadoop.hbase.security.access.AccessController</value>
    </property>
    <property>
      <name>hbase.coprocessor.master.classes</name>
      <value>org.apache.hadoop.hbase.security.access.AccessController</value>
    </property>
    <property>
      <name>hbase.rpc.engine</name>
      <value>org.apache.hadoop.hbase.ipc.SecureRpcEngine</value>
    </property>
    <property>
       <name>hbase.superuser</name>
       <value>hbase,root,lixj</value>
    </property>
    
</configuration>

 

四、配置/home/hbase-2.2.4/conf/regionservers

localhost
39.108.***.***{本机IP}

 五、启动、停止hbase服务

/home/hbase-2.2.4/bin# ./start-hbase.sh

查看是否启动成功:#jps

有出现HMaster表示启动成功。

/home/hbase-2.2.4/bin# ./stop-hbase.sh

 六、开放相关端口:2181(zookeeper)、60010(web)、16000(hbase),16020(hbase)

访问  http://39.108.***.***:60010/master-status

七、windows环境下采用hbase-client-2.2.4   JAVA客户端链接

7.1 下载hadoop-2.10.0.tar.gz解压至D:\server\hadoop-2.10.0

用winRAR解压错误时,进入dos窗口,执行 start winrar x -y hadoop-2.10.0.tar.gz 即可

7.2 配置系统环境变量

新建变量名:HADOOP_HOME  变量值:D:\server\hadoop-2.10.0

Path新增 %HADOOP_HOME%\bin 项

7.3 下载 winutils.exe、hadoop.dll 放到 D:\server\hadoop-2.10.0\bin 目录下

下载地址:https://github.com/cdarlint/winutils/tree/master/hadoop-2.9.2/bin

7.4 配置C:\Windows\System32\drivers\etc\hosts 文件

增加Region Servers的ServerName(在http://39.108.***.***:60010/master-status查看)与服务器IP地址映射

例: 39.108.***.*** iZwz974yt1dail4ihlqh6fZ

7.5 JAVA工程jar包

包括 /home/hbase-2.2.4/lib 下所有包及htrace-core4-4.2.0-incubating.jar(下载地址https://www.mvnjar.com/org.apache.htrace/htrace-core4/4.2.0-incubating/detail.html

7.6 JAVA代码示例:

package test;


import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.Logger;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.filter.BinaryComparator;
import org.apache.hadoop.hbase.filter.ColumnPaginationFilter;
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.MultipleColumnPrefixFilter;
import org.apache.hadoop.hbase.filter.PageFilter;
import org.apache.hadoop.hbase.filter.PrefixFilter;
import org.apache.hadoop.hbase.filter.RandomRowFilter;
import org.apache.hadoop.hbase.filter.RowFilter;
import org.apache.hadoop.hbase.filter.SkipFilter;
import org.apache.hadoop.hbase.filter.SubstringComparator;
import org.apache.hadoop.hbase.filter.TimestampsFilter;
import org.apache.hadoop.hbase.filter.ValueFilter;
import org.apache.hadoop.hbase.util.Bytes;

/**
 * 
 * @author 李小家
 *
 */
public class HbaseClient {

    private static Logger logger = Logger.getLogger(HbaseClient.class.getName());
    
    private static Connection conn = null;
    /**
     * 建立连接
     * */
    static{
System.setProperty("HADOOP_USER_NAME", "lixj"); Configuration conf
= HBaseConfiguration.create(); conf.set("hbase.zookeeper.property.clientPort", "2181"); conf.set("hbase.zookeeper.quorum", "39.108.***.***");
     conf.set("hadoop.user.name","lixj");
try { conn = ConnectionFactory.createConnection(conf); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } /** * 创建表 * @param tableName 表名 * @param familys 列族 * @throws IOException */ public void createTable(String tableName,String ...familys){ try { Admin admin = conn.getAdmin(); TableName tname = TableName.valueOf(tableName); if ( admin.tableExists(tname) ){ logger.warning(tableName + "表已经存在,不能重复创建."); } else { TableDescriptorBuilder tdesc = TableDescriptorBuilder.newBuilder(tname); for(String family: familys){ ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(family); tdesc.setColumnFamily(cfd); } TableDescriptor desc=tdesc.build(); admin.createTable(desc); } } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public void createTables(String[] tableNames, List<List<String>> familys){ try { Admin admin = conn.getAdmin(); if(tableNames.length == familys.size()){ for(int i = 0 ; i < tableNames.length; i++){ TableName tname=TableName.valueOf(tableNames[i]); if(admin.tableExists(tname)){ logger.warning(tableNames[i]+"表已经存在,不能重复创建."); }else{ TableDescriptorBuilder tdesc = TableDescriptorBuilder.newBuilder(tname); for(String family: familys.get(i)){ ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(family); tdesc.setColumnFamily(cfd); } TableDescriptor desc = tdesc.build(); admin.createTable(desc); } } }else{ logger.warning("每张表必须要有列族"); } } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public void deleteTable(String tableName){ try { Admin admin = conn.getAdmin(); TableName tName = TableName.valueOf(tableName); if (admin.tableExists(tName)) { admin.disableTable(tName); admin.deleteTable(tName); logger.info("删除" + tableName + "表成功"); }else{ logger.warning("需要删除的" + tableName + "表不存在"); } } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public void deleteTables(String ...tableNames){ try { Admin admin = conn.getAdmin(); for(String tableName : tableNames){ TableName tName = TableName.valueOf(tableName); admin.disableTable(tName); admin.deleteTable(tName); } } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public void deleteFamily(String tableName, String family){ try { Admin admin=conn.getAdmin(); admin.deleteColumnFamily(TableName.valueOf(tableName), Bytes.toBytes(family)); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public void addFamily(String tableName, String family){ try { Admin admin = conn.getAdmin(); ColumnFamilyDescriptor columnFamily = ColumnFamilyDescriptorBuilder.newBuilder(family.getBytes()).build(); admin.addColumnFamily(TableName.valueOf(tableName), columnFamily); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public void addRow(String tableName, String rowKey, String family, String qualifier, String value){ try { Table table = (Table) conn.getTable(TableName.valueOf(tableName)); //通过rowkey创建一个 put 对象 Put put = new Put(Bytes.toBytes(rowKey)); //在 put 对象中设置 列族、列、值 put.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier), Bytes.toBytes(value)); //插入数据,可通过 put(List<Put>) 批量插入 table.put(put); table.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public void addrow(String tname,String family, Map<String,Object> params){ try { Table table = conn.getTable(TableName.valueOf(tname)); Put put = new Put(params.get("row").toString().getBytes()); for(Map.Entry<String, Object> m:params.entrySet()){ if(m.getKey().equals("row")){ continue; } put.addColumn(family.getBytes(), m.getKey().getBytes(), m.getValue().toString().getBytes()); } table.put(put); table.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public void addrows(String tname,Map<String,Map<String,Object>> params){ try { Table table = conn.getTable(TableName.valueOf(tname)); List<Put> listput=new ArrayList<Put>(); for(Map.Entry<String, Map<String,Object>> map:params.entrySet()){ Put put=new Put(map.getKey().getBytes()); String family=map.getValue().get("family").toString(); for(Map.Entry<String, Object> m:map.getValue().entrySet()){ if(m.getKey().equals("row")){ continue; } put.addColumn(family.getBytes(), m.getKey().getBytes(), m.getValue().toString().getBytes()); } listput.add(put); } table.put(listput); table.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public boolean deleteRow(String tname,String row, Map<String,Object> params){ TableName tableName = TableName.valueOf(tname); try { Table table = conn.getTable(tableName); Delete delete = new Delete(row.getBytes()); if(params != null){ for(Map.Entry<String, Object> m:params.entrySet()){ delete.addColumn(m.getKey().getBytes(), m.getValue().toString().getBytes()); } } table.delete(delete); table.close(); return true; } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } return false; } public void deleteRows(String tableName, String[] rows) { try { Table table = (Table) conn.getTable(TableName.valueOf(tableName)); List<Delete> list = new ArrayList<Delete>(); for (String row : rows) { Delete delete = new Delete(Bytes.toBytes(row)); list.add(delete); } table.delete(list); table.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public void deleteRows(String tname, Map<String,Object> params,String ...rows) { try { Table table = conn.getTable(TableName.valueOf(tname)); List<Delete> deletes = new ArrayList<Delete>(); for(String row : rows){ Delete delete = new Delete(row.getBytes()); if(params != null){ for(Map.Entry<String, Object> m:params.entrySet()){ delete.addColumn(m.getKey().getBytes(), m.getValue().toString().getBytes()); } } deletes.add(delete); } table.delete(deletes); table.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public Map<String, Object> getRow(String tableName, String rowKey) { Map<String, Object> data = new HashMap<String, Object>(); try { Table table = (Table) conn.getTable(TableName.valueOf(tableName)); Get get = new Get(Bytes.toBytes(rowKey)); //通过rowkey创建一个 get 对象 Result result = table.get(get); if( !get.isCheckExistenceOnly() ){ for (Cell cell : result.rawCells()) { data.put("row", new String(CellUtil.cloneRow(cell))); data.put("family", new String(CellUtil.cloneFamily(cell))); data.put("qualifier", new String(CellUtil.cloneQualifier(cell))); data.put("value", new String(CellUtil.cloneValue(cell))); } } table.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } return data; } public List<Map<String, Object>> getAllData(String tname) { List<Map<String, Object>> list = new ArrayList<Map<String, Object>>(); TableName tableName = TableName.valueOf(tname); try { Table table = conn.getTable(tableName); Set<byte []> familyNames = table.getDescriptor().getColumnFamilyNames(); for(byte[] familyName : familyNames){ ResultScanner rs = table.getScanner(familyName); Iterator<Result> iterator = rs.iterator(); while(iterator.hasNext()){ Result r = iterator.next(); for (Cell cell : r.rawCells()){ String family = Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()); String qualifier = Bytes.toString(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); String row = Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); String value = Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); Map map = new HashMap(); map.put("row", row); map.put("family", family); map.put("qualifier", qualifier); map.put("value", value); list.add(map); logger.info("row="+row+",family="+family +",qualifier="+qualifier+",value="+value); } } } table.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } return list; } public void queryData(String tableName){ Table table = null; try { table = (Table) conn.getTable(TableName.valueOf(tableName)); } catch (IOException e1) { // TODO Auto-generated catch block e1.printStackTrace(); } Scan scan = new Scan(); scan.setMaxResultSize(1000); //指定最多返回的Cell数目。用于防止一行中有过多的数据,导致OutofMemory错误。 // scan.setBatch(1000); scan.withStartRow(Bytes.toBytes("row001")); scan.withStopRow(Bytes.toBytes("row010")); scan.addFamily(Bytes.toBytes("cf01")); // scan.addColumn(Bytes.toBytes("cf01"), Bytes.toBytes("name")); //该过滤器是随机选择一行的过滤器。参数 chance 是一个浮点值,介于0.1 和 1.0 之间。随机输出一半的行数据 RandomRowFilter randomRowFilter = new RandomRowFilter(0.5f); //ColumnPrefixFilter :用于列名 Qualifier 前缀过滤,即包含某个前缀的所有列名。 ColumnPrefixFilter columnPrefixFilter = new ColumnPrefixFilter("bir".getBytes()); byte[][] prefixes = new byte[][] {"author".getBytes(),"bookname".getBytes()}; MultipleColumnPrefixFilter multipleColumnPrefixFilter = new MultipleColumnPrefixFilter(prefixes); //分页,PageFilter :用于按行分页。 PageFilter pageFilter = new PageFilter(3);//指定3行分一页 //这是一种附加过滤器,其与 ValueFilter 结合使用,如果发现一行中的某一列不符合条件,那么整行就会被过滤掉。 Filter skipFilter = new SkipFilter(columnPrefixFilter); PrefixFilter prefixFilter = new PrefixFilter(Bytes.toBytes("李")); Filter columnPaginationFilter = new ColumnPaginationFilter(5,15); Filter valueFilter = new ValueFilter(CompareOperator.EQUAL, new SubstringComparator("test")); Filter rowFilter1 = new RowFilter(CompareOperator.GREATER_OR_EQUAL, new BinaryComparator(Bytes.toBytes("row-3"))); //时间戳过滤器 List<Long> timestamp = new ArrayList<>(); timestamp.add(1571438854697L); timestamp.add(1571438854543L); TimestampsFilter timestampsFilter = new TimestampsFilter(timestamp); List<Filter> filters = new ArrayList <Filter>(); filters.add(pageFilter); filters.add(valueFilter); FilterList filter = new FilterList (FilterList.Operator.MUST_PASS_ALL,filters); scan.setFilter(filter); ResultScanner rs = null; try { rs = table.getScanner(scan); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } if ( rs != null ){ for (Result r : rs) { for (Cell cell : r.rawCells()) { System.out.println(String.format("row:%s, family:%s, qualifier:%s, value:%s, timestamp:%s.", Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()), Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()), Bytes.toString(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()), Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()), cell.getTimestamp())); } } rs.close(); } } public List<String> getQualifierValue(String tableName,String family,String qualifier){ List<String> list = new ArrayList<String>(); TableName tName = TableName.valueOf(tableName); try { Table table = conn.getTable(tName); ResultScanner rs = table.getScanner(family.getBytes(), qualifier.getBytes()); Iterator<Result> iterator = rs.iterator(); while(iterator.hasNext()){ Result r= iterator.next(); for (Cell cell:r.rawCells()){ String value = Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); list.add(value); } } rs.close(); table.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } return list; } public static void main(String[] args){ HbaseClient client = new HbaseClient(); if (client.conn != null){ try { logger.info("链接成功 client.conn="+client.conn); //client.deleteTable("table1"); //client.createTable("table1", "cf01", "cf02"); // Map data = new HashMap(); // data.put("row", "row001"); // data.put("name", "李小家(cf01)"); // data.put("sex", 2+"(cf01)"); // data.put("birthday", new Date()); // data.put("describe", "test(cf01)"); // client.addrow("table1", "cf01", data); // client.addFamily("table1", "cf02"); // Map data = new HashMap(); // data.put("row", "row001"); // data.put("name", "李小家(cf02)"); // data.put("sex", 2+"(cf02)"); // data.put("birthday", new Date()); // data.put("describe", "test(cf02)"); // client.addrow("table1", "cf02", data); // Map params = new HashMap(); // params.put("cf01", "sex"); // client.deleteRow("table1", "row001", params); // logger.info(client.getAllData("table1").toString()); client.queryData("table1"); // logger.info(client.getQualifierValue("table1", "cf01", "name").toString()); // logger.info(client.getRow("table1", "row001").toString()); // client.deleteFamily("table1", "cf02"); client.conn.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } } }

 

posted @ 2020-07-24 17:04  李小加  阅读(2088)  评论(0编辑  收藏  举报