【HBase】HBase与MapReduce的集成案例


HBase与MapReducer集成官方帮助文档:http://archive.cloudera.com/cdh5/cdh/5/hbase-1.2.0-cdh5.14.0/book.html


需求

在HBase先创建一张表myuser2 —— create 'myuser2','f1',然后读取myuser表中的数据,将myuser表中f1列族name列age列的数据写入到表myuser2中


步骤

一、创建maven工程,导入jar包

<repositories>
        <repository>
            <id>cloudera</id>
            <url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
        </repository>
    </repositories>

    <dependencies>

        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>2.6.0-mr1-cdh5.14.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-client</artifactId>
            <version>1.2.0-cdh5.14.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hbase</groupId>
            <artifactId>hbase-server</artifactId>
            <version>1.2.0-cdh5.14.0</version>
        </dependency>
        <dependency>
            <groupId>junit</groupId>
            <artifactId>junit</artifactId>
            <version>4.12</version>
            <scope>test</scope>
        </dependency>
        <dependency>
            <groupId>org.testng</groupId>
            <artifactId>testng</artifactId>
            <version>6.14.3</version>
            <scope>test</scope>
        </dependency>


    </dependencies>

    <build>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <version>3.0</version>
                <configuration>
                    <source>1.8</source>
                    <target>1.8</target>
                    <encoding>UTF-8</encoding>
                    <!--    <verbal>true</verbal>-->
                </configuration>
            </plugin>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-shade-plugin</artifactId>
                <version>2.2</version>
                <executions>
                    <execution>
                        <phase>package</phase>
                        <goals>
                            <goal>shade</goal>
                        </goals>
                        <configuration>
                            <filters>
                                <filter>
                                    <artifact>*:*</artifact>
                                    <excludes>
                                        <exclude>META-INF/*.SF</exclude>
                                        <exclude>META-INF/*.DSA</exclude>
                                        <exclude>META-INF/*/RSA</exclude>
                                    </excludes>
                                </filter>
                            </filters>
                        </configuration>
                    </execution>
                </executions>
            </plugin>
        </plugins>
    </build>

 

二、开发MapReduce程序

定义一个main类——HbaseReadWrite

package cn.itcast.mr.demo1;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;


public class HbaseReadWrite extends Configured implements Tool {
    @Override
    public int run(String[] args) throws Exception {
        //创建Job对象
        Job job = Job.getInstance(super.getConf(), "HbaseMapReduce");
        //创建Scan对象,这里如果不设置过滤器,就是全表查询,因为在Mapper类中已经设置了判断条件,所以这里不需要设置过滤器
        Scan scan = new Scan();


        /**
         *  这是自定义Map逻辑的工具类
         *  这里需要五个参数:
         *  tablename 就是 要读取数据的表名
         *  scan 就是 HBASE 在java代码 实现增删改查时用来设置过滤器,获取数据等的
         *  接着就是自己定义的Mapper类,k2和v2的输出类型
         *  最后是Job对象
         */
        TableMapReduceUtil.initTableMapperJob("myuser",scan,HbaseReadMapper.class, Text.class, Put.class,job);

        /**
         * 这是自定义Reduce逻辑的工具类
         * 这里只需要三个参数即可
         * tablename 就是要写入数据的表名
         * 然后一个自定义的reduce类和job对象
         */
        TableMapReduceUtil.initTableReducerJob("myuser2",HbaseWriteReducer.class,job);

        //提交任务
        boolean b = job.waitForCompletion(true);

        return b?0:1;
    }

    /**
     * main方法,负责run的退出
     * @param args
     * @throws Exception
     */
    public static void main(String[] args) throws Exception {
        Configuration configuration = HBaseConfiguration.create();
        //一定记得要在configuration中设置zookeeper的地址,否则无法连接
        configuration.set("hbase.zookeeper.quorum","node01:2181,node02:2181,node03:2181");
        int run = ToolRunner.run(configuration, new HbaseReadWrite(), args);
        System.exit(run);
    }
}

自定义Mapper逻辑,定义一个Mapper类——HbaseReadMapper

package cn.itcast.mr.demo1;

import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Text;

import java.io.IOException;
import java.util.List;


public class HbaseReadMapper extends TableMapper<Text, Put> {
    /**
     *
     * @param key   ke2输出类型为Text,因为是rowKey
     * @param result     v2输出类型为Put,因为Hbase插入数据都是Put对象
     * @param context
     * @throws IOException
     * @throws InterruptedException
     */
    @Override
    protected void map(ImmutableBytesWritable key, Result result, Context context) throws IOException, InterruptedException {
        //获取Hbase表中rowKey的字节
        byte[] rowKeyBytes = key.get();
        //将rowKey字节转换为字符串,因为k2输出类型为Text
        String rowKey = Bytes.toString(rowKeyBytes);

        //新建Put对象
        Put put = new Put(rowKeyBytes);
        //获取Hbase所有数据
        List<Cell> cells = result.listCells();
        //循环遍历到每一条数据
        for (Cell cell : cells) {
            //获取cell的列族
            byte[] family = cell.getFamily();
            //获取cell的列
            byte[] qualifier = cell.getQualifier();
            //判断cell的列族和列值,拿到需要的数据
            if ("f1".equals(Bytes.toString(family))){
                if ("name".equals(Bytes.toString(qualifier)) || "age".equals(Bytes.toString(qualifier))){
                    put.add(cell);
                }
            }
        }
        //判断Put是否为空
        if (!put.isEmpty()){
            context.write(new Text(rowKey),put);
        }
    }
}

自定义Reducer逻辑,定义一个Reducer类——HbaseWriterReduce

package cn.itcast.mr.demo1;

import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.io.Text;

import java.io.IOException;


public class HbaseWriteReducer extends TableReducer<Text, Put, ImmutableBytesWritable> {
    /**
     *
     * @param key   输入值,k2为Text,也就是rowKey
     * @param values    输入值,v2为Put
     * @param context
     * @throws IOException
     * @throws InterruptedException
     */
    @Override
    protected void reduce(Text key, Iterable<Put> values, Context context) throws IOException, InterruptedException {
        // ImmutableBytesWritable是用来封装rowKey的
        ImmutableBytesWritable immutableBytesWritable = new ImmutableBytesWritable();
        // key就是rowKey
        immutableBytesWritable.set(key.getBytes());
        // 循环遍历拿到每一个put对象,输出即可
        for (Put put : values) {
            context.write(immutableBytesWritable,put);
        }
    }
}

三、运行结果

在这里插入图片描述

posted @ 2020-04-01 01:02  _codeRookie  阅读(480)  评论(0编辑  收藏  举报