HBASE安装 (HA)

######################################################################################################
HBASE安装(HA)
######################################################################################################
安装环境:

cendos6.8
jdk1.8

安装包版本:

zookeeper-3.4.10.tar.gz
hadoop-2.6.5.tar.gz
hbase-1.3.1-bin.tar.gz
apache-phoenix-4.13.1-HBase-1.3-bin.tar.gz
v2.4.0RC2.tar.gz(opentsdb)

角色分配:
4台机器(配置HA)

外网IP    内网IP    主机host    zookeeper    hadoop    hbase    phoenix    opentsdb
114.67.233.239    192.168.0.21    elasticsearch    QuorumPeerMain    NameNode、DataNode    Hmaster    Y    TSDMain
116.196.66.0    192.168.0.29    hbase-test001    QuorumPeerMain    DataNode    HRegionServer    X    X
116.196.66.9    192.168.0.30    hbase-test002    QuorumPeerMain    DataNode    HRegionServer    X    X
116.196.73.252    192.168.0.31    hbase-test003    NameNode、DataNode    Hmaster    X    X
#以上X、Y含义为 Y:安装;X:未安装

添加hosts
vim /etc/hosts

192.168.0.21 elasticsearch
192.168.0.29 hbase-test001
192.168.0.30 hbase-test002
192.168.0.31 hbase-test003

ssh设置免密登录(至少两master节点需要设置)

ssh-keygen -t rsa
ssh-copy-id -i ~/.ssh/id_rsa.pub root@elasticsearch
ssh-copy-id -i ~/.ssh/id_rsa.pub root@hbase-test001
ssh-copy-id -i ~/.ssh/id_rsa.pub root@hbase-test002
ssh-copy-id -i ~/.ssh/id_rsa.pub root@hbase-test003

######################################################################################################
安装zookeeper

wget https://archive.apache.org/dist/zookeeper/zookeeper-3.4.10/zookeeper-3.4.10.tar.gz
tar zxvf zookeeper-3.4.10.tar.gz 
cd zookeeper-3.4.10
cd conf/
cp zoo_sample.cfg zoo.cfg
vim zoo.cfg
添加如下内容:
dataDir=/root/libin/data/zk/data
dataLogDir=/root/libin/data/zk/logs
#elasticsearch
server.1=elasticsearch:2888:3888
#hbase_test001
server.2=hbase-test001:2888:3888
#hbase_test002
server.3=hbase-test002:2888:3888

创建zk数据目录
mkdir -p /root/libin/data/zk/logs
mkdir -p /root/libin/data/zk/data

创建myip文件
cd /root/libin/data/zk/data
vim myid
在myid文件中添加server.X中x编号。如server.1节点,则myid中添加1即可。

其他两台做相同配置

启动zk,三台机器都需要执行启动命令
bin/zkServer.sh start

查看zk运行状态
bin/zkServer.sh status

zk客户端启动,验证zk安装是否正确
bin/zkCli.sh -server 192.168.0.30:2181

例如
[zk: 192.168.0.30:2181(CONNECTED) 1] ls /
[zookeeper, hbase]

######################################################################################################
安装hadoop

修改linux系统配置
vim /etc/security/limits.conf
# NS_PER_INSTALL
* soft nofile 65536
* hard nofile 65536
* soft nproc 131072
* hard nproc 131072

##修改后,需要reboot机器


wget http://www-eu.apache.org/dist/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz
tar zxvf hadoop-2.6.5.tar.gz
cd hadoop-2.6.5
cd etc/hadoop

修改以下配置文件
vim hadoop-env.sh
export JAVA_HOME=/usr/java/jdk1.8.0_171-amd64

vim core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://ns1</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/root/libin/data/hadoop/tmp</value>
</property>
<property>
<name>fs.trash.interval</name>
<value>420</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>elasticsearch:2181,hbase-test001:2181,hbase-test002:2181</value>
</property>
</configuration>

vim hdfs-site.xml
<configuration>
<!--nameservices,管理namenode的空间-->
<property>
<name>dfs.nameservices</name>
<value>ns1</value>
</property>
<!--配置nameservices所管理的namenode,即ns1管理的namenode为nn1,nn2-->
<property>
<name>dfs.ha.namenodes.ns1</name>
<value>nn1,nn2</value>
</property>
<!--配置两个namenode所在主机-->
<property>
<name>dfs.namenode.rpc-address.ns1.nn1</name>
<value>elasticsearch:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.ns1.nn2</name>
<value>hbase-test003:8020</value>
</property>
<!--配置两个namenode的web 50070端口-->
<property>
<name>dfs.namenode.http-address.ns1.nn1</name>
<value>elasticsearch:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.ns1.nn2</name>
<value>hbase-test003:50070</value>
</property>
<!--jernalNode所在主机-->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://elasticsearch:8485;hbase-test001:8485;hbase-test002:8485/ns1</value>
</property>
<!--配置客户端代理-->
<property>
<name>dfs.client.failover.proxy.provider.ns1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!--配置两个namenode之间的隔离方式-->
<property>
<name>dfs.ha.fencing.methods</name>
<value>
sshfence
shell(/bin/true)
</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.connect-timeout</name>
<value>30000</value>
</property>
<!--此处是自己主机的ssh-key路径-->
<!--此处使用的是ssh隔离方式,必须提前配置两个namenode所在主机之间能够进行无密钥登陆,否则会失败-->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<!--配置jernal日志文件存放在本地磁盘的那个目录下-->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/root/libin/data/hadoop/journal</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/root/libin/data/hadoop/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/root/libin/data/hadoop/data</value>
</property>
</configuration>

vim slaves
elasticsearch
hbase-test001
hbase-test002
hbase-test003

创建hadoop数据目录
mkdir -p /root/libin/data/hadoop/name
mkdir -p /root/libin/data/hadoop/data
mkdir -p /root/libin/data/hadoop/tmp
mkdir -p /root/libin/data/hadoop/journal

其他3台做相同配置

格式化zkfc,让在zookeeper中生成ha节点,在第一台master节点执行
bin/hdfs zkfc -formatZK

#格式成功后,查看zookeeper中可以看到
#[zk: localhost:2181(CONNECTED) 1] ls /hadoop-ha
#[ns1]

格式化hdfs,在第一台master节点执行
bin/hdfs namenode -format

格式化hdfs,在第二台master节点执行
bin/hdfs namenode -bootstrapStandby

启动,在master节点执行即可
sbin/start-dfs.sh

# 停止hdfs命令: sbin/stop-dfs.sh

验证hadoop安装是否正确
1、jps 主节点有NameNode、JournalNode等进程存在
[[root@elasticsearch hadoop]# jps
2531 DFSZKFailoverController
2195 DataNode
1734 Elasticsearch
2648 QuorumPeerMain
2395 JournalNode
4029 Jps
2845 HMaster
2061 NameNode

2.访问HDFS的页面
http://114.67.233.239:50070

3.简单操作验证
创建目录
bin/hadoop fs -mkdir /input
上传文件
bin/hadoop fs -put LICENSE.txt /input
查看文件
bin/hadoop fs -ls /input
查看文件内容
bin/hadoop fs -cat /input/LICENSE.txt

##############################################################################################
安装hbase

wget http://archive.apache.org/dist/hbase/1.3.1/hbase-1.3.1-bin.tar.gz
tar zxvf hbase-1.3.1-bin.tar.gz
cd hbase-1.3.1
cd conf

vim hbase-env.sh
修改如下配置:
export JAVA_HOME=/usr/java/jdk1.8.0_171-amd64
export HBASE_MANAGES_ZK=false

vim hbase-site.xml
<configuration>
<property>
<name>hbase.rootdir</name>
<value>hdfs://ns1/hbase</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property> 
<!--指定hbase的master-->
<name>hbase.master</name>
<value>60000</value>
</property> 
<property>
<name>hbase.zookeeper.quorum</name>
<value>elasticsearch,hbase-test001,hbase-test002</value>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/root/libin/data/zk/data</value>
</property>
</configuration>

vim regionservers
hbase-test001
hbase-test002

vim backup-masters
hbase-test003

hadoop的core-site.xml和hdfs-site.xml拷贝到hbase的conf下

其他3台做相同配置

启动hbase,在master节点执行即可
bin/start-hbase.sh

###停止hbase命令: bin/stop-hbase.sh

验证hbase安装是否正确
1、jps查看进程HMaster HRegionServer
[root@elasticsearch hadoop-2.6.5]# jps
12978 QuorumPeerMain
8980 NameNode
9188 SecondaryNameNode
9916 HMaster
15086 Jps

2.使用hbase shell简单操作
bin/hbase shell

查看状态
hbase(main):001:0> status
1 active master, 0 backup masters, 2 servers, 0 dead, 1.5000 average load

hbase(main):002:0>

创建表
hbase(main):002:0> create 'test1', 'cf'
0 row(s) in 1.2340 seconds

=> Hbase::Table - test1
hbase(main):003:0>

查看表
hbase(main):004:0> list 'test1'
TABLE 
test1 
1 row(s) in 0.0130 seconds

=> ["test1"]
hbase(main):005:0>


3.页面查看
http://114.67.233.239:16010

##############################################################################################
安装phoenix

##phoenix只安装一个节点即可(本次安装在master节点上)

wget http://apache.01link.hk/phoenix/apache-phoenix-4.13.1-HBase-1.3/bin/apache-phoenix-4.13.1-HBase-1.3-bin.tar.gz
tar zxvf apache-phoenix-4.13.1-HBase-1.3-bin.tar.gz
cd apache-phoenix-4.13.1-HBase-1.3-bin

将phoenix-core-4.13.1-HBase-1.3.jar和phoenix-4.13.1-HBase-1.3-client.jar拷贝到hbase集群各个节点hbase安装目录lib中。
cp phoenix-core-4.13.1-HBase-1.3.jar ../hbase-1.3.1/lib/
scp phoenix-4.13.1-HBase-1.3-client.jar root@hbase-test001:/root/libin/hbase/hbase-1.3.1/lib/
scp phoenix-4.13.1-HBase-1.3-client.jar root@hbase-test002:/root/libin/hbase/hbase-1.3.1/lib/
scp phoenix-4.13.1-HBase-1.3-client.jar root@hbase-test003:/root/libin/hbase/hbase-1.3.1/lib/

cp phoenix-4.13.1-HBase-1.3-client.jar ../hbase-1.3.1/lib/
scp phoenix-core-4.13.1-HBase-1.3.jar root@hbase-test003:/root/libin/hbase/hbase-1.3.1/lib/
scp phoenix-core-4.13.1-HBase-1.3.jar root@hbase-test002:/root/libin/hbase/hbase-1.3.1/lib/
scp phoenix-core-4.13.1-HBase-1.3.jar root@hbase-test001:/root/libin/hbase/hbase-1.3.1/lib/

将hbase-site.xml、hdfs-site.xml、core-site.xml拷贝到phoenix的bin目录下
cd /root/libin/hbase/hbase-1.3.1/conf
cp hbase-site.xml /root/libin/hbase/apache-phoenix-4.13.1-HBase-1.3-bin/bin
cp hdfs-site.xml /root/libin/hbase/apache-phoenix-4.13.1-HBase-1.3-bin/bin
cp core-site.xml /root/libin/hbase/apache-phoenix-4.13.1-HBase-1.3-bin/bin

安装python依赖包,否则phoenix sqlline.py客户端工具无法使用
yum install python-argparse

重启hbase
cd /root/libin/hbase/hbase-1.3.1
bin/stop-hbase.sh
bin/start-hbase.sh

启动sqlline.py
cd /root/libin/hbase/apache-phoenix-4.13.1-HBase-1.3-bin/bin
./sqlline.py localhost
验证phoenix是否正常
查看所有表
!tables
创建表
create table person01 (id integer not null primary key,name varchar,age integer);
插入数据
upsert into person01 values (1,'zhangsan' ,18);
查询数据
select * from person01;

[root@elasticsearch apache-phoenix-4.13.1-HBase-1.3-bin]# bin/sqlline.py elasticsearch
Setting property: [incremental, false]
Setting property: [isolation, TRANSACTION_READ_COMMITTED]
issuing: !connect jdbc:phoenix:elasticsearch none none org.apache.phoenix.jdbc.PhoenixDriver
Connecting to jdbc:phoenix:elasticsearch
18/05/16 11:18:19 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Connected to: Phoenix (version 4.13)
Driver: PhoenixEmbeddedDriver (version 4.13)
Autocommit status: true
Transaction isolation: TRANSACTION_READ_COMMITTED
Building list of tables and columns for tab-completion (set fastconnect to true to skip)...
95/95 (100%) Done
Done
sqlline version 1.2.0
0: jdbc:phoenix:elasticsearch> !tables
+------------+--------------+-------------+---------------+----------+------------+--------------------+
| TABLE_CAT | TABLE_SCHEM | TABLE_NAME | TABLE_TYPE | REMARKS | TYPE_NAME | SELF_REFERENCING_C |
+------------+--------------+-------------+---------------+----------+------------+--------------------+
| | SYSTEM | CATALOG | SYSTEM TABLE | | | |
| | SYSTEM | FUNCTION | SYSTEM TABLE | | | |
| | SYSTEM | SEQUENCE | SYSTEM TABLE | | | |
| | SYSTEM | STATS | SYSTEM TABLE | | | |
| | | PERSON | TABLE | | | |
+------------+--------------+-------------+---------------+----------+------------+--------------------+
0: jdbc:phoenix:elasticsearch> create table person01 (id integer not null primary key,name varchar,age integer);
No rows affected (1.347 seconds)
0: jdbc:phoenix:elasticsearch> !tables
+------------+--------------+-------------+---------------+----------+------------+--------------------+
| TABLE_CAT | TABLE_SCHEM | TABLE_NAME | TABLE_TYPE | REMARKS | TYPE_NAME | SELF_REFERENCING_C |
+------------+--------------+-------------+---------------+----------+------------+--------------------+
| | SYSTEM | CATALOG | SYSTEM TABLE | | | |
| | SYSTEM | FUNCTION | SYSTEM TABLE | | | |
| | SYSTEM | SEQUENCE | SYSTEM TABLE | | | |
| | SYSTEM | STATS | SYSTEM TABLE | | | |
| | | PERSON | TABLE | | | |
| | | PERSON01 | TABLE | | | |
+------------+--------------+-------------+---------------+----------+------------+--------------------+
0: jdbc:phoenix:elasticsearch> upsert into person01 values (1,'zhangsan' ,18);
1 row affected (0.052 seconds)
0: jdbc:phoenix:elasticsearch> select * from person01;
+-----+-----------+------+
| ID | NAME | AGE |
+-----+-----------+------+
| 1 | zhangsan | 18 |
+-----+-----------+------+
1 row selected (0.041 seconds)
0: jdbc:phoenix:elasticsearch>

##############################################################################################
安装OpenTSDB

##OpenTSDB单台部署(本次安装在master节点上)

安装gnuplot,Opentsdb依赖Gnuplot,命令行的交互式绘图工具
yum install gnuplot

安装automake,编译源码时需要
yum install automake

wget https://github.com/OpenTSDB/opentsdb/archive/v2.4.0RC2.tar.gz
tar zxvf v2.4.0RC2.tar.gz
cd opentsdb-2.4.0RC2/

编译安装
./build.sh

将create_table.sh中COMPRESSION设置为COMPRESSION=NONE,不使用压缩。如使用则需修改hadoop配置,支持lzo压缩。

创建所需要的hbase表
env COMPRESSION=NONE HBASE_HOME=/root/libin/hbase/hbase-1.3.1 ./src/create_table.sh

修改配置opentsdb.conf,将src目录下opentsdb.conf拷贝一份做模板修改即可
cp src/opentsdb.conf ../
vim opentsdb.conf
tsd.network.port = 4242
tsd.http.staticroot = /root/libin/hbase/opentsdb-2.4.0RC2/build/staticroot
tsd.http.cachedir = /root/libin/hbase/opentsdb-2.4.0RC2/build/cachedir
tsd.storage.hbase.zk_quorum = elasticsearch:2181,hbase-test001:2181,hbase-test002:2181

启动
./build/tsdb tsd
#./build/tsdb tsd --config=./opentsdb.conf
#./build/tsdb tsd --port=4242 --staticroot=staticroot --cachedir=/tmp/tsdtmp --auto-metric &
#nohup ./build/tsdb tsd --config=/etc/opentsdb.conf 2>&1 > /dev/null &

#你可以在命令行中通过--config指定配置文件所在路径,如果没有指定,OpenTSDB会从以下路径寻找配置文件:
#./opentsdb.conf
#/etc/opentsdb.conf
#/etc/opentsdb/opentsdb.conf
#/opt/opentsdb/opentsdb.conf


验证
浏览器打开界面
http://114.67.233.239:4242/
posted @ 2018-07-04 11:47  粒子先生  阅读(718)  评论(0编辑  收藏  举报