HDFS HA测试环境搭建及故障切换测试
一、HDFS HA测试环境搭建
1、集群规划
HDFS HA高可用集群 | |||
主机名 | IP | 安装的软件 | 执行的进程 |
bd-dev01 | 10.176.158.41 | jdk、zookeeper | QuorumPeerMain |
bd-dev02 | 10.176.158.42 | jdk、zookeeper | QuorumPeerMain |
bd-dev03 | 10.176.158.43 | jdk、zookeeper | QuorumPeerMain |
bd-dev05 | 10.176.158.45 | jdk、Hadoop | NameNode、DFSZKFailoverController(zkfc) |
bd-dev06 | 10.176.158.46 | jdk、Hadoop | NameNode、DFSZKFailoverController(zkfc) |
bd-dev07 | 10.176.158.47 | jdk、Hadoop | DataNode、JournalNode |
bd-dev08 | 10.176.158.48 | jdk、Hadoop | DataNode、JournalNode |
bd-dev09 | 10.176.158.49 | jdk、Hadoop | DataNode、JournalNode |
2、主机基础配置
设置hosts、关闭防火墙、关闭Selinux、免密钥登陆。
3、安装jdk
#解压 tar -xvf jdk-8u131-linux-x64.tar.gz mv jdk1.8.0_131 /usr/local/jdk1.8 #设置环境变量 vim /etc/profile JAVA_HOME=/usr/local/jdk1.8/ JAVA_BIN=/usr/local/jdk1.8/bin JRE_HOME=/usr/local/jdk1.8/jre PATH=$PATH:/usr/local/jdk1.8/bin:/usr/local/jdk1.8/jre/bin CLASSPATH=/usr/local/jdk1.8/jre/lib:/usr/local/jdk1.8/lib:/usr/local/jdk1.8/jre/lib/charsets.jar
#下载 [root@sl-opencron src]# wget https://mirrors.tuna.tsinghua.edu.cn/apache/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz [root@sl-opencron src]# tar -xvf zookeeper-3.4.14.tar.gz #解压后的目录mv 到/usr/local/下 [root@sl-opencron src]# mv zookeeper-3.4.14 /usr/local/zookeeper #Step7.1:配置zookeeper cd /usr/local/zookeeper/conf/ #将zoo_sample.cfg(样板配置文件)命名为zoo.cfg mv zoo_sample.cfg zoo.cfg #修改配置文件 [root@sl-opencron conf]# vim zoo.cfg *********** *********** #路径可自定义 dataDir=/data/zookeeper server.1=10.176.158.41:2888:3888 server.2=10.176.158.42:2888:3888 server.3=10.176.158.43:2888:3888 #生成myid文件 mkdir /data/zookeeper cd /data/zookeeper touch myid echo "1" >> myid 说明:bd-dev01 myid是1 bd-dev01 myid是2 bd-dev01 myid是3 #启动zookeeper集群 说明:分别在bd-dev01 bd-dev02 bd-dev03 cd /usr/local/zookeeper/bin ./zkServer.sh start cd /usr/local/zookeeper/bin ./zkServer.sh start
5、安装hdfs
#下载 wget http://www-eu.apache.org/dist/hadoop/common/hadoop-2.9.2/hadoop-2.9.2.tar.gz #解压 tar -xvf hadoop-2.9.2.tar.gz #解压后的目录移动到/usr/local/ mv hadoop-2.9.2 /usr/local/hadoop #创建几个目录,在所有的Hadoop节点执行 [root@hadooop-master hadoop]# mkdir /sda1/hdfs/tmp /sda1/hdfs/data /sda1/hdfs/name /sda1/hdfs/journal #配置Hadoop vim core-site.xml
core-site.xml
<configuration> <property> <name>fs.defaultFS</name> <value>hdfs://ns1</value> </property> <property> <name>hadoop.tmp.dir</name> <value>/sda1/hdfs/tmp</value> </property> <property> <name>ha.zookeeper.quorum</name> <value>10.176.158.41:2181,10.176.158.42:2181,10.176.158.43:2181</value> </property> </configuration>
vim hdfs-site.xml
hdfs-site.xml
<configuration>
<!--指定hdfs的nameservice为ns1,须要和core-site.xml中的保持一致 -->
<property>
<name>dfs.nameservices</name>
<value>ns1</value>
</property>
<!-- ns1以下有两个NameNode,各自是nn1,nn2 -->
<property>
<name>dfs.ha.namenodes.ns1</name>
<value>nn1,nn2</value>
</property>
<!-- nn1的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.ns1.nn1</name>
<value>bd-dev05:9000</value>
</property>
<!-- nn1的http通信地址 -->
<property>
<name>dfs.namenode.http-address.ns1.nn1</name>
<value>bd-dev05:50070</value>
</property>
<!-- nn2的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.ns1.nn2</name>
<value>bd-dev06:9000</value>
</property>
<!-- nn2的http通信地址 -->
<property>
<name>dfs.namenode.http-address.ns1.nn2</name>
<value>bd-dev06:50070</value>
</property>
<!-- 指定NameNode的元数据在JournalNode上的存放位置 -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://bd-dev07:8485;bd-dev08:8485;bd-dev09:8485/ns1</value>
</property>
<!-- 指定JournalNode在本地磁盘存放数据的位置 -->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/sda1/hdfs/journal</value>
</property>
<!-- 开启NameNode失败自己主动切换 -->
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<!-- 配置失败自己主动切换实现方式 -->
<property>
<name>dfs.client.failover.proxy.provider.ns1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!-- 配置隔离机制方法。多个机制用换行切割,即每一个机制暂用一行-->
<property>
<name>dfs.ha.fencing.methods</name>
<value>
sshfence
shell(/bin/true)
</value>
</property>
<!-- 使用sshfence隔离机制时须要ssh免登陆 -->
<property