hadoop ha 配置流程:

1.hadoop-env.sh 添加
export JAVA_HOME=/usr/local/jdk.1.8
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_ZKFC_USER=root
export HDFS_JOURNALNODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root

2.core-site.xml添加
<property>
<name>fs.defaultFS</name>
<value>hdfs://bdp</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/var/bdp/hadoop/ha</value>
</property>
<property>
<name>hadoop.http.staticuser.user</name>
<value>root</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property>

3.hdfs-site.xml 添加
<property>
<name>dfs.nameservices</name>
<value>bdp</value>
</property>
<property>
<name>dfs.ha.namenodes.bdp</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.bdp.nn1</name>
<value>node1:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.bdp.nn2</name>
<value>node2:8020</value>
</property>
<property>
<name>dfs.namenode.http-address.bdp.nn1</name>
<value>node1:9870</value>
</property>
<property>
<name>dfs.namenode.http-address.bdp.nn2</name>
<value>node2:9870</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://node1:8485;node2:8485;node3:8485/bdp</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/var/bdp/hadoop/ha/qjm</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.bdp</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
<value>shell(true)</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>

4.启动步骤(必须按顺序执行否则启动不成功)
zkServer.sh start (node1,2,3)
hdfs --daemon start journalnode(node1,2,3)
hdfs --daemon start namenode(node1,3)
hdfs namenode -format(node1)
hdfs namenode -bootstrapStandby (node2)
hdfs zkfc -formatZK(node1,2,3)
start-dfs.sh(node1,2,3)