Hadoop完全分布式搭建
每次启动 或关闭一个应用
hadoop-daemon start namenode hadoop-daemon start datanode hadoop-daemon start secondarynamenode hadoop-daemon stop datanode
start-all.sh //启动hdfs(namenode,DataNode,secondaryNameNode)和yarn两个组件(resourcesmanager,nodemanager)
stop-all.sh //关闭hdfs(NameNode,DataNode,secondaryname)和yarn两个组件(resourcemanager,nodemanager)
start-dfs.sh //启动hdfs组件(NameNode,DataNode,secondarynamenode)
stop-dfs.sh //关闭hdfs组件
start-yarn.sh //启动yarn组件
stop-yarn.sh //停止yarn组件
yarn-daemon.sh start resourcemanager yarn-daemon.sh start nodemanager //停止或开启yarn组件的单个进程
vim /etc/hostname //查看各主机名 保证各不相同
vim /etc/hosts 查看ip地址和主机名映射是否相同
ssh-keygen //得到公钥和私钥
ssh-copy-id hdp02 //将公钥发送给其余主机保证相互访问不需要密码
ssh hdp02 //测试是否成功
vim /opt/soft/hadoop277/etc/hadoop/slaves //加入所有datanode主机的主机名
此配置为hdfs上文件的副本数
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
使用scp将配置分发到其余主机
scp /opt/soft/hadoop277/etc/hadoop/hdfs-site.xml hdp02:/opt/soft/hadoop277/etc/hadoop
cd /opt/soft/hadoop277/tmp
rm -rf *
hadoop namenode -format
启动节点
start-dfs.sh
start-yarn.sh