hadoop集群全纪录

169namenode

170datanode

171datenode

 

1:部署JDK

获取jdk安装代码:jdk-7u21-linux-x64.gz

tar -zxvf jdk-7u21-linux-x64.gz
rpm -ivh jdk-7u21-linux-x64.rpm
vi /etc/profile

HOSTNAME=`/bin/hostname`
HISTSIZE=1000

JAVA_HOME=/usr/java/jdk1.7.0_21
CLASS_PATH=$JAVA_HOME/lib:JAVA_HOME/jre/lib:JAVA_HOME/lib/tools.jar:$CLASS_PATH
PATH=$JAVA_HOME/bin:$PATH

export CLASS_PATH JAVA_HOME

2:SSH无密码登录

1:前提【启用公钥私钥配对认证方式】
vi /etc/ssh/sshd_config
RSAAuthentication yes # 启用 RSA 认证
PubkeyAuthentication yes # 启用公钥私钥配对认证方式
AuthorizedKeysFile  %h/.ssh/authorized_keys # 公钥文件路径 

service sshd restart

2:本机无密码登录
useradd hadoop
passwd -d hadoop

ssh hadoop
ssh-keygen -t dsa

cd ~
cd .ssh
cat id_dsa.pub >> authorized_keys
cd ..
chmod 700 .ssh
chmod 600 .ssh/authorized_keys

3:部署准备

1:namenode无密码登录datanode

169上
scp -p 22 id_dsa.pub root@10.96.21.170:/home/hadoop/.ssh/21169
scp -p 22 id_dsa.pub root@10.96.21.171:/home/hadoop/.ssh/21169

170,171上
cat 21169 >> authorized_keys

2:host
vi /etc/hosts

127.0.0.1    localhost

10.96.21.169 qd21-169
10.96.21.170 qd21-170
10.96.21.171 qd21-171

              

4:部署

1:下载hadoop-2.0.0-cdh4.6.0.tar.gz
2:tar -zxvf hadoop-2.0.0-cdh4.6.0.tar.gz
3:修改配置文件
vi /soft/hadoop/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/usr/java/jdk1.7.0_21

vi /soft/hadoop/etc/hadoop/core-site.xml
<configuration>
  <property>
    <name>fs.defaultFS</name>
    <value>hdfs://qd21-169</value>
  </property>
  <property>
    <name>fs.trash.interval</name>
    <value>10080</value>
  </property>
  <property>
    <name>fs.trash.checkpoint.interval</name>
    <value>10080</value>
  </property>
</configuration>

vi /soft/hadoop/etc/hadoop/hdfs-site.xml
<configuration>
  <property>
    <name>dfs.replication</name>
    <value>3</value>
  </property>
  <property>
    <name>hadoop.tmp.dir</name>
    <value>/hio/hadoop/tmp</value>
  </property>
  <property>
    <name>dfs.namenode.http-address</name>
    <value>qd21-169:50070</value>
  </property>
  <property>
    <name>dfs.namenode.secondary.http-address</name>
    <value>qd21-169:50090</value>
  </property>
  <property>
    <name>dfs.webhdfs.enabled</name>
    <value>true</value>
  </property>
</configuration>

vi /soft/hadoop/etc/hadoop/mapred-site.xml
<configuration>
  <property>
   <name>mapreduce.framework.name</name>
   <value>yarn</value>
  </property>
  <property>
    <name>mapreduce.jobhistory.address</name>
    <value>qd21-169:10020</value>
  </property>
  <property>
    <name>mapreduce.jobhistory.webapp.address</name>
    <value>qd21-169:19888</value>
  </property>
</configuration>

vi /soft/hadoop/etc/hadoop/masters
qd21-169

vi /soft/hadoop/etc/hadoop/slaves
qd21-170
qd21-171

vi /etc/profile
export JRE_HOME=$JAVA_HOME/jre
export CLASSPATH=./:$JAVA_HOME/lib:$JRE_HOME/lib:$JRE_HOME/lib/tools.jar
export HADOOP_HOME=/soft/hadoop
export HADOOP_MAPRED_HOME=${HADOOP_HOME}
export HADOOP_COMMON_HOME=${HADOOP_HOME}
export HADOOP_HDFS_HOME=${HADOOP_HOME}
export YARN_HOME=${HADOOP_HOME}
export HADOOP_YARN_HOME=${HADOOP_HOME}
export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export HDFS_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export YARN_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export PATH=$PATH:$HOME/bin:$JAVA_HOME/bin:$HADOOP_HOME/sbin

5:开启

1:创建必要的文件夹
mkdir /soft/hadoop/logs /hio/hadoop/local /hio/hadoop/logs /hio/hadoop/tmp /hio/hadoop/work
chmod 777 /soft/hadoop/logs /hio/hadoop/local /hio/hadoop/logs /hio/hadoop/tmp /hio/hadoop/work

2:开启
初始化
/soft/hadoop/bin/hadoop namenode -format

su -hadoop
/soft/hadoop/sbin/start-dfs.sh
/soft/hadoop/sbin/start-yarn.sh

6:监控

hdfs
http://10.96.21.169:50070/dfshealth.jsp

job
http://10.96.21.169:8088/cluster

 

 

 

  

posted @ 2014-06-06 14:33  李占卫  阅读(641)  评论(0编辑  收藏  举报