(亲测)eclipse远程访问hadoop

1.环境:

 hadoop 2.6.0

 jdk 1.7x64

 centos7

   eclipse j2ee

 

2.安装hadoop

  1.关闭防火墙

  centos7.0以上使用此命令

  systemctl stop firewalld.service #临时关闭

  systemctl disable firewalld.service #关闭开机启动

  centos7.0以下使用此命令

  service iptables stop #临时关闭

  chkconfig iptables off #关闭开机启动

  

  2.修改主机名

  vi /etc/hosts

  去除其他所有的hosts信息,插入下面的hosts

10.0.1.35 zzm #ip hostname

  vi /etc/sysconfig/network

# Created by anaconda
NETWORKING=yes
HOSTNAME=zzm

 

  3.解压jdk1.7与hadoop2.6.0

  tar -zxvf jdk-7u80-linux-x64.gz -C /usr/local/java/ #指定解压指定目录下

  tar -zxcf hadoop-2.6.0.tar.gz -C /usr/local/ #指定解压指定目录下

  mv jdk1.7.0.80 jdk1.7 #将解压的jdk安装文件夹改名

  mv hadoop-2.6.0 hadoop #将解压后的hadoop安装文件夹改名

 

  4.修改环境变量

  vi /etc/profile

  

# set java environment
export JAVA_HOME=/usr/local/java/jdk1.7
export JRE_HOME=/usr/local/java/jdk1.7/jre
export CLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib:$CLASSPATH
export PATH=$JAVA_HOME/bin:$JRE_HOME/bin:$JAVA_HOME:$PATH
# set hadoop environment
export HADOOP_HOME=/usr/local/hadoop
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin
export HADOOP_MAPARED_HOME=${HADOOP_HOME}
export HADOOP_COMMON_HOME=${HADOOP_HOME}
export HADOOP_HDFS_HOME=${HADOOP_HOME}
export YARN_HOME=${HADOOP_HOME}
export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop

  source /etc/profile #使配置生效

 

  5.修改hadoop 的配置

  hadoop-env.sh

  

# The java implementation to use.
export JAVA_HOME=/usr/local/java/jdk1.7

  core-site.xml

<configuration>
    <property>
         <name>fs.default.name</name>
         <value>hdfs://zzm:9000</value>
         <final>true</final>
    </property>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/home/hadoop/hadoop_tmp</value>
    </property>
</configuration>

  yarn-site.xml

<configuration>
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
    <property>
        <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
        <value>org.apache.hadoop.mapred.ShuffleHandler</value>
    </property>
</configuration>

  mapred-site.xml

<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
    <property>
        <name>mapred.system.dir</name>
        <value>/home/hadoop/mapred/system/</value>
        <final>true</final>
    </property>
    <property>
        <name>mapred.local.dir</name>
        <value>/home/hadoop/mapred/local</value>
        <final>true</final>
    </property>
</configuration>

  hdfs-site.xml

  mkdir /home/hadoop/dfs/name

<configuration>
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>/home/hadoop/dfs/name</value>
        <final>true</final>
    </property>
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>/home/hadoop/dfs/data</value>
        <description>Determines where on the local
          filesystem an DFS data node should store its blocks.
          If this is a comma-delimited list of directories,
          then data will be stored in all named
          directories, typically on different devices.
          Directories that do not exist are ignored.
        </description>
        <final>true</final>
    </property>
    <property>
        <name>dfs.replication</name>
        <value>2</value>
    </property>
    <property>
        <name>dfs.permissions</name>
        <value>false</value>
    </property>
</configuration>

 

  启动hadoop

  start-all.sh

  hadoop-daemon.sh start namenode

  hadoop-daemon.sh start datanode

  yarn-daemon.sh start resourcemanager

  yarn-daemon.sh start nodemanager

posted @ 2016-03-04 01:44  sz_zzm  阅读(978)  评论(0编辑  收藏  举报