hadoop settings

ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa

cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys

 

 

export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
export HADOOP_HOME=/usr/local/hadoop
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib"
export JAVA_LIBRARY_PATH=$HADOOP_HOME/lib/native:$JAVA_LIBRARY_PATH

 

source ~/.bashrc

 

sudo gedit /usr/local/hadoop/etc/hadoop/hadoop-env.sh

export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64

sudo gedit /usr/local/hadoop/etc/hadoop/core-site.xml

core-site.xml

<configuration>
<property>
    <name>fs.default.name</name>
    <value>hdfs://localhost:9000</value>
</property>
</configuration>

sudo gedit /usr/local/hadoop/etc/hadoop/yarn-site.xml

<configuration>

<!-- Site specific YARN configuration properties -->

<property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
</property>
<property>
    <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>

</configuration>

 

sudo cp /usr/local/hadoop/etc/hadoop/mapred-site.xml.template /usr/local/hadoop/etc/hadoop/mapred-site.xml

sudo gedit  /usr/local/hadoop/etc/hadoop/mapred-site.xml

<configuration>
<property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
</property>

</configuration>

sudo gedit  /usr/local/hadoop/etc/hadoop/hdfs-site.xml


<configuration>
<property>
    <name>dfs.replication</name>
    <value>3</value>
</property>
<property>
    <name>dfs.nodename.name.dir</name>
    <value>file:/usr/local/hadoop/hadoop_data/hdfs/namenode</value>
</property>
<property>
    <name>dfs.nodename.data.dir</name>
    <value>file:/usr/local/hadoop/hadoop_data/hdfs/datanode</value>
</property>
</configuration>

 

 

sudo mkdir -p /usr/local/hadoop/hadoop_data/hdfs/namenode

 

sudo mkdir -p /usr/local/hadoop/hadoop_data/hdfs/datanode

sudo chown liqu -R /usr/local/hadoop

 

namenode无法启动

 

/usr/local/hadoop/hadoop-2.8.3/tmp/dfs/name is in an inconsistent state: storage directory does not exist or is not accessible.

从这入手,这是一个临时目录,在每次hadoop重启的时候都会删除,所以找不到

解决方法:

修改core-site.xml

<property>
<name>hadoop.tmp.dir</name>
<value>file:///usr/local/hadoop/tmp</value>

</property>

改成如下:

<property>


<name>hadoop.tmp.dir</name>
<value>/usr/local/hadoop/tmp</value>

</property>

所以,每次重启临时文件都会被清除

 

sudo gedit /etc/network/interfaces

interfaces 内容:

# interfaces(5) file used by ifup(8) and ifdown(8)
auto lo
iface lo inet loopback

#NAT interface
auto eth0
iface eth0 inet dhcp

#host only interface
auto eth1
iface eth1 inet static
address 192.168.56.101
netmask 255.255.255.0
network 192.168.56.0
broadcast 192.168.56.255

 

 

sudo gedit /etc/hosts

127.0.0.1 localhost
127.0.1.1 liqu-VirtualBox

192.168.56.100 master
192.168.56.101 data1
192.168.56.102 data2
192.168.56.103 data3

 

 

data1中yarn -site.xml新增

<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>master:8025</value>
</property>

<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>master:8030</value>
</property>

<property>
<name>yarn.resourcemanager.address</name>
<value>master:8050</value>
</property>

posted @ 2019-11-14 16:20  liqu  阅读(161)  评论(0编辑  收藏  举报