spark--环境搭建--6.Spark1.3.0集群搭建

1. spark安装

$ cd /usr/local

$ tar -zxvf spark-1.3.0-bin-hadoop2.4.tgz

$ mv spark-1.3.0-bin-hadoop2.4 spark

$ vi ~/.bashrc

export SPARK_HOME=/usr/local/spark/
export PATH=$PATH:$SPARK_HOME/bin
export CLASSPATH=.:$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib

$ source ~/.bashrc

$ cd spark/conf/

$ mv spark-env.sh.template spark-env.sh

$ vi spark-env.sh

export JAVA_HOME=/usr/java/latest/
export SCALA_HOME=/usr/local/scala/
# spark集群的master节点ip
export SPARK_MASTER_IP=192.168.2.100
# 指定worker节点能够最大分配给Excutors的内存大小
export SPARK_WORKER_MEMORY=1g
# hadoop集群的配置文件目录
export HADOOP_CONF_DIR=/usr/local/hadoop/etc/hadoop

$ mv slaves.template slaves

$ vi slaves

localhost 改为 
spark2
spark3

$ cd /usr/local

$ scp -r spark root@spark2:/usr/local/

$ scp -r spark root@spark3:/usr/local/

$ scp ~/.bashrc root@spark2:~/.bashrc

$ scp ~/.bashrc root@spark3:~/.bashrc

# 分别在2和3执行

$ source ~/.bashrc

2. 启动spark

$ cd spark/sbin/

$ ./start-all.sh

$ jps

# 浏览器打开  http://spark1:8080

$ cd ../../

$ spark-shell

> exit

posted @ 2018-03-19 11:10  _殇j  阅读(193)  评论(0编辑  收藏  举报