sqoop & Linux 之 自动化安装大数据虚拟机脚本

 一、说明  

sqoop查看是否安装成功:https://www.cnblogs.com/xibuhaohao/p/11775973.html#_label3_0

如何用shell操作mysql: 使用mysql -e可以增删改查,具体见:https://blog.csdn.net/feeltouch/article/details/46643065

mysql -h localhost -P3306 -proot -uroot -e "show databases;" # mysql -h 主机地址 -u用户名 -p用户密码[如果有设] -P端口号
sed -i '22a\#test' /etc/my.cnf # 插在22行之后(成为第23行,其他行顺移)
sed -i '23c #testchange' /etc/my.cnf # 修改第23行

二、自动化脚本(仅供语句参考)

适用于:大数据单机

使用方法:

1. 需要安装的global var改成true

2. ./setup.sh 192.168.56.111(启动后面需要加自己的host,因为脚本中有$1)

#! /bin/bash
echo 'full system install begining...'

#global var
ssh=false
jdk=false
mysql=false
hadoop=false
sqoop=false
hive=false
zookeeper=true
hbase=true
spark=false
flume=false
kafka=false
flink=false

# 免密登录
if [ "$ssh" = true ];then
  echo 'install ssh'
  cd /root
  ssh-keygen -t rsa -P ''
  cd .ssh/
  cat id_rsa.pub >> authorized_keys
  chmod 600 authorized_keys
fi

#setup idk 1.8.111
if [ "$jdk" = true ];then
  echo 'install jdk 1.8'
  cd /opt/install
  tar -zxf /opt/install/jdk-8u111-linux-x64.tar.gz
  mv /opt/install/jdk1.8.0_111 /opt/soft/jdk180
  echo 'export JAVA_HOME=/opt/soft/jdk180' >> /etc/profile
  echo 'export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar' >> /etc/profile
  echo 'export PATH=$PATH:$JAVA_HOME/bin' >> /etc/profile
fi

# mysql 5.7
if [ "$mysql" = true ];then
  echo 'install mysql 5.7'
  cd /opt/install
  wget http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm
  rpm -ivh mysql-community-release-el7-5.noarch.rpm
  yum install mysql-server -y
  chown -R root:root /var/lib/mysql
  chown root /var/lib/mysql/
  service mysqld restart
 mysql -h localhost -P3306 -uroot -e "use mysql;update user set password=password('root') where user='root';grant all on *.* to root@'%' identified by 'root';flush privileges;"
  sed -i '22a\character-set-server=utf8' /etc/my.cnf
  service mysqld restart
fi

# hadoop
if [ "$hadoop" = true ];then
  echo 'install hadoop cdh 5.14.2 -hadoop 2.6'
  cd /opt/install
  tar -zxf /opt/install/hadoop-2.6.0-cdh5.14.2.tar.gz
  mv /opt/install/hadoop-2.6.0-cdh5.14.2 /opt/soft/hadoop260
  sed -i '25c export JAVA_HOME=/opt/soft/jdk180' /opt/soft/hadoop260/etc/hadoop/hadoop-env.sh
  sed -i '19a\<property><name>hadoop.proxyuser.root.groups</name><value>*</value></property>' /opt/soft/hadoop260/etc/hadoop/core-site.xml
  sed -i '19a\<property><name>hadoop.proxyuser.root.hosts</name><value>*</value></property>' /opt/soft/hadoop260/etc/hadoop/core-site.xml
  sed -i '19a\<property><name>hadoop.tmp.dir</name><value>/opt/soft/hadoop260/tmp</value></property>' /opt/soft/hadoop260/etc/hadoop/core-site.xml
  sed -i '19a\<property><name>fs.defaultFS</name><value>hdfs://$1:9000</value></property>' /opt/soft/hadoop260/etc/hadoop/core-site.xml
  sed -i '19a\<property><name>dfs.permissions</name><value>false</value></property>' /opt/soft/hadoop260/etc/hadoop/hdfs-site.xml
  sed -i '19a\<property><name>dfs.replication</name><value>1</value></property>' /opt/soft/hadoop260/etc/hadoop/hdfs-site.xml
  cp /opt/soft/hadoop260/etc/hadoop/mapred-site.xml.template /opt/soft/hadoop260/etc/hadoop/mapred-site.xml
  sed -i '19a\<property><name>mapreduce.framework.name</name><value>yarn</value></property>' /opt/soft/hadoop260/etc/hadoop/mapred-site.xml
  sed -i '16a\<property><name>yarn.nodemanager.aux-services</name><value>mapreduce_shuffle</value></property>' /opt/soft/hadoop260/etc/hadoop/yarn-site.xml
  sed -i '16a\<property><name>yarn.resourcemanager.localhost</name><value>localhost</value></property>' /opt/soft/hadoop260/etc/hadoop/yarn-site.xml
  
  echo '#hadoop' >> /etc/profile
  echo 'export HADOOP_HOME=/opt/soft/hadoop260' >> /etc/profile
  echo 'export HADOOP_MAPRED_HOME=$HADOOP_HOME' >> /etc/profile
  echo 'export HADOOP_COMMON_HOME=$HADOOP_HOME' >> /etc/profile
  echo 'export HADOOP_HDFS_HOME=$HADOOP_HOME' >> /etc/profile
  echo 'export YARN_HOME=$HADOOP_HOME' >> /etc/profile
  echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native' >> /etc/profile
  echo 'export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin' >> /etc/profile
  echo 'export HADOOP_INATALL=$HADOOP_HOME' >> /etc/profile/
fi

#install sqoop
if [ "$sqoop" = true ];then
  echo 'install sqoop'
  cd /opt/install
  tar -zxf sqoop-1.4.6-cdh5.14.2.tar.gz
  mv /opt/install/sqoop-1.4.6-cdh5.14.2 /opt/soft/sqoop146
  cp /opt/soft/sqoop146/conf/sqoop-env-template.sh /opt/soft/sqoop146/conf/sqoop-env.sh
  echo 'export HADOOP_COMMON_HOME=/opt/bigdata/hadoop/hadoop260' >> /opt/bigdata/sqoop/sqoop146/conf/sqoop-env.sh
  echo 'export HADOOP_MAPRED_HOME=/opt/bigdata/hadoop/hadoop260' >> /opt/bigdata/sqoop/sqoop146/conf/sqoop-env.sh
  echo 'export HIVE_HOME=/opt/bigdata/hadoop/hive110' >> /opt/bigdata/sqoop/sqoop146/conf/sqoop-env.sh
  echo 'export HBASE_HOME=/opt/bigdata/hadoop/hbase120' >> /opt/bigdata/sqoop/sqoop146/conf/sqoop-env.sh
  echo 'export ZOOCFGDIR=/opt/bigdata/hadoop/zookeeper345/conf' >> /opt/bigdata/sqoop/sqoop146/conf/sqoop-env.sh
  cp /opt/install/mysql-connector-java-5.1.38.jar /opt/soft/sqoop146/lib
  cp /opt/install/java-json.jar /opt/soft/sqoop146/lib
  cp /opt/install/soft/hive110b/hive-common-1.1.0-cdh5.14.2.jar /opt/soft/sqoop146/lib
  cp /opt/install/soft/hive110b/hive-jdbc-1.1.0-cdh5.14.2-standalone.jar /opt/soft/sqoop146/lib
  echo '#sqoop' >> /etc/profile
  echo 'export SQOOP_HOME=/opt/bigdata/sqoop/sqoop146' >> /etc/profile
  echo 'export PATH=$PATH:$SQOOP_HOME/bin' >> /etc/profile
fi

# hive
if [ "$hive" = true ];then
  echo 'install hive110'
  cd /opt/install
  tar -zxf hive-1.1.0-cdh5.14.2.tar.gz
  mv hive-1.1.0-cdh5.14.2 /opt/soft/hive110
  cp /opt/install/mysql-connector-java-5.1.38.jar /opt/soft/hive110/lib
  #cp /opt/soft/hive110/conf/hive-env.sh.template /opt/soft/hive110/conf/hive-env.sh
  touch /opt/soft/hive110/conf/hive-site.xml
  echo '<?xml version="1.0" encoding="UTF-8" standalone="no"?>' >> '/opt/soft/hive110/conf/hive-site.xml'
  echo '<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>' >> '/opt/soft/hive110/conf/hive-site.xml'
  echo '<configuration>' >> '/opt/soft/hive110/conf/hive-site.xml'
  echo '<property><name>hive.metastore.warehouse.dir</name><value>/opt/soft/hive110</value></property>' >> '/opt/soft/hive110/conf/hive-site.xml'
  echo '<property><name>hive.metastore.local</name><value>true</value></property>' >> '/opt/soft/hive110/conf/hive-site.xml'
  echo '<property><name>javax.jdo.option.ConnectionURL</name><value>jdbc:mysql://192.168.133.195:3306/hive110?createDatabaseIfNotExist=true</value></property>' >> '/opt/soft/hive110/conf/hive-site.xml'
  echo '<property><name>javax.jdo.option.ConnectionDriverName</name><value>com.mysql.jdbc.Driver</value></property>' >> '/opt/soft/hive110/conf/hive-site.xml'
  echo '<property><name>javax.jdo.option.ConnectionUserName</name><value>root</value></property>' >> '/opt/soft/hive110/conf/hive-site.xml'
  echo '<property><name>javax.jdo.option.ConnectionPassword</name><value>root</value></property>' >> '/opt/soft/hive110/conf/hive-site.xml'
  echo '<property><name>hive.server2.thrift.client</name><value>NONE</value></property>' >> '/opt/soft/hive110/conf/hive-site.xml'
  echo '<property><name>hive.server2.thrift.client.user</name><value>root</value></property>' >> '/opt/soft/hive110/conf/hive-site.xml'
  echo '<property><name>hive.server2.thrift.client.password</name><value>root</value></property>' >> '/opt/soft/hive110/conf/hive-site.xml'
  echo '</configuration>' >> '/opt/soft/hive110/conf/hive-site.xml'
  
  echo '#hive' >> /etc/profile
  echo 'export HIVE_HOME=/opt/soft/hive110' >> /etc/profile
  echo 'export PATH=$PATH:$HIVE_HOME/bin' >> /etc/profile
fi

# zookepper
if [ "$zookeeper" = true ];then
  echo 'install zookeeper'
  cd /opt/install
  tar -zxf zookeeper-3.4.5-cdh5.14.2.tar.gz
  mv /opt/install/zookeeper-3.4.5-cdh5.14.2 /opt/soft/zk345
  cp /opt/soft/zk345/conf/zoo_sample.cfg /opt/soft/zk345/conf/zoo.cfg
  mkdir -p /opt/soft/zk345/datas
  sed -i '12c dataDir=/opt/soft/zk345/datas' /opt/soft/zk345/conf/zoo.cfg
  echo "server.0=$1:2287:3387" >> /opt/soft/zk345/conf/zoo.cfg
  echo '#zookeeper' >> /etc/profile
  echo 'export ZOOKEEPER_HOME=/opt/soft/zk345' >> /etc/profile
  echo 'export PATH=$PATH:$ZOOKEEPER_HOME/bin' >> /etc/profile
fi

# hbase
if [ "$hbase" = true ];then
  echo 'setup hbase'
  cd /opt/install
  tar -zxf /opt/install/hbase-1.2.0-cdh5.14.2.tar.gz
  mv /opt/install/hbase-1.2.0-cdh5.14.2 /opt/soft/hbase120
  echo 'export HBASE_MANAGES_ZK=false' >> /opt/soft/hbase120/conf/hbase-env.sh #不使用内部的zk管理器
  echo 'export JAVA_HOME=/opt/soft/jdk180' >> /opt/soft/hbase120/conf/hbase-env.sh
  sed -i '23a\<property><name>hbase.zookeeper.property.clientPort</name><value>2181</value></property>'  /opt/soft/hbase120/conf/hbase-site.xml
  sed -i '23a\<property><name>hbase.zookeeper.property.dataDir</name><value>/opt/soft/zk345</value></property>'  /opt/soft/hbase120/conf/hbase-site.xml
  sed -i '23a\<property><name>hbase.cluster.distributed</name><value>true</value></property>'  /opt/soft/hbase120/conf/hbase-site.xml
  sed -i "23a\<property><name>hbase.rootdir</name><value>hdfs://$1:9000/hbase</value></property>"  /opt/soft/hbase120/conf/hbase-site.xml
  
  echo '#hbase' >> /etc/profile
  echo 'export HBASE_HOME=/opt/soft/hbase120' >> /etc/profile
  echo 'export PATH=$PATH:$HBASE_HOME/bin' >> /etc/profile
fi

三、如何开关虚拟机?

1.开机顺序

zookeeper: zkServer.sh start
hadoop: (/opt/bigdata/hadoop/hadoop260/sbin/)
    start-all.sh
hbase:   (/opt/bigdata/hadoop/hbase120/bin)

    start-hbase.sh [需要zookeeper和hadoop]

    => 直接开启 hbase shell

mysql:nohup bin/mysqld_safe --defaults-file=/etc/my3307.cnf --user=mysql >/dev/null 2>&1 &

hive:[需要hadoop和mysql,可以需要zookeeper]
  1)需要第三方beeline  

  #### 推荐先前台起server,再起server2
    hive --service metastore
    nohup hiveserver2 >/dev/null 2>&1 & 

    •   如果报错Error: Could not open client transport with JDBC Uri: jdbc:hive2://192.168.56.111:10000: java.net.Co
    •   尝试使用前台命令 hive --service hiveserver2

  ### 命令
    beeline -u jdbc:hive2://192.168.56.111:10000

  2)直接hive shell

    hive --service metastore
    hive 

zeppelin:cd /opt/bigdata/hadoop/zeppelin082/bin [需要hadoop]

                  ./zeppelin-daemon.sh start 

     http://192.168.56.111:8001

 >>> 写成脚本:

zkServer.sh start
start-all.sh
nohup hive --service metastore &
nohup hive --service hiveserver2 &
start-hbase.sh

2.关虚拟机顺序

  关hbase:stop-hbase.sh
  关zookeeper:zkServer.sh stop  
  关hive: kill -9 XXX
  关hadoop: stop-all.sh

当jps -> 只有jps的时候
shutdown now      # 安全关闭虚拟机

>>> 写成脚本:

zkServer.sh stop
stop-hbase.sh
res=`ps -ef|grep RunJar|grep Hive|awk '{print $2}'`
for i in $res 
do 
    kill -9 $i
done 
res2 = `jps |grep RunJar|awk '{print $1}'`
kill -9 $res2
stop-all.sh

 

posted @ 2020-11-09 18:26  PEAR2020  阅读(170)  评论(0编辑  收藏  举报