/etc/profile
export JAVA_HOME=/usr/local/jdk1.8.0_171 export JRE_HOME=$JAVA_HOME/jre export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib:$CLASSPATH export PATH=$JAVA_HOME/bin:$PATH export HADOOP_HOME=/data/hadoop export HIVE_HOME=/data/hive-3.1.0 export ZOOKEEPER_HOME=/data/zookeeper-3.4.13 export PATH=$PATH:$HADOOP_HOME/bin:$ZOOKEEPER_HOME/bin:$HIVE_HOME/bin
1、MySQL作为存储元数据的数据库,所以需要把连接MySQL的jar包放入或链接到$HIVE_HOME/lib目录下。
yum install mysql-connector-java
ln -s /usr/share/java/mysql-connector-java.jar /data/hive-3.1.0/lib/mysql-connector-java.jar
2、修改hive-site.xml
cd /data/hive-3.1.0/conf cp hive-env.sh.template hive-env.sh mkdir /data/hive-3.1.0/tmp
内嵌模式,特点是:hive服务和metastore服务运行在同一个进程中,derby服务也运行在该进程中。
该模式无需特殊配置
本地模式,特点是:hive服务和metastore服务运行在同一个进程中,mysql是单独的进程,可以在同一台机器上,也可以在远程机器上。
该模式只需将hive-site.xml中的ConnectionURL指向mysql,并配置好驱动名、数据库连接账号即可:
远程模式,特点是:hive服务和metastore在不同的进程内,可能是不同的机器。
<!-- 制定使用mysql数据库 --> <property> <name>hive.metastore.db.type</name> <value>mysql</value> <description> Expects one of [derby, oracle, mysql, mssql, postgres]. Type of database used by the metastore. Information schema & JDBCStorageHandler depend on it. </description> </property> <!-- 指定HDFS中的hive仓库地址 --> <property> <name>hive.metastore.warehouse.dir</name> <value>/hive/warehouse</value> </property> <!-- 该属性为空表示嵌入模式或本地模式,否则为远程模式 --> <property> <name>hive.metastore.uris</name> <value>thrift://namenode1:9083</value> <description>Thrift uri for the remote metastore. Used by metastore client to connect to remote metastore.</description> </property> <!-- 指定mysql的连接 --> <property> <name>javax.jdo.option.ConnectionURL</name> <value>jdbc:mysql://172.16.99.40:3306/hive?createDatabaseIfNotExist=true</value> </property> <!-- 指定驱动类 --> <property> <name>javax.jdo.option.ConnectionDriverName</name> <value>com.mysql.jdbc.Driver</value> </property> <!-- 指定用户名 --> <property> <name>javax.jdo.option.ConnectionUserName</name> <value>hive</value> </property> <!-- 指定密码 --> <property> <name>javax.jdo.option.ConnectionPassword</name> <value>hive</value> </property> <!-- 指定临时目录 --> <property> <name>hive.exec.local.scratchdir</name> <value>/data/hive-3.1.0/tmp</value> <description>Local scratch space for Hive jobs</description> </property> <property> <name>hive.downloaded.resources.dir</name> <value>/data/hive-3.1.0/tmp</value> <description>Temporary local directory for added resources in the remote file system.</description> </property> </configuration>
<!-- 指定驱动类,如果是mysql8,类名为com.mysql.cj.jdbc.Driver -->
客户端配置
<configuration> <property> <name>hive.metastore.warehouse.dir</name> <value>/hive/warehouse</value> </property> <property> <name>hive.metastore.local</name> <value>false</value> </property> <property> <name>hive.metastore.uris</name> <value>thrift://namenode2:9083</value> </property> </configuration>
3、初始化数据库
/data/hive-3.1.0/bin/schematool --dbType mysql --initSchema
4、解决日志jar冲突
保留/data/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7.25.jar
rm /data/hive-3.1.0/lib/log4j-slf4j-impl-2.10.0.jar
5、检查环境
hadoop version
hadoop version | awk '{if (NR == 1) {print $2;}}'
7、启动服务端
$HIVE_HOME/bin/hive --service metastore &
8、找个非hadoop机器机器,部署并启动客户端
scp -r -P 21860 /data/hadoop root@mysql1:/data/ scp -r -P 21860 /data/hive-3.1.0 root@mysql1:/data/ source /etc/profile hive
9、服务器启动hiveserver2 (参考 https://blog.csdn.net/zhanglh046/article/details/78572926)
./hiveserver2 start 客户端使用 # beeline Beeline version 1.2.1 by Apache Hive beeline> !connect jdbc:hive2://namenode2:10000 Connecting to jdbc:hive2://namenode2:10000 Enter username for jdbc:hive2://namenode2:10000: hive_user Enter password for jdbc:hive2://namenode2:10000: ******
----------------------------------------------------------------------------
旧配置
----------------------------------------------------------------------------
http://apache.fayea.com/hive/hive-2.0.0/
wget http://apache.fayea.com/hive/hive-2.0.0/apache-hive-2.0.0-bin.tar.gz
tar zxvf apache-hive-2.0.0-bin.tar.gz
mv apache-hive-2.0.0-bin /home/hadoop/hive
cd /home/hadoop/hive
cd conf
cp hive-default.xml.template hive-site.xml
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 | <?xml version= "1.0" ?> <?xml-stylesheet type= "text/xsl" href= "configuration.xsl" ?> <configuration> <property> <name>hive.metastore.warehouse.dir</name> <value>/home/hadoop/hive-warehouse</value> </property> <property> <name>javax.jdo.option.ConnectionURL</name> <value>jdbc:mysql: //hadoop-master:3306/hive?createDatabaseIfNotExist=true</value> </property> <property> <name>javax.jdo.option.ConnectionDriverName</name> <value>com.mysql.jdbc.Driver</value> </property> <property> <name>javax.jdo.option.ConnectionUserName</name> <value>root</value> </property> <property> <name>javax.jdo.option.ConnectionPassword</name> <value>hive</value> </property> <property> <name>hive.metastore.local</name> <value> false </value> </property> <property> <name>hive.metastore.uris</name> <value>thrift: //hadoop-master:9083</value> </property> <property> <name>datanucleus.readOnlyDatastore</name> <value> false </value> </property> <property> <name>datanucleus.fixedDatastore</name> <value> false </value> </property> <property> <name>datanucleus.autoCreateSchema</name> <value> true </value> </property> <property> <name>datanucleus.autoCreateTables</name> <value> true </value> </property> <property> <name>datanucleus.autoCreateColumns</name> <value> true </value> </property> </configuration> |
hive-env.sh
1 2 3 4 5 | # Hive Configuration Directory can be controlled by: export HIVE_CONF_DIR=/home/hadoop/hive/conf # Folder containing extra ibraries required for hive compilation/execution can be controlled by: export HIVE_AUX_JARS_PATH=/home/hadoop/hive/lib |
下载mysql的jdbc
http://dev.mysql.com/downloads/file/?id=460362
把mysql-connector-java-5.1.38-bin.jar 放到//home/hadoop/hive/lib
启动hive服务端程序
hive --service metastore
客户端直接使用hive命令即可
hive
退出
exit;
hadoop job -kill jobid
create table test1 (col_1 string, col_2 string, col_3 string ,col_4 string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE;
从本地文件导入
LOAD DATA LOCAL INPATH '/home/hadoop/a.txt' OVERWRITE INTO TABLE test1;
dfs -ls /home/hadoop/hive-warehouse/test1
/home/hadoop/hive-warehouse是hive-site.xml里面配置路径,不是真实物理路径
从hadoop文件导入
hadoop fs -put /home/hadoop/b.txt /data/input
hadoop fs -ls /data/input
LOAD DATA INPATH '/data/input/a.txt' OVERWRITE INTO TABLE test1;
从表导入
create table test2 (col_1 string, col_2 string, col_3 string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' STORED AS TEXTFILE;
insert into table test2 as select col_1,col_2,col_3 from test1;
create table test3 as select * from test1;
· 一个超经典 WinForm,WPF 卡死问题的终极反思
· ASP.NET Core - 日志记录系统(二)
· .NET 依赖注入中的 Captive Dependency
· .NET Core 对象分配(Alloc)底层原理浅谈
· 聊一聊 C#异步 任务延续的三种底层玩法
· 互联网不景气了那就玩玩嵌入式吧,用纯.NET开发并制作一个智能桌面机器人(一):从.NET IoT入
· .NET 开发的分流抢票软件,不做广告、不收集隐私
· ASP.NET Core - 日志记录系统(二)
· 一个超经典 WinForm,WPF 卡死问题的终极反思
· 实现windows下简单的自动化窗口管理
2015-04-13 oracle查看表空间和物理文件大小
2011-04-13 NET Framework 中的并行编程