1.require:java环境,本地可用的hadoop,远程可访问的mysql
2.拷贝hive文件(hive-2.2.1)
3.设置环境变量
export HIVE_HOME=/data/spark/bin/hive
export PATH=$PATH:$HIVE_HOME/bin
4.拷贝mysql以及java连接类
yum install -y mysql-server mysql-devel mysql-connector-java cp /usr/share/java/mysql-connector-java.jar $HIVE_HOME/lib/
5.初始化表结构
cd $HIVE_HOME/scripts/metastore/upgrade/mysql
mysql -h {host} -P {port} -u {user} -p{password} {db} < $HIVE_HOME/scripts/metastore/upgrade/mysql/hive-schema-2.1.0.mysql.sql
6.配置hive连接mysql
vi $HIVE_HOME/conf/hive-site.xml <configuration> <property> <name>javax.jdo.option.ConnectionURL</name> <value>jdbc:mysql://{host}:{port}/{db}?createDatabaseIfNotExist=true</value> <description>metadata is stored in a MySQL server</description> </property> <property> <name>javax.jdo.option.ConnectionDriverName</name> <value>com.mysql.jdbc.Driver</value> <description>MySQL JDBC driver class</description> </property> <property> <name>javax.jdo.option.ConnectionUserName</name> <value>{user}</value> <description>user name for connecting to mysql server</description> </property> <property> <name>javax.jdo.option.ConnectionPassword</name> <value>{password}</value> <description>password for connecting to mysql server</description> </property> </configuration>
7.测试
hive hive> create table saurzcode(id int, name string);
8.校验
mysql> select * from TBLS;