大数据

  打开hive,需要打开hdfs 和 yarn,相关命令:

cd /kkb/install/hive

hive --service metastore

bin/hive --service hiveserver2

bin/beeline -u jdbc:hive2://node01:10000 -n hadoop

然后将csv文件粘贴到linux系统下

在hive中建表:

create table lingshou_csv (ID String,
                         name String,
                         tongxunaddress String,
                         zhuceaddress String,
                         faren String,
                         xingzhengcode String,
                         hangyeweidu String,
                         hangyecode String,
                         hangyedalei String,
                          hangye2 String,
                         hangyecode1 String,
                         dengjizhuce String,
                          jishulingyu String,
                         qiyeweidu String,
                         gaoxinjishuweidu String,
                         gaoxin String,
                         gaoxindalei String,
                         gongyeall String,
                         yingyeshouru String,
                         nianmoall String,
                         yearall String,
                         congyerenyuan String,
                         zhuanyejishu String,
                         kejihuodongrenyuan String,
                         kejihuodongjingfei String,
                         zhuanlishenqingnum String,
                         zhuanlishouquan String,
                         diyuweidu String,
                         datayear String)
                         ROW format delimited fields terminated by ',' STORED AS TEXTFILE;

将csv文件导入:load data local inpath '/kkb/install/hive/AA.csv' into table AA01;

posted @ 2021-10-18 09:37  大风吹爱护  阅读(78)  评论(0编辑  收藏  举报