hive基本操作与应用
1.启动hadoop
start-all.sh
2.Hdfs上创建文件夹
hdfs dfs -mkdir wcinput
hdfs dfs -ls /user/hadoop
3.上传文件至hdfs
hdfs dfs -put ./509.txt wcinput
hdfs dfs -ls /user/hadoop/wcinput
4.启动Hive
hive
5.创建原始文档表
create table docs(line string)
6.导入文件内容到表docs并查看
load data inpath '/user/hadoop/wcinput/509.txt' overwrite into table docs;
select *from docs;//查看表信息
7.用HQL进行词频统计,结果放在表word_count里
用一张表,记录文件数据,文件的一行就是表里一个字段的数据,所以使用换行符作为分隔符,并以文件名为分区
drop table file_data;
create table file_data(context string)
partitioned by (file_name string)row format delimited fields terminated
by '\n'stored as textfile;
从hdfs中把文件数据导入file_data
cat /home/hadoop/demo.txt
load data local inpath '/home/hadoop/demo.txt' overwrite into table file_data PARTITION(file_name='/home/hadoop/demo.txt');
查询file_data
select * from file_data;
将切分出来的每个单词作为一行 记录到结果表里面
select explode(split(context,' ')) from file_data where file_name='/home/hadoop/demo.txt';
drop table wordcount;
create
table wordcount(context string) partitioned by (file_name string)row
format delimited fields terminated by ' 'stored as textfile;
insert
overwrite table wordcount partition(file_name='/home/hadoop/demo.txt')
select explode(split(context,' ')) from file_data where
file_name='/home/hadoop/demo.txt';
使用hql查询
select context, count(context) from wordcount where file_name='/home/hadoop/demo.txt' group by context;