`
dengqsintyt
  • 浏览: 288439 次
  • 性别: Icon_minigender_1
社区版块
存档分类
最新评论

hive 基本使用

阅读更多

      hive是基于hadoop的一个数据仓库工具,可以将结构化的数据文件转化为一个数据库表,并提供简单的sql查询功能,可以将sql语句转换为MapReduce任务进行运行。优点是学习成本较低,可以通过类Sql语句实现简单的MapReduce统计,不必开发专门的MapReduce应用程序,非常适合数据统计分析。

    学习和总结如下:

hive 基本语法

创建表 
	hive> CREATE TABLE pokes (foo INT, bar STRING); 
创建表并创建索引字段ds 
	hive> CREATE TABLE invites (foo INT, bar STRING) PARTITIONED BY (ds STRING); 
显示所有表 
	hive> SHOW TABLES; 
按正条件(正则表达式)显示表, 
	hive> SHOW TABLES '.*s'; 
表添加一列 
	hive> ALTER TABLE pokes ADD COLUMNS (new_col INT); 
添加一列并增加列字段注释 
	hive> ALTER TABLE invites ADD COLUMNS (new_col2 INT COMMENT 'a comment'); 
更改表名 
	hive> ALTER TABLE events RENAME TO 3koobecaf; 
删除列 
	hive> DROP TABLE pokes; 
元数据存储 
将文件中的数据加载到表中 
	hive> LOAD DATA LOCAL INPATH './examples/files/kv1.txt' OVERWRITE INTO TABLE pokes; 
加载本地数据,同时给定分区信息 
	hive> LOAD DATA LOCAL INPATH './examples/files/kv2.txt' OVERWRITE INTO TABLE invites PARTITION (ds='2008-08-15'); 
加载DFS数据 ,同时给定分区信息 
	hive> LOAD DATA INPATH '/user/myname/kv2.txt' OVERWRITE INTO TABLE invites PARTITION (ds='2008-08-15'); 
	The above command will load data from an HDFS file/directory to the table. Note that loading data from HDFS will result in moving the file/directory. As a result, the operation is almost instantaneous. 
SQL 操作 
按先件查询 
	hive> SELECT a.foo FROM invites a WHERE a.ds='<DATE>'; 
将查询数据输出至目录 
	hive> INSERT OVERWRITE DIRECTORY '/tmp/hdfs_out' SELECT a.* FROM invites a WHERE a.ds='<DATE>'; 
将查询结果输出至本地目录 
	hive> INSERT OVERWRITE LOCAL DIRECTORY '/tmp/local_out' SELECT a.* FROM pokes a; 
选择所有列到本地目录 
	hive> INSERT OVERWRITE TABLE events SELECT a.* FROM profiles a; 
	hive> INSERT OVERWRITE TABLE events SELECT a.* FROM profiles a WHERE a.key < 100; 
	hive> INSERT OVERWRITE LOCAL DIRECTORY '/tmp/reg_3' SELECT a.* FROM events a; 
	hive> INSERT OVERWRITE DIRECTORY '/tmp/reg_4' select a.invites, a.pokes FROM profiles a; 
	hive> INSERT OVERWRITE DIRECTORY '/tmp/reg_5' SELECT COUNT(1) FROM invites a WHERE a.ds='<DATE>'; 
	hive> INSERT OVERWRITE DIRECTORY '/tmp/reg_5' SELECT a.foo, a.bar FROM invites a; 
	hive> INSERT OVERWRITE LOCAL DIRECTORY '/tmp/sum' SELECT SUM(a.pc) FROM pc1 a; 
将一个表的统计结果插入另一个表中 
	hive> FROM invites a INSERT OVERWRITE TABLE events SELECT a.bar, count(1) WHERE a.foo > 0 GROUP BY a.bar; 
	hive> INSERT OVERWRITE TABLE events SELECT a.bar, count(1) FROM invites a WHERE a.foo > 0 GROUP BY a.bar; 
JOIN 
	hive> FROM pokes t1 JOIN invites t2 ON (t1.bar = t2.bar) INSERT OVERWRITE TABLE events SELECT t1.bar, t1.foo, t2.foo; 
将多表数据插入到同一表中 
FROM src 
	INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100 
	INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
	INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300 
	INSERT OVERWRITE LOCAL DIRECTORY '/tmp/dest4.out' SELECT src.value WHERE src.key >= 300; 
	将文件流直接插入文件 
	hive> FROM invites a INSERT OVERWRITE TABLE events SELECT TRANSFORM(a.foo, a.bar) AS (oof, rab) USING '/bin/cat' WHERE a.ds > '2008-08-09'; 
	This streams the data in the map phase through the script /bin/cat (like hadoop streaming). Similarly - streaming can be used on the reduce side (please see the Hive Tutorial or examples) 
实际示例 
创建一个表 
	CREATE TABLE u_data ( 
	userid INT, 
	movieid INT, 
	rating INT, 
	unixtime STRING) 
	ROW FORMAT DELIMITED 
	FIELDS TERMINATED BY '\t' 
	STORED AS TEXTFILE; 
下载示例数据文件,并解压缩 
	wget http://www.grouplens.org/system/files/ml-data.tar__0.gz 
	tar xvzf ml-data.tar__0.gz 
加载数据到表中 
	LOAD DATA LOCAL INPATH 'ml-data/u.data' 
	OVERWRITE INTO TABLE u_data; 
统计数据总量 
	SELECT COUNT(1) FROM u_data; 
现在做一些复杂的数据分析 
创建一个 weekday_mapper.py: 文件,作为数据按周进行分割 
	import sys 
	import datetime 
	for line in sys.stdin: 
	line = line.strip() 
	userid, movieid, rating, unixtime = line.split('\t') 
	生成数据的周信息 
	weekday = datetime.datetime.fromtimestamp(float(unixtime)).isoweekday() 
	print '\t'.join([userid, movieid, rating, str(weekday)]) 
	使用映射脚本 
	//创建表,按分割符分割行中的字段值 
	CREATE TABLE u_data_new ( 
	userid INT, 
	movieid INT, 
	rating INT, 
	weekday INT) 
	ROW FORMAT DELIMITED 
	FIELDS TERMINATED BY '\t'; 
	//将python文件加载到系统 
	add FILE weekday_mapper.py; 
	将数据按周进行分割 
	INSERT OVERWRITE TABLE u_data_new 
	SELECT 
	TRANSFORM (userid, movieid, rating, unixtime) 
	USING 'python weekday_mapper.py' 
	AS (userid, movieid, rating, weekday) 
	FROM u_data; 
	SELECT weekday, COUNT(1) 
	FROM u_data_new 
	GROUP BY weekday; 
处理Apache Weblog 数据 
将WEB日志先用正则表达式进行组合,再按需要的条件进行组合输入到表中 
	add jar ../build/contrib/hive_contrib.jar; 
	CREATE TABLE apachelog ( 
	host STRING, 
	identity STRING, 
	user STRING, 
	time STRING, 
	request STRING, 
	status STRING, 
	size STRING, 
	referer STRING, 
	agent STRING) 
	ROW FORMAT SERDE 'org.apache.hadoop.hive.contrib.serde2.RegexSerDe' 
	WITH SERDEPROPERTIES ( 
	"input.regex" = "([^ ]*) ([^ ]*) ([^ ]*) (-|\\[[^\\]]*\\]) ([^ \"]*|\"[^\"]*\") (-|[0-9]*) (-|[0-9]*)(?: ([^ \"]*|\"[^\"]*\") ([^ \"]*|\"[^\"]*\"))?", 
	"output.format.string" = "%1$s %2$s %3$s %4$s %5$s %6$s %7$s %8$s %9$s" 
	) 
	STORED AS TEXTFILE;

 

分享到:
评论

相关推荐

Global site tag (gtag.js) - Google Analytics