You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by kh...@apache.org on 2014/08/20 00:41:13 UTC

svn commit: r1619005 [9/9] - in /hive/trunk: ./ accumulo-handler/ accumulo-handler/src/ accumulo-handler/src/java/ accumulo-handler/src/java/org/ accumulo-handler/src/java/org/apache/ accumulo-handler/src/java/org/apache/hadoop/ accumulo-handler/src/ja...

Added: hive/trunk/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/accumulo-handler/src/test/results/positive/accumulo_queries.q.out?rev=1619005&view=auto
==============================================================================
--- hive/trunk/accumulo-handler/src/test/results/positive/accumulo_queries.q.out (added)
+++ hive/trunk/accumulo-handler/src/test/results/positive/accumulo_queries.q.out Tue Aug 19 22:41:10 2014
@@ -0,0 +1,909 @@
+PREHOOK: query: DROP TABLE accumulo_table_1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE accumulo_table_1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE accumulo_table_1(key int, value string) 
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,cf:string")
+TBLPROPERTIES ("accumulo.table.name" = "accumulo_table_0")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@accumulo_table_1
+POSTHOOK: query: CREATE TABLE accumulo_table_1(key int, value string) 
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,cf:string")
+TBLPROPERTIES ("accumulo.table.name" = "accumulo_table_0")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@accumulo_table_1
+PREHOOK: query: DESCRIBE EXTENDED accumulo_table_1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@accumulo_table_1
+POSTHOOK: query: DESCRIBE EXTENDED accumulo_table_1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@accumulo_table_1
+key                 	int                 	from deserializer   
+value               	string              	from deserializer   
+	 	 
+#### A masked pattern was here ####
+PREHOOK: query: select * from accumulo_table_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_table_1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_table_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_table_1
+#### A masked pattern was here ####
+PREHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE accumulo_table_1 SELECT * WHERE (key%2)=0
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE accumulo_table_1 SELECT * WHERE (key%2)=0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: ((key % 2) = 0) (type: boolean)
+              Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: UDFToInteger(key) (type: int), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.hive.accumulo.mr.HiveAccumuloTableInputFormat
+                      output format: org.apache.hadoop.hive.accumulo.mr.HiveAccumuloTableOutputFormat
+                      serde: org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe
+                      name: default.accumulo_table_1
+
+PREHOOK: query: FROM src INSERT OVERWRITE TABLE accumulo_table_1 SELECT * WHERE (key%2)=0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@accumulo_table_1
+POSTHOOK: query: FROM src INSERT OVERWRITE TABLE accumulo_table_1 SELECT * WHERE (key%2)=0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@accumulo_table_1
+PREHOOK: query: DROP TABLE accumulo_table_2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE accumulo_table_2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE EXTERNAL TABLE accumulo_table_2(key int, value string) 
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,cf:string")
+TBLPROPERTIES ("accumulo.table.name" = "accumulo_table_0")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@accumulo_table_2
+POSTHOOK: query: CREATE EXTERNAL TABLE accumulo_table_2(key int, value string) 
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,cf:string")
+TBLPROPERTIES ("accumulo.table.name" = "accumulo_table_0")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@accumulo_table_2
+PREHOOK: query: EXPLAIN 
+SELECT Y.* 
+FROM 
+(SELECT accumulo_table_1.* FROM accumulo_table_1) x
+JOIN 
+(SELECT src.* FROM src) Y
+ON (x.key = Y.key)
+ORDER BY key, value LIMIT 20
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN 
+SELECT Y.* 
+FROM 
+(SELECT accumulo_table_1.* FROM accumulo_table_1) x
+JOIN 
+(SELECT src.* FROM src) Y
+ON (x.key = Y.key)
+ORDER BY key, value LIMIT 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: accumulo_table_1
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+            Filter Operator
+              predicate: UDFToDouble(key) is not null (type: boolean)
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: UDFToDouble(_col0) (type: double)
+                  sort order: +
+                  Map-reduce partition columns: UDFToDouble(_col0) (type: double)
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+          TableScan
+            alias: src
+            Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: UDFToDouble(key) is not null (type: boolean)
+              Statistics: Num rows: 15 Data size: 3006 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 15 Data size: 3006 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: UDFToDouble(_col0) (type: double)
+                  sort order: +
+                  Map-reduce partition columns: UDFToDouble(_col0) (type: double)
+                  Statistics: Num rows: 15 Data size: 3006 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col0 (type: string), _col1 (type: string)
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          condition expressions:
+            0 
+            1 {VALUE._col0} {VALUE._col1}
+          outputColumnNames: _col2, _col3
+          Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col2 (type: string), _col3 (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string), _col1 (type: string)
+              sort order: ++
+              Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE
+          Limit
+            Number of rows: 20
+            Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT Y.* 
+FROM 
+(SELECT accumulo_table_1.* FROM accumulo_table_1) x
+JOIN 
+(SELECT src.* FROM src) Y
+ON (x.key = Y.key)
+ORDER BY key, value LIMIT 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_table_1
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT Y.* 
+FROM 
+(SELECT accumulo_table_1.* FROM accumulo_table_1) x
+JOIN 
+(SELECT src.* FROM src) Y
+ON (x.key = Y.key)
+ORDER BY key, value LIMIT 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_table_1
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+100	val_100
+100	val_100
+104	val_104
+104	val_104
+114	val_114
+116	val_116
+118	val_118
+118	val_118
+12	val_12
+12	val_12
+120	val_120
+120	val_120
+126	val_126
+128	val_128
+128	val_128
+128	val_128
+PREHOOK: query: EXPLAIN 
+SELECT Y.*
+FROM 
+(SELECT accumulo_table_1.* FROM accumulo_table_1 WHERE 100 < accumulo_table_1.key) x
+JOIN 
+(SELECT accumulo_table_2.* FROM accumulo_table_2 WHERE accumulo_table_2.key < 120) Y
+ON (x.key = Y.key)
+ORDER BY key, value
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN 
+SELECT Y.*
+FROM 
+(SELECT accumulo_table_1.* FROM accumulo_table_1 WHERE 100 < accumulo_table_1.key) x
+JOIN 
+(SELECT accumulo_table_2.* FROM accumulo_table_2 WHERE accumulo_table_2.key < 120) Y
+ON (x.key = Y.key)
+ORDER BY key, value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: accumulo_table_1
+            filterExpr: (100 < key) (type: boolean)
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+          TableScan
+            alias: accumulo_table_2
+            filterExpr: (key < 120) (type: boolean)
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  value expressions: _col1 (type: string)
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          condition expressions:
+            0 
+            1 {KEY.reducesinkkey0} {VALUE._col0}
+          outputColumnNames: _col2, _col3
+          Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+          Select Operator
+            expressions: _col2 (type: int), _col3 (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: int), _col1 (type: string)
+              sort order: ++
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT Y.*
+FROM 
+(SELECT accumulo_table_1.* FROM accumulo_table_1 WHERE 100 < accumulo_table_1.key) x
+JOIN 
+(SELECT accumulo_table_2.* FROM accumulo_table_2 WHERE accumulo_table_2.key < 120) Y
+ON (x.key = Y.key)
+ORDER BY key,value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_table_1
+PREHOOK: Input: default@accumulo_table_2
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT Y.*
+FROM 
+(SELECT accumulo_table_1.* FROM accumulo_table_1 WHERE 100 < accumulo_table_1.key) x
+JOIN 
+(SELECT accumulo_table_2.* FROM accumulo_table_2 WHERE accumulo_table_2.key < 120) Y
+ON (x.key = Y.key)
+ORDER BY key,value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_table_1
+POSTHOOK: Input: default@accumulo_table_2
+#### A masked pattern was here ####
+12	val_12
+104	val_104
+114	val_114
+116	val_116
+118	val_118
+PREHOOK: query: DROP TABLE empty_accumulo_table
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE empty_accumulo_table
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE empty_accumulo_table(key int, value string) 
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,cf:string")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@empty_accumulo_table
+POSTHOOK: query: CREATE TABLE empty_accumulo_table(key int, value string) 
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,cf:string")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@empty_accumulo_table
+PREHOOK: query: DROP TABLE empty_normal_table
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE empty_normal_table
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE empty_normal_table(key int, value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@empty_normal_table
+POSTHOOK: query: CREATE TABLE empty_normal_table(key int, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@empty_normal_table
+PREHOOK: query: select * from (select count(1) as c from empty_normal_table union all select count(1) as c from empty_accumulo_table) x order by c
+PREHOOK: type: QUERY
+PREHOOK: Input: default@empty_accumulo_table
+PREHOOK: Input: default@empty_normal_table
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select count(1) as c from empty_normal_table union all select count(1) as c from empty_accumulo_table) x order by c
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@empty_accumulo_table
+POSTHOOK: Input: default@empty_normal_table
+#### A masked pattern was here ####
+0
+0
+PREHOOK: query: select * from (select count(1) c from empty_normal_table union all select count(1) as c from accumulo_table_1) x order by c
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_table_1
+PREHOOK: Input: default@empty_normal_table
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select count(1) c from empty_normal_table union all select count(1) as c from accumulo_table_1) x order by c
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_table_1
+POSTHOOK: Input: default@empty_normal_table
+#### A masked pattern was here ####
+0
+155
+PREHOOK: query: select * from (select count(1) c from src union all select count(1) as c from empty_accumulo_table) x order by c
+PREHOOK: type: QUERY
+PREHOOK: Input: default@empty_accumulo_table
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select count(1) c from src union all select count(1) as c from empty_accumulo_table) x order by c
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@empty_accumulo_table
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0
+500
+PREHOOK: query: select * from (select count(1) c from src union all select count(1) as c from accumulo_table_1) x order by c
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_table_1
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select count(1) c from src union all select count(1) as c from accumulo_table_1) x order by c
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_table_1
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+155
+500
+PREHOOK: query: CREATE TABLE accumulo_table_3(key int, value string, count int) 
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,cf:val,cf2:count"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@accumulo_table_3
+POSTHOOK: query: CREATE TABLE accumulo_table_3(key int, value string, count int) 
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,cf:val,cf2:count"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@accumulo_table_3
+PREHOOK: query: EXPLAIN 
+INSERT OVERWRITE TABLE accumulo_table_3
+SELECT x.key, x.value, Y.count 
+FROM 
+(SELECT accumulo_table_1.* FROM accumulo_table_1) x
+JOIN 
+(SELECT src.key, count(src.key) as count FROM src GROUP BY src.key) Y
+ON (x.key = Y.key)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN 
+INSERT OVERWRITE TABLE accumulo_table_3
+SELECT x.key, x.value, Y.count 
+FROM 
+(SELECT accumulo_table_1.* FROM accumulo_table_1) x
+JOIN 
+(SELECT src.key, count(src.key) as count FROM src GROUP BY src.key) Y
+ON (x.key = Y.key)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: UDFToDouble(key) is not null (type: boolean)
+              Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: key
+                Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count(key)
+                  keys: key (type: string)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: string)
+                    Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 14 Data size: 1402 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: string), _col1 (type: bigint)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 14 Data size: 1402 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-0
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: accumulo_table_1
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+            Filter Operator
+              predicate: UDFToDouble(key) is not null (type: boolean)
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: UDFToDouble(_col0) (type: double)
+                  sort order: +
+                  Map-reduce partition columns: UDFToDouble(_col0) (type: double)
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  value expressions: _col0 (type: int), _col1 (type: string)
+          TableScan
+            Reduce Output Operator
+              key expressions: UDFToDouble(_col0) (type: double)
+              sort order: +
+              Map-reduce partition columns: UDFToDouble(_col0) (type: double)
+              Statistics: Num rows: 14 Data size: 1402 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          condition expressions:
+            0 {VALUE._col0} {VALUE._col1}
+            1 {VALUE._col1}
+          outputColumnNames: _col0, _col1, _col3
+          Statistics: Num rows: 15 Data size: 1542 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: int), _col1 (type: string), UDFToInteger(_col3) (type: int)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 15 Data size: 1542 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 15 Data size: 1542 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.hive.accumulo.mr.HiveAccumuloTableInputFormat
+                  output format: org.apache.hadoop.hive.accumulo.mr.HiveAccumuloTableOutputFormat
+                  serde: org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe
+                  name: default.accumulo_table_3
+
+PREHOOK: query: INSERT OVERWRITE TABLE accumulo_table_3
+SELECT x.key, x.value, Y.count 
+FROM 
+(SELECT accumulo_table_1.* FROM accumulo_table_1) x
+JOIN 
+(SELECT src.key, count(src.key) as count FROM src GROUP BY src.key) Y
+ON (x.key = Y.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_table_1
+PREHOOK: Input: default@src
+PREHOOK: Output: default@accumulo_table_3
+POSTHOOK: query: INSERT OVERWRITE TABLE accumulo_table_3
+SELECT x.key, x.value, Y.count 
+FROM 
+(SELECT accumulo_table_1.* FROM accumulo_table_1) x
+JOIN 
+(SELECT src.key, count(src.key) as count FROM src GROUP BY src.key) Y
+ON (x.key = Y.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_table_1
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@accumulo_table_3
+PREHOOK: query: select count(1) from accumulo_table_3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_table_3
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from accumulo_table_3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_table_3
+#### A masked pattern was here ####
+155
+PREHOOK: query: select * from accumulo_table_3 order by key, value limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_table_3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from accumulo_table_3 order by key, value limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_table_3
+#### A masked pattern was here ####
+0	val_0	3
+2	val_2	1
+4	val_4	1
+8	val_8	1
+10	val_10	1
+PREHOOK: query: select key, count from accumulo_table_3 order by key, count desc limit 5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_table_3
+#### A masked pattern was here ####
+POSTHOOK: query: select key, count from accumulo_table_3 order by key, count desc limit 5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_table_3
+#### A masked pattern was here ####
+0	3
+2	1
+4	1
+8	1
+10	1
+PREHOOK: query: DROP TABLE accumulo_table_4
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE accumulo_table_4
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE accumulo_table_4(key int, value1 string, value2 int, value3 int) 
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,a:b,a:c,d:e"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@accumulo_table_4
+POSTHOOK: query: CREATE TABLE accumulo_table_4(key int, value1 string, value2 int, value3 int) 
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,a:b,a:c,d:e"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@accumulo_table_4
+PREHOOK: query: INSERT OVERWRITE TABLE accumulo_table_4 SELECT key, value, key+1, key+2 
+FROM src WHERE key=98 OR key=100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@accumulo_table_4
+POSTHOOK: query: INSERT OVERWRITE TABLE accumulo_table_4 SELECT key, value, key+1, key+2 
+FROM src WHERE key=98 OR key=100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@accumulo_table_4
+PREHOOK: query: SELECT * FROM accumulo_table_4 ORDER BY key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_table_4
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM accumulo_table_4 ORDER BY key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_table_4
+#### A masked pattern was here ####
+98	val_98	99	100
+100	val_100	101	102
+PREHOOK: query: DROP TABLE accumulo_table_5
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE accumulo_table_5
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE EXTERNAL TABLE accumulo_table_5(key int, value map<string,string>) 
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,a:*")
+TBLPROPERTIES ("accumulo.table.name" = "accumulo_table_4")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@accumulo_table_5
+POSTHOOK: query: CREATE EXTERNAL TABLE accumulo_table_5(key int, value map<string,string>) 
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,a:*")
+TBLPROPERTIES ("accumulo.table.name" = "accumulo_table_4")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@accumulo_table_5
+PREHOOK: query: SELECT * FROM accumulo_table_5 ORDER BY key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_table_5
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM accumulo_table_5 ORDER BY key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_table_5
+#### A masked pattern was here ####
+98	{"b":"val_98","c":"99"}
+100	{"b":"val_100","c":"101"}
+PREHOOK: query: DROP TABLE accumulo_table_6
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE accumulo_table_6
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE accumulo_table_6(key int, value map<string,string>) 
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,cf:*"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@accumulo_table_6
+POSTHOOK: query: CREATE TABLE accumulo_table_6(key int, value map<string,string>) 
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,cf:*"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@accumulo_table_6
+PREHOOK: query: INSERT OVERWRITE TABLE accumulo_table_6 SELECT key, map(value, key) FROM src
+WHERE key=98 OR key=100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@accumulo_table_6
+POSTHOOK: query: INSERT OVERWRITE TABLE accumulo_table_6 SELECT key, map(value, key) FROM src
+WHERE key=98 OR key=100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@accumulo_table_6
+PREHOOK: query: SELECT * FROM accumulo_table_6 ORDER BY key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_table_6
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM accumulo_table_6 ORDER BY key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_table_6
+#### A masked pattern was here ####
+98	{"val_98":"98"}
+100	{"val_100":"100"}
+PREHOOK: query: DROP TABLE accumulo_table_7
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE accumulo_table_7
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE accumulo_table_7(value map<string,string>, key int) 
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = "cf:*,:rowID"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@accumulo_table_7
+POSTHOOK: query: CREATE TABLE accumulo_table_7(value map<string,string>, key int) 
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = "cf:*,:rowID"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@accumulo_table_7
+PREHOOK: query: INSERT OVERWRITE TABLE accumulo_table_7 
+SELECT map(value, key, upper(value), key+1), key FROM src
+WHERE key=98 OR key=100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@accumulo_table_7
+POSTHOOK: query: INSERT OVERWRITE TABLE accumulo_table_7 
+SELECT map(value, key, upper(value), key+1), key FROM src
+WHERE key=98 OR key=100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@accumulo_table_7
+PREHOOK: query: SELECT * FROM accumulo_table_7 ORDER BY key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_table_7
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM accumulo_table_7 ORDER BY key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_table_7
+#### A masked pattern was here ####
+{"VAL_98":"99.0","val_98":"98"}	98
+{"VAL_100":"101.0","val_100":"100"}	100
+PREHOOK: query: DROP TABLE accumulo_table_8
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE accumulo_table_8
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE accumulo_table_8(key int, value1 string, value2 int, value3 int) 
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,a:b,a:c,d:e"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@accumulo_table_8
+POSTHOOK: query: CREATE TABLE accumulo_table_8(key int, value1 string, value2 int, value3 int) 
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES (
+"accumulo.columns.mapping" = ":rowID,a:b,a:c,d:e"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@accumulo_table_8
+PREHOOK: query: INSERT OVERWRITE TABLE accumulo_table_8 SELECT key, value, key+1, key+2 
+FROM src WHERE key=98 OR key=100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@accumulo_table_8
+POSTHOOK: query: INSERT OVERWRITE TABLE accumulo_table_8 SELECT key, value, key+1, key+2 
+FROM src WHERE key=98 OR key=100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@accumulo_table_8
+PREHOOK: query: SELECT * FROM accumulo_table_8 ORDER BY key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@accumulo_table_8
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM accumulo_table_8 ORDER BY key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@accumulo_table_8
+#### A masked pattern was here ####
+98	val_98	99	100
+100	val_100	101	102
+PREHOOK: query: DROP TABLE accumulo_table_1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@accumulo_table_1
+PREHOOK: Output: default@accumulo_table_1
+POSTHOOK: query: DROP TABLE accumulo_table_1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@accumulo_table_1
+POSTHOOK: Output: default@accumulo_table_1
+PREHOOK: query: DROP TABLE accumulo_table_2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@accumulo_table_2
+PREHOOK: Output: default@accumulo_table_2
+POSTHOOK: query: DROP TABLE accumulo_table_2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@accumulo_table_2
+POSTHOOK: Output: default@accumulo_table_2
+PREHOOK: query: DROP TABLE accumulo_table_3
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@accumulo_table_3
+PREHOOK: Output: default@accumulo_table_3
+POSTHOOK: query: DROP TABLE accumulo_table_3
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@accumulo_table_3
+POSTHOOK: Output: default@accumulo_table_3
+PREHOOK: query: DROP TABLE accumulo_table_4
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@accumulo_table_4
+PREHOOK: Output: default@accumulo_table_4
+POSTHOOK: query: DROP TABLE accumulo_table_4
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@accumulo_table_4
+POSTHOOK: Output: default@accumulo_table_4
+PREHOOK: query: DROP TABLE accumulo_table_5
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@accumulo_table_5
+PREHOOK: Output: default@accumulo_table_5
+POSTHOOK: query: DROP TABLE accumulo_table_5
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@accumulo_table_5
+POSTHOOK: Output: default@accumulo_table_5
+PREHOOK: query: DROP TABLE accumulo_table_6
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@accumulo_table_6
+PREHOOK: Output: default@accumulo_table_6
+POSTHOOK: query: DROP TABLE accumulo_table_6
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@accumulo_table_6
+POSTHOOK: Output: default@accumulo_table_6
+PREHOOK: query: DROP TABLE accumulo_table_7
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@accumulo_table_7
+PREHOOK: Output: default@accumulo_table_7
+POSTHOOK: query: DROP TABLE accumulo_table_7
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@accumulo_table_7
+POSTHOOK: Output: default@accumulo_table_7
+PREHOOK: query: DROP TABLE accumulo_table_8
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@accumulo_table_8
+PREHOOK: Output: default@accumulo_table_8
+POSTHOOK: query: DROP TABLE accumulo_table_8
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@accumulo_table_8
+POSTHOOK: Output: default@accumulo_table_8
+PREHOOK: query: DROP TABLE empty_accumulo_table
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@empty_accumulo_table
+PREHOOK: Output: default@empty_accumulo_table
+POSTHOOK: query: DROP TABLE empty_accumulo_table
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@empty_accumulo_table
+POSTHOOK: Output: default@empty_accumulo_table
+PREHOOK: query: DROP TABLE empty_normal_table
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@empty_normal_table
+PREHOOK: Output: default@empty_normal_table
+POSTHOOK: query: DROP TABLE empty_normal_table
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@empty_normal_table
+POSTHOOK: Output: default@empty_normal_table

Added: hive/trunk/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out?rev=1619005&view=auto
==============================================================================
--- hive/trunk/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out (added)
+++ hive/trunk/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out Tue Aug 19 22:41:10 2014
@@ -0,0 +1,255 @@
+PREHOOK: query: -- HIVE-4375 Single sourced multi insert consists of native and non-native table mixed throws NPE
+CREATE TABLE src_x1(key string, value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_x1
+POSTHOOK: query: -- HIVE-4375 Single sourced multi insert consists of native and non-native table mixed throws NPE
+CREATE TABLE src_x1(key string, value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_x1
+PREHOOK: query: CREATE TABLE src_x2(key string, value string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowid, cf:value")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_x2
+POSTHOOK: query: CREATE TABLE src_x2(key string, value string)
+STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler'
+WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowid, cf:value")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_x2
+PREHOOK: query: explain
+from src a
+insert overwrite table src_x1
+select key,"" where a.key > 0 AND a.key < 50
+insert overwrite table src_x2
+select value,"" where a.key > 50 AND a.key < 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from src a
+insert overwrite table src_x1
+select key,"" where a.key > 0 AND a.key < 50
+insert overwrite table src_x2
+select value,"" where a.key > 50 AND a.key < 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+  Stage-2 depends on stages: Stage-0
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: ((key > 0) and (key < 50)) (type: boolean)
+              Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), '' (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      name: default.src_x1
+            Filter Operator
+              predicate: ((key > 50) and (key < 100)) (type: boolean)
+              Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: value (type: string), '' (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.hive.accumulo.mr.HiveAccumuloTableInputFormat
+                      output format: org.apache.hadoop.hive.accumulo.mr.HiveAccumuloTableOutputFormat
+                      serde: org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe
+                      name: default.src_x2
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_x1
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.src_x1
+
+  Stage: Stage-5
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.src_x1
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: from src a
+insert overwrite table src_x1
+select key,"" where a.key > 0 AND a.key < 50
+insert overwrite table src_x2
+select value,"" where a.key > 50 AND a.key < 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_x1
+PREHOOK: Output: default@src_x2
+POSTHOOK: query: from src a
+insert overwrite table src_x1
+select key,"" where a.key > 0 AND a.key < 50
+insert overwrite table src_x2
+select value,"" where a.key > 50 AND a.key < 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_x1
+POSTHOOK: Output: default@src_x2
+POSTHOOK: Lineage: src_x1.key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_x1.value SIMPLE []
+PREHOOK: query: select * from src_x1 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_x1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_x1 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_x1
+#### A masked pattern was here ####
+10	
+11	
+12	
+12	
+15	
+15	
+17	
+18	
+18	
+19	
+2	
+20	
+24	
+24	
+26	
+26	
+27	
+28	
+30	
+33	
+34	
+35	
+35	
+35	
+37	
+37	
+4	
+41	
+42	
+42	
+43	
+44	
+47	
+5	
+5	
+5	
+8	
+9	
+PREHOOK: query: select * from src_x2 order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_x2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_x2 order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_x2
+#### A masked pattern was here ####
+val_51	
+val_53	
+val_54	
+val_57	
+val_58	
+val_64	
+val_65	
+val_66	
+val_67	
+val_69	
+val_70	
+val_72	
+val_74	
+val_76	
+val_77	
+val_78	
+val_80	
+val_82	
+val_83	
+val_84	
+val_85	
+val_86	
+val_87	
+val_90	
+val_92	
+val_95	
+val_96	
+val_97	
+val_98	
+PREHOOK: query: DROP TABLE src_x1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@src_x1
+PREHOOK: Output: default@src_x1
+POSTHOOK: query: DROP TABLE src_x1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@src_x1
+POSTHOOK: Output: default@src_x1
+PREHOOK: query: DROP TABLE src_x2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@src_x2
+PREHOOK: Output: default@src_x2
+POSTHOOK: query: DROP TABLE src_x2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@src_x2
+POSTHOOK: Output: default@src_x2

Added: hive/trunk/accumulo-handler/src/test/templates/TestAccumuloCliDriver.vm
URL: http://svn.apache.org/viewvc/hive/trunk/accumulo-handler/src/test/templates/TestAccumuloCliDriver.vm?rev=1619005&view=auto
==============================================================================
--- hive/trunk/accumulo-handler/src/test/templates/TestAccumuloCliDriver.vm (added)
+++ hive/trunk/accumulo-handler/src/test/templates/TestAccumuloCliDriver.vm Tue Aug 19 22:41:10 2014
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.cli;
+
+import junit.framework.Test;
+import junit.framework.TestCase;
+import junit.framework.TestSuite;
+
+import java.io.*;
+import java.util.*;
+
+import org.apache.hadoop.hive.accumulo.AccumuloQTestUtil;
+import org.apache.hadoop.hive.accumulo.AccumuloTestSetup;
+import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
+import org.apache.hadoop.hive.ql.session.SessionState;
+
+public class $className extends TestCase {
+
+  private static final String HIVE_ROOT = AccumuloQTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root"));
+  private AccumuloQTestUtil qt;
+  private AccumuloTestSetup setup;
+
+  public $className(String name, AccumuloTestSetup setup) {
+    super(name);
+    qt = null;
+    this.setup = setup;
+  }
+
+  @Override
+  protected void setUp() {
+
+    MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
+    String initScript = "$initScript";
+    String cleanupScript = "$cleanupScript";
+
+    try {
+      qt = new AccumuloQTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR,
+          setup, initScript, cleanupScript);
+    } catch (Exception e) {
+      System.err.println("Exception: " + e.getMessage());
+      e.printStackTrace();
+      System.err.flush();
+      fail("Unexpected exception in setup: " + e);
+    }
+  }
+
+  @Override
+  protected void tearDown() {
+    try {
+      qt.shutdown();
+    }
+    catch (Exception e) {
+      System.err.println("Exception: " + e.getMessage());
+      e.printStackTrace();
+      System.err.flush();
+      fail("Unexpected exception in tearDown");
+    }
+  }
+
+  public static Test suite() {
+    Set<String> qFilesToExecute = new HashSet<String>();
+    String qFiles = System.getProperty("qfile", "").trim();
+    if(!qFiles.isEmpty()) {
+      for(String qFile : qFiles.split(",")) {
+        qFile = qFile.trim();
+        if(!qFile.isEmpty()) {
+          qFilesToExecute.add(qFile);
+        }
+      }
+    }
+    TestSuite suite = new TestSuite();
+    AccumuloTestSetup setup = new AccumuloTestSetup(suite);
+#foreach ($qf in $qfiles)
+  #set ($fname = $qf.getName())
+  #set ($eidx = $fname.indexOf('.'))
+  #set ($tname = $fname.substring(0, $eidx))
+    if(qFilesToExecute.isEmpty() || qFilesToExecute.contains("$fname")) {
+      suite.addTest(new $className("testCliDriver_$tname", setup));
+    }
+#end
+    return setup;
+  }
+
+#foreach ($qf in $qfiles)
+  #set ($fname = $qf.getName())
+  #set ($eidx = $fname.indexOf('.'))
+  #set ($tname = $fname.substring(0, $eidx))
+  #set ($fpath = $qfilesMap.get($fname))
+  public void testCliDriver_$tname() throws Exception {
+    runTest("$tname", "$fname", (HIVE_ROOT + "$fpath"));
+  }
+
+#end
+
+  private void runTest(String tname, String fname, String fpath) throws Exception {
+    long startTime = System.currentTimeMillis();
+    try {
+      System.err.println("Begin query: " + fname);
+
+      qt.addFile(fpath);
+
+      if (qt.shouldBeSkipped(fname)) {
+        System.err.println("Test " + fname + " skipped");
+        return;
+      }
+
+      qt.cliInit(fname);
+      qt.clearTestSideEffects();
+      int ecode = qt.executeClient(fname);
+      if (ecode != 0) {
+        qt.failed(ecode, fname, null);
+      }
+
+      ecode = qt.checkCliDriverResults(fname);
+      if (ecode != 0) {
+        qt.failedDiff(ecode, fname, null);
+      }
+      qt.clearPostTestEffects();
+
+    } catch (Throwable e) {
+      qt.failed(e, fname, null);
+    }
+
+    long elapsedTime = System.currentTimeMillis() - startTime;
+    System.err.println("Done query: " + fname + " elapsedTime=" + elapsedTime/1000 + "s");
+    assertTrue("Test passed", true);
+  }
+}
+

Modified: hive/trunk/itests/qtest/pom.xml
URL: http://svn.apache.org/viewvc/hive/trunk/itests/qtest/pom.xml?rev=1619005&r1=1619004&r2=1619005&view=diff
==============================================================================
--- hive/trunk/itests/qtest/pom.xml (original)
+++ hive/trunk/itests/qtest/pom.xml Tue Aug 19 22:41:10 2014
@@ -42,6 +42,11 @@
     <!-- dependencies are always listed in sorted order by groupId, artifectId -->
     <!-- test intra-project -->
     <dependency>
+      <groupId>org.apache.accumulo</groupId>
+      <artifactId>accumulo-minicluster</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hive</groupId>
       <artifactId>hive-ant</artifactId>
       <version>${project.version}</version>
@@ -402,6 +407,7 @@
                 <mkdir dir="${project.build.directory}/qfile-results/hbase-handler/positive/" />
                 <mkdir dir="${project.build.directory}/qfile-results/hbase-handler/negative/" />
                 <mkdir dir="${project.build.directory}/qfile-results/hbase-handler/minimrpositive/" />
+                <mkdir dir="${project.build.directory}/qfile-results/accumulo-handler/positive/" />
 
                 <mkdir dir="${project.build.directory}/qfile-results/contribpositive"/>
                 <mkdir dir="${project.build.directory}/qfile-results/contribnegative"/>
@@ -590,6 +596,20 @@
                   initScript="q_test_init.sql"
                   cleanupScript="q_test_cleanup.sql"/>
 
+                <!-- Accumulo Positive -->
+                <qtestgen hiveRootDirectory="${basedir}/${hive.path.to.root}/"
+                  outputDirectory="${project.build.directory}/generated-test-sources/java/org/apache/hadoop/hive/cli/"
+                  templatePath="${basedir}/${hive.path.to.root}/accumulo-handler/src/test/templates/" template="TestAccumuloCliDriver.vm"
+                  queryDirectory="${basedir}/${hive.path.to.root}/accumulo-handler/src/test/queries/positive/"
+                  queryFile="${qfile}"
+                  runDisabled="${run_disabled}"
+                  clusterMode="${clustermode}"
+                  resultsDirectory="${basedir}/${hive.path.to.root}/accumulo-handler/src/test/results/positive/" className="TestAccumuloCliDriver"
+                  logFile="${project.build.directory}/testaccumuloclidrivergen.log"
+                  logDirectory="${project.build.directory}/qfile-results/accumulo-handler/positive/"
+                  initScript="q_test_init.sql"
+                  cleanupScript="q_test_cleanup.sql"/>
+
 
                 <!-- Beeline -->
                 <if>

Modified: hive/trunk/itests/util/pom.xml
URL: http://svn.apache.org/viewvc/hive/trunk/itests/util/pom.xml?rev=1619005&r1=1619004&r2=1619005&view=diff
==============================================================================
--- hive/trunk/itests/util/pom.xml (original)
+++ hive/trunk/itests/util/pom.xml Tue Aug 19 22:41:10 2014
@@ -35,6 +35,21 @@
     <!-- dependencies are always listed in sorted order by groupId, artifectId -->
     <!-- test intra-project -->
     <dependency>
+      <groupId>org.apache.accumulo</groupId>
+      <artifactId>accumulo-minicluster</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-accumulo-handler</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-accumulo-handler</artifactId>
+      <version>${project.version}</version>
+      <classifier>tests</classifier>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hive</groupId>
       <artifactId>hive-common</artifactId>
       <version>${project.version}</version>

Added: hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloQTestUtil.java
URL: http://svn.apache.org/viewvc/hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloQTestUtil.java?rev=1619005&view=auto
==============================================================================
--- hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloQTestUtil.java (added)
+++ hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloQTestUtil.java Tue Aug 19 22:41:10 2014
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.accumulo;
+
+import org.apache.hadoop.hive.ql.QTestUtil;
+
+/**
+ * AccumuloQTestUtil initializes Accumulo-specific test fixtures.
+ */
+public class AccumuloQTestUtil extends QTestUtil {
+  public AccumuloQTestUtil(String outDir, String logDir, MiniClusterType miniMr,
+      AccumuloTestSetup setup, String initScript, String cleanupScript) throws Exception {
+
+    super(outDir, logDir, miniMr, null, initScript, cleanupScript);
+    setup.setupWithHiveConf(conf);
+    super.init();
+  }
+
+  @Override
+  public void init() throws Exception {
+    // defer
+  }
+}

Added: hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloTestSetup.java
URL: http://svn.apache.org/viewvc/hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloTestSetup.java?rev=1619005&view=auto
==============================================================================
--- hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloTestSetup.java (added)
+++ hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloTestSetup.java Tue Aug 19 22:41:10 2014
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.accumulo;
+
+import java.io.File;
+import java.sql.Date;
+import java.sql.Timestamp;
+
+import junit.extensions.TestSetup;
+import junit.framework.Test;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.minicluster.MiniAccumuloCluster;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.conf.HiveConf;
+
+/**
+ * Start and stop an AccumuloMiniCluster for testing purposes
+ */
+public class AccumuloTestSetup extends TestSetup {
+  public static final String PASSWORD = "password";
+  public static final String TABLE_NAME = "accumuloHiveTable";
+
+  protected MiniAccumuloCluster miniCluster;
+
+  public AccumuloTestSetup(Test test) {
+    super(test);
+  }
+
+  protected void setupWithHiveConf(HiveConf conf) throws Exception {
+    if (null == miniCluster) {
+      String testTmpDir = System.getProperty("test.tmp.dir");
+      File tmpDir = new File(testTmpDir, "accumulo");
+
+      MiniAccumuloConfig cfg = new MiniAccumuloConfig(tmpDir, PASSWORD);
+      cfg.setNumTservers(1);
+
+      miniCluster = new MiniAccumuloCluster(cfg);
+
+      miniCluster.start();
+
+      createAccumuloTable(miniCluster.getConnector("root", PASSWORD));
+    }
+
+    // Setup connection information
+    conf.set(AccumuloConnectionParameters.USER_NAME, "root");
+    conf.set(AccumuloConnectionParameters.USER_PASS, PASSWORD);
+    conf.set(AccumuloConnectionParameters.ZOOKEEPERS, miniCluster.getZooKeepers());
+    conf.set(AccumuloConnectionParameters.INSTANCE_NAME, miniCluster.getInstanceName());
+  }
+
+  protected void createAccumuloTable(Connector conn) throws TableExistsException,
+      TableNotFoundException, AccumuloException, AccumuloSecurityException {
+    TableOperations tops = conn.tableOperations();
+    if (tops.exists(TABLE_NAME)) {
+      tops.delete(TABLE_NAME);
+    }
+
+    tops.create(TABLE_NAME);
+
+    boolean[] booleans = new boolean[] {true, false, true};
+    byte [] bytes = new byte [] { Byte.MIN_VALUE, -1, Byte.MAX_VALUE };
+    short [] shorts = new short [] { Short.MIN_VALUE, -1, Short.MAX_VALUE };
+    int [] ints = new int [] { Integer.MIN_VALUE, -1, Integer.MAX_VALUE };
+    long [] longs = new long [] { Long.MIN_VALUE, -1, Long.MAX_VALUE };
+    String [] strings = new String [] { "Hadoop, Accumulo", "Hive", "Test Strings" };
+    float [] floats = new float [] { Float.MIN_VALUE, -1.0F, Float.MAX_VALUE };
+    double [] doubles = new double [] { Double.MIN_VALUE, -1.0, Double.MAX_VALUE };
+    HiveDecimal[] decimals = new HiveDecimal[] {HiveDecimal.create("3.14159"), HiveDecimal.create("2.71828"), HiveDecimal.create("0.57721")};
+    Date[] dates = new Date[] {Date.valueOf("2014-01-01"), Date.valueOf("2014-03-01"), Date.valueOf("2014-05-01")};
+    Timestamp[] timestamps = new Timestamp[] {new Timestamp(50), new Timestamp(100), new Timestamp(150)};
+
+    BatchWriter bw = conn.createBatchWriter(TABLE_NAME, new BatchWriterConfig());
+    final String cf = "cf";
+    try {
+      for (int i = 0; i < 3; i++) {
+        Mutation m = new Mutation("key-" + i);
+        m.put(cf, "cq-boolean", Boolean.toString(booleans[i]));
+        m.put(cf.getBytes(), "cq-byte".getBytes(), new byte[] {bytes[i]});
+        m.put(cf, "cq-short", Short.toString(shorts[i]));
+        m.put(cf, "cq-int", Integer.toString(ints[i]));
+        m.put(cf, "cq-long", Long.toString(longs[i]));
+        m.put(cf, "cq-string", strings[i]);
+        m.put(cf, "cq-float", Float.toString(floats[i]));
+        m.put(cf, "cq-double", Double.toString(doubles[i]));
+        m.put(cf, "cq-decimal", decimals[i].toString());
+        m.put(cf, "cq-date", dates[i].toString());
+        m.put(cf, "cq-timestamp", timestamps[i].toString());
+
+        bw.addMutation(m);
+      }
+    } finally {
+      bw.close();
+    }
+  }
+
+  @Override
+  protected void tearDown() throws Exception {
+    if (null != miniCluster) {
+      miniCluster.stop();
+      miniCluster = null;
+    }
+  }
+}

Modified: hive/trunk/packaging/pom.xml
URL: http://svn.apache.org/viewvc/hive/trunk/packaging/pom.xml?rev=1619005&r1=1619004&r2=1619005&view=diff
==============================================================================
--- hive/trunk/packaging/pom.xml (original)
+++ hive/trunk/packaging/pom.xml Tue Aug 19 22:41:10 2014
@@ -182,6 +182,11 @@
       <version>${project.version}</version>
     </dependency>
     <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-accumulo-handler</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
       <groupId>org.apache.hive.hcatalog</groupId>
       <artifactId>hive-hcatalog-streaming</artifactId>
       <version>${project.version}</version>

Modified: hive/trunk/pom.xml
URL: http://svn.apache.org/viewvc/hive/trunk/pom.xml?rev=1619005&r1=1619004&r2=1619005&view=diff
==============================================================================
--- hive/trunk/pom.xml (original)
+++ hive/trunk/pom.xml Tue Aug 19 22:41:10 2014
@@ -31,6 +31,7 @@
   </prerequisites>
 
   <modules>
+    <module>accumulo-handler</module>
     <module>ant</module>
     <module>beeline</module>
     <module>cli</module>
@@ -87,6 +88,7 @@
     <maven.build-helper.plugin.version>1.8</maven.build-helper.plugin.version>
 
     <!-- Library Dependency Versions -->
+    <accumulo.version>1.6.0</accumulo.version>
     <activemq.version>5.5.0</activemq.version>
     <ant.version>1.9.1</ant.version>
     <antlr.version>3.4</antlr.version>
@@ -365,6 +367,31 @@
         <version>${commons-exec.version}</version>
       </dependency>
       <dependency>
+        <groupId>org.apache.accumulo</groupId>
+        <artifactId>accumulo-core</artifactId>
+        <version>${accumulo.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.accumulo</groupId>
+        <artifactId>accumulo-fate</artifactId>
+        <version>${accumulo.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.accumulo</groupId>
+        <artifactId>accumulo-minicluster</artifactId>
+        <version>${accumulo.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.accumulo</groupId>
+        <artifactId>accumulo-start</artifactId>
+        <version>${accumulo.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.accumulo</groupId>
+        <artifactId>accumulo-trace</artifactId>
+        <version>${accumulo.version}</version>
+      </dependency>
+      <dependency>
         <groupId>org.apache.activemq</groupId>
         <artifactId>activemq-core</artifactId>
         <version>${activemq.version}</version>