You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by pr...@apache.org on 2015/05/12 03:23:47 UTC

[01/39] hive git commit: HIVE-9743: Incorrect result set for vectorized left outer join (Matt McCline, reviewed by Vikram Dixit)

Repository: hive
Updated Branches:
  refs/heads/llap dc7ceb4e2 -> e6b1556e3


http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/test/results/clientpositive/vector_leftsemi_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_leftsemi_mapjoin.q.out b/ql/src/test/results/clientpositive/vector_leftsemi_mapjoin.q.out
new file mode 100644
index 0000000..23a8adb
--- /dev/null
+++ b/ql/src/test/results/clientpositive/vector_leftsemi_mapjoin.q.out
@@ -0,0 +1,13572 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table t1 stored as orc as select cast(key as int) key, value from src where key <= 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table t1 stored as orc as select cast(key as int) key, value from src where key <= 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t1
+PREHOOK: query: select * from t1 sort by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 sort by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+2	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: create table t2 stored as orc as select cast(2*key as int) key, value from t1
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@t1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t2
+POSTHOOK: query: create table t2 stored as orc as select cast(2*key as int) key, value from t1
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t2
+PREHOOK: query: select * from t2 sort by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t2 sort by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_5
+10	val_5
+10	val_5
+16	val_8
+18	val_9
+20	val_10
+4	val_2
+8	val_4
+PREHOOK: query: create table t3 stored as orc as select * from (select * from t1 union all select * from t2) b
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t3
+POSTHOOK: query: create table t3 stored as orc as select * from (select * from t1 union all select * from t2) b
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t3
+PREHOOK: query: select * from t3 sort by key, value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t3 sort by key, value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+10	val_5
+10	val_5
+10	val_5
+16	val_8
+18	val_9
+2	val_2
+20	val_10
+4	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+8	val_4
+8	val_8
+9	val_9
+PREHOOK: query: create table t4 (key int, value string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t4
+POSTHOOK: query: create table t4 (key int, value string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t4
+PREHOOK: query: select * from t4
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t4
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t4
+#### A masked pattern was here ####
+PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+4	val_4
+8	val_8
+PREHOOK: query: explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_5
+10	val_5
+10	val_5
+4	val_2
+8	val_4
+PREHOOK: query: explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t4
+#### A masked pattern was here ####
+PREHOOK: query: explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key < 15) (type: boolean)
+              Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: key (type: int), key (type: int)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
+                HashTable Sink Operator
+                  keys:
+                    0 key (type: int)
+                    1 _col1 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col1 (type: int)
+                outputColumnNames: _col1
+                Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col1 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+val_0
+val_0
+val_0
+val_10
+val_2
+val_4
+val_5
+val_5
+val_5
+val_8
+val_9
+PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: ((value < 'val_10') and key is not null) (type: boolean)
+              Statistics: Num rows: 2 Data size: 186 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string)
+                outputColumnNames: key, value
+                Statistics: Num rows: 2 Data size: 186 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int), value (type: string)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 2 Data size: 186 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+PREHOOK: query: explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b:t3 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b:t3 
+          TableScan
+            alias: t3
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key > 5) (type: boolean)
+              Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: _col0 (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 7 Data size: 651 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col1
+                Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col1 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 7 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+val_10
+val_8
+val_9
+PREHOOK: query: explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b:t2 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b:t2 
+          TableScan
+            alias: t2
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (((key > 5) and (value <= 'val_20')) and key is not null) (type: boolean)
+              Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: _col0 (type: int), _col1 (type: string)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col1 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string)
+          outputColumnNames: _col0
+          Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+PREHOOK: query: explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b:t1 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b:t1 
+          TableScan
+            alias: t1
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key > 2) (type: boolean)
+              Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: _col0 (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+10	val_5
+10	val_5
+10	val_5
+4	val_2
+8	val_4
+PREHOOK: query: explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-3 is a root stage
+  Stage-1 depends on stages: Stage-3
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-3
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+2
+4
+4
+5
+5
+5
+8
+8
+9
+PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (2 * key) is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 (2 * _col0) (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 (2 * _col0) (type: int)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+8	val_8
+PREHOOK: query: explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-6 is a root stage
+  Stage-2 depends on stages: Stage-6
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-6
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        a 
+          Fetch Operator
+            limit: -1
+        c 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        a 
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              HashTable Sink Operator
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 _col0 (type: int)
+        c 
+          TableScan
+            alias: c
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 key (type: int)
+                      2 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                     Left Semi Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 _col0 (type: int)
+                outputColumnNames: _col0, _col1, _col5, _col6
+                Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int), _col1 (type: string)
+                    sort order: ++
+                    Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col2 (type: int), _col3 (type: string)
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: int), VALUE._col1 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+10	val_10	10	val_5
+10	val_10	10	val_5
+10	val_10	10	val_5
+4	val_4	4	val_2
+8	val_8	8	val_4
+PREHOOK: query: explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key is not null and value is not null) (type: boolean)
+              Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string)
+                outputColumnNames: key, value
+                Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int), value (type: string)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int), value (type: string)
+                      1 _col0 (type: int), _col1 (type: string)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key is not null and value is not null) (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int), value (type: string)
+                  1 _col0 (type: int), _col1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+2	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-1 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+        c 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+                      2 _col0 (type: int)
+        c 
+          TableScan
+            alias: c
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+                      2 _col0 (type: int)
+
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                     Left Semi Join 0 to 2
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                  2 _col0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 24 Data size: 2250 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+4
+4
+8
+8
+PREHOOK: query: explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-6 is a root stage
+  Stage-2 depends on stages: Stage-6
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-6
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+        c 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              keys:
+                0 key (type: int)
+                1 key (type: int)
+                2 _col0 (type: int)
+        c 
+          TableScan
+            alias: c
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: int)
+              outputColumnNames: key
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: key (type: int)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                HashTable Sink Operator
+                  keys:
+                    0 key (type: int)
+                    1 key (type: int)
+                    2 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Outer Join0 to 1
+                   Left Semi Join 1 to 2
+              keys:
+                0 key (type: int)
+                1 key (type: int)
+                2 _col0 (type: int)
+              outputColumnNames: _col0
+              Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: int)
+                sort order: +
+                Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+4
+4
+8
+8
+PREHOOK: query: explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Reduce Output Operator
+              key expressions: key (type: int)
+              sort order: +
+              Map-reduce partition columns: key (type: int)
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+          TableScan
+            alias: b
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Reduce Output Operator
+              key expressions: key (type: int)
+              sort order: +
+              Map-reduce partition columns: key (type: int)
+              Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+          TableScan
+            alias: c
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: int)
+              outputColumnNames: key
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: key (type: int)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Outer Join 0 to 1
+               Left Semi Join 1 to 2
+          keys:
+            0 key (type: int)
+            1 key (type: int)
+            2 _col0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: int)
+              sort order: +
+              Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+4
+4
+8
+8
+NULL
+NULL
+NULL
+PREHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-6 is a root stage
+  Stage-2 depends on stages: Stage-6
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-6
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+        c 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: int)
+              outputColumnNames: key
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: key (type: int)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                HashTable Sink Operator
+                  keys:
+                    0 key (type: int)
+                    1 _col0 (type: int)
+                    2 key (type: int)
+        c 
+          TableScan
+            alias: c
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              keys:
+                0 key (type: int)
+                1 _col0 (type: int)
+                2 key (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Semi Join 0 to 1
+                   Left Outer Join0 to 2
+              keys:
+                0 key (type: int)
+                1 _col0 (type: int)
+                2 key (type: int)
+              outputColumnNames: _col0
+              Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: int)
+                sort order: +
+                Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+16
+18
+20
+4
+4
+8
+8
+PREHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-6 is a root stage
+  Stage-2 depends on stages: Stage-6
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-6
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        a 
+          Fetch Operator
+            limit: -1
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        a 
+          TableScan
+            alias: a
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              keys:
+                0 key (type: int)
+                1 _col0 (type: int)
+                2 key (type: int)
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: int)
+              outputColumnNames: key
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: key (type: int)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                HashTable Sink Operator
+                  keys:
+                    0 key (type: int)
+                    1 _col0 (type: int)
+                    2 key (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: c
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Semi Join 0 to 1
+                   Right Outer Join0 to 2
+              keys:
+                0 key (type: int)
+                1 _col0 (type: int)
+                2 key (type: int)
+              outputColumnNames: _col0
+              Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: int)
+                sort order: +
+                Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+4
+4
+8
+8
+NULL
+NULL
+NULL
+NULL
+NULL
+PREHOOK: query: explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Reduce Output Operator
+              key expressions: key (type: int)
+              sort order: +
+              Map-reduce partition columns: key (type: int)
+              Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: int)
+              outputColumnNames: key
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: key (type: int)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+          TableScan
+            alias: c
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Reduce Output Operator
+              key expressions: key (type: int)
+              sort order: +
+              Map-reduce partition columns: key (type: int)
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Left Semi Join 0 to 1
+               Outer Join 0 to 2
+          keys:
+            0 key (type: int)
+            1 _col0 (type: int)
+            2 key (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: int)
+              sort order: +
+              Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 48 Data size: 4501 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+10
+10
+10
+10
+10
+10
+10
+10
+2
+4
+4
+5
+5
+5
+8
+8
+9
+NULL
+NULL
+NULL
+PREHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-8 is a root stage
+  Stage-3 depends on stages: Stage-8
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-8
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+        c 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+        c 
+          TableScan
+            alias: c
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              keys:
+                0 _col1 (type: string)
+                1 value (type: string)
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12 Data size: 1125 Basic stats: COMPLETE Column stats: NONE
+                Map Join Operator
+                  condition map:
+                       Left Outer Join0 to 1
+                  keys:
+                    0 _col1 (type: string)
+                    1 value (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 13 Data size: 1237 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    sort order: +
+                    Statistics: Num rows: 13 Data size: 1237 Basic stats: COMPLETE Column stats: NONE
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int)
+          outputColumnNames: _col0
+          Statistics: Num rows: 13 Data size: 1237 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 13 Data size: 1237 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+10
+10
+10
+10
+10
+10
+16
+18
+20
+4
+4
+8
+8
+PREHOOK: query: explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        $hdt$_1:b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        $hdt$_1:b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: value is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: value (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: _col0 (type: string)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 _col1 (type: string)
+                      1 _col0 (type: string)
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 22 Data size: 2046 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: ((key > 100) and value is not null) (type: boolean)
+              Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                Map Join Operator
+                  condition map:
+                       Left Semi Join 0 to 1
+                  keys:
+                    0 _col1 (type: string)
+                    1 _col0 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b 
+          TableScan
+            alias: b
+            Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int)
+                outputColumnNames: key
+                Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: key (type: int)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: int)
+                      1 _col0 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      

<TRUNCATED>

[09/39] hive git commit: HIVE-10592: ORC file dump in JSON format (Prasanth Jayachandran reviewed by Gopal V)

Posted by pr...@apache.org.
HIVE-10592: ORC file dump in JSON format (Prasanth Jayachandran reviewed by Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/80fb8913
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/80fb8913
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/80fb8913

Branch: refs/heads/llap
Commit: 80fb8913196eef8e4125544c3138b0c73be267b7
Parents: 93995c8
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Wed May 6 18:52:17 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Wed May 6 18:52:17 2015 -0700

----------------------------------------------------------------------
 bin/ext/orcfiledump.sh                          |    9 +-
 .../hive/ql/io/orc/ColumnStatisticsImpl.java    |   16 +-
 .../apache/hadoop/hive/ql/io/orc/FileDump.java  |   91 +-
 .../hadoop/hive/ql/io/orc/JsonFileDump.java     |  365 +++++
 .../hadoop/hive/ql/io/orc/TestJsonFileDump.java |  138 ++
 ql/src/test/resources/orc-file-dump.json        | 1354 ++++++++++++++++++
 6 files changed, 1929 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/80fb8913/bin/ext/orcfiledump.sh
----------------------------------------------------------------------
diff --git a/bin/ext/orcfiledump.sh b/bin/ext/orcfiledump.sh
index 752e437..6139de2 100644
--- a/bin/ext/orcfiledump.sh
+++ b/bin/ext/orcfiledump.sh
@@ -23,5 +23,12 @@ orcfiledump () {
 }
 
 orcfiledump_help () {
-  echo "usage ./hive orcfiledump [-d] [--rowindex <col_ids>] <path_to_file>"
+  echo "usage ./hive orcfiledump [-h] [-j] [-p] [-t] [-d] [-r <col_ids>] <path_to_file>"
+  echo ""
+  echo "  --json (-j)                 Print metadata in JSON format"
+  echo "  --pretty (-p)               Pretty print json metadata output"
+  echo "  --timezone (-t)             Print writer's time zone"
+  echo "  --data (-d)                 Should the data be printed"
+  echo "  --rowindex (-r) <_col_ids_> Comma separated list of column ids for which row index should be printed"
+  echo "  --help (-h)                 Print help message"
 } 

http://git-wip-us.apache.org/repos/asf/hive/blob/80fb8913/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java
index 7cfbd81..ffba3c6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java
@@ -699,12 +699,18 @@ class ColumnStatisticsImpl implements ColumnStatistics {
 
     @Override
     public Date getMinimum() {
+      if (minimum == null) {
+        return null;
+      }
       minDate.set(minimum);
       return minDate.get();
     }
 
     @Override
     public Date getMaximum() {
+      if (maximum == null) {
+        return null;
+      }
       maxDate.set(maximum);
       return maxDate.get();
     }
@@ -793,14 +799,12 @@ class ColumnStatisticsImpl implements ColumnStatistics {
 
     @Override
     public Timestamp getMinimum() {
-      Timestamp minTimestamp = new Timestamp(minimum);
-      return minTimestamp;
+      return minimum == null ? null : new Timestamp(minimum);
     }
 
     @Override
     public Timestamp getMaximum() {
-      Timestamp maxTimestamp = new Timestamp(maximum);
-      return maxTimestamp;
+      return maximum == null ? null : new Timestamp(maximum);
     }
 
     @Override
@@ -808,9 +812,9 @@ class ColumnStatisticsImpl implements ColumnStatistics {
       StringBuilder buf = new StringBuilder(super.toString());
       if (getNumberOfValues() != 0) {
         buf.append(" min: ");
-        buf.append(minimum);
+        buf.append(getMinimum());
         buf.append(" max: ");
-        buf.append(maximum);
+        buf.append(getMaximum());
       }
       return buf.toString();
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/80fb8913/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
index cd4db75..33c4cd8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -50,10 +50,11 @@ import org.codehaus.jettison.json.JSONWriter;
  * A tool for printing out the file structure of ORC files.
  */
 public final class FileDump {
-  private static final String UNKNOWN = "UNKNOWN";
+  public static final String UNKNOWN = "UNKNOWN";
 
   // not used
-  private FileDump() {}
+  private FileDump() {
+  }
 
   public static void main(String[] args) throws Exception {
     Configuration conf = new Configuration();
@@ -69,21 +70,28 @@ public final class FileDump {
     }
 
     boolean dumpData = cli.hasOption('d');
-    if (cli.hasOption("rowindex")) {
-      String[] colStrs = cli.getOptionValue("rowindex").split(",");
+    if (cli.hasOption("r")) {
+      String[] colStrs = cli.getOptionValue("r").split(",");
       rowIndexCols = new ArrayList<Integer>(colStrs.length);
       for (String colStr : colStrs) {
         rowIndexCols.add(Integer.parseInt(colStr));
       }
     }
 
-    boolean printTimeZone = false;
-    if (cli.hasOption('t')) {
-      printTimeZone = true;
-    }
+    boolean printTimeZone = cli.hasOption('t');
+    boolean jsonFormat = cli.hasOption('j');
     String[] files = cli.getArgs();
-    if (dumpData) printData(Arrays.asList(files), conf);
-    else printMetaData(Arrays.asList(files), conf, rowIndexCols, printTimeZone);
+    if (dumpData) {
+      printData(Arrays.asList(files), conf);
+    } else {
+      if (jsonFormat) {
+        boolean prettyPrint = cli.hasOption('p');
+        JsonFileDump.printJsonMetaData(Arrays.asList(files), conf, rowIndexCols, prettyPrint,
+            printTimeZone);
+      } else {
+        printMetaData(Arrays.asList(files), conf, rowIndexCols, printTimeZone);
+      }
+    }
   }
 
   private static void printData(List<String> files, Configuration conf) throws IOException,
@@ -100,7 +108,7 @@ public final class FileDump {
       Path path = new Path(filename);
       Reader reader = OrcFile.createReader(path, OrcFile.readerOptions(conf));
       System.out.println("File Version: " + reader.getFileVersion().getName() +
-                         " with " + reader.getWriterVersion());
+          " with " + reader.getWriterVersion());
       RecordReaderImpl rows = (RecordReaderImpl) reader.rows();
       System.out.println("Rows: " + reader.getNumberOfRows());
       System.out.println("Compression: " + reader.getCompression());
@@ -121,7 +129,7 @@ public final class FileDump {
       ColumnStatistics[] stats = reader.getStatistics();
       int colCount = stats.length;
       System.out.println("\nFile Statistics:");
-      for(int i=0; i < stats.length; ++i) {
+      for (int i = 0; i < stats.length; ++i) {
         System.out.println("  Column " + i + ": " + stats[i].toString());
       }
       System.out.println("\nStripes:");
@@ -140,7 +148,7 @@ public final class FileDump {
           System.out.println("  Stripe: " + stripe.toString());
         }
         long sectionStart = stripeStart;
-        for(OrcProto.Stream section: footer.getStreamsList()) {
+        for (OrcProto.Stream section : footer.getStreamsList()) {
           String kind = section.hasKind() ? section.getKind().name() : UNKNOWN;
           System.out.println("    Stream: column " + section.getColumn() +
               " section " + kind + " start: " + sectionStart +
@@ -270,7 +278,7 @@ public final class FileDump {
     return buf.toString();
   }
 
-  private static long getTotalPaddingSize(Reader reader) throws IOException {
+  public static long getTotalPaddingSize(Reader reader) throws IOException {
     long paddedBytes = 0;
     List<org.apache.hadoop.hive.ql.io.orc.StripeInformation> stripes = reader.getStripes();
     for (int i = 1; i < stripes.size(); i++) {
@@ -307,21 +315,30 @@ public final class FileDump {
         .withArgName("comma separated list of column ids for which row index should be printed")
         .withDescription("Dump stats for column number(s)")
         .hasArg()
-        .create());
+        .create('r'));
+
+    result.addOption(OptionBuilder
+        .withLongOpt("json")
+        .withDescription("Print metadata in JSON format")
+        .create('j'));
 
+    result.addOption(OptionBuilder
+            .withLongOpt("pretty")
+            .withDescription("Pretty print json metadata output")
+            .create('p'));
 
     return result;
   }
 
   private static void printMap(JSONWriter writer,
-                               Map<Object, Object> obj,
-                               List<OrcProto.Type> types,
-                               OrcProto.Type type
+      Map<Object, Object> obj,
+      List<OrcProto.Type> types,
+      OrcProto.Type type
   ) throws IOException, JSONException {
     writer.array();
     int keyType = type.getSubtypes(0);
     int valueType = type.getSubtypes(1);
-    for(Map.Entry<Object,Object> item: obj.entrySet()) {
+    for (Map.Entry<Object, Object> item : obj.entrySet()) {
       writer.object();
       writer.key("_key");
       printObject(writer, item.getKey(), types, keyType);
@@ -333,34 +350,34 @@ public final class FileDump {
   }
 
   private static void printList(JSONWriter writer,
-                                List<Object> obj,
-                                List<OrcProto.Type> types,
-                                OrcProto.Type type
+      List<Object> obj,
+      List<OrcProto.Type> types,
+      OrcProto.Type type
   ) throws IOException, JSONException {
     int subtype = type.getSubtypes(0);
     writer.array();
-    for(Object item: obj) {
+    for (Object item : obj) {
       printObject(writer, item, types, subtype);
     }
     writer.endArray();
   }
 
   private static void printUnion(JSONWriter writer,
-                                 OrcUnion obj,
-                                 List<OrcProto.Type> types,
-                                 OrcProto.Type type
+      OrcUnion obj,
+      List<OrcProto.Type> types,
+      OrcProto.Type type
   ) throws IOException, JSONException {
     int subtype = type.getSubtypes(obj.getTag());
     printObject(writer, obj.getObject(), types, subtype);
   }
 
   static void printStruct(JSONWriter writer,
-                          OrcStruct obj,
-                          List<OrcProto.Type> types,
-                          OrcProto.Type type) throws IOException, JSONException {
+      OrcStruct obj,
+      List<OrcProto.Type> types,
+      OrcProto.Type type) throws IOException, JSONException {
     writer.object();
     List<Integer> fieldTypes = type.getSubtypesList();
-    for(int i=0; i < fieldTypes.size(); ++i) {
+    for (int i = 0; i < fieldTypes.size(); ++i) {
       writer.key(type.getFieldNames(i));
       printObject(writer, obj.getFieldValue(i), types, fieldTypes.get(i));
     }
@@ -368,9 +385,9 @@ public final class FileDump {
   }
 
   static void printObject(JSONWriter writer,
-                          Object obj,
-                          List<OrcProto.Type> types,
-                          int typeId) throws IOException, JSONException {
+      Object obj,
+      List<OrcProto.Type> types,
+      int typeId) throws IOException, JSONException {
     OrcProto.Type type = types.get(typeId);
     if (obj == null) {
       writer.value(null);
@@ -417,7 +434,7 @@ public final class FileDump {
   }
 
   static void printJsonData(Configuration conf,
-                            String filename) throws IOException, JSONException {
+      String filename) throws IOException, JSONException {
     Path path = new Path(filename);
     Reader reader = OrcFile.createReader(path.getFileSystem(conf), path);
     OutputStreamWriter out = new OutputStreamWriter(System.out, "UTF-8");

http://git-wip-us.apache.org/repos/asf/hive/blob/80fb8913/ql/src/java/org/apache/hadoop/hive/ql/io/orc/JsonFileDump.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/JsonFileDump.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/JsonFileDump.java
new file mode 100644
index 0000000..c33004e
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/JsonFileDump.java
@@ -0,0 +1,365 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.io.orc;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.io.filters.BloomFilterIO;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONStringer;
+import org.codehaus.jettison.json.JSONWriter;
+
+/**
+ * File dump tool with json formatted output.
+ */
+public class JsonFileDump {
+
+  public static void printJsonMetaData(List<String> files, Configuration conf,
+      List<Integer> rowIndexCols, boolean prettyPrint, boolean printTimeZone) throws JSONException, IOException {
+    JSONStringer writer = new JSONStringer();
+    boolean multiFile = files.size() > 1;
+    if (multiFile) {
+      writer.array();
+    } else {
+      writer.object();
+    }
+    for (String filename : files) {
+      if (multiFile) {
+        writer.object();
+      }
+      writer.key("fileName").value(filename);
+      Path path = new Path(filename);
+      Reader reader = OrcFile.createReader(path, OrcFile.readerOptions(conf));
+      writer.key("fileVersion").value(reader.getFileVersion().getName());
+      writer.key("writerVersion").value(reader.getWriterVersion());
+      RecordReaderImpl rows = (RecordReaderImpl) reader.rows();
+      writer.key("numberOfRows").value(reader.getNumberOfRows());
+      writer.key("compression").value(reader.getCompression());
+      if (reader.getCompression() != CompressionKind.NONE) {
+        writer.key("compressionBufferSize").value(reader.getCompressionSize());
+      }
+      writer.key("schemaString").value(reader.getObjectInspector().getTypeName());
+      writer.key("schema").array();
+      writeSchema(writer, reader.getTypes());
+      writer.endArray();
+
+      writer.key("stripeStatistics").array();
+      Metadata metadata = reader.getMetadata();
+      for (int n = 0; n < metadata.getStripeStatistics().size(); n++) {
+        writer.object();
+        writer.key("stripeNumber").value(n + 1);
+        StripeStatistics ss = metadata.getStripeStatistics().get(n);
+        writer.key("columnStatistics").array();
+        for (int i = 0; i < ss.getColumnStatistics().length; i++) {
+          writer.object();
+          writer.key("columnId").value(i);
+          writeColumnStatistics(writer, ss.getColumnStatistics()[i]);
+          writer.endObject();
+        }
+        writer.endArray();
+        writer.endObject();
+      }
+      writer.endArray();
+
+      ColumnStatistics[] stats = reader.getStatistics();
+      int colCount = stats.length;
+      writer.key("fileStatistics").array();
+      for (int i = 0; i < stats.length; ++i) {
+        writer.object();
+        writer.key("columnId").value(i);
+        writeColumnStatistics(writer, stats[i]);
+        writer.endObject();
+      }
+      writer.endArray();
+
+      writer.key("stripes").array();
+      int stripeIx = -1;
+      for (StripeInformation stripe : reader.getStripes()) {
+        ++stripeIx;
+        long stripeStart = stripe.getOffset();
+        OrcProto.StripeFooter footer = rows.readStripeFooter(stripe);
+        writer.object(); // start of stripe information
+        writer.key("stripeNumber").value(stripeIx + 1);
+        writer.key("stripeInformation");
+        writeStripeInformation(writer, stripe);
+        if (printTimeZone) {
+          writer.key("writerTimezone").value(
+              footer.hasWriterTimezone() ? footer.getWriterTimezone() : FileDump.UNKNOWN);
+        }
+        long sectionStart = stripeStart;
+
+        writer.key("streams").array();
+        for (OrcProto.Stream section : footer.getStreamsList()) {
+          writer.object();
+          String kind = section.hasKind() ? section.getKind().name() : FileDump.UNKNOWN;
+          writer.key("columnId").value(section.getColumn());
+          writer.key("section").value(kind);
+          writer.key("startOffset").value(sectionStart);
+          writer.key("length").value(section.getLength());
+          sectionStart += section.getLength();
+          writer.endObject();
+        }
+        writer.endArray();
+
+        writer.key("encodings").array();
+        for (int i = 0; i < footer.getColumnsCount(); ++i) {
+          writer.object();
+          OrcProto.ColumnEncoding encoding = footer.getColumns(i);
+          writer.key("columnId").value(i);
+          writer.key("kind").value(encoding.getKind());
+          if (encoding.getKind() == OrcProto.ColumnEncoding.Kind.DICTIONARY ||
+              encoding.getKind() == OrcProto.ColumnEncoding.Kind.DICTIONARY_V2) {
+            writer.key("dictionarySize").value(encoding.getDictionarySize());
+          }
+          writer.endObject();
+        }
+        writer.endArray();
+
+        if (rowIndexCols != null && !rowIndexCols.isEmpty()) {
+          // include the columns that are specified, only if the columns are included, bloom filter
+          // will be read
+          boolean[] sargColumns = new boolean[colCount];
+          for (int colIdx : rowIndexCols) {
+            sargColumns[colIdx] = true;
+          }
+          RecordReaderImpl.Index indices = rows.readRowIndex(stripeIx, null, sargColumns);
+          writer.key("indexes").array();
+          for (int col : rowIndexCols) {
+            writer.object();
+            writer.key("columnId").value(col);
+            writeRowGroupIndexes(writer, col, indices.getRowGroupIndex());
+            writeBloomFilterIndexes(writer, col, indices.getBloomFilterIndex());
+            writer.endObject();
+          }
+          writer.endArray();
+        }
+        writer.endObject(); // end of stripe information
+      }
+      writer.endArray();
+
+      FileSystem fs = path.getFileSystem(conf);
+      long fileLen = fs.getContentSummary(path).getLength();
+      long paddedBytes = FileDump.getTotalPaddingSize(reader);
+      // empty ORC file is ~45 bytes. Assumption here is file length always >0
+      double percentPadding = ((double) paddedBytes / (double) fileLen) * 100;
+      writer.key("fileLength").value(fileLen);
+      writer.key("paddingLength").value(paddedBytes);
+      writer.key("paddingRatio").value(percentPadding);
+      rows.close();
+
+      writer.endObject();
+    }
+    if (multiFile) {
+      writer.endArray();
+    }
+
+    if (prettyPrint) {
+      final String prettyJson;
+      if (multiFile) {
+        JSONArray jsonArray = new JSONArray(writer.toString());
+        prettyJson = jsonArray.toString(2);
+      } else {
+        JSONObject jsonObject = new JSONObject(writer.toString());
+        prettyJson = jsonObject.toString(2);
+      }
+      System.out.println(prettyJson);
+    } else {
+      System.out.println(writer.toString());
+    }
+  }
+
+  private static void writeSchema(JSONStringer writer, List<OrcProto.Type> types)
+      throws JSONException {
+    int i = 0;
+    for(OrcProto.Type type : types) {
+      writer.object();
+      writer.key("columnId").value(i++);
+      writer.key("columnType").value(type.getKind());
+      if (type.getFieldNamesCount() > 0) {
+        writer.key("childColumnNames").array();
+        for (String field : type.getFieldNamesList()) {
+          writer.value(field);
+        }
+        writer.endArray();
+        writer.key("childColumnIds").array();
+        for (Integer colId : type.getSubtypesList()) {
+          writer.value(colId);
+        }
+        writer.endArray();
+      }
+      if (type.hasPrecision()) {
+        writer.key("precision").value(type.getPrecision());
+      }
+
+      if (type.hasScale()) {
+        writer.key("scale").value(type.getScale());
+      }
+
+      if (type.hasMaximumLength()) {
+        writer.key("maxLength").value(type.getMaximumLength());
+      }
+      writer.endObject();
+    }
+  }
+
+  private static void writeStripeInformation(JSONWriter writer, StripeInformation stripe)
+      throws JSONException {
+    writer.object();
+    writer.key("offset").value(stripe.getOffset());
+    writer.key("indexLength").value(stripe.getIndexLength());
+    writer.key("dataLength").value(stripe.getDataLength());
+    writer.key("footerLength").value(stripe.getFooterLength());
+    writer.key("rowCount").value(stripe.getNumberOfRows());
+    writer.endObject();
+  }
+
+  private static void writeColumnStatistics(JSONWriter writer, ColumnStatistics cs)
+      throws JSONException {
+    if (cs != null) {
+      writer.key("count").value(cs.getNumberOfValues());
+      writer.key("hasNull").value(cs.hasNull());
+      if (cs instanceof BinaryColumnStatistics) {
+        writer.key("totalLength").value(((BinaryColumnStatistics) cs).getSum());
+        writer.key("type").value(OrcProto.Type.Kind.BINARY);
+      } else if (cs instanceof BooleanColumnStatistics) {
+        writer.key("trueCount").value(((BooleanColumnStatistics) cs).getTrueCount());
+        writer.key("falseCount").value(((BooleanColumnStatistics) cs).getFalseCount());
+        writer.key("type").value(OrcProto.Type.Kind.BOOLEAN);
+      } else if (cs instanceof IntegerColumnStatistics) {
+        writer.key("min").value(((IntegerColumnStatistics) cs).getMinimum());
+        writer.key("max").value(((IntegerColumnStatistics) cs).getMaximum());
+        if (((IntegerColumnStatistics) cs).isSumDefined()) {
+          writer.key("sum").value(((IntegerColumnStatistics) cs).getSum());
+        }
+        writer.key("type").value(OrcProto.Type.Kind.LONG);
+      } else if (cs instanceof DoubleColumnStatistics) {
+        writer.key("min").value(((DoubleColumnStatistics) cs).getMinimum());
+        writer.key("max").value(((DoubleColumnStatistics) cs).getMaximum());
+        writer.key("sum").value(((DoubleColumnStatistics) cs).getSum());
+        writer.key("type").value(OrcProto.Type.Kind.DOUBLE);
+      } else if (cs instanceof StringColumnStatistics) {
+        writer.key("min").value(((StringColumnStatistics) cs).getMinimum());
+        writer.key("max").value(((StringColumnStatistics) cs).getMaximum());
+        writer.key("totalLength").value(((StringColumnStatistics) cs).getSum());
+        writer.key("type").value(OrcProto.Type.Kind.STRING);
+      } else if (cs instanceof DateColumnStatistics) {
+        if (((DateColumnStatistics) cs).getMaximum() != null) {
+          writer.key("min").value(((DateColumnStatistics) cs).getMinimum());
+          writer.key("max").value(((DateColumnStatistics) cs).getMaximum());
+        }
+        writer.key("type").value(OrcProto.Type.Kind.DATE);
+      } else if (cs instanceof TimestampColumnStatistics) {
+        if (((TimestampColumnStatistics) cs).getMaximum() != null) {
+          writer.key("min").value(((TimestampColumnStatistics) cs).getMinimum());
+          writer.key("max").value(((TimestampColumnStatistics) cs).getMaximum());
+        }
+        writer.key("type").value(OrcProto.Type.Kind.TIMESTAMP);
+      } else if (cs instanceof DecimalColumnStatistics) {
+        if (((DecimalColumnStatistics) cs).getMaximum() != null) {
+          writer.key("min").value(((DecimalColumnStatistics) cs).getMinimum());
+          writer.key("max").value(((DecimalColumnStatistics) cs).getMaximum());
+          writer.key("sum").value(((DecimalColumnStatistics) cs).getSum());
+        }
+        writer.key("type").value(OrcProto.Type.Kind.DECIMAL);
+      }
+    }
+  }
+
+  private static void writeBloomFilterIndexes(JSONWriter writer, int col,
+      OrcProto.BloomFilterIndex[] bloomFilterIndex) throws JSONException {
+
+    BloomFilterIO stripeLevelBF = null;
+    if (bloomFilterIndex != null && bloomFilterIndex[col] != null) {
+      int entryIx = 0;
+      writer.key("bloomFilterIndexes").array();
+      for (OrcProto.BloomFilter bf : bloomFilterIndex[col].getBloomFilterList()) {
+        writer.object();
+        writer.key("entryId").value(entryIx++);
+        BloomFilterIO toMerge = new BloomFilterIO(bf);
+        writeBloomFilterStats(writer, toMerge);
+        if (stripeLevelBF == null) {
+          stripeLevelBF = toMerge;
+        } else {
+          stripeLevelBF.merge(toMerge);
+        }
+        writer.endObject();
+      }
+      writer.endArray();
+    }
+    if (stripeLevelBF != null) {
+      writer.key("stripeLevelBloomFilter");
+      writer.object();
+      writeBloomFilterStats(writer, stripeLevelBF);
+      writer.endObject();
+    }
+  }
+
+  private static void writeBloomFilterStats(JSONWriter writer, BloomFilterIO bf)
+      throws JSONException {
+    int bitCount = bf.getBitSize();
+    int popCount = 0;
+    for (long l : bf.getBitSet()) {
+      popCount += Long.bitCount(l);
+    }
+    int k = bf.getNumHashFunctions();
+    float loadFactor = (float) popCount / (float) bitCount;
+    float expectedFpp = (float) Math.pow(loadFactor, k);
+    writer.key("numHashFunctions").value(k);
+    writer.key("bitCount").value(bitCount);
+    writer.key("popCount").value(popCount);
+    writer.key("loadFactor").value(loadFactor);
+    writer.key("expectedFpp").value(expectedFpp);
+  }
+
+  private static void writeRowGroupIndexes(JSONWriter writer, int col,
+      OrcProto.RowIndex[] rowGroupIndex)
+      throws JSONException {
+
+    OrcProto.RowIndex index;
+    if (rowGroupIndex == null || (col >= rowGroupIndex.length) ||
+        ((index = rowGroupIndex[col]) == null)) {
+      return;
+    }
+
+    writer.key("rowGroupIndexes").array();
+    for (int entryIx = 0; entryIx < index.getEntryCount(); ++entryIx) {
+      writer.object();
+      writer.key("entryId").value(entryIx);
+      OrcProto.RowIndexEntry entry = index.getEntry(entryIx);
+      if (entry == null) {
+        continue;
+      }
+      OrcProto.ColumnStatistics colStats = entry.getStatistics();
+      writeColumnStatistics(writer, ColumnStatisticsImpl.deserialize(colStats));
+      writer.key("positions").array();
+      for (int posIx = 0; posIx < entry.getPositionsCount(); ++posIx) {
+        writer.value(entry.getPositions(posIx));
+      }
+      writer.endArray();
+      writer.endObject();
+    }
+    writer.endArray();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/80fb8913/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestJsonFileDump.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestJsonFileDump.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestJsonFileDump.java
new file mode 100644
index 0000000..d17c528
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestJsonFileDump.java
@@ -0,0 +1,138 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.io.orc;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FileReader;
+import java.io.PrintStream;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
+import org.apache.hive.common.util.HiveTestUtils;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestJsonFileDump {
+
+  Path workDir = new Path(System.getProperty("test.tmp.dir"));
+  Configuration conf;
+  FileSystem fs;
+  Path testFilePath;
+
+  @Before
+  public void openFileSystem () throws Exception {
+    conf = new Configuration();
+    fs = FileSystem.getLocal(conf);
+    fs.setWorkingDirectory(workDir);
+    testFilePath = new Path("TestFileDump.testDump.orc");
+    fs.delete(testFilePath, false);
+  }
+
+  static class MyRecord {
+    int i;
+    long l;
+    String s;
+    MyRecord(int i, long l, String s) {
+      this.i = i;
+      this.l = l;
+      this.s = s;
+    }
+  }
+
+  static void checkOutput(String expected,
+                                  String actual) throws Exception {
+    BufferedReader eStream =
+        new BufferedReader(new FileReader(HiveTestUtils.getFileFromClasspath(expected)));
+    BufferedReader aStream =
+        new BufferedReader(new FileReader(actual));
+    String expectedLine = eStream.readLine();
+    while (expectedLine != null) {
+      String actualLine = aStream.readLine();
+      System.out.println("actual:   " + actualLine);
+      System.out.println("expected: " + expectedLine);
+      assertEquals(expectedLine, actualLine);
+      expectedLine = eStream.readLine();
+    }
+    assertNull(eStream.readLine());
+    assertNull(aStream.readLine());
+  }
+
+  @Test
+  public void testJsonDump() throws Exception {
+    ObjectInspector inspector;
+    synchronized (TestOrcFile.class) {
+      inspector = ObjectInspectorFactory.getReflectionObjectInspector
+          (MyRecord.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
+    }
+    conf.set(HiveConf.ConfVars.HIVE_ORC_ENCODING_STRATEGY.varname, "COMPRESSION");
+    OrcFile.WriterOptions options = OrcFile.writerOptions(conf)
+        .fileSystem(fs)
+        .inspector(inspector)
+        .stripeSize(100000)
+        .compress(CompressionKind.ZLIB)
+        .bufferSize(10000)
+        .rowIndexStride(1000)
+        .bloomFilterColumns("s");
+    Writer writer = OrcFile.createWriter(testFilePath, options);
+    Random r1 = new Random(1);
+    String[] words = new String[]{"It", "was", "the", "best", "of", "times,",
+        "it", "was", "the", "worst", "of", "times,", "it", "was", "the", "age",
+        "of", "wisdom,", "it", "was", "the", "age", "of", "foolishness,", "it",
+        "was", "the", "epoch", "of", "belief,", "it", "was", "the", "epoch",
+        "of", "incredulity,", "it", "was", "the", "season", "of", "Light,",
+        "it", "was", "the", "season", "of", "Darkness,", "it", "was", "the",
+        "spring", "of", "hope,", "it", "was", "the", "winter", "of", "despair,",
+        "we", "had", "everything", "before", "us,", "we", "had", "nothing",
+        "before", "us,", "we", "were", "all", "going", "direct", "to",
+        "Heaven,", "we", "were", "all", "going", "direct", "the", "other",
+        "way"};
+    for(int i=0; i < 21000; ++i) {
+      if (i % 100 == 0) {
+        writer.addRow(new MyRecord(r1.nextInt(), r1.nextLong(), null));
+      } else {
+        writer.addRow(new MyRecord(r1.nextInt(), r1.nextLong(),
+            words[r1.nextInt(words.length)]));
+      }
+    }
+
+    writer.close();
+    PrintStream origOut = System.out;
+    String outputFilename = "orc-file-dump.json";
+    FileOutputStream myOut = new FileOutputStream(workDir + File.separator + outputFilename);
+
+    // replace stdout and run command
+    System.setOut(new PrintStream(myOut));
+    FileDump.main(new String[]{testFilePath.toString(), "-j", "-p", "--rowindex=3"});
+    System.out.flush();
+    System.setOut(origOut);
+
+
+    checkOutput(outputFilename, workDir + File.separator + outputFilename);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/80fb8913/ql/src/test/resources/orc-file-dump.json
----------------------------------------------------------------------
diff --git a/ql/src/test/resources/orc-file-dump.json b/ql/src/test/resources/orc-file-dump.json
new file mode 100644
index 0000000..125a32e
--- /dev/null
+++ b/ql/src/test/resources/orc-file-dump.json
@@ -0,0 +1,1354 @@
+{
+  "fileName": "TestFileDump.testDump.orc",
+  "fileVersion": "0.12",
+  "writerVersion": "HIVE_8732",
+  "numberOfRows": 21000,
+  "compression": "ZLIB",
+  "compressionBufferSize": 10000,
+  "schemaString": "struct<i:int,l:bigint,s:string>",
+  "schema": [
+    {
+      "columnId": 0,
+      "columnType": "STRUCT",
+      "childColumnNames": [
+        "i",
+        "l",
+        "s"
+      ],
+      "childColumnIds": [
+        1,
+        2,
+        3
+      ]
+    },
+    {
+      "columnId": 1,
+      "columnType": "INT"
+    },
+    {
+      "columnId": 2,
+      "columnType": "LONG"
+    },
+    {
+      "columnId": 3,
+      "columnType": "STRING"
+    }
+  ],
+  "stripeStatistics": [
+    {
+      "stripeNumber": 1,
+      "columnStatistics": [
+        {
+          "columnId": 0,
+          "count": 5000,
+          "hasNull": false
+        },
+        {
+          "columnId": 1,
+          "count": 5000,
+          "hasNull": false,
+          "min": -2147115959,
+          "max": 2145210552,
+          "sum": 50111854553,
+          "type": "LONG"
+        },
+        {
+          "columnId": 2,
+          "count": 5000,
+          "hasNull": false,
+          "min": -9223180583305557329,
+          "max": 9221614132680747961,
+          "type": "LONG"
+        },
+        {
+          "columnId": 3,
+          "count": 4950,
+          "hasNull": true,
+          "min": "Darkness,",
+          "max": "worst",
+          "totalLength": 19283,
+          "type": "STRING"
+        }
+      ]
+    },
+    {
+      "stripeNumber": 2,
+      "columnStatistics": [
+        {
+          "columnId": 0,
+          "count": 5000,
+          "hasNull": false
+        },
+        {
+          "columnId": 1,
+          "count": 5000,
+          "hasNull": false,
+          "min": -2147390285,
+          "max": 2147224606,
+          "sum": -22290798217,
+          "type": "LONG"
+        },
+        {
+          "columnId": 2,
+          "count": 5000,
+          "hasNull": false,
+          "min": -9219295160509160427,
+          "max": 9217571024994660020,
+          "type": "LONG"
+        },
+        {
+          "columnId": 3,
+          "count": 4950,
+          "hasNull": true,
+          "min": "Darkness,",
+          "max": "worst",
+          "totalLength": 19397,
+          "type": "STRING"
+        }
+      ]
+    },
+    {
+      "stripeNumber": 3,
+      "columnStatistics": [
+        {
+          "columnId": 0,
+          "count": 5000,
+          "hasNull": false
+        },
+        {
+          "columnId": 1,
+          "count": 5000,
+          "hasNull": false,
+          "min": -2146954065,
+          "max": 2146722468,
+          "sum": 20639652136,
+          "type": "LONG"
+        },
+        {
+          "columnId": 2,
+          "count": 5000,
+          "hasNull": false,
+          "min": -9214076359988107846,
+          "max": 9222919052987871506,
+          "type": "LONG"
+        },
+        {
+          "columnId": 3,
+          "count": 4950,
+          "hasNull": true,
+          "min": "Darkness,",
+          "max": "worst",
+          "totalLength": 19031,
+          "type": "STRING"
+        }
+      ]
+    },
+    {
+      "stripeNumber": 4,
+      "columnStatistics": [
+        {
+          "columnId": 0,
+          "count": 5000,
+          "hasNull": false
+        },
+        {
+          "columnId": 1,
+          "count": 5000,
+          "hasNull": false,
+          "min": -2146969085,
+          "max": 2146025044,
+          "sum": -5156814387,
+          "type": "LONG"
+        },
+        {
+          "columnId": 2,
+          "count": 5000,
+          "hasNull": false,
+          "min": -9222731174895935707,
+          "max": 9220625004936875965,
+          "type": "LONG"
+        },
+        {
+          "columnId": 3,
+          "count": 4950,
+          "hasNull": true,
+          "min": "Darkness,",
+          "max": "worst",
+          "totalLength": 19459,
+          "type": "STRING"
+        }
+      ]
+    },
+    {
+      "stripeNumber": 5,
+      "columnStatistics": [
+        {
+          "columnId": 0,
+          "count": 1000,
+          "hasNull": false
+        },
+        {
+          "columnId": 1,
+          "count": 1000,
+          "hasNull": false,
+          "min": -2144303438,
+          "max": 2127599049,
+          "sum": 62841564778,
+          "type": "LONG"
+        },
+        {
+          "columnId": 2,
+          "count": 1000,
+          "hasNull": false,
+          "min": -9195133638801798919,
+          "max": 9218626063131504414,
+          "type": "LONG"
+        },
+        {
+          "columnId": 3,
+          "count": 990,
+          "hasNull": true,
+          "min": "Darkness,",
+          "max": "worst",
+          "totalLength": 3963,
+          "type": "STRING"
+        }
+      ]
+    }
+  ],
+  "fileStatistics": [
+    {
+      "columnId": 0,
+      "count": 21000,
+      "hasNull": false
+    },
+    {
+      "columnId": 1,
+      "count": 21000,
+      "hasNull": false,
+      "min": -2147390285,
+      "max": 2147224606,
+      "sum": 106145458863,
+      "type": "LONG"
+    },
+    {
+      "columnId": 2,
+      "count": 21000,
+      "hasNull": false,
+      "min": -9223180583305557329,
+      "max": 9222919052987871506,
+      "type": "LONG"
+    },
+    {
+      "columnId": 3,
+      "count": 20790,
+      "hasNull": true,
+      "min": "Darkness,",
+      "max": "worst",
+      "totalLength": 81133,
+      "type": "STRING"
+    }
+  ],
+  "stripes": [
+    {
+      "stripeNumber": 1,
+      "stripeInformation": {
+        "offset": 3,
+        "indexLength": 863,
+        "dataLength": 63749,
+        "footerLength": 103,
+        "rowCount": 5000
+      },
+      "streams": [
+        {
+          "columnId": 0,
+          "section": "ROW_INDEX",
+          "startOffset": 3,
+          "length": 17
+        },
+        {
+          "columnId": 1,
+          "section": "ROW_INDEX",
+          "startOffset": 20,
+          "length": 165
+        },
+        {
+          "columnId": 2,
+          "section": "ROW_INDEX",
+          "startOffset": 185,
+          "length": 174
+        },
+        {
+          "columnId": 3,
+          "section": "ROW_INDEX",
+          "startOffset": 359,
+          "length": 103
+        },
+        {
+          "columnId": 3,
+          "section": "BLOOM_FILTER",
+          "startOffset": 462,
+          "length": 404
+        },
+        {
+          "columnId": 1,
+          "section": "DATA",
+          "startOffset": 866,
+          "length": 20029
+        },
+        {
+          "columnId": 2,
+          "section": "DATA",
+          "startOffset": 20895,
+          "length": 40035
+        },
+        {
+          "columnId": 3,
+          "section": "PRESENT",
+          "startOffset": 60930,
+          "length": 17
+        },
+        {
+          "columnId": 3,
+          "section": "DATA",
+          "startOffset": 60947,
+          "length": 3510
+        },
+        {
+          "columnId": 3,
+          "section": "LENGTH",
+          "startOffset": 64457,
+          "length": 25
+        },
+        {
+          "columnId": 3,
+          "section": "DICTIONARY_DATA",
+          "startOffset": 64482,
+          "length": 133
+        }
+      ],
+      "encodings": [
+        {
+          "columnId": 0,
+          "kind": "DIRECT"
+        },
+        {
+          "columnId": 1,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 2,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 3,
+          "kind": "DICTIONARY_V2",
+          "dictionarySize": 35
+        }
+      ],
+      "indexes": [{
+        "columnId": 3,
+        "rowGroupIndexes": [
+          {
+            "entryId": 0,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3873,
+            "type": "STRING",
+            "positions": [
+              0,
+              0,
+              0,
+              0,
+              0,
+              0,
+              0
+            ]
+          },
+          {
+            "entryId": 1,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3861,
+            "type": "STRING",
+            "positions": [
+              0,
+              38,
+              12,
+              0,
+              0,
+              736,
+              23
+            ]
+          },
+          {
+            "entryId": 2,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3946,
+            "type": "STRING",
+            "positions": [
+              0,
+              78,
+              12,
+              0,
+              0,
+              1473,
+              43
+            ]
+          },
+          {
+            "entryId": 3,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3774,
+            "type": "STRING",
+            "positions": [
+              0,
+              118,
+              12,
+              0,
+              0,
+              2067,
+              261
+            ]
+          },
+          {
+            "entryId": 4,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3829,
+            "type": "STRING",
+            "positions": [
+              0,
+              158,
+              12,
+              0,
+              0,
+              2992,
+              35
+            ]
+          }
+        ],
+        "bloomFilterIndexes": [
+          {
+            "entryId": 0,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 1,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 2,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 3,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 4,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          }
+        ],
+        "stripeLevelBloomFilter": {
+          "numHashFunctions": 4,
+          "bitCount": 6272,
+          "popCount": 138,
+          "loadFactor": 0.022002551704645157,
+          "expectedFpp": 2.3436470542037569E-7
+        }
+      }]
+    },
+    {
+      "stripeNumber": 2,
+      "stripeInformation": {
+        "offset": 64718,
+        "indexLength": 854,
+        "dataLength": 63742,
+        "footerLength": 103,
+        "rowCount": 5000
+      },
+      "streams": [
+        {
+          "columnId": 0,
+          "section": "ROW_INDEX",
+          "startOffset": 64718,
+          "length": 17
+        },
+        {
+          "columnId": 1,
+          "section": "ROW_INDEX",
+          "startOffset": 64735,
+          "length": 164
+        },
+        {
+          "columnId": 2,
+          "section": "ROW_INDEX",
+          "startOffset": 64899,
+          "length": 169
+        },
+        {
+          "columnId": 3,
+          "section": "ROW_INDEX",
+          "startOffset": 65068,
+          "length": 100
+        },
+        {
+          "columnId": 3,
+          "section": "BLOOM_FILTER",
+          "startOffset": 65168,
+          "length": 404
+        },
+        {
+          "columnId": 1,
+          "section": "DATA",
+          "startOffset": 65572,
+          "length": 20029
+        },
+        {
+          "columnId": 2,
+          "section": "DATA",
+          "startOffset": 85601,
+          "length": 40035
+        },
+        {
+          "columnId": 3,
+          "section": "PRESENT",
+          "startOffset": 125636,
+          "length": 17
+        },
+        {
+          "columnId": 3,
+          "section": "DATA",
+          "startOffset": 125653,
+          "length": 3503
+        },
+        {
+          "columnId": 3,
+          "section": "LENGTH",
+          "startOffset": 129156,
+          "length": 25
+        },
+        {
+          "columnId": 3,
+          "section": "DICTIONARY_DATA",
+          "startOffset": 129181,
+          "length": 133
+        }
+      ],
+      "encodings": [
+        {
+          "columnId": 0,
+          "kind": "DIRECT"
+        },
+        {
+          "columnId": 1,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 2,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 3,
+          "kind": "DICTIONARY_V2",
+          "dictionarySize": 35
+        }
+      ],
+      "indexes": [{
+        "columnId": 3,
+        "rowGroupIndexes": [
+          {
+            "entryId": 0,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3946,
+            "type": "STRING",
+            "positions": [
+              0,
+              0,
+              0,
+              0,
+              0,
+              0,
+              0
+            ]
+          },
+          {
+            "entryId": 1,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3836,
+            "type": "STRING",
+            "positions": [
+              0,
+              38,
+              12,
+              0,
+              0,
+              746,
+              11
+            ]
+          },
+          {
+            "entryId": 2,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3791,
+            "type": "STRING",
+            "positions": [
+              0,
+              78,
+              12,
+              0,
+              0,
+              1430,
+              95
+            ]
+          },
+          {
+            "entryId": 3,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3904,
+            "type": "STRING",
+            "positions": [
+              0,
+              118,
+              12,
+              0,
+              0,
+              2239,
+              23
+            ]
+          },
+          {
+            "entryId": 4,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3920,
+            "type": "STRING",
+            "positions": [
+              0,
+              158,
+              12,
+              0,
+              0,
+              2994,
+              17
+            ]
+          }
+        ],
+        "bloomFilterIndexes": [
+          {
+            "entryId": 0,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 1,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 2,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 3,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 4,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          }
+        ],
+        "stripeLevelBloomFilter": {
+          "numHashFunctions": 4,
+          "bitCount": 6272,
+          "popCount": 138,
+          "loadFactor": 0.022002551704645157,
+          "expectedFpp": 2.3436470542037569E-7
+        }
+      }]
+    },
+    {
+      "stripeNumber": 3,
+      "stripeInformation": {
+        "offset": 129417,
+        "indexLength": 853,
+        "dataLength": 63749,
+        "footerLength": 103,
+        "rowCount": 5000
+      },
+      "streams": [
+        {
+          "columnId": 0,
+          "section": "ROW_INDEX",
+          "startOffset": 129417,
+          "length": 17
+        },
+        {
+          "columnId": 1,
+          "section": "ROW_INDEX",
+          "startOffset": 129434,
+          "length": 160
+        },
+        {
+          "columnId": 2,
+          "section": "ROW_INDEX",
+          "startOffset": 129594,
+          "length": 170
+        },
+        {
+          "columnId": 3,
+          "section": "ROW_INDEX",
+          "startOffset": 129764,
+          "length": 102
+        },
+        {
+          "columnId": 3,
+          "section": "BLOOM_FILTER",
+          "startOffset": 129866,
+          "length": 404
+        },
+        {
+          "columnId": 1,
+          "section": "DATA",
+          "startOffset": 130270,
+          "length": 20029
+        },
+        {
+          "columnId": 2,
+          "section": "DATA",
+          "startOffset": 150299,
+          "length": 40035
+        },
+        {
+          "columnId": 3,
+          "section": "PRESENT",
+          "startOffset": 190334,
+          "length": 17
+        },
+        {
+          "columnId": 3,
+          "section": "DATA",
+          "startOffset": 190351,
+          "length": 3510
+        },
+        {
+          "columnId": 3,
+          "section": "LENGTH",
+          "startOffset": 193861,
+          "length": 25
+        },
+        {
+          "columnId": 3,
+          "section": "DICTIONARY_DATA",
+          "startOffset": 193886,
+          "length": 133
+        }
+      ],
+      "encodings": [
+        {
+          "columnId": 0,
+          "kind": "DIRECT"
+        },
+        {
+          "columnId": 1,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 2,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 3,
+          "kind": "DICTIONARY_V2",
+          "dictionarySize": 35
+        }
+      ],
+      "indexes": [{
+        "columnId": 3,
+        "rowGroupIndexes": [
+          {
+            "entryId": 0,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3829,
+            "type": "STRING",
+            "positions": [
+              0,
+              0,
+              0,
+              0,
+              0,
+              0,
+              0
+            ]
+          },
+          {
+            "entryId": 1,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3853,
+            "type": "STRING",
+            "positions": [
+              0,
+              38,
+              12,
+              0,
+              0,
+              698,
+              74
+            ]
+          },
+          {
+            "entryId": 2,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3796,
+            "type": "STRING",
+            "positions": [
+              0,
+              78,
+              12,
+              0,
+              0,
+              1483,
+              39
+            ]
+          },
+          {
+            "entryId": 3,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3736,
+            "type": "STRING",
+            "positions": [
+              0,
+              118,
+              12,
+              0,
+              0,
+              2148,
+              155
+            ]
+          },
+          {
+            "entryId": 4,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3817,
+            "type": "STRING",
+            "positions": [
+              0,
+              158,
+              12,
+              0,
+              0,
+              3018,
+              8
+            ]
+          }
+        ],
+        "bloomFilterIndexes": [
+          {
+            "entryId": 0,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 1,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 2,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 3,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 4,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          }
+        ],
+        "stripeLevelBloomFilter": {
+          "numHashFunctions": 4,
+          "bitCount": 6272,
+          "popCount": 138,
+          "loadFactor": 0.022002551704645157,
+          "expectedFpp": 2.3436470542037569E-7
+        }
+      }]
+    },
+    {
+      "stripeNumber": 4,
+      "stripeInformation": {
+        "offset": 194122,
+        "indexLength": 866,
+        "dataLength": 63735,
+        "footerLength": 103,
+        "rowCount": 5000
+      },
+      "streams": [
+        {
+          "columnId": 0,
+          "section": "ROW_INDEX",
+          "startOffset": 194122,
+          "length": 17
+        },
+        {
+          "columnId": 1,
+          "section": "ROW_INDEX",
+          "startOffset": 194139,
+          "length": 164
+        },
+        {
+          "columnId": 2,
+          "section": "ROW_INDEX",
+          "startOffset": 194303,
+          "length": 174
+        },
+        {
+          "columnId": 3,
+          "section": "ROW_INDEX",
+          "startOffset": 194477,
+          "length": 107
+        },
+        {
+          "columnId": 3,
+          "section": "BLOOM_FILTER",
+          "startOffset": 194584,
+          "length": 404
+        },
+        {
+          "columnId": 1,
+          "section": "DATA",
+          "startOffset": 194988,
+          "length": 20029
+        },
+        {
+          "columnId": 2,
+          "section": "DATA",
+          "startOffset": 215017,
+          "length": 40035
+        },
+        {
+          "columnId": 3,
+          "section": "PRESENT",
+          "startOffset": 255052,
+          "length": 17
+        },
+        {
+          "columnId": 3,
+          "section": "DATA",
+          "startOffset": 255069,
+          "length": 3496
+        },
+        {
+          "columnId": 3,
+          "section": "LENGTH",
+          "startOffset": 258565,
+          "length": 25
+        },
+        {
+          "columnId": 3,
+          "section": "DICTIONARY_DATA",
+          "startOffset": 258590,
+          "length": 133
+        }
+      ],
+      "encodings": [
+        {
+          "columnId": 0,
+          "kind": "DIRECT"
+        },
+        {
+          "columnId": 1,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 2,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 3,
+          "kind": "DICTIONARY_V2",
+          "dictionarySize": 35
+        }
+      ],
+      "indexes": [{
+        "columnId": 3,
+        "rowGroupIndexes": [
+          {
+            "entryId": 0,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3959,
+            "type": "STRING",
+            "positions": [
+              0,
+              0,
+              0,
+              0,
+              0,
+              0,
+              0
+            ]
+          },
+          {
+            "entryId": 1,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3816,
+            "type": "STRING",
+            "positions": [
+              0,
+              38,
+              12,
+              0,
+              0,
+              495,
+              338
+            ]
+          },
+          {
+            "entryId": 2,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3883,
+            "type": "STRING",
+            "positions": [
+              0,
+              78,
+              12,
+              0,
+              0,
+              1449,
+              71
+            ]
+          },
+          {
+            "entryId": 3,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3938,
+            "type": "STRING",
+            "positions": [
+              0,
+              118,
+              12,
+              0,
+              0,
+              2207,
+              59
+            ]
+          },
+          {
+            "entryId": 4,
+            "count": 990,
+            "hasNull": true,
+            "min": "Darkness,",
+            "max": "worst",
+            "totalLength": 3863,
+            "type": "STRING",
+            "positions": [
+              0,
+              158,
+              12,
+              0,
+              0,
+              2838,
+              223
+            ]
+          }
+        ],
+        "bloomFilterIndexes": [
+          {
+            "entryId": 0,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 1,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 2,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 3,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          },
+          {
+            "entryId": 4,
+            "numHashFunctions": 4,
+            "bitCount": 6272,
+            "popCount": 138,
+            "loadFactor": 0.022002551704645157,
+            "expectedFpp": 2.3436470542037569E-7
+          }
+        ],
+        "stripeLevelBloomFilter": {
+          "numHashFunctions": 4,
+          "bitCount": 6272,
+          "popCount": 138,
+          "loadFactor": 0.022002551704645157,
+          "expectedFpp": 2.3436470542037569E-7
+        }
+      }]
+    },
+    {
+      "stripeNumber": 5,
+      "stripeInformation": {
+        "offset": 258826,
+        "indexLength": 433,
+        "dataLength": 12940,
+        "footerLength": 95,
+        "rowCount": 1000
+      },
+      "streams": [
+        {
+          "columnId": 0,
+          "section": "ROW_INDEX",
+          "startOffset": 258826,
+          "length": 12
+        },
+        {
+          "columnId": 1,
+          "section": "ROW_INDEX",
+          "startOffset": 258838,
+          "length": 38
+        },
+        {
+          "columnId": 2,
+          "section": "ROW_INDEX",
+          "startOffset": 258876,
+          "length": 41
+        },
+        {
+          "columnId": 3,
+          "section": "ROW_INDEX",
+          "startOffset": 258917,
+          "length": 41
+        },
+        {
+          "columnId": 3,
+          "section": "BLOOM_FILTER",
+          "startOffset": 258958,
+          "length": 301
+        },
+        {
+          "columnId": 1,
+          "section": "DATA",
+          "startOffset": 259259,
+          "length": 4007
+        },
+        {
+          "columnId": 2,
+          "section": "DATA",
+          "startOffset": 263266,
+          "length": 8007
+        },
+        {
+          "columnId": 3,
+          "section": "PRESENT",
+          "startOffset": 271273,
+          "length": 16
+        },
+        {
+          "columnId": 3,
+          "section": "DATA",
+          "startOffset": 271289,
+          "length": 752
+        },
+        {
+          "columnId": 3,
+          "section": "LENGTH",
+          "startOffset": 272041,
+          "length": 25
+        },
+        {
+          "columnId": 3,
+          "section": "DICTIONARY_DATA",
+          "startOffset": 272066,
+          "length": 133
+        }
+      ],
+      "encodings": [
+        {
+          "columnId": 0,
+          "kind": "DIRECT"
+        },
+        {
+          "columnId": 1,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 2,
+          "kind": "DIRECT_V2"
+        },
+        {
+          "columnId": 3,
+          "kind": "DICTIONARY_V2",
+          "dictionarySize": 35
+        }
+      ],
+      "indexes": [{
+        "columnId": 3,
+        "rowGroupIndexes": [{
+          "entryId": 0,
+          "count": 990,
+          "hasNull": true,
+          "min": "Darkness,",
+          "max": "worst",
+          "totalLength": 3963,
+          "type": "STRING",
+          "positions": [
+            0,
+            0,
+            0,
+            0,
+            0,
+            0,
+            0
+          ]
+        }],
+        "bloomFilterIndexes": [{
+          "entryId": 0,
+          "numHashFunctions": 4,
+          "bitCount": 6272,
+          "popCount": 138,
+          "loadFactor": 0.022002551704645157,
+          "expectedFpp": 2.3436470542037569E-7
+        }],
+        "stripeLevelBloomFilter": {
+          "numHashFunctions": 4,
+          "bitCount": 6272,
+          "popCount": 138,
+          "loadFactor": 0.022002551704645157,
+          "expectedFpp": 2.3436470542037569E-7
+        }
+      }]
+    }
+  ],
+  "fileLength": 272842,
+  "paddingLength": 0,
+  "paddingRatio": 0
+}


[20/39] hive git commit: HIVE-10620 : ZooKeeperHiveLock overrides equal() method but not hashcode() (Chaoyu Tang, reviewed by Ashutosh)

Posted by pr...@apache.org.
HIVE-10620 : ZooKeeperHiveLock overrides equal() method but not hashcode() (Chaoyu Tang, reviewed by Ashutosh)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/61763335
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/61763335
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/61763335

Branch: refs/heads/llap
Commit: 61763335b6a184e4a0ce6dde8a6e3a5d93af4857
Parents: 7149ab1
Author: Szehon Ho <sz...@cloudera.com>
Authored: Thu May 7 11:18:13 2015 -0700
Committer: Szehon Ho <sz...@cloudera.com>
Committed: Thu May 7 11:18:13 2015 -0700

----------------------------------------------------------------------
 .../ql/lockmgr/zookeeper/ZooKeeperHiveLock.java | 22 ++++++++++++++++++++
 1 file changed, 22 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/61763335/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLock.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLock.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLock.java
index 8e35007..463a339 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLock.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/zookeeper/ZooKeeperHiveLock.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.ql.lockmgr.zookeeper;
 
+import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLock;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockMode;
 import org.apache.hadoop.hive.ql.lockmgr.HiveLockObject;
@@ -71,4 +72,25 @@ public class ZooKeeperHiveLock extends HiveLock {
       obj.equals(zLock.getHiveLockObject()) &&
       mode == zLock.getHiveLockMode();
   }
+  
+  @Override
+  public int hashCode() {
+    HashCodeBuilder builder = new HashCodeBuilder();
+    boolean pathPresent = path != null;
+    builder.append(pathPresent);
+    if (pathPresent) {
+      builder.append(path.toCharArray());
+    }
+    boolean lockObjectPresent = obj != null;
+    builder.append(lockObjectPresent);
+    if (lockObjectPresent) {
+      builder.append(obj.hashCode());
+    }
+    boolean modePresent = mode != null;
+    builder.append(modePresent);
+    if (modePresent) {
+      builder.append(mode);
+    }
+    return builder.toHashCode();
+  }
 }


[35/39] hive git commit: HIVE-10651: ORC file footer cache should be bounded (Prasanth Jayachandran reviewed by Sergey Shelukhin)

Posted by pr...@apache.org.
HIVE-10651: ORC file footer cache should be bounded (Prasanth Jayachandran reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9e4803cd
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9e4803cd
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9e4803cd

Branch: refs/heads/llap
Commit: 9e4803cd373ccf4ba0df484bc4eefad8c0685514
Parents: 8141a4e
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Mon May 11 13:07:16 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Mon May 11 13:07:16 2015 -0700

----------------------------------------------------------------------
 common/src/java/org/apache/hadoop/hive/conf/HiveConf.java    | 2 +-
 .../org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java     | 8 ++++++--
 2 files changed, 7 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/9e4803cd/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 115a5d0..eff4d30 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1022,7 +1022,7 @@ public class HiveConf extends Configuration {
         "If turned on splits generated by orc will include metadata about the stripes in the file. This\n" +
         "data is read remotely (from the client or HS2 machine) and sent to all the tasks."),
     HIVE_ORC_CACHE_STRIPE_DETAILS_SIZE("hive.orc.cache.stripe.details.size", 10000,
-        "Cache size for keeping meta info about orc splits cached in the client."),
+        "Max cache size for keeping meta info about orc splits cached in the client."),
     HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS("hive.orc.compute.splits.num.threads", 10,
         "How many threads orc should use to create splits in parallel."),
     HIVE_ORC_SKIP_CORRUPT_DATA("hive.exec.orc.skip.corrupt.data", false,

http://git-wip-us.apache.org/repos/asf/hive/blob/9e4803cd/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index 3ae4688..5d6c9da 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@ -412,8 +412,12 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
         }
 
         if (footerCache == null && cacheStripeDetails) {
-          footerCache = CacheBuilder.newBuilder().concurrencyLevel(numThreads)
-              .initialCapacity(cacheStripeDetailsSize).softValues().build();
+          footerCache = CacheBuilder.newBuilder()
+              .concurrencyLevel(numThreads)
+              .initialCapacity(cacheStripeDetailsSize)
+              .maximumSize(cacheStripeDetailsSize)
+              .softValues()
+              .build();
         }
       }
       String value = conf.get(ValidTxnList.VALID_TXNS_KEY,


[22/39] hive git commit: schemaTool upgrade from 0.14.0 to 1.3.0 causes failure (Hari Subramaniyan via Sushanth Sowmyan, Thejas Nair) (addendum fix)

Posted by pr...@apache.org.
schemaTool upgrade from 0.14.0 to 1.3.0 causes failure (Hari Subramaniyan via Sushanth Sowmyan, Thejas Nair) (addendum fix)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0af1d6e7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0af1d6e7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0af1d6e7

Branch: refs/heads/llap
Commit: 0af1d6e7d36fbd8071a35692f51d72eb86b0e9da
Parents: 42f88ca
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Thu May 7 12:07:17 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Thu May 7 12:07:17 2015 -0700

----------------------------------------------------------------------
 metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql | 2 +-
 metastore/scripts/upgrade/mysql/hive-schema-1.3.0.mysql.sql | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0af1d6e7/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql
index 07dce8f..a205ac3 100644
--- a/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/hive-schema-1.2.0.mysql.sql
@@ -216,7 +216,7 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS` (
   UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
   KEY `PARTITIONS_N49` (`TBL_ID`),
   KEY `PARTITIONS_N50` (`SD_ID`),
-  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`)
+  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
   CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
   CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
   CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)

http://git-wip-us.apache.org/repos/asf/hive/blob/0af1d6e7/metastore/scripts/upgrade/mysql/hive-schema-1.3.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/hive-schema-1.3.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-1.3.0.mysql.sql
index 19ae264..bc63f0d 100644
--- a/metastore/scripts/upgrade/mysql/hive-schema-1.3.0.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/hive-schema-1.3.0.mysql.sql
@@ -216,7 +216,7 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS` (
   UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
   KEY `PARTITIONS_N49` (`TBL_ID`),
   KEY `PARTITIONS_N50` (`SD_ID`),
-  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`)
+  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
   CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
   CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
   CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)


[02/39] hive git commit: HIVE-9743: Incorrect result set for vectorized left outer join (Matt McCline, reviewed by Vikram Dixit)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/test/results/clientpositive/vector_left_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_left_outer_join2.q.out b/ql/src/test/results/clientpositive/vector_left_outer_join2.q.out
new file mode 100644
index 0000000..23d3f32
--- /dev/null
+++ b/ql/src/test/results/clientpositive/vector_left_outer_join2.q.out
@@ -0,0 +1,568 @@
+PREHOOK: query: drop table if exists TJOIN1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists TJOIN1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table if exists TJOIN2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists TJOIN2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table if not exists TJOIN1 (RNUM int , C1 int, C2 int) STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN1
+POSTHOOK: query: create table if not exists TJOIN1 (RNUM int , C1 int, C2 int) STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN1
+PREHOOK: query: create table if not exists TJOIN2 (RNUM int , C1 int, C2 char(2)) STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN2
+POSTHOOK: query: create table if not exists TJOIN2 (RNUM int , C1 int, C2 char(2)) STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN2
+PREHOOK: query: create table if not exists TJOIN1STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN1STAGE
+POSTHOOK: query: create table if not exists TJOIN1STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN1STAGE
+PREHOOK: query: create table if not exists TJOIN2STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN2STAGE
+POSTHOOK: query: create table if not exists TJOIN2STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN2STAGE
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin1.txt' OVERWRITE INTO TABLE TJOIN1STAGE
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@tjoin1stage
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin1.txt' OVERWRITE INTO TABLE TJOIN1STAGE
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@tjoin1stage
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin2.txt' OVERWRITE INTO TABLE TJOIN2STAGE
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@tjoin2stage
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin2.txt' OVERWRITE INTO TABLE TJOIN2STAGE
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@tjoin2stage
+PREHOOK: query: INSERT INTO TABLE TJOIN1 SELECT * from TJOIN1STAGE
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1stage
+PREHOOK: Output: default@tjoin1
+POSTHOOK: query: INSERT INTO TABLE TJOIN1 SELECT * from TJOIN1STAGE
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1stage
+POSTHOOK: Output: default@tjoin1
+POSTHOOK: Lineage: tjoin1.c1 SIMPLE [(tjoin1stage)tjoin1stage.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin1.c2 EXPRESSION [(tjoin1stage)tjoin1stage.FieldSchema(name:c2, type:char(2), comment:null), ]
+POSTHOOK: Lineage: tjoin1.rnum SIMPLE [(tjoin1stage)tjoin1stage.FieldSchema(name:rnum, type:int, comment:null), ]
+PREHOOK: query: INSERT INTO TABLE TJOIN2 SELECT * from TJOIN2STAGE
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin2stage
+PREHOOK: Output: default@tjoin2
+POSTHOOK: query: INSERT INTO TABLE TJOIN2 SELECT * from TJOIN2STAGE
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin2stage
+POSTHOOK: Output: default@tjoin2
+POSTHOOK: Lineage: tjoin2.c1 SIMPLE [(tjoin2stage)tjoin2stage.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin2.c2 SIMPLE [(tjoin2stage)tjoin2stage.FieldSchema(name:c2, type:char(2), comment:null), ]
+POSTHOOK: Lineage: tjoin2.rnum SIMPLE [(tjoin2stage)tjoin2stage.FieldSchema(name:rnum, type:int, comment:null), ]
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        tjoin2 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        tjoin2 
+          TableScan
+            alias: tjoin2
+            Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: tjoin1
+            Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Outer Join0 to 1
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+              outputColumnNames: _col0, _col1, _col2, _col8
+              Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+0	10	15	NULL
+1	20	25	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        tjoin2 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        tjoin2 
+          TableScan
+            alias: tjoin2
+            Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: tjoin1
+            Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Outer Join0 to 1
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+              outputColumnNames: _col0, _col1, _col2, _col8
+              Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+0	10	15	NULL
+1	20	25	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        tjoin2 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        tjoin2 
+          TableScan
+            alias: tjoin2
+            Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: tjoin1
+            Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Outer Join0 to 1
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+              outputColumnNames: _col0, _col1, _col2, _col8
+              Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+0	10	15	NULL
+1	20	25	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        tjoin2 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        tjoin2 
+          TableScan
+            alias: tjoin2
+            Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: tjoin1
+            Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Outer Join0 to 1
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+              outputColumnNames: _col0, _col1, _col2, _col8
+              Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+0	10	15	NULL
+1	20	25	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        tjoin2 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        tjoin2 
+          TableScan
+            alias: tjoin2
+            Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: tjoin1
+            Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Outer Join0 to 1
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+              outputColumnNames: _col0, _col1, _col2, _col8
+              Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+0	10	15	NULL
+1	20	25	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-4 is a root stage
+  Stage-3 depends on stages: Stage-4
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-4
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        tjoin2 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        tjoin2 
+          TableScan
+            alias: tjoin2
+            Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+            HashTable Sink Operator
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: tjoin1
+            Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Outer Join0 to 1
+              filter predicates:
+                0 {(c2 > 15)}
+                1 
+              keys:
+                0 c1 (type: int)
+                1 c1 (type: int)
+              outputColumnNames: _col0, _col1, _col2, _col8
+              Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Local Work:
+        Map Reduce Local Work
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+0	10	15	NULL
+1	20	25	NULL
+2	NULL	50	NULL


[19/39] hive git commit: HIVE-10608 : Fix useless 'if' stamement in RetryingMetaStoreClient (135) (Alexander Pivovarov via Szehon)

Posted by pr...@apache.org.
HIVE-10608 : Fix useless 'if' stamement in RetryingMetaStoreClient (135) (Alexander Pivovarov via Szehon)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7149ab15
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7149ab15
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7149ab15

Branch: refs/heads/llap
Commit: 7149ab15787a5f26954ece2283944ab78e8694ec
Parents: 48a243e
Author: Szehon Ho <sz...@cloudera.com>
Authored: Thu May 7 11:13:59 2015 -0700
Committer: Szehon Ho <sz...@cloudera.com>
Committed: Thu May 7 11:13:59 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/metastore/RetryingMetaStoreClient.java | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/7149ab15/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
index fb44484..e282981 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
@@ -173,8 +173,11 @@ public class RetryingMetaStoreClient implements InvocationHandler {
           throw e.getCause();
         }
       } catch (MetaException e) {
-        if (e.getMessage().matches("(?s).*(IO|TTransport)Exception.*"));
-        caughtException = e;
+        if (e.getMessage().matches("(?s).*(IO|TTransport)Exception.*")) {
+          caughtException = e;
+        } else {
+          throw e;
+        }
       }
 
       if (retriesMade >=  retryLimit) {


[25/39] hive git commit: HIVE-10526: CBO (Calcite Return Path): HiveCost epsilon comparison should take row count in to account (Laljo John Pullokkaran reviewed by Ashutosh Chauhan)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
index f7026a8..f84524b 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
@@ -53,11 +53,11 @@ POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Output: default@ss
 POSTHOOK: Lineage: ss.k1 SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: ss.k2 EXPRESSION [(src)y.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: ss.k3 SIMPLE [(srcpart)z.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: ss.k2 SIMPLE [(src)y.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: ss.k3 EXPRESSION [(srcpart)z.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: ss.v1 SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: ss.v2 EXPRESSION [(src)y.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: ss.v3 SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: ss.v2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: ss.v3 EXPRESSION [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: INSERT OVERWRITE TABLE sr
 SELECT x.key,x.value,y.key,y.value,z.key,z.value
 FROM src1 x 
@@ -81,11 +81,11 @@ POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 POSTHOOK: Output: default@sr
 POSTHOOK: Lineage: sr.k1 SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: sr.k2 EXPRESSION [(src)y.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: sr.k3 SIMPLE [(srcpart)z.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: sr.k2 SIMPLE [(src)y.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: sr.k3 EXPRESSION [(srcpart)z.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: sr.v1 SIMPLE [(src1)x.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: sr.v2 EXPRESSION [(src)y.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: sr.v3 SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: sr.v2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: sr.v3 EXPRESSION [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: INSERT OVERWRITE TABLE cs
 SELECT x.key,x.value,y.key,y.value,z.key,z.value
 FROM src1 x 
@@ -195,7 +195,7 @@ Stage-0
                Merge Join Operator [MERGEJOIN_29]
                |  condition map:[{"":"Inner Join 0 to 1"}]
                |  keys:{"1":"_col3 (type: string)","0":"_col0 (type: string)"}
-               |  outputColumnNames:["_col0","_col4","_col5"]
+               |  outputColumnNames:["_col1","_col2","_col5"]
                |  Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                |<-Map 1 [SIMPLE_EDGE]
                |  Reduce Output Operator [RS_14]
@@ -203,14 +203,15 @@ Stage-0
                |     Map-reduce partition columns:_col0 (type: string)
                |     sort order:+
                |     Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-               |     Select Operator [SEL_2]
-               |        outputColumnNames:["_col0"]
+               |     value expressions:_col1 (type: string)
+               |     Select Operator [SEL_1]
+               |        outputColumnNames:["_col0","_col1"]
                |        Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                |        Filter Operator [FIL_25]
-               |           predicate:value is not null (type: boolean)
+               |           predicate:key is not null (type: boolean)
                |           Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                |           TableScan [TS_0]
-               |              alias:z
+               |              alias:y
                |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                |<-Reducer 4 [SIMPLE_EDGE]
                   Reduce Output Operator [RS_16]
@@ -218,11 +219,11 @@ Stage-0
                      Map-reduce partition columns:_col3 (type: string)
                      sort order:+
                      Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-                     value expressions:_col1 (type: string), _col2 (type: string)
+                     value expressions:_col0 (type: string)
                      Merge Join Operator [MERGEJOIN_28]
                      |  condition map:[{"":"Inner Join 0 to 1"}]
-                     |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
-                     |  outputColumnNames:["_col1","_col2","_col3"]
+                     |  keys:{"1":"_col1 (type: string)","0":"_col0 (type: string)"}
+                     |  outputColumnNames:["_col0","_col3"]
                      |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                      |<-Map 3 [SIMPLE_EDGE]
                      |  Reduce Output Operator [RS_8]
@@ -230,28 +231,27 @@ Stage-0
                      |     Map-reduce partition columns:_col0 (type: string)
                      |     sort order:+
                      |     Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                     |     value expressions:_col1 (type: string)
                      |     Select Operator [SEL_4]
-                     |        outputColumnNames:["_col0","_col1"]
+                     |        outputColumnNames:["_col0"]
                      |        Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                      |        Filter Operator [FIL_26]
-                     |           predicate:key is not null (type: boolean)
+                     |           predicate:value is not null (type: boolean)
                      |           Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                     |           TableScan [TS_3]
-                     |              alias:y
+                     |           TableScan [TS_2]
+                     |              alias:z
                      |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                      |<-Map 5 [SIMPLE_EDGE]
                         Reduce Output Operator [RS_10]
-                           key expressions:_col0 (type: string)
-                           Map-reduce partition columns:_col0 (type: string)
+                           key expressions:_col1 (type: string)
+                           Map-reduce partition columns:_col1 (type: string)
                            sort order:+
                            Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
-                           value expressions:_col1 (type: string)
+                           value expressions:_col0 (type: string)
                            Select Operator [SEL_6]
                               outputColumnNames:["_col0","_col1"]
                               Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                               Filter Operator [FIL_27]
-                                 predicate:(key is not null and value is not null) (type: boolean)
+                                 predicate:(value is not null and key is not null) (type: boolean)
                                  Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                                  TableScan [TS_5]
                                     alias:x
@@ -315,21 +315,21 @@ POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
 Vertex dependency in root stage
-Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE)
+Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 6 (SIMPLE_EDGE)
+Reducer 13 <- Map 12 (SIMPLE_EDGE), Map 14 (SIMPLE_EDGE)
 Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 9 (SIMPLE_EDGE)
-Reducer 14 <- Map 13 (SIMPLE_EDGE), Reducer 16 (SIMPLE_EDGE)
-Reducer 12 <- Map 11 (SIMPLE_EDGE), Reducer 14 (SIMPLE_EDGE)
+Reducer 11 <- Map 10 (SIMPLE_EDGE), Reducer 13 (SIMPLE_EDGE)
 Reducer 5 <- Reducer 4 (SIMPLE_EDGE)
-Reducer 4 <- Reducer 12 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE)
-Reducer 6 <- Reducer 5 (SIMPLE_EDGE)
-Reducer 9 <- Map 10 (SIMPLE_EDGE), Map 8 (SIMPLE_EDGE)
+Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
+Reducer 9 <- Reducer 16 (SIMPLE_EDGE), Reducer 8 (SIMPLE_EDGE)
+Reducer 8 <- Map 7 (SIMPLE_EDGE), Reducer 11 (SIMPLE_EDGE)
 Reducer 16 <- Map 15 (SIMPLE_EDGE), Map 17 (SIMPLE_EDGE)
 
 Stage-0
    Fetch Operator
       limit:100
       Stage-1
-         Reducer 6
+         Reducer 5
          File Output Operator [FS_71]
             compressed:false
             Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
@@ -339,236 +339,236 @@ Stage-0
                Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
                Select Operator [SEL_69]
                |  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
-               |  Statistics:Num rows: 365 Data size: 3882 Basic stats: COMPLETE Column stats: NONE
-               |<-Reducer 5 [SIMPLE_EDGE]
+               |  Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
+               |<-Reducer 4 [SIMPLE_EDGE]
                   Reduce Output Operator [RS_68]
                      key expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string)
                      sort order:+++
-                     Statistics:Num rows: 365 Data size: 3882 Basic stats: COMPLETE Column stats: NONE
+                     Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
                      value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint)
                      Group By Operator [GBY_66]
                      |  aggregations:["count(VALUE._col0)","count(VALUE._col1)","count(VALUE._col2)"]
                      |  keys:KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
                      |  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                     |  Statistics:Num rows: 365 Data size: 3882 Basic stats: COMPLETE Column stats: NONE
-                     |<-Reducer 4 [SIMPLE_EDGE]
+                     |  Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
+                     |<-Reducer 3 [SIMPLE_EDGE]
                         Reduce Output Operator [RS_65]
                            key expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string)
                            Map-reduce partition columns:_col0 (type: string), _col1 (type: string), _col2 (type: string)
                            sort order:+++
-                           Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                           Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
                            value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint)
                            Group By Operator [GBY_64]
                               aggregations:["count(_col3)","count(_col4)","count(_col5)"]
                               keys:_col0 (type: string), _col1 (type: string), _col2 (type: string)
                               outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                              Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                              Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
                               Select Operator [SEL_62]
                                  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                 Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
-                                 Merge Join Operator [MERGEJOIN_111]
+                                 Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
+                                 Merge Join Operator [MERGEJOIN_113]
                                  |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |  keys:{"1":"_col8 (type: string), _col10 (type: string)","0":"_col8 (type: string), _col10 (type: string)"}
-                                 |  outputColumnNames:["_col2","_col3","_col8","_col9","_col20","_col21"]
-                                 |  Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
-                                 |<-Reducer 12 [SIMPLE_EDGE]
-                                 |  Reduce Output Operator [RS_60]
-                                 |     key expressions:_col8 (type: string), _col10 (type: string)
-                                 |     Map-reduce partition columns:_col8 (type: string), _col10 (type: string)
+                                 |  keys:{"1":"_col15 (type: string), _col17 (type: string)","0":"_col1 (type: string), _col3 (type: string)"}
+                                 |  outputColumnNames:["_col2","_col3","_col12","_col13","_col20","_col21"]
+                                 |  Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
+                                 |<-Reducer 2 [SIMPLE_EDGE]
+                                 |  Reduce Output Operator [RS_58]
+                                 |     key expressions:_col1 (type: string), _col3 (type: string)
+                                 |     Map-reduce partition columns:_col1 (type: string), _col3 (type: string)
                                  |     sort order:++
-                                 |     Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
-                                 |     value expressions:_col6 (type: string), _col7 (type: string)
-                                 |     Select Operator [SEL_46]
-                                 |        outputColumnNames:["_col10","_col6","_col7","_col8"]
-                                 |        Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
-                                 |        Merge Join Operator [MERGEJOIN_109]
-                                 |        |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |        |  keys:{"1":"_col5 (type: string)","0":"_col1 (type: string)"}
-                                 |        |  outputColumnNames:["_col6","_col7","_col8","_col10"]
-                                 |        |  Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
-                                 |        |<-Map 11 [SIMPLE_EDGE]
-                                 |        |  Reduce Output Operator [RS_42]
-                                 |        |     key expressions:_col1 (type: string)
-                                 |        |     Map-reduce partition columns:_col1 (type: string)
-                                 |        |     sort order:+
-                                 |        |     Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
-                                 |        |     Select Operator [SEL_19]
-                                 |        |        outputColumnNames:["_col1"]
-                                 |        |        Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
-                                 |        |        Filter Operator [FIL_101]
-                                 |        |           predicate:((key = 'src1key') and value is not null) (type: boolean)
-                                 |        |           Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
-                                 |        |           TableScan [TS_17]
-                                 |        |              alias:src1
-                                 |        |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-                                 |        |<-Reducer 14 [SIMPLE_EDGE]
-                                 |           Reduce Output Operator [RS_44]
-                                 |              key expressions:_col5 (type: string)
-                                 |              Map-reduce partition columns:_col5 (type: string)
-                                 |              sort order:+
-                                 |              Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                                 |              value expressions:_col4 (type: string), _col6 (type: string), _col8 (type: string)
-                                 |              Merge Join Operator [MERGEJOIN_108]
-                                 |              |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |              |  keys:{"1":"_col2 (type: string)","0":"_col0 (type: string)"}
-                                 |              |  outputColumnNames:["_col4","_col5","_col6","_col8"]
-                                 |              |  Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                                 |              |<-Map 13 [SIMPLE_EDGE]
-                                 |              |  Reduce Output Operator [RS_36]
-                                 |              |     key expressions:_col0 (type: string)
-                                 |              |     Map-reduce partition columns:_col0 (type: string)
-                                 |              |     sort order:+
-                                 |              |     Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |              |     Select Operator [SEL_22]
-                                 |              |        outputColumnNames:["_col0"]
-                                 |              |        Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |              |        Filter Operator [FIL_102]
-                                 |              |           predicate:((value = 'd1value') and key is not null) (type: boolean)
-                                 |              |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |              |           TableScan [TS_20]
-                                 |              |              alias:d1
-                                 |              |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                 |              |<-Reducer 16 [SIMPLE_EDGE]
-                                 |                 Reduce Output Operator [RS_38]
-                                 |                    key expressions:_col2 (type: string)
-                                 |                    Map-reduce partition columns:_col2 (type: string)
-                                 |                    sort order:+
-                                 |                    Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                                 |                    value expressions:_col3 (type: string), _col4 (type: string), _col6 (type: string)
-                                 |                    Merge Join Operator [MERGEJOIN_107]
-                                 |                    |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |                    |  keys:{"1":"_col3 (type: string)","0":"_col1 (type: string)"}
-                                 |                    |  outputColumnNames:["_col2","_col3","_col4","_col6"]
-                                 |                    |  Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                                 |                    |<-Map 15 [SIMPLE_EDGE]
-                                 |                    |  Reduce Output Operator [RS_30]
-                                 |                    |     key expressions:_col1 (type: string)
-                                 |                    |     Map-reduce partition columns:_col1 (type: string)
-                                 |                    |     sort order:+
-                                 |                    |     Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                 |                    |     Select Operator [SEL_25]
-                                 |                    |        outputColumnNames:["_col1"]
-                                 |                    |        Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                 |                    |        Filter Operator [FIL_103]
-                                 |                    |           predicate:((key = 'srcpartkey') and value is not null) (type: boolean)
-                                 |                    |           Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                 |                    |           TableScan [TS_23]
-                                 |                    |              alias:srcpart
-                                 |                    |              Statistics:Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                                 |                    |<-Map 17 [SIMPLE_EDGE]
-                                 |                       Reduce Output Operator [RS_32]
-                                 |                          key expressions:_col3 (type: string)
-                                 |                          Map-reduce partition columns:_col3 (type: string)
-                                 |                          sort order:+
-                                 |                          Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                 |                          value expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string), _col4 (type: string)
-                                 |                          Select Operator [SEL_28]
-                                 |                             outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
-                                 |                             Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                 |                             Filter Operator [FIL_104]
-                                 |                                predicate:((((((v3 = 'ssv3') and v2 is not null) and k1 is not null) and v1 is not null) and k2 is not null) and k3 is not null) (type: boolean)
-                                 |                                Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                 |                                TableScan [TS_26]
-                                 |                                   alias:ss
-                                 |                                   Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
-                                 |<-Reducer 3 [SIMPLE_EDGE]
-                                    Reduce Output Operator [RS_58]
-                                       key expressions:_col8 (type: string), _col10 (type: string)
-                                       Map-reduce partition columns:_col8 (type: string), _col10 (type: string)
+                                 |     Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                 |     value expressions:_col2 (type: string)
+                                 |     Merge Join Operator [MERGEJOIN_107]
+                                 |     |  condition map:[{"":"Inner Join 0 to 1"}]
+                                 |     |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                                 |     |  outputColumnNames:["_col1","_col2","_col3"]
+                                 |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                 |     |<-Map 1 [SIMPLE_EDGE]
+                                 |     |  Reduce Output Operator [RS_53]
+                                 |     |     key expressions:_col0 (type: string)
+                                 |     |     Map-reduce partition columns:_col0 (type: string)
+                                 |     |     sort order:+
+                                 |     |     Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+                                 |     |     value expressions:_col1 (type: string), _col2 (type: string), _col3 (type: string)
+                                 |     |     Select Operator [SEL_1]
+                                 |     |        outputColumnNames:["_col0","_col1","_col2","_col3"]
+                                 |     |        Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+                                 |     |        Filter Operator [FIL_99]
+                                 |     |           predicate:((k1 is not null and v2 is not null) and v3 is not null) (type: boolean)
+                                 |     |           Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+                                 |     |           TableScan [TS_0]
+                                 |     |              alias:cs
+                                 |     |              Statistics:Num rows: 170 Data size: 5890 Basic stats: COMPLETE Column stats: NONE
+                                 |     |<-Map 6 [SIMPLE_EDGE]
+                                 |        Reduce Output Operator [RS_55]
+                                 |           key expressions:_col0 (type: string)
+                                 |           Map-reduce partition columns:_col0 (type: string)
+                                 |           sort order:+
+                                 |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                 |           Select Operator [SEL_4]
+                                 |              outputColumnNames:["_col0"]
+                                 |              Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                 |              Filter Operator [FIL_100]
+                                 |                 predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
+                                 |                 Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                 |                 TableScan [TS_2]
+                                 |                    alias:d1
+                                 |                    Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                 |<-Reducer 9 [SIMPLE_EDGE]
+                                    Reduce Output Operator [RS_60]
+                                       key expressions:_col15 (type: string), _col17 (type: string)
+                                       Map-reduce partition columns:_col15 (type: string), _col17 (type: string)
                                        sort order:++
-                                       Statistics:Num rows: 150 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
-                                       value expressions:_col2 (type: string), _col3 (type: string), _col9 (type: string)
-                                       Merge Join Operator [MERGEJOIN_110]
-                                       |  condition map:[{"":"Inner Join 0 to 1"}]
-                                       |  keys:{"1":"_col3 (type: string), _col5 (type: string)","0":"_col1 (type: string), _col3 (type: string)"}
-                                       |  outputColumnNames:["_col2","_col3","_col8","_col9","_col10"]
-                                       |  Statistics:Num rows: 150 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
-                                       |<-Reducer 2 [SIMPLE_EDGE]
-                                       |  Reduce Output Operator [RS_53]
-                                       |     key expressions:_col1 (type: string), _col3 (type: string)
-                                       |     Map-reduce partition columns:_col1 (type: string), _col3 (type: string)
-                                       |     sort order:++
-                                       |     Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                       |     value expressions:_col2 (type: string)
-                                       |     Merge Join Operator [MERGEJOIN_105]
-                                       |     |  condition map:[{"":"Inner Join 0 to 1"}]
-                                       |     |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
-                                       |     |  outputColumnNames:["_col1","_col2","_col3"]
-                                       |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                       |     |<-Map 1 [SIMPLE_EDGE]
-                                       |     |  Reduce Output Operator [RS_48]
-                                       |     |     key expressions:_col0 (type: string)
-                                       |     |     Map-reduce partition columns:_col0 (type: string)
-                                       |     |     sort order:+
-                                       |     |     Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
-                                       |     |     value expressions:_col1 (type: string), _col2 (type: string), _col3 (type: string)
-                                       |     |     Select Operator [SEL_1]
-                                       |     |        outputColumnNames:["_col0","_col1","_col2","_col3"]
-                                       |     |        Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
-                                       |     |        Filter Operator [FIL_97]
-                                       |     |           predicate:((k1 is not null and v2 is not null) and v3 is not null) (type: boolean)
-                                       |     |           Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
-                                       |     |           TableScan [TS_0]
-                                       |     |              alias:cs
-                                       |     |              Statistics:Num rows: 170 Data size: 5890 Basic stats: COMPLETE Column stats: NONE
-                                       |     |<-Map 7 [SIMPLE_EDGE]
-                                       |        Reduce Output Operator [RS_50]
-                                       |           key expressions:_col0 (type: string)
-                                       |           Map-reduce partition columns:_col0 (type: string)
-                                       |           sort order:+
-                                       |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                       |           Select Operator [SEL_4]
-                                       |              outputColumnNames:["_col0"]
-                                       |              Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                       |              Filter Operator [FIL_98]
-                                       |                 predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
-                                       |                 Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                       |                 TableScan [TS_2]
-                                       |                    alias:d1
-                                       |                    Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                       |<-Reducer 9 [SIMPLE_EDGE]
-                                          Reduce Output Operator [RS_55]
-                                             key expressions:_col3 (type: string), _col5 (type: string)
-                                             Map-reduce partition columns:_col3 (type: string), _col5 (type: string)
-                                             sort order:++
-                                             Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                             value expressions:_col2 (type: string), _col4 (type: string)
-                                             Merge Join Operator [MERGEJOIN_106]
-                                             |  condition map:[{"":"Inner Join 0 to 1"}]
-                                             |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
-                                             |  outputColumnNames:["_col2","_col3","_col4","_col5"]
-                                             |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                             |<-Map 10 [SIMPLE_EDGE]
-                                             |  Reduce Output Operator [RS_14]
-                                             |     key expressions:_col0 (type: string)
-                                             |     Map-reduce partition columns:_col0 (type: string)
-                                             |     sort order:+
-                                             |     Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                             |     Select Operator [SEL_10]
-                                             |        outputColumnNames:["_col0"]
-                                             |        Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                             |        Filter Operator [FIL_100]
-                                             |           predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
-                                             |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                             |           TableScan [TS_8]
-                                             |              alias:d1
-                                             |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                             |<-Map 8 [SIMPLE_EDGE]
-                                                Reduce Output Operator [RS_12]
-                                                   key expressions:_col0 (type: string)
-                                                   Map-reduce partition columns:_col0 (type: string)
-                                                   sort order:+
-                                                   Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                                   value expressions:_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
-                                                   Select Operator [SEL_7]
-                                                      outputColumnNames:["_col0","_col2","_col3","_col4","_col5"]
-                                                      Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                                      Filter Operator [FIL_99]
-                                                         predicate:((((((v1 = 'srv1') and k1 is not null) and v2 is not null) and v3 is not null) and k2 is not null) and k3 is not null) (type: boolean)
-                                                         Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                                         TableScan [TS_5]
-                                                            alias:sr
-                                                            Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
+                                       Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                                       value expressions:_col6 (type: string), _col7 (type: string), _col14 (type: string)
+                                       Select Operator [SEL_51]
+                                          outputColumnNames:["_col14","_col15","_col17","_col6","_col7"]
+                                          Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                                          Merge Join Operator [MERGEJOIN_112]
+                                          |  condition map:[{"":"Inner Join 0 to 1"}]
+                                          |  keys:{"1":"_col2 (type: string), _col4 (type: string)","0":"_col8 (type: string), _col10 (type: string)"}
+                                          |  outputColumnNames:["_col6","_col7","_col14","_col15","_col17"]
+                                          |  Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                                          |<-Reducer 16 [SIMPLE_EDGE]
+                                          |  Reduce Output Operator [RS_49]
+                                          |     key expressions:_col2 (type: string), _col4 (type: string)
+                                          |     Map-reduce partition columns:_col2 (type: string), _col4 (type: string)
+                                          |     sort order:++
+                                          |     Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                          |     value expressions:_col3 (type: string), _col5 (type: string)
+                                          |     Merge Join Operator [MERGEJOIN_111]
+                                          |     |  condition map:[{"":"Inner Join 0 to 1"}]
+                                          |     |  keys:{"1":"_col0 (type: string)","0":"_col0 (type: string)"}
+                                          |     |  outputColumnNames:["_col2","_col3","_col4","_col5"]
+                                          |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                          |     |<-Map 15 [SIMPLE_EDGE]
+                                          |     |  Reduce Output Operator [RS_36]
+                                          |     |     key expressions:_col0 (type: string)
+                                          |     |     Map-reduce partition columns:_col0 (type: string)
+                                          |     |     sort order:+
+                                          |     |     Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                          |     |     value expressions:_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
+                                          |     |     Select Operator [SEL_31]
+                                          |     |        outputColumnNames:["_col0","_col2","_col3","_col4","_col5"]
+                                          |     |        Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                          |     |        Filter Operator [FIL_105]
+                                          |     |           predicate:((((((v1 = 'srv1') and k1 is not null) and k2 is not null) and k3 is not null) and v2 is not null) and v3 is not null) (type: boolean)
+                                          |     |           Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                          |     |           TableScan [TS_29]
+                                          |     |              alias:sr
+                                          |     |              Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
+                                          |     |<-Map 17 [SIMPLE_EDGE]
+                                          |        Reduce Output Operator [RS_38]
+                                          |           key expressions:_col0 (type: string)
+                                          |           Map-reduce partition columns:_col0 (type: string)
+                                          |           sort order:+
+                                          |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                          |           Select Operator [SEL_34]
+                                          |              outputColumnNames:["_col0"]
+                                          |              Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                          |              Filter Operator [FIL_106]
+                                          |                 predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
+                                          |                 Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                          |                 TableScan [TS_32]
+                                          |                    alias:d1
+                                          |                    Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                          |<-Reducer 8 [SIMPLE_EDGE]
+                                             Reduce Output Operator [RS_47]
+                                                key expressions:_col8 (type: string), _col10 (type: string)
+                                                Map-reduce partition columns:_col8 (type: string), _col10 (type: string)
+                                                sort order:++
+                                                Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
+                                                value expressions:_col6 (type: string), _col7 (type: string)
+                                                Merge Join Operator [MERGEJOIN_110]
+                                                |  condition map:[{"":"Inner Join 0 to 1"}]
+                                                |  keys:{"1":"_col5 (type: string)","0":"_col1 (type: string)"}
+                                                |  outputColumnNames:["_col6","_col7","_col8","_col10"]
+                                                |  Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
+                                                |<-Map 7 [SIMPLE_EDGE]
+                                                |  Reduce Output Operator [RS_42]
+                                                |     key expressions:_col1 (type: string)
+                                                |     Map-reduce partition columns:_col1 (type: string)
+                                                |     sort order:+
+                                                |     Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                                                |     Select Operator [SEL_7]
+                                                |        outputColumnNames:["_col1"]
+                                                |        Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                                                |        Filter Operator [FIL_101]
+                                                |           predicate:((key = 'src1key') and value is not null) (type: boolean)
+                                                |           Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                                                |           TableScan [TS_5]
+                                                |              alias:src1
+                                                |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                                                |<-Reducer 11 [SIMPLE_EDGE]
+                                                   Reduce Output Operator [RS_44]
+                                                      key expressions:_col5 (type: string)
+                                                      Map-reduce partition columns:_col5 (type: string)
+                                                      sort order:+
+                                                      Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+                                                      value expressions:_col4 (type: string), _col6 (type: string), _col8 (type: string)
+                                                      Merge Join Operator [MERGEJOIN_109]
+                                                      |  condition map:[{"":"Inner Join 0 to 1"}]
+                                                      |  keys:{"1":"_col2 (type: string)","0":"_col0 (type: string)"}
+                                                      |  outputColumnNames:["_col4","_col5","_col6","_col8"]
+                                                      |  Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+                                                      |<-Map 10 [SIMPLE_EDGE]
+                                                      |  Reduce Output Operator [RS_24]
+                                                      |     key expressions:_col0 (type: string)
+                                                      |     Map-reduce partition columns:_col0 (type: string)
+                                                      |     sort order:+
+                                                      |     Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                                      |     Select Operator [SEL_10]
+                                                      |        outputColumnNames:["_col0"]
+                                                      |        Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                                      |        Filter Operator [FIL_102]
+                                                      |           predicate:((value = 'd1value') and key is not null) (type: boolean)
+                                                      |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                                      |           TableScan [TS_8]
+                                                      |              alias:d1
+                                                      |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                                      |<-Reducer 13 [SIMPLE_EDGE]
+                                                         Reduce Output Operator [RS_26]
+                                                            key expressions:_col2 (type: string)
+                                                            Map-reduce partition columns:_col2 (type: string)
+                                                            sort order:+
+                                                            Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                                                            value expressions:_col3 (type: string), _col4 (type: string), _col6 (type: string)
+                                                            Merge Join Operator [MERGEJOIN_108]
+                                                            |  condition map:[{"":"Inner Join 0 to 1"}]
+                                                            |  keys:{"1":"_col3 (type: string)","0":"_col1 (type: string)"}
+                                                            |  outputColumnNames:["_col2","_col3","_col4","_col6"]
+                                                            |  Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                                                            |<-Map 12 [SIMPLE_EDGE]
+                                                            |  Reduce Output Operator [RS_18]
+                                                            |     key expressions:_col1 (type: string)
+                                                            |     Map-reduce partition columns:_col1 (type: string)
+                                                            |     sort order:+
+                                                            |     Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                                            |     Select Operator [SEL_13]
+                                                            |        outputColumnNames:["_col1"]
+                                                            |        Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                                            |        Filter Operator [FIL_103]
+                                                            |           predicate:((key = 'srcpartkey') and value is not null) (type: boolean)
+                                                            |           Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                                            |           TableScan [TS_11]
+                                                            |              alias:srcpart
+                                                            |              Statistics:Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                                                            |<-Map 14 [SIMPLE_EDGE]
+                                                               Reduce Output Operator [RS_20]
+                                                                  key expressions:_col3 (type: string)
+                                                                  Map-reduce partition columns:_col3 (type: string)
+                                                                  sort order:+
+                                                                  Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                                                  value expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string), _col4 (type: string)
+                                                                  Select Operator [SEL_16]
+                                                                     outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
+                                                                     Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                                                     Filter Operator [FIL_104]
+                                                                        predicate:((((((v3 = 'ssv3') and v2 is not null) and k1 is not null) and v1 is not null) and k2 is not null) and k3 is not null) (type: boolean)
+                                                                        Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                                                        TableScan [TS_14]
+                                                                           alias:ss
+                                                                           Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
 PREHOOK: query: explain
 SELECT x.key, z.value, y.value
 FROM src1 x JOIN src y ON (x.key = y.key) 
@@ -1298,7 +1298,7 @@ Stage-0
                Map Join Operator [MAPJOIN_29]
                |  condition map:[{"":"Inner Join 0 to 1"}]
                |  keys:{"Map 1":"_col0 (type: string)","Map 2":"_col3 (type: string)"}
-               |  outputColumnNames:["_col0","_col4","_col5"]
+               |  outputColumnNames:["_col1","_col2","_col5"]
                |  Statistics:Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                |<-Map 1 [BROADCAST_EDGE]
                |  Reduce Output Operator [RS_14]
@@ -1306,44 +1306,45 @@ Stage-0
                |     Map-reduce partition columns:_col0 (type: string)
                |     sort order:+
                |     Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-               |     Select Operator [SEL_2]
-               |        outputColumnNames:["_col0"]
+               |     value expressions:_col1 (type: string)
+               |     Select Operator [SEL_1]
+               |        outputColumnNames:["_col0","_col1"]
                |        Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                |        Filter Operator [FIL_25]
-               |           predicate:value is not null (type: boolean)
+               |           predicate:key is not null (type: boolean)
                |           Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                |           TableScan [TS_0]
-               |              alias:z
+               |              alias:y
                |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                |<-Map Join Operator [MAPJOIN_28]
                   |  condition map:[{"":"Inner Join 0 to 1"}]
-                  |  keys:{"Map 2":"_col0 (type: string)","Map 3":"_col0 (type: string)"}
-                  |  outputColumnNames:["_col1","_col2","_col3"]
+                  |  keys:{"Map 2":"_col0 (type: string)","Map 3":"_col1 (type: string)"}
+                  |  outputColumnNames:["_col0","_col3"]
                   |  Statistics:Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                   |<-Map 3 [BROADCAST_EDGE]
                   |  Reduce Output Operator [RS_10]
-                  |     key expressions:_col0 (type: string)
-                  |     Map-reduce partition columns:_col0 (type: string)
+                  |     key expressions:_col1 (type: string)
+                  |     Map-reduce partition columns:_col1 (type: string)
                   |     sort order:+
                   |     Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
-                  |     value expressions:_col1 (type: string)
+                  |     value expressions:_col0 (type: string)
                   |     Select Operator [SEL_6]
                   |        outputColumnNames:["_col0","_col1"]
                   |        Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                   |        Filter Operator [FIL_27]
-                  |           predicate:(key is not null and value is not null) (type: boolean)
+                  |           predicate:(value is not null and key is not null) (type: boolean)
                   |           Statistics:Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                   |           TableScan [TS_5]
                   |              alias:x
                   |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                   |<-Select Operator [SEL_4]
-                        outputColumnNames:["_col0","_col1"]
+                        outputColumnNames:["_col0"]
                         Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                         Filter Operator [FIL_26]
-                           predicate:key is not null (type: boolean)
+                           predicate:value is not null (type: boolean)
                            Statistics:Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                           TableScan [TS_3]
-                              alias:y
+                           TableScan [TS_2]
+                              alias:z
                               Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
 PREHOOK: query: EXPLAIN
 select 
@@ -1404,17 +1405,17 @@ POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
 Vertex dependency in root stage
-Map 2 <- Map 1 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
-Map 4 <- Map 3 (BROADCAST_EDGE)
-Map 7 <- Map 10 (BROADCAST_EDGE), Map 2 (BROADCAST_EDGE), Map 5 (BROADCAST_EDGE), Map 6 (BROADCAST_EDGE)
-Reducer 9 <- Reducer 8 (SIMPLE_EDGE)
-Reducer 8 <- Map 7 (SIMPLE_EDGE)
+Map 2 <- Map 1 (BROADCAST_EDGE)
+Map 10 <- Map 9 (BROADCAST_EDGE)
+Map 5 <- Map 10 (BROADCAST_EDGE), Map 2 (BROADCAST_EDGE), Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE), Map 8 (BROADCAST_EDGE)
+Reducer 7 <- Reducer 6 (SIMPLE_EDGE)
+Reducer 6 <- Map 5 (SIMPLE_EDGE)
 
 Stage-0
    Fetch Operator
       limit:100
       Stage-1
-         Reducer 9
+         Reducer 7
          File Output Operator [FS_71]
             compressed:false
             Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
@@ -1424,190 +1425,190 @@ Stage-0
                Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
                Select Operator [SEL_69]
                |  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
-               |  Statistics:Num rows: 365 Data size: 3882 Basic stats: COMPLETE Column stats: NONE
-               |<-Reducer 8 [SIMPLE_EDGE]
+               |  Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
+               |<-Reducer 6 [SIMPLE_EDGE]
                   Reduce Output Operator [RS_68]
                      key expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string)
                      sort order:+++
-                     Statistics:Num rows: 365 Data size: 3882 Basic stats: COMPLETE Column stats: NONE
+                     Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
                      value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint)
                      Group By Operator [GBY_66]
                      |  aggregations:["count(VALUE._col0)","count(VALUE._col1)","count(VALUE._col2)"]
                      |  keys:KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
                      |  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                     |  Statistics:Num rows: 365 Data size: 3882 Basic stats: COMPLETE Column stats: NONE
-                     |<-Map 7 [SIMPLE_EDGE]
+                     |  Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
+                     |<-Map 5 [SIMPLE_EDGE]
                         Reduce Output Operator [RS_65]
                            key expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string)
                            Map-reduce partition columns:_col0 (type: string), _col1 (type: string), _col2 (type: string)
                            sort order:+++
-                           Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                           Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
                            value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint)
                            Group By Operator [GBY_64]
                               aggregations:["count(_col3)","count(_col4)","count(_col5)"]
                               keys:_col0 (type: string), _col1 (type: string), _col2 (type: string)
                               outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                              Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                              Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
                               Select Operator [SEL_62]
                                  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                 Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
-                                 Map Join Operator [MAPJOIN_111]
+                                 Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
+                                 Map Join Operator [MAPJOIN_113]
                                  |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |  keys:{"Map 2":"_col8 (type: string), _col10 (type: string)","Map 7":"_col8 (type: string), _col10 (type: string)"}
-                                 |  outputColumnNames:["_col2","_col3","_col8","_col9","_col20","_col21"]
-                                 |  Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                                 |  keys:{"Map 2":"_col1 (type: string), _col3 (type: string)","Map 5":"_col15 (type: string), _col17 (type: string)"}
+                                 |  outputColumnNames:["_col2","_col3","_col12","_col13","_col20","_col21"]
+                                 |  Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
                                  |<-Map 2 [BROADCAST_EDGE]
                                  |  Reduce Output Operator [RS_58]
-                                 |     key expressions:_col8 (type: string), _col10 (type: string)
-                                 |     Map-reduce partition columns:_col8 (type: string), _col10 (type: string)
+                                 |     key expressions:_col1 (type: string), _col3 (type: string)
+                                 |     Map-reduce partition columns:_col1 (type: string), _col3 (type: string)
                                  |     sort order:++
-                                 |     Statistics:Num rows: 150 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
-                                 |     value expressions:_col2 (type: string), _col3 (type: string), _col9 (type: string)
-                                 |     Map Join Operator [MAPJOIN_110]
+                                 |     Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                 |     value expressions:_col2 (type: string)
+                                 |     Map Join Operator [MAPJOIN_107]
                                  |     |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |     |  keys:{"Map 2":"_col1 (type: string), _col3 (type: string)","Map 4":"_col3 (type: string), _col5 (type: string)"}
-                                 |     |  outputColumnNames:["_col2","_col3","_col8","_col9","_col10"]
-                                 |     |  Statistics:Num rows: 150 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
-                                 |     |<-Map 4 [BROADCAST_EDGE]
-                                 |     |  Reduce Output Operator [RS_55]
-                                 |     |     key expressions:_col3 (type: string), _col5 (type: string)
-                                 |     |     Map-reduce partition columns:_col3 (type: string), _col5 (type: string)
-                                 |     |     sort order:++
-                                 |     |     Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                 |     |     value expressions:_col2 (type: string), _col4 (type: string)
-                                 |     |     Map Join Operator [MAPJOIN_106]
-                                 |     |     |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |     |     |  keys:{"Map 3":"_col0 (type: string)","Map 4":"_col0 (type: string)"}
-                                 |     |     |  outputColumnNames:["_col2","_col3","_col4","_col5"]
-                                 |     |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                 |     |     |<-Map 3 [BROADCAST_EDGE]
-                                 |     |     |  Reduce Output Operator [RS_12]
-                                 |     |     |     key expressions:_col0 (type: string)
-                                 |     |     |     Map-reduce partition columns:_col0 (type: string)
-                                 |     |     |     sort order:+
-                                 |     |     |     Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                 |     |     |     value expressions:_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
-                                 |     |     |     Select Operator [SEL_7]
-                                 |     |     |        outputColumnNames:["_col0","_col2","_col3","_col4","_col5"]
-                                 |     |     |        Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                 |     |     |        Filter Operator [FIL_99]
-                                 |     |     |           predicate:((((((v1 = 'srv1') and k1 is not null) and v2 is not null) and v3 is not null) and k2 is not null) and k3 is not null) (type: boolean)
-                                 |     |     |           Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                 |     |     |           TableScan [TS_5]
-                                 |     |     |              alias:sr
-                                 |     |     |              Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
-                                 |     |     |<-Select Operator [SEL_10]
-                                 |     |           outputColumnNames:["_col0"]
-                                 |     |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |     |           Filter Operator [FIL_100]
-                                 |     |              predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
-                                 |     |              Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |     |              TableScan [TS_8]
-                                 |     |                 alias:d1
-                                 |     |                 Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                 |     |<-Map Join Operator [MAPJOIN_105]
-                                 |        |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |        |  keys:{"Map 1":"_col0 (type: string)","Map 2":"_col0 (type: string)"}
-                                 |        |  outputColumnNames:["_col1","_col2","_col3"]
-                                 |        |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                 |        |<-Map 1 [BROADCAST_EDGE]
-                                 |        |  Reduce Output Operator [RS_48]
-                                 |        |     key expressions:_col0 (type: string)
-                                 |        |     Map-reduce partition columns:_col0 (type: string)
-                                 |        |     sort order:+
-                                 |        |     Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
-                                 |        |     value expressions:_col1 (type: string), _col2 (type: string), _col3 (type: string)
-                                 |        |     Select Operator [SEL_1]
-                                 |        |        outputColumnNames:["_col0","_col1","_col2","_col3"]
-                                 |        |        Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
-                                 |        |        Filter Operator [FIL_97]
-                                 |        |           predicate:((k1 is not null and v2 is not null) and v3 is not null) (type: boolean)
-                                 |        |           Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
-                                 |        |           TableScan [TS_0]
-                                 |        |              alias:cs
-                                 |        |              Statistics:Num rows: 170 Data size: 5890 Basic stats: COMPLETE Column stats: NONE
-                                 |        |<-Select Operator [SEL_4]
-                                 |              outputColumnNames:["_col0"]
+                                 |     |  keys:{"Map 1":"_col0 (type: string)","Map 2":"_col0 (type: string)"}
+                                 |     |  outputColumnNames:["_col1","_col2","_col3"]
+                                 |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                 |     |<-Map 1 [BROADCAST_EDGE]
+                                 |     |  Reduce Output Operator [RS_53]
+                                 |     |     key expressions:_col0 (type: string)
+                                 |     |     Map-reduce partition columns:_col0 (type: string)
+                                 |     |     sort order:+
+                                 |     |     Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+                                 |     |     value expressions:_col1 (type: string), _col2 (type: string), _col3 (type: string)
+                                 |     |     Select Operator [SEL_1]
+                                 |     |        outputColumnNames:["_col0","_col1","_col2","_col3"]
+                                 |     |        Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+                                 |     |        Filter Operator [FIL_99]
+                                 |     |           predicate:((k1 is not null and v2 is not null) and v3 is not null) (type: boolean)
+                                 |     |           Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+                                 |     |           TableScan [TS_0]
+                                 |     |              alias:cs
+                                 |     |              Statistics:Num rows: 170 Data size: 5890 Basic stats: COMPLETE Column stats: NONE
+                                 |     |<-Select Operator [SEL_4]
+                                 |           outputColumnNames:["_col0"]
+                                 |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                 |           Filter Operator [FIL_100]
+                                 |              predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
                                  |              Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |              Filter Operator [FIL_98]
-                                 |                 predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
-                                 |                 Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |                 TableScan [TS_2]
-                                 |                    alias:d1
-                                 |                    Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                 |<-Select Operator [SEL_46]
-                                       outputColumnNames:["_col10","_col6","_col7","_col8"]
-                                       Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
-                                       Map Join Operator [MAPJOIN_109]
+                                 |              TableScan [TS_2]
+                                 |                 alias:d1
+                                 |                 Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                 |<-Select Operator [SEL_51]
+                                       outputColumnNames:["_col14","_col15","_col17","_col6","_col7"]
+                                       Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                                       Map Join Operator [MAPJOIN_112]
                                        |  condition map:[{"":"Inner Join 0 to 1"}]
-                                       |  keys:{"Map 5":"_col1 (type: string)","Map 7":"_col5 (type: string)"}
-                                       |  outputColumnNames:["_col6","_col7","_col8","_col10"]
-                                       |  Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
-                                       |<-Map 5 [BROADCAST_EDGE]
-                                       |  Reduce Output Operator [RS_42]
-                                       |     key expressions:_col1 (type: string)
-                                       |     Map-reduce partition columns:_col1 (type: string)
-                                       |     sort order:+
-                                       |     Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
-                                       |     Select Operator [SEL_19]
-                                       |        outputColumnNames:["_col1"]
-                                       |        Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
-                                       |        Filter Operator [FIL_101]
-                                       |           predicate:((key = 'src1key') and value is not null) (type: boolean)
-                                       |           Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
-                                       |           TableScan [TS_17]
-                                       |              alias:src1
-                                       |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-                                       |<-Map Join Operator [MAPJOIN_108]
+                                       |  keys:{"Map 10":"_col2 (type: string), _col4 (type: string)","Map 5":"_col8 (type: string), _col10 (type: string)"}
+                                       |  outputColumnNames:["_col6","_col7","_col14","_col15","_col17"]
+                                       |  Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                                       |<-Map 10 [BROADCAST_EDGE]
+                                       |  Reduce Output Operator [RS_49]
+                                       |     key expressions:_col2 (type: string), _col4 (type: string)
+                                       |     Map-reduce partition columns:_col2 (type: string), _col4 (type: string)
+                                       |     sort order:++
+                                       |     Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                       |     value expressions:_col3 (type: string), _col5 (type: string)
+                                       |     Map Join Operator [MAPJOIN_111]
+                                       |     |  condition map:[{"":"Inner Join 0 to 1"}]
+                                       |     |  keys:{"Map 10":"_col0 (type: string)","Map 9":"_col0 (type: string)"}
+                                       |     |  outputColumnNames:["_col2","_col3","_col4","_col5"]
+                                       |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                       |     |<-Map 9 [BROADCAST_EDGE]
+                                       |     |  Reduce Output Operator [RS_36]
+                                       |     |     key expressions:_col0 (type: string)
+                                       |     |     Map-reduce partition columns:_col0 (type: string)
+                                       |     |     sort order:+
+                                       |     |     Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                       |     |     value expressions:_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
+                                       |     |     Select Operator [SEL_31]
+                                       |     |        outputColumnNames:["_col0","_col2","_col3","_col4","_col5"]
+                                       |     |        Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                       |     |        Filter Operator [FIL_105]
+                                       |     |           predicate:((((((v1 = 'srv1') and k1 is not null) and k2 is not null) and k3 is not null) and v2 is not null) and v3 is not null) (type: boolean)
+                                       |     |           Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                       |     |           TableScan [TS_29]
+                                       |     |              alias:sr
+                                       |     |              Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
+                                       |     |<-Select Operator [SEL_34]
+                                       |           outputColumnNames:["_col0"]
+                                       |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                       |           Filter Operator [FIL_106]
+                                       |              predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
+                                       |              Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                       |              TableScan [TS_32]
+                                       |                 alias:d1
+                                       |                 Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                       |<-Map Join Operator [MAPJOIN_110]
                                           |  condition map:[{"":"Inner Join 0 to 1"}]
-                                          |  keys:{"Map 7":"_col2 (type: string)","Map 6":"_col0 (type: string)"}
-                                          |  outputColumnNames:["_col4","_col5","_col6","_col8"]
-                                          |  Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                                          |<-Map 6 [BROADCAST_EDGE]
-                                          |  Reduce Output Operator [RS_36]
-                                          |     key expressions:_col0 (type: string)
-                                          |     Map-reduce partition columns:_col0 (type: string)
+                                          |  keys:{"Map 3":"_col1 (type: string)","Map 5":"_col5 (type: string)"}
+                                          |  outputColumnNames:["_col6","_col7","_col8","_col10"]
+                                          |  Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
+                                          |<-Map 3 [BROADCAST_EDGE]
+                                          |  Reduce Output Operator [RS_42]
+                                          |     key expressions:_col1 (type: string)
+                                          |     Map-reduce partition columns:_col1 (type: string)
                                           |     sort order:+
-                                          |     Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                          |     Select Operator [SEL_22]
-                                          |        outputColumnNames:["_col0"]
-                                          |        Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                          |        Filter Operator [FIL_102]
-                                          |           predicate:((value = 'd1value') and key is not null) (type: boolean)
-                                          |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                          |           TableScan [TS_20]
-                                          |              alias:d1
-                                          |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                          |<-Map Join Operator [MAPJOIN_107]
+                                          |     Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                                          |     Select Operator [SEL_7]
+                                          |        outputColumnNames:["_col1"]
+                                          |        Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                                          |        Filter Operator [FIL_101]
+                                          |           predicate:((key = 'src1key') and value is not null) (type: boolean)
+                                          |           Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                                          |           TableScan [TS_5]
+                                          |              alias:src1
+                                          |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                                          |<-Map Join Operator [MAPJOIN_109]
                                              |  condition map:[{"":"Inner Join 0 to 1"}]
-                                             |  keys:{"Map 10":"_col3 (type: string)","Map 7":"_col1 (type: string)"}
-                                             |  outputColumnNames:["_col2","_col3","_col4","_col6"]
-                                             |  Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                                             |<-Map 10 [BROADCAST_EDGE]
-                                             |  Reduce Output Operator [RS_32]
-                                             |     key expressions:_col3 (type: string)
-                                             |     Map-reduce partition columns:_col3 (type: string)
+                                             |  keys:{"Map 5":"_col2 (type: string)","Map 4":"_col0 (type: string)"}
+                                             |  outputColumnNames:["_col4","_col5","_col6","_col8"]
+                                             |  Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+                                             |<-Map 4 [BROADCAST_EDGE]
+                                             |  Reduce Output Operator [RS_24]
+                                             |     key expressions:_col0 (type: string)
+                                             |     Map-reduce partition columns:_col0 (type: string)
                                              |     sort order:+
-                                             |     Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                             |     value expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string), _col4 (type: string)
-                                             |     Select Operator [SEL_28]
-                                             |        outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
-                                             |        Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                             |        Filter Operator [FIL_104]
-                                             |           predicate:((((((v3 = 'ssv3') and v2 is not null) and k1 is not null) and v1 is not null) and k2 is not null) and k3 is not null) (type: boolean)
-                                             |           Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                             |           TableScan [TS_26]
-                                             |              alias:ss
-                                             |              Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
-                                             |<-Select Operator [SEL_25]
-                                                   outputColumnNames:["_col1"]
-                                                   Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                                   Filter Operator [FIL_103]
-                                                      predicate:((key = 'srcpartkey') and value is not null) (type: boolean)
+                                             |     Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column 

<TRUNCATED>

[14/39] hive git commit: HIVE-10612 : HIVE-10578 broke TestSQLStdHiveAccessControllerHS2 tests (Thejas Nair via Sushanth Sowmyan, reviwed by Ashutosh Chauhan)

Posted by pr...@apache.org.
HIVE-10612 : HIVE-10578 broke TestSQLStdHiveAccessControllerHS2 tests (Thejas Nair via Sushanth Sowmyan, reviwed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/72088ca7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/72088ca7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/72088ca7

Branch: refs/heads/llap
Commit: 72088ca7c39136dd68dbbefb3897cd7abfdd982c
Parents: e0044e0
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Thu May 7 01:27:54 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Thu May 7 01:28:57 2015 -0700

----------------------------------------------------------------------
 common/src/java/org/apache/hadoop/hive/conf/HiveConf.java | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/72088ca7/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 54e154c..85e732f 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2838,7 +2838,6 @@ public class HiveConf extends Configuration {
     ConfVars.HIVE_INSERT_INTO_MULTILEVEL_DIRS.varname,
     ConfVars.HIVE_LOCALIZE_RESOURCE_NUM_WAIT_ATTEMPTS.varname,
     ConfVars.HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES.varname,
-    ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION.varname,
     ConfVars.HIVE_QUOTEDID_SUPPORT.varname,
     ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES.varname,
     ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS.varname,


[24/39] hive git commit: HIVE-10530: Aggregate stats cache: bug fixes for RDBMS path (Vaibhav Gumashta reviewed by Mostafa Mokhtar, Thejas Nair)

Posted by pr...@apache.org.
HIVE-10530: Aggregate stats cache: bug fixes for RDBMS path (Vaibhav Gumashta reviewed by Mostafa Mokhtar, Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4a0ccd11
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4a0ccd11
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4a0ccd11

Branch: refs/heads/llap
Commit: 4a0ccd11f56f4e47b76eae4e60668e78bedfc20b
Parents: 3633db2
Author: Vaibhav Gumashta <vg...@apache.org>
Authored: Thu May 7 13:58:34 2015 -0700
Committer: Vaibhav Gumashta <vg...@apache.org>
Committed: Thu May 7 13:58:34 2015 -0700

----------------------------------------------------------------------
 .../hive/metastore/AggregateStatsCache.java     | 33 +++++++++-----------
 .../hive/metastore/MetaStoreDirectSql.java      | 24 +++++++++-----
 .../test/queries/clientpositive/explainuser_2.q |  1 +
 .../extrapolate_part_stats_partial.q            |  2 ++
 .../extrapolate_part_stats_partial_ndv.q        |  2 ++
 .../queries/clientpositive/mapjoin_mapjoin.q    |  1 +
 6 files changed, 37 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4a0ccd11/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java b/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java
index 6a85936..44106f5 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java
@@ -55,7 +55,7 @@ public class AggregateStatsCache {
   // Run the cleaner thread until cache is cleanUntil% occupied
   private final float cleanUntil;
   // Nodes go stale after this
-  private final long timeToLive;
+  private final long timeToLiveMs;
   // Max time when waiting for write locks on node list
   private final long maxWriterWaitTime;
   // Max time when waiting for read locks on node list
@@ -73,12 +73,12 @@ public class AggregateStatsCache {
   // To track cleaner metrics
   int numRemovedTTL = 0, numRemovedLRU = 0;
 
-  private AggregateStatsCache(int maxCacheNodes, int maxPartsPerCacheNode, long timeToLive,
+  private AggregateStatsCache(int maxCacheNodes, int maxPartsPerCacheNode, long timeToLiveMs,
       float falsePositiveProbability, float maxVariance, long maxWriterWaitTime,
       long maxReaderWaitTime, float maxFull, float cleanUntil) {
     this.maxCacheNodes = maxCacheNodes;
     this.maxPartsPerCacheNode = maxPartsPerCacheNode;
-    this.timeToLive = timeToLive;
+    this.timeToLiveMs = timeToLiveMs;
     this.falsePositiveProbability = falsePositiveProbability;
     this.maxVariance = maxVariance;
     this.maxWriterWaitTime = maxWriterWaitTime;
@@ -97,9 +97,9 @@ public class AggregateStatsCache {
       int maxPartitionsPerCacheNode =
           HiveConf
               .getIntVar(conf, HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS);
-      long timeToLive =
+      long timeToLiveMs =
           HiveConf.getTimeVar(conf, HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_TTL,
-              TimeUnit.SECONDS);
+              TimeUnit.SECONDS)*1000;
       // False positives probability we are ready to tolerate for the underlying bloom filter
       float falsePositiveProbability =
           HiveConf.getFloatVar(conf, HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_FPP);
@@ -120,7 +120,7 @@ public class AggregateStatsCache {
       float cleanUntil =
           HiveConf.getFloatVar(conf, HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL);
       self =
-          new AggregateStatsCache(maxCacheNodes, maxPartitionsPerCacheNode, timeToLive,
+          new AggregateStatsCache(maxCacheNodes, maxPartitionsPerCacheNode, timeToLiveMs,
               falsePositiveProbability, maxVariance, maxWriterWaitTime, maxReaderWaitTime, maxFull,
               cleanUntil);
     }
@@ -213,7 +213,7 @@ public class AggregateStatsCache {
    * @return best matched node or null
    */
   private AggrColStats findBestMatch(List<String> partNames, List<AggrColStats> candidates) {
-    // Hits, misses, shouldSkip for a node
+    // Hits, misses tracked for a candidate node
     MatchStats matchStats;
     // MatchStats for each candidate
     Map<AggrColStats, MatchStats> candidateMatchStats = new HashMap<AggrColStats, MatchStats>();
@@ -227,26 +227,23 @@ public class AggregateStatsCache {
     // Note: we're not creating a copy of the list for saving memory
     for (AggrColStats candidate : candidates) {
       // Variance check
-      if ((float) Math.abs((candidate.getNumPartsCached() - numPartsRequested)
-          / numPartsRequested) > maxVariance) {
+      if ((float) Math.abs((candidate.getNumPartsCached() - numPartsRequested) / numPartsRequested)
+          > maxVariance) {
         continue;
       }
       // TTL check
       if (isExpired(candidate)) {
         continue;
-      }
-      else {
+      } else {
         candidateMatchStats.put(candidate, new MatchStats(0, 0));
       }
     }
     // We'll count misses as we iterate
     int maxMisses = (int) maxVariance * numPartsRequested;
     for (String partName : partNames) {
-      for (AggrColStats candidate : candidates) {
-        matchStats = candidateMatchStats.get(candidate);
-        if (matchStats == null) {
-          continue;
-        }
+      for (Map.Entry<AggrColStats, MatchStats> entry : candidateMatchStats.entrySet()) {
+        AggrColStats candidate = entry.getKey();
+        matchStats = entry.getValue();
         if (candidate.getBloomFilter().test(partName.getBytes())) {
           ++matchStats.hits;
         } else {
@@ -464,7 +461,7 @@ public class AggregateStatsCache {
   }
 
   private boolean isExpired(AggrColStats aggrColStats) {
-    return System.currentTimeMillis() - aggrColStats.lastAccessTime > timeToLive;
+    return (System.currentTimeMillis() - aggrColStats.lastAccessTime) > timeToLiveMs;
   }
 
   /**
@@ -502,7 +499,7 @@ public class AggregateStatsCache {
 
     @Override
     public String toString() {
-      return "Database: " + dbName + ", Table: " + tblName + ", Column: " + colName;
+      return "database:" + dbName + ", table:" + tblName + ", column:" + colName;
     }
 
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/4a0ccd11/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
index 5ef3b9a..8bee978 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
@@ -1106,24 +1106,23 @@ class MetaStoreDirectSql {
     if (isAggregateStatsCacheEnabled) {
       AggrColStats colStatsAggrCached;
       List<ColumnStatisticsObj> colStatsAggrFromDB;
-      int maxPartitionsPerCacheNode = aggrStatsCache.getMaxPartsPerCacheNode();
-      float falsePositiveProbability = aggrStatsCache.getFalsePositiveProbability();
+      int maxPartsPerCacheNode = aggrStatsCache.getMaxPartsPerCacheNode();
+      float fpp = aggrStatsCache.getFalsePositiveProbability();
       int partitionsRequested = partNames.size();
-      if (partitionsRequested > maxPartitionsPerCacheNode) {
+      if (partitionsRequested > maxPartsPerCacheNode) {
         colStatsList =
             columnStatisticsObjForPartitions(dbName, tableName, partNames, colNames, partsFound,
                 useDensityFunctionForNDVEstimation);
       } else {
         colStatsList = new ArrayList<ColumnStatisticsObj>();
+        // Bloom filter for the new node that we will eventually add to the cache
+        BloomFilter bloomFilter = createPartsBloomFilter(maxPartsPerCacheNode, fpp, partNames);
         for (String colName : colNames) {
           // Check the cache first
           colStatsAggrCached = aggrStatsCache.get(dbName, tableName, colName, partNames);
           if (colStatsAggrCached != null) {
             colStatsList.add(colStatsAggrCached.getColStats());
           } else {
-            // Bloom filter for the new node that we will eventually add to the cache
-            BloomFilter bloomFilter =
-                new BloomFilter(maxPartitionsPerCacheNode, falsePositiveProbability);
             List<String> colNamesForDB = new ArrayList<String>();
             colNamesForDB.add(colName);
             // Read aggregated stats for one column
@@ -1148,6 +1147,15 @@ class MetaStoreDirectSql {
     return new AggrStats(colStatsList, partsFound);
   }
 
+  private BloomFilter createPartsBloomFilter(int maxPartsPerCacheNode, float fpp,
+      List<String> partNames) {
+    BloomFilter bloomFilter = new BloomFilter(maxPartsPerCacheNode, fpp);
+    for (String partName : partNames) {
+      bloomFilter.add(partName.getBytes());
+    }
+    return bloomFilter;
+  }
+
   private long partsFoundForPartitions(String dbName, String tableName,
       List<String> partNames, List<String> colNames) throws MetaException {
     long partsFound = 0;
@@ -1174,8 +1182,8 @@ class MetaStoreDirectSql {
   }
 
   private List<ColumnStatisticsObj> columnStatisticsObjForPartitions(String dbName,
-      String tableName, List<String> partNames, List<String> colNames, long partsFound, boolean useDensityFunctionForNDVEstimation)
-      throws MetaException {
+      String tableName, List<String> partNames, List<String> colNames, long partsFound,
+      boolean useDensityFunctionForNDVEstimation) throws MetaException {
     // TODO: all the extrapolation logic should be moved out of this class,
     // only mechanical data retrieval should remain here.
     String commonPrefix = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", "

http://git-wip-us.apache.org/repos/asf/hive/blob/4a0ccd11/ql/src/test/queries/clientpositive/explainuser_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/explainuser_2.q b/ql/src/test/queries/clientpositive/explainuser_2.q
index 8e8ac92..6e98fa0 100644
--- a/ql/src/test/queries/clientpositive/explainuser_2.q
+++ b/ql/src/test/queries/clientpositive/explainuser_2.q
@@ -1,4 +1,5 @@
 set hive.explain.user=true;
+set hive.metastore.aggregate.stats.cache.enabled=false;
 
 CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4a0ccd11/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial.q b/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial.q
index 8ae9a90..5c062ee 100644
--- a/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial.q
+++ b/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial.q
@@ -1,6 +1,8 @@
 set hive.stats.fetch.column.stats=true;
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.metastore.aggregate.stats.cache.enabled=false;
+
 
 create table if not exists ext_loc (
   state string,

http://git-wip-us.apache.org/repos/asf/hive/blob/4a0ccd11/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial_ndv.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial_ndv.q b/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial_ndv.q
index b7fc4e3..5f0160a 100644
--- a/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial_ndv.q
+++ b/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial_ndv.q
@@ -2,6 +2,8 @@ set hive.metastore.stats.ndv.densityfunction=true;
 set hive.stats.fetch.column.stats=true;
 set hive.exec.dynamic.partition=true;
 set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.metastore.aggregate.stats.cache.enabled=false;
+
 
 drop table if exists ext_loc;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4a0ccd11/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q b/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q
index 5bf4ab1..7f66ff2 100644
--- a/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q
+++ b/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q
@@ -1,6 +1,7 @@
 set hive.auto.convert.join=true;
 set hive.auto.convert.join.noconditionaltask=true;
 set hive.auto.convert.join.noconditionaltask.size=10000;
+set hive.metastore.aggregate.stats.cache.enabled=false;
 
 -- Since the inputs are small, it should be automatically converted to mapjoin
 


[12/39] hive git commit: HIVE-10542: Full outer joins in tez produce incorrect results in certain cases (Vikram Dixit K, reviewed by Gunther Hagleitner)

Posted by pr...@apache.org.
HIVE-10542: Full outer joins in tez produce incorrect results in certain cases (Vikram Dixit K, reviewed by Gunther Hagleitner)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/26ec033c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/26ec033c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/26ec033c

Branch: refs/heads/llap
Commit: 26ec033c89a61fa0bf95b9b66da0842b22ec4c9b
Parents: 4b44408
Author: vikram <vi...@hortonworks.com>
Authored: Wed May 6 22:18:28 2015 -0700
Committer: vikram <vi...@hortonworks.com>
Committed: Wed May 6 22:18:28 2015 -0700

----------------------------------------------------------------------
 .../test/resources/testconfiguration.properties |   2 +-
 .../hive/ql/exec/CommonMergeJoinOperator.java   |  54 +-
 ql/src/test/queries/clientpositive/mergejoin.q  |  17 +
 .../test/results/clientpositive/mergejoin.q.out | 844 ++++++++++++++++---
 .../clientpositive/tez/auto_join29.q.out        | 500 +++++++++++
 .../results/clientpositive/tez/mergejoin.q.out  | 844 ++++++++++++++++---
 6 files changed, 2005 insertions(+), 256 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/26ec033c/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 134fded..3eff7d0 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -124,6 +124,7 @@ minitez.query.files.shared=alter_merge_2_orc.q,\
   mapreduce2.q,\
   merge1.q,\
   merge2.q,\
+  mergejoin.q,\
   metadataonly1.q,\
   metadata_only_queries.q,\
   optimize_nullscan.q,\
@@ -160,7 +161,6 @@ minitez.query.files.shared=alter_merge_2_orc.q,\
   transform1.q,\
   transform2.q,\
   transform_ppr1.q,\
-  mergejoin.q,\
   transform_ppr2.q,\
   union2.q,\
   union3.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/26ec033c/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
index cb0a5e7..d1d5e2b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java
@@ -20,10 +20,13 @@ package org.apache.hadoop.hive.ql.exec;
 
 import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeSet;
 import java.util.concurrent.Future;
 
 import org.apache.commons.logging.Log;
@@ -36,6 +39,7 @@ import org.apache.hadoop.hive.ql.exec.tez.TezContext;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.JoinCondDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.api.OperatorType;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
@@ -83,6 +87,7 @@ public class CommonMergeJoinOperator extends AbstractMapJoinOperator<CommonMerge
 
   transient List<Operator<? extends OperatorDesc>> originalParents =
       new ArrayList<Operator<? extends OperatorDesc>>();
+  transient Set<Integer> fetchInputAtClose;
 
   public CommonMergeJoinOperator() {
     super();
@@ -93,6 +98,7 @@ public class CommonMergeJoinOperator extends AbstractMapJoinOperator<CommonMerge
   public Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
     Collection<Future<?>> result = super.initializeOp(hconf);
     firstFetchHappened = false;
+    fetchInputAtClose = getFetchInputAtCloseList();
 
     int maxAlias = 0;
     for (byte pos = 0; pos < order.length; pos++) {
@@ -145,6 +151,25 @@ public class CommonMergeJoinOperator extends AbstractMapJoinOperator<CommonMerge
     return result;
   }
 
+  /*
+   * In case of outer joins, we need to push records through even if one of the sides is done
+   * sending records. For e.g. In the case of full outer join, the right side needs to send in data
+   * for the join even after the left side has completed sending all the records on its side. This
+   * can be done once at initialize time and at close, these tags will still forward records until
+   * they have no more to send. Also, subsequent joins need to fetch their data as well since
+   * any join following the outer join could produce results with one of the outer sides depending on
+   * the join condition. We could optimize for the case of inner joins in the future here.
+   */
+  private Set<Integer> getFetchInputAtCloseList() {
+    Set<Integer> retval = new TreeSet<Integer>();
+    for (JoinCondDesc joinCondDesc : conf.getConds()) {
+      retval.add(joinCondDesc.getLeft());
+      retval.add(joinCondDesc.getRight());
+    }
+
+    return retval;
+  }
+
   @Override
   public void endGroup() throws HiveException {
     // we do not want the end group to cause a checkAndGenObject
@@ -173,7 +198,6 @@ public class CommonMergeJoinOperator extends AbstractMapJoinOperator<CommonMerge
     List<Object> value = getFilteredValue(alias, row);
     // compute keys and values as StandardObjects
     List<Object> key = mergeJoinComputeKeys(row, alias);
-
     if (!firstFetchHappened) {
       firstFetchHappened = true;
       // fetch the first group for all small table aliases
@@ -405,9 +429,37 @@ public class CommonMergeJoinOperator extends AbstractMapJoinOperator<CommonMerge
 
     while (!allFetchDone) {
       List<Byte> ret = joinOneGroup();
+      for (int i = 0; i < fetchDone.length; i++) {
+        // if the fetch is not completed for the big table
+        if (i == posBigTable) {
+          // if we are in close op phase, we have definitely exhausted the big table input
+          fetchDone[i] = true;
+          continue;
+        }
+
+        // in case of outer joins, we need to pull in records from the sides we still
+        // need to produce output for apart from the big table. for e.g. full outer join
+        if ((fetchInputAtClose.contains(i)) && (fetchDone[i] == false)) {
+          // if we have never fetched, we need to fetch before we can do the join
+          if (firstFetchHappened == false) {
+            // we need to fetch all the needed ones at least once to ensure bootstrapping
+            if (i == (fetchDone.length - 1)) {
+              firstFetchHappened = true;
+            }
+            // This is a bootstrap. The joinOneGroup automatically fetches the next rows.
+            fetchNextGroup((byte) i);
+          }
+          // Do the join. It does fetching of next row groups itself.
+          if (i == (fetchDone.length - 1)) {
+            ret = joinOneGroup();
+          }
+        }
+      }
+
       if (ret == null || ret.size() == 0) {
         break;
       }
+
       reportProgress();
       numMapRowsRead++;
       allFetchDone = allFetchDone();

http://git-wip-us.apache.org/repos/asf/hive/blob/26ec033c/ql/src/test/queries/clientpositive/mergejoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/mergejoin.q b/ql/src/test/queries/clientpositive/mergejoin.q
index 257337a..59374ca 100644
--- a/ql/src/test/queries/clientpositive/mergejoin.q
+++ b/ql/src/test/queries/clientpositive/mergejoin.q
@@ -6,11 +6,14 @@ set hive.optimize.metadataonly=false;
 set hive.optimize.index.filter=true;
 set hive.vectorized.execution.enabled=true;
 
+-- SORT_QUERY_RESULTS
+
 explain
 select * from src a join src1 b on a.key = b.key;
 
 select * from src a join src1 b on a.key = b.key;
 
+
 CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
 CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS ORCFILE;
 CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
@@ -105,3 +108,17 @@ join
 (select t2.key as id, t2.value as od from tab_part t2 order by id, od) rt2) vt2
 where vt1.id=vt2.id;
 
+set mapred.reduce.tasks=18;
+select * from (select * from tab where tab.key = 0)a full outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key;
+select * from (select * from tab where tab.key = 0)a right outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key;
+
+select * from
+(select * from tab where tab.key = 0)a
+full outer join
+(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key;
+
+select * from
+(select * from tab where tab.key = 0)a
+join
+(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/26ec033c/ql/src/test/results/clientpositive/mergejoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mergejoin.q.out b/ql/src/test/results/clientpositive/mergejoin.q.out
index af3d7df..cb96ab3 100644
--- a/ql/src/test/results/clientpositive/mergejoin.q.out
+++ b/ql/src/test/results/clientpositive/mergejoin.q.out
@@ -1,7 +1,11 @@
-PREHOOK: query: explain
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
 select * from src a join src1 b on a.key = b.key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
 select * from src a join src1 b on a.key = b.key
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
@@ -338,74 +342,12 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 0	val_0	2008-04-08	0	val_0	2008-04-08
 0	val_0	2008-04-08	0	val_0	2008-04-08
 0	val_0	2008-04-08	0	val_0	2008-04-08
-2	val_2	2008-04-08	2	val_2	2008-04-08
-4	val_4	2008-04-08	4	val_4	2008-04-08
-8	val_8	2008-04-08	8	val_8	2008-04-08
-11	val_11	2008-04-08	11	val_11	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-17	val_17	2008-04-08	17	val_17	2008-04-08
-19	val_19	2008-04-08	19	val_19	2008-04-08
-20	val_20	2008-04-08	20	val_20	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-28	val_28	2008-04-08	28	val_28	2008-04-08
-33	val_33	2008-04-08	33	val_33	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-44	val_44	2008-04-08	44	val_44	2008-04-08
-51	val_51	2008-04-08	51	val_51	2008-04-08
-51	val_51	2008-04-08	51	val_51	2008-04-08
-51	val_51	2008-04-08	51	val_51	2008-04-08
-51	val_51	2008-04-08	51	val_51	2008-04-08
-53	val_53	2008-04-08	53	val_53	2008-04-08
-57	val_57	2008-04-08	57	val_57	2008-04-08
-64	val_64	2008-04-08	64	val_64	2008-04-08
-66	val_66	2008-04-08	66	val_66	2008-04-08
-77	val_77	2008-04-08	77	val_77	2008-04-08
-80	val_80	2008-04-08	80	val_80	2008-04-08
-82	val_82	2008-04-08	82	val_82	2008-04-08
-84	val_84	2008-04-08	84	val_84	2008-04-08
-84	val_84	2008-04-08	84	val_84	2008-04-08
-84	val_84	2008-04-08	84	val_84	2008-04-08
-84	val_84	2008-04-08	84	val_84	2008-04-08
-86	val_86	2008-04-08	86	val_86	2008-04-08
-95	val_95	2008-04-08	95	val_95	2008-04-08
-95	val_95	2008-04-08	95	val_95	2008-04-08
-95	val_95	2008-04-08	95	val_95	2008-04-08
-95	val_95	2008-04-08	95	val_95	2008-04-08
-97	val_97	2008-04-08	97	val_97	2008-04-08
-97	val_97	2008-04-08	97	val_97	2008-04-08
-97	val_97	2008-04-08	97	val_97	2008-04-08
-97	val_97	2008-04-08	97	val_97	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 105	val_105	2008-04-08	105	val_105	2008-04-08
+11	val_11	2008-04-08	11	val_11	2008-04-08
 114	val_114	2008-04-08	114	val_114	2008-04-08
 116	val_116	2008-04-08	116	val_116	2008-04-08
 118	val_118	2008-04-08	118	val_118	2008-04-08
@@ -447,6 +389,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
 150	val_150	2008-04-08	150	val_150	2008-04-08
 152	val_152	2008-04-08	152	val_152	2008-04-08
 152	val_152	2008-04-08	152	val_152	2008-04-08
@@ -484,6 +430,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
+17	val_17	2008-04-08	17	val_17	2008-04-08
 170	val_170	2008-04-08	170	val_170	2008-04-08
 172	val_172	2008-04-08	172	val_172	2008-04-08
 172	val_172	2008-04-08	172	val_172	2008-04-08
@@ -510,10 +457,13 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 187	val_187	2008-04-08	187	val_187	2008-04-08
 187	val_187	2008-04-08	187	val_187	2008-04-08
 189	val_189	2008-04-08	189	val_189	2008-04-08
+19	val_19	2008-04-08	19	val_19	2008-04-08
 190	val_190	2008-04-08	190	val_190	2008-04-08
 192	val_192	2008-04-08	192	val_192	2008-04-08
 194	val_194	2008-04-08	194	val_194	2008-04-08
 196	val_196	2008-04-08	196	val_196	2008-04-08
+2	val_2	2008-04-08	2	val_2	2008-04-08
+20	val_20	2008-04-08	20	val_20	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
@@ -560,6 +510,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
@@ -571,6 +525,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 255	val_255	2008-04-08	255	val_255	2008-04-08
 255	val_255	2008-04-08	255	val_255	2008-04-08
 257	val_257	2008-04-08	257	val_257	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
 260	val_260	2008-04-08	260	val_260	2008-04-08
 262	val_262	2008-04-08	262	val_262	2008-04-08
 266	val_266	2008-04-08	266	val_266	2008-04-08
@@ -600,6 +558,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
+28	val_28	2008-04-08	28	val_28	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
@@ -661,11 +620,21 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
+33	val_33	2008-04-08	33	val_33	2008-04-08
 332	val_332	2008-04-08	332	val_332	2008-04-08
 336	val_336	2008-04-08	336	val_336	2008-04-08
 338	val_338	2008-04-08	338	val_338	2008-04-08
 341	val_341	2008-04-08	341	val_341	2008-04-08
 345	val_345	2008-04-08	345	val_345	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
 356	val_356	2008-04-08	356	val_356	2008-04-08
 365	val_365	2008-04-08	365	val_365	2008-04-08
 367	val_367	2008-04-08	367	val_367	2008-04-08
@@ -681,6 +650,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
 374	val_374	2008-04-08	374	val_374	2008-04-08
 378	val_378	2008-04-08	378	val_378	2008-04-08
 389	val_389	2008-04-08	389	val_389	2008-04-08
@@ -695,6 +668,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
+4	val_4	2008-04-08	4	val_4	2008-04-08
 400	val_400	2008-04-08	400	val_400	2008-04-08
 402	val_402	2008-04-08	402	val_402	2008-04-08
 404	val_404	2008-04-08	404	val_404	2008-04-08
@@ -732,6 +706,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 417	val_417	2008-04-08	417	val_417	2008-04-08
 417	val_417	2008-04-08	417	val_417	2008-04-08
 419	val_419	2008-04-08	419	val_419	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
@@ -751,6 +729,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
+44	val_44	2008-04-08	44	val_44	2008-04-08
 444	val_444	2008-04-08	444	val_444	2008-04-08
 446	val_446	2008-04-08	446	val_446	2008-04-08
 448	val_448	2008-04-08	448	val_448	2008-04-08
@@ -809,68 +788,6 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 493	val_493	2008-04-08	493	val_493	2008-04-08
 495	val_495	2008-04-08	495	val_495	2008-04-08
 497	val_497	2008-04-08	497	val_497	2008-04-08
-PREHOOK: query: select * from tab a join tab_part b on a.key = b.key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@tab
-PREHOOK: Input: default@tab@ds=2008-04-08
-PREHOOK: Input: default@tab_part
-PREHOOK: Input: default@tab_part@ds=2008-04-08
-#### A masked pattern was here ####
-POSTHOOK: query: select * from tab a join tab_part b on a.key = b.key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@tab
-POSTHOOK: Input: default@tab@ds=2008-04-08
-POSTHOOK: Input: default@tab_part
-POSTHOOK: Input: default@tab_part@ds=2008-04-08
-#### A masked pattern was here ####
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-2	val_2	2008-04-08	2	val_2	2008-04-08
-4	val_4	2008-04-08	4	val_4	2008-04-08
-8	val_8	2008-04-08	8	val_8	2008-04-08
-11	val_11	2008-04-08	11	val_11	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-17	val_17	2008-04-08	17	val_17	2008-04-08
-19	val_19	2008-04-08	19	val_19	2008-04-08
-20	val_20	2008-04-08	20	val_20	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-28	val_28	2008-04-08	28	val_28	2008-04-08
-33	val_33	2008-04-08	33	val_33	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-44	val_44	2008-04-08	44	val_44	2008-04-08
 51	val_51	2008-04-08	51	val_51	2008-04-08
 51	val_51	2008-04-08	51	val_51	2008-04-08
 51	val_51	2008-04-08	51	val_51	2008-04-08
@@ -880,6 +797,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 64	val_64	2008-04-08	64	val_64	2008-04-08
 66	val_66	2008-04-08	66	val_66	2008-04-08
 77	val_77	2008-04-08	77	val_77	2008-04-08
+8	val_8	2008-04-08	8	val_8	2008-04-08
 80	val_80	2008-04-08	80	val_80	2008-04-08
 82	val_82	2008-04-08	82	val_82	2008-04-08
 84	val_84	2008-04-08	84	val_84	2008-04-08
@@ -895,11 +813,35 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 97	val_97	2008-04-08	97	val_97	2008-04-08
 97	val_97	2008-04-08	97	val_97	2008-04-08
 97	val_97	2008-04-08	97	val_97	2008-04-08
+PREHOOK: query: select * from tab a join tab_part b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from tab a join tab_part b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 105	val_105	2008-04-08	105	val_105	2008-04-08
+11	val_11	2008-04-08	11	val_11	2008-04-08
 114	val_114	2008-04-08	114	val_114	2008-04-08
 116	val_116	2008-04-08	116	val_116	2008-04-08
 118	val_118	2008-04-08	118	val_118	2008-04-08
@@ -941,6 +883,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
 150	val_150	2008-04-08	150	val_150	2008-04-08
 152	val_152	2008-04-08	152	val_152	2008-04-08
 152	val_152	2008-04-08	152	val_152	2008-04-08
@@ -978,6 +924,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
+17	val_17	2008-04-08	17	val_17	2008-04-08
 170	val_170	2008-04-08	170	val_170	2008-04-08
 172	val_172	2008-04-08	172	val_172	2008-04-08
 172	val_172	2008-04-08	172	val_172	2008-04-08
@@ -1004,10 +951,13 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 187	val_187	2008-04-08	187	val_187	2008-04-08
 187	val_187	2008-04-08	187	val_187	2008-04-08
 189	val_189	2008-04-08	189	val_189	2008-04-08
+19	val_19	2008-04-08	19	val_19	2008-04-08
 190	val_190	2008-04-08	190	val_190	2008-04-08
 192	val_192	2008-04-08	192	val_192	2008-04-08
 194	val_194	2008-04-08	194	val_194	2008-04-08
 196	val_196	2008-04-08	196	val_196	2008-04-08
+2	val_2	2008-04-08	2	val_2	2008-04-08
+20	val_20	2008-04-08	20	val_20	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
@@ -1054,6 +1004,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
@@ -1065,6 +1019,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 255	val_255	2008-04-08	255	val_255	2008-04-08
 255	val_255	2008-04-08	255	val_255	2008-04-08
 257	val_257	2008-04-08	257	val_257	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
 260	val_260	2008-04-08	260	val_260	2008-04-08
 262	val_262	2008-04-08	262	val_262	2008-04-08
 266	val_266	2008-04-08	266	val_266	2008-04-08
@@ -1094,6 +1052,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
+28	val_28	2008-04-08	28	val_28	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
@@ -1155,11 +1114,21 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
+33	val_33	2008-04-08	33	val_33	2008-04-08
 332	val_332	2008-04-08	332	val_332	2008-04-08
 336	val_336	2008-04-08	336	val_336	2008-04-08
 338	val_338	2008-04-08	338	val_338	2008-04-08
 341	val_341	2008-04-08	341	val_341	2008-04-08
 345	val_345	2008-04-08	345	val_345	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
 356	val_356	2008-04-08	356	val_356	2008-04-08
 365	val_365	2008-04-08	365	val_365	2008-04-08
 367	val_367	2008-04-08	367	val_367	2008-04-08
@@ -1175,6 +1144,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
 374	val_374	2008-04-08	374	val_374	2008-04-08
 378	val_378	2008-04-08	378	val_378	2008-04-08
 389	val_389	2008-04-08	389	val_389	2008-04-08
@@ -1189,6 +1162,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
+4	val_4	2008-04-08	4	val_4	2008-04-08
 400	val_400	2008-04-08	400	val_400	2008-04-08
 402	val_402	2008-04-08	402	val_402	2008-04-08
 404	val_404	2008-04-08	404	val_404	2008-04-08
@@ -1226,6 +1200,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 417	val_417	2008-04-08	417	val_417	2008-04-08
 417	val_417	2008-04-08	417	val_417	2008-04-08
 419	val_419	2008-04-08	419	val_419	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
@@ -1245,6 +1223,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
+44	val_44	2008-04-08	44	val_44	2008-04-08
 444	val_444	2008-04-08	444	val_444	2008-04-08
 446	val_446	2008-04-08	446	val_446	2008-04-08
 448	val_448	2008-04-08	448	val_448	2008-04-08
@@ -1303,6 +1282,31 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 493	val_493	2008-04-08	493	val_493	2008-04-08
 495	val_495	2008-04-08	495	val_495	2008-04-08
 497	val_497	2008-04-08	497	val_497	2008-04-08
+51	val_51	2008-04-08	51	val_51	2008-04-08
+51	val_51	2008-04-08	51	val_51	2008-04-08
+51	val_51	2008-04-08	51	val_51	2008-04-08
+51	val_51	2008-04-08	51	val_51	2008-04-08
+53	val_53	2008-04-08	53	val_53	2008-04-08
+57	val_57	2008-04-08	57	val_57	2008-04-08
+64	val_64	2008-04-08	64	val_64	2008-04-08
+66	val_66	2008-04-08	66	val_66	2008-04-08
+77	val_77	2008-04-08	77	val_77	2008-04-08
+8	val_8	2008-04-08	8	val_8	2008-04-08
+80	val_80	2008-04-08	80	val_80	2008-04-08
+82	val_82	2008-04-08	82	val_82	2008-04-08
+84	val_84	2008-04-08	84	val_84	2008-04-08
+84	val_84	2008-04-08	84	val_84	2008-04-08
+84	val_84	2008-04-08	84	val_84	2008-04-08
+84	val_84	2008-04-08	84	val_84	2008-04-08
+86	val_86	2008-04-08	86	val_86	2008-04-08
+95	val_95	2008-04-08	95	val_95	2008-04-08
+95	val_95	2008-04-08	95	val_95	2008-04-08
+95	val_95	2008-04-08	95	val_95	2008-04-08
+95	val_95	2008-04-08	95	val_95	2008-04-08
+97	val_97	2008-04-08	97	val_97	2008-04-08
+97	val_97	2008-04-08	97	val_97	2008-04-08
+97	val_97	2008-04-08	97	val_97	2008-04-08
+97	val_97	2008-04-08	97	val_97	2008-04-08
 PREHOOK: query: explain
 select count(*)
 from tab a left outer join tab_part b on a.key = b.key
@@ -2565,3 +2569,589 @@ POSTHOOK: Input: default@tab_part
 POSTHOOK: Input: default@tab_part@ds=2008-04-08
 #### A masked pattern was here ####
 480
+PREHOOK: query: select * from (select * from tab where tab.key = 0)a full outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select * from tab where tab.key = 0)a full outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+0	val_0	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	NULL	NULL	NULL
+NULL	NULL	NULL	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08
+PREHOOK: query: select * from (select * from tab where tab.key = 0)a right outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select * from tab where tab.key = 0)a right outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+NULL	NULL	NULL	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08
+PREHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+full outer join
+(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+full outer join
+(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
+Warning: Shuffle Join JOIN[9][tables = [a, b]] in Stage 'Stage-1:MAPRED' is a cross product
+PREHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+join
+(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+join
+(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+NULL	NULL	NULL	NULL	NULL	NULL	0	val_0	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	0	val_0	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	0	val_0	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	10	val_10	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	100	val_100	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	100	val_100	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	103	val_103	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	103	val_103	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	104	val_104	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	104	val_104	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	105	val_105	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	11	val_11	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	111	val_111	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	113	val_113	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	113	val_113	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	114	val_114	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	116	val_116	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	118	val_118	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	118	val_118	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	119	val_119	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	119	val_119	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	119	val_119	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	12	val_12	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	12	val_12	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	120	val_120	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	120	val_120	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	125	val_125	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	125	val_125	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	126	val_126	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	128	val_128	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	128	val_128	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	128	val_128	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	129	val_129	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	129	val_129	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	131	val_131	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	133	val_133	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	134	val_134	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	134	val_134	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	136	val_136	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	137	val_137	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	137	val_137	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	143	val_143	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	145	val_145	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	146	val_146	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	146	val_146	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	149	val_149	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	149	val_149	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	15	val_15	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	15	val_15	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	150	val_150	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	152	val_152	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	152	val_152	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	153	val_153	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	155	val_155	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	156	val_156	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	157	val_157	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	158	val_158	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	160	val_160	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	162	val_162	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	163	val_163	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	164	val_164	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	164	val_164	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	165	val_165	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	165	val_165	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	166	val_166	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	167	val_167	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	167	val_167	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	167	val_167	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	168	val_168	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	17	val_17	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	170	val_170	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	172	val_172	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	172	val_172	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	174	val_174	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	174	val_174	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	175	val_175	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	175	val_175	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	176	val_176	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	176	val_176	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	177	val_177	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	178	val_178	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	179	val_179	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	179	val_179	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	18	val_18	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	18	val_18	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	180	val_180	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	181	val_181	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	183	val_183	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	186	val_186	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	187	val_187	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	187	val_187	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	187	val_187	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	189	val_189	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	19	val_19	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	190	val_190	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	191	val_191	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	191	val_191	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	192	val_192	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	193	val_193	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	193	val_193	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	193	val_193	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	194	val_194	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	195	val_195	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	195	val_195	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	196	val_196	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	197	val_197	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	197	val_197	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	199	val_199	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	199	val_199	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	199	val_199	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	2	val_2	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	20	val_20	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	200	val_200	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	200	val_200	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	201	val_201	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	202	val_202	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	203	val_203	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	203	val_203	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	205	val_205	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	205	val_205	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	207	val_207	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	207	val_207	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	208	val_208	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	208	val_208	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	208	val_208	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	209	val_209	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	209	val_209	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	213	val_213	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	213	val_213	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	214	val_214	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	216	val_216	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	216	val_216	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	217	val_217	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	217	val_217	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	218	val_218	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	219	val_219	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	219	val_219	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	221	val_221	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	221	val_221	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	222	val_222	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	223	val_223	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	223	val_223	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	224	val_224	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	224	val_224	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	226	val_226	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	228	val_228	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	229	val_229	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	229	val_229	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	233	val_233	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	233	val_233	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	235	val_235	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	237	val_237	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	237	val_237	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	238	val_238	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	238	val_238	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	239	val_239	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	239	val_239	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	24	val_24	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	24	val_24	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	241	val_241	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	242	val_242	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	242	val_242	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	244	val_244	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	247	val_247	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	248	val_248	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	249	val_249	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	252	val_252	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	255	val_255	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	255	val_255	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	256	val_256	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	256	val_256	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	257	val_257	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	258	val_258	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	26	val_26	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	26	val_26	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	260	val_260	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	262	val_262	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	263	val_263	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	265	val_265	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	265	val_265	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	266	val_266	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	27	val_27	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	272	val_272	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	272	val_272	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	273	val_273	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	273	val_273	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	273	val_273	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	274	val_274	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	275	val_275	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	278	val_278	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	278	val_278	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	28	val_28	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	280	val_280	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	280	val_280	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	281	val_281	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	281	val_281	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	282	val_282	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	282	val_282	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	283	val_283	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	284	val_284	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	285	val_285	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	286	val_286	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	287	val_287	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	288	val_288	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	288	val_288	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	289	val_289	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	291	val_291	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	292	val_292	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	296	val_296	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	298	val_298	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	298	val_298	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	298	val_298	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	30	val_30	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	302	val_302	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	305	val_305	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	306	val_306	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	307	val_307	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	307	val_307	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	308	val_308	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	309	val_309	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	309	val_309	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	310	val_310	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	311	val_311	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	311	val_311	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	311	val_311	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	315	val_315	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	316	val_316	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	316	val_316	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	316	val_316	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	317	val_317	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	317	val_317	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	318	val_318	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	318	val_318	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	318	val_318	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	321	val_321	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	321	val_321	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	322	val_322	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	322	val_322	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	323	val_323	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	325	val_325	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	325	val_325	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	327	val_327	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	327	val_327	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	327	val_327	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	33	val_33	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	331	val_331	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	331	val_331	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	332	val_332	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	333	val_333	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	333	val_333	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	335	val_335	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	336	val_336	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	338	val_338	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	339	val_339	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	34	val_34	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	341	val_341	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	342	val_342	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	342	val_342	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	344	val_344	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	344	val_344	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	345	val_345	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	35	val_35	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	35	val_35	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	35	val_35	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	351	val_351	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	353	val_353	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	353	val_353	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	356	val_356	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	360	val_360	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	362	val_362	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	364	val_364	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	365	val_365	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	366	val_366	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	367	val_367	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	367	val_367	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	368	val_368	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	369	val_369	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	369	val_369	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	369	val_369	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	37	val_37	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	37	val_37	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	373	val_373	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	374	val_374	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	375	val_375	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	377	val_377	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	378	val_378	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	379	val_379	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	382	val_382	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	382	val_382	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	384	val_384	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	384	val_384	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	384	val_384	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	386	val_386	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	389	val_389	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	392	val_392	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	393	val_393	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	394	val_394	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	395	val_395	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	395	val_395	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	396	val_396	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	396	val_396	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	396	val_396	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	397	val_397	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	397	val_397	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	399	val_399	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	399	val_399	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	4	val_4	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	400	val_400	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	402	val_402	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	403	val_403	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	403	val_403	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	403	val_403	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	404	val_404	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	404	val_404	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	407	val_407	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	409	val_409	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	409	val_409	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	409	val_409	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	41	val_41	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	411	val_411	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	413	val_413	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	413	val_413	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	414	val_414	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	414	val_414	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	417	val_417	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	417	val_417	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	417	val_417	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	418	val_418	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	419	val_419	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	42	val_42	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	42	val_42	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	421	val_421	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	424	val_424	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	424	val_424	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	427	val_427	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	429	val_429	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	429	val_429	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	43	val_43	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	430	val_430	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	430	val_430	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	430	val_430	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	431	val_431	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	431	val_431	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	431	val_431	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	432	val_432	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	435	val_435	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	436	val_436	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	437	val_437	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	438	val_438	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	438	val_438	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	438	val_438	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	439	val_439	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	439	val_439	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	44	val_44	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	443	val_443	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	444	val_444	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	446	val_446	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	448	val_448	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	449	val_449	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	452	val_452	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	453	val_453	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	454	val_454	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	454	val_454	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	454	val_454	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	455	val_455	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	457	val_457	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	458	val_458	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	458	val_458	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	459	val_459	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	459	val_459	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	460	val_460	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	462	val_462	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	462	val_462	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	463	val_463	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	463	val_463	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	466	val_466	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	466	val_466	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	466	val_466	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	467	val_467	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	47	val_47	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	470	val_470	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	472	val_472	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	475	val_475	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	477	val_477	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	478	val_478	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	478	val_478	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	479	val_479	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	480	val_480	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	480	val_480	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	480	val_480	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	481	val_481	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	482	val_482	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	483	val_483	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	484	val_484	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	485	val_485	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	487	val_487	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	490	val_490	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	491	val_491	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	492	val_492	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	492	val_492	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	493	val_493	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	494	val_494	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	495	val_495	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	496	val_496	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	497	val_497	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	498	val_498	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	498	val_498	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	498	val_498	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	5	val_5	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	5	val_5	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	5	val_5	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	51	val_51	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	51	val_51	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	53	val_53	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	54	val_54	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	57	val_57	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	58	val_58	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	58	val_58	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	64	val_64	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	65	val_65	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	66	val_66	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	67	val_67	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	67	val_67	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	69	val_69	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	70	val_70	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	70	val_70	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	70	val_70	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	72	val_72	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	72	val_72	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	74	val_74	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	76	val_76	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	76	val_76	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	77	val_77	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	78	val_78	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	8	val_8	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	80	val_80	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	82	val_82	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	83	val_83	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	83	val_83	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	84	val_84	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	84	val_84	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	85	val_85	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	86	val_86	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	87	val_87	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	9	val_9	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	90	val_90	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	90	val_90	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	90	val_90	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	92	val_92	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	95	val_95	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	95	val_95	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	96	val_96	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	97	val_97	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	97	val_97	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	98	val_98	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	98	val_98	2008-04-08

http://git-wip-us.apache.org/repos/asf/hive/blob/26ec033c/ql/src/test/results/clientpositive/tez/auto_join29.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/auto_join29.q.out b/ql/src/test/results/clientpositive/tez/auto_join29.q.out
index 2cab06e..18c07d9 100644
--- a/ql/src/test/results/clientpositive/tez/auto_join29.q.out
+++ b/ql/src/test/results/clientpositive/tez/auto_join29.q.out
@@ -2673,6 +2673,506 @@ POSTHOOK: query: SELECT * FROM src src1 JOIN src src2 ON (src1.key = src2.key AN
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
+NULL	NULL	NULL	NULL	0	val_0
+NULL	NULL	NULL	NULL	0	val_0
+NULL	NULL	NULL	NULL	0	val_0
+NULL	NULL	NULL	NULL	10	val_10
+NULL	NULL	NULL	NULL	100	val_100
+NULL	NULL	NULL	NULL	100	val_100
+NULL	NULL	NULL	NULL	103	val_103
+NULL	NULL	NULL	NULL	103	val_103
+NULL	NULL	NULL	NULL	104	val_104
+NULL	NULL	NULL	NULL	104	val_104
+NULL	NULL	NULL	NULL	105	val_105
+NULL	NULL	NULL	NULL	11	val_11
+NULL	NULL	NULL	NULL	111	val_111
+NULL	NULL	NULL	NULL	113	val_113
+NULL	NULL	NULL	NULL	113	val_113
+NULL	NULL	NULL	NULL	114	val_114
+NULL	NULL	NULL	NULL	116	val_116
+NULL	NULL	NULL	NULL	118	val_118
+NULL	NULL	NULL	NULL	118	val_118
+NULL	NULL	NULL	NULL	119	val_119
+NULL	NULL	NULL	NULL	119	val_119
+NULL	NULL	NULL	NULL	119	val_119
+NULL	NULL	NULL	NULL	12	val_12
+NULL	NULL	NULL	NULL	12	val_12
+NULL	NULL	NULL	NULL	120	val_120
+NULL	NULL	NULL	NULL	120	val_120
+NULL	NULL	NULL	NULL	125	val_125
+NULL	NULL	NULL	NULL	125	val_125
+NULL	NULL	NULL	NULL	126	val_126
+NULL	NULL	NULL	NULL	128	val_128
+NULL	NULL	NULL	NULL	128	val_128
+NULL	NULL	NULL	NULL	128	val_128
+NULL	NULL	NULL	NULL	129	val_129
+NULL	NULL	NULL	NULL	129	val_129
+NULL	NULL	NULL	NULL	131	val_131
+NULL	NULL	NULL	NULL	133	val_133
+NULL	NULL	NULL	NULL	134	val_134
+NULL	NULL	NULL	NULL	134	val_134
+NULL	NULL	NULL	NULL	136	val_136
+NULL	NULL	NULL	NULL	137	val_137
+NULL	NULL	NULL	NULL	137	val_137
+NULL	NULL	NULL	NULL	138	val_138
+NULL	NULL	NULL	NULL	138	val_138
+NULL	NULL	NULL	NULL	138	val_138
+NULL	NULL	NULL	NULL	138	val_138
+NULL	NULL	NULL	NULL	143	val_143
+NULL	NULL	NULL	NULL	145	val_145
+NULL	NULL	NULL	NULL	146	val_146
+NULL	NULL	NULL	NULL	146	val_146
+NULL	NULL	NULL	NULL	149	val_149
+NULL	NULL	NULL	NULL	149	val_149
+NULL	NULL	NULL	NULL	15	val_15
+NULL	NULL	NULL	NULL	15	val_15
+NULL	NULL	NULL	NULL	150	val_150
+NULL	NULL	NULL	NULL	152	val_152
+NULL	NULL	NULL	NULL	152	val_152
+NULL	NULL	NULL	NULL	153	val_153
+NULL	NULL	NULL	NULL	155	val_155
+NULL	NULL	NULL	NULL	156	val_156
+NULL	NULL	NULL	NULL	157	val_157
+NULL	NULL	NULL	NULL	158	val_158
+NULL	NULL	NULL	NULL	160	val_160
+NULL	NULL	NULL	NULL	162	val_162
+NULL	NULL	NULL	NULL	163	val_163
+NULL	NULL	NULL	NULL	164	val_164
+NULL	NULL	NULL	NULL	164	val_164
+NULL	NULL	NULL	NULL	165	val_165
+NULL	NULL	NULL	NULL	165	val_165
+NULL	NULL	NULL	NULL	166	val_166
+NULL	NULL	NULL	NULL	167	val_167
+NULL	NULL	NULL	NULL	167	val_167
+NULL	NULL	NULL	NULL	167	val_167
+NULL	NULL	NULL	NULL	168	val_168
+NULL	NULL	NULL	NULL	169	val_169
+NULL	NULL	NULL	NULL	169	val_169
+NULL	NULL	NULL	NULL	169	val_169
+NULL	NULL	NULL	NULL	169	val_169
+NULL	NULL	NULL	NULL	17	val_17
+NULL	NULL	NULL	NULL	170	val_170
+NULL	NULL	NULL	NULL	172	val_172
+NULL	NULL	NULL	NULL	172	val_172
+NULL	NULL	NULL	NULL	174	val_174
+NULL	NULL	NULL	NULL	174	val_174
+NULL	NULL	NULL	NULL	175	val_175
+NULL	NULL	NULL	NULL	175	val_175
+NULL	NULL	NULL	NULL	176	val_176
+NULL	NULL	NULL	NULL	176	val_176
+NULL	NULL	NULL	NULL	177	val_177
+NULL	NULL	NULL	NULL	178	val_178
+NULL	NULL	NULL	NULL	179	val_179
+NULL	NULL	NULL	NULL	179	val_179
+NULL	NULL	NULL	NULL	18	val_18
+NULL	NULL	NULL	NULL	18	val_18
+NULL	NULL	NULL	NULL	180	val_180
+NULL	NULL	NULL	NULL	181	val_181
+NULL	NULL	NULL	NULL	183	val_183
+NULL	NULL	NULL	NULL	186	val_186
+NULL	NULL	NULL	NULL	187	val_187
+NULL	NULL	NULL	NULL	187	val_187
+NULL	NULL	NULL	NULL	187	val_187
+NULL	NULL	NULL	NULL	189	val_189
+NULL	NULL	NULL	NULL	19	val_19
+NULL	NULL	NULL	NULL	190	val_190
+NULL	NULL	NULL	NULL	191	val_191
+NULL	NULL	NULL	NULL	191	val_191
+NULL	NULL	NULL	NULL	192	val_192
+NULL	NULL	NULL	NULL	193	val_193
+NULL	NULL	NULL	NULL	193	val_193
+NULL	NULL	NULL	NULL	193	val_193
+NULL	NULL	NULL	NULL	194	val_194
+NULL	NULL	NULL	NULL	195	val_195
+NULL	NULL	NULL	NULL	195	val_195
+NULL	NULL	NULL	NULL	196	val_196
+NULL	NULL	NULL	NULL	197	val_197
+NULL	NULL	NULL	NULL	197	val_197
+NULL	NULL	NULL	NULL	199	val_199
+NULL	NULL	NULL	NULL	199	val_199
+NULL	NULL	NULL	NULL	199	val_199
+NULL	NULL	NULL	NULL	2	val_2
+NULL	NULL	NULL	NULL	20	val_20
+NULL	NULL	NULL	NULL	200	val_200
+NULL	NULL	NULL	NULL	200	val_200
+NULL	NULL	NULL	NULL	201	val_201
+NULL	NULL	NULL	NULL	202	val_202
+NULL	NULL	NULL	NULL	203	val_203
+NULL	NULL	NULL	NULL	203	val_203
+NULL	NULL	NULL	NULL	205	val_205
+NULL	NULL	NULL	NULL	205	val_205
+NULL	NULL	NULL	NULL	207	val_207
+NULL	NULL	NULL	NULL	207	val_207
+NULL	NULL	NULL	NULL	208	val_208
+NULL	NULL	NULL	NULL	208	val_208
+NULL	NULL	NULL	NULL	208	val_208
+NULL	NULL	NULL	NULL	209	val_209
+NULL	NULL	NULL	NULL	209	val_209
+NULL	NULL	NULL	NULL	213	val_213
+NULL	NULL	NULL	NULL	213	val_213
+NULL	NULL	NULL	NULL	214	val_214
+NULL	NULL	NULL	NULL	216	val_216
+NULL	NULL	NULL	NULL	216	val_216
+NULL	NULL	NULL	NULL	217	val_217
+NULL	NULL	NULL	NULL	217	val_217
+NULL	NULL	NULL	NULL	218	val_218
+NULL	NULL	NULL	NULL	219	val_219
+NULL	NULL	NULL	NULL	219	val_219
+NULL	NULL	NULL	NULL	221	val_221
+NULL	NULL	NULL	NULL	221	val_221
+NULL	NULL	NULL	NULL	222	val_222
+NULL	NULL	NULL	NULL	223	val_223
+NULL	NULL	NULL	NULL	223	val_223
+NULL	NULL	NULL	NULL	224	val_224
+NULL	NULL	NULL	NULL	224	val_224
+NULL	NULL	NULL	NULL	226	val_226
+NULL	NULL	NULL	NULL	228	val_228
+NULL	NULL	NULL	NULL	229	val_229
+NULL	NULL	NULL	NULL	229	val_229
+NULL	NULL	NULL	NULL	230	val_230
+NULL	NULL	NULL	NULL	230	val_230
+NULL	NULL	NULL	NULL	230	val_230
+NULL	NULL	NULL	NULL	230	val_230
+NULL	NULL	NULL	NULL	230	val_230
+NULL	NULL	NULL	NULL	233	val_233
+NULL	NULL	NULL	NULL	233	val_233
+NULL	NULL	NULL	NULL	235	val_235
+NULL	NULL	NULL	NULL	237	val_237
+NULL	NULL	NULL	NULL	237	val_237
+NULL	NULL	NULL	NULL	238	val_238
+NULL	NULL	NULL	NULL	238	val_238
+NULL	NULL	NULL	NULL	239	val_239
+NULL	NULL	NULL	NULL	239	val_239
+NULL	NULL	NULL	NULL	24	val_24
+NULL	NULL	NULL	NULL	24	val_24
+NULL	NULL	NULL	NULL	241	val_241
+NULL	NULL	NULL	NULL	242	val_242
+NULL	NULL	NULL	NULL	242	val_242
+NULL	NULL	NULL	NULL	244	val_244
+NULL	NULL	NULL	NULL	247	val_247
+NULL	NULL	NULL	NULL	248	val_248
+NULL	NULL	NULL	NULL	249	val_249
+NULL	NULL	NULL	NULL	252	val_252
+NULL	NULL	NULL	NULL	255	val_255
+NULL	NULL	NULL	NULL	255	val_255
+NULL	NULL	NULL	NULL	256	val_256
+NULL	NULL	NULL	NULL	256	val_256
+NULL	NULL	NULL	NULL	257	val_257
+NULL	NULL	NULL	NULL	258	val_258
+NULL	NULL	NULL	NULL	26	val_26
+NULL	NULL	NULL	NULL	26	val_26
+NULL	NULL	NULL	NULL	260	val_260
+NULL	NULL	NULL	NULL	262	val_262
+NULL	NULL	NULL	NULL	263	val_263
+NULL	NULL	NULL	NULL	265	val_265
+NULL	NULL	NULL	NULL	265	val_265
+NULL	NULL	NULL	NULL	266	val_266
+NULL	NULL	NULL	NULL	27	val_27
+NULL	NULL	NULL	NULL	272	val_272
+NULL	NULL	NULL	NULL	272	val_272
+NULL	NULL	NULL	NULL	273	val_273
+NULL	NULL	NULL	NULL	273	val_273
+NULL	NULL	NULL	NULL	273	val_273
+NULL	NULL	NULL	NULL	274	val_274
+NULL	NULL	NULL	NULL	275	val_275
+NULL	NULL	NULL	NULL	277	val_277
+NULL	NULL	NULL	NULL	277	val_277
+NULL	NULL	NULL	NULL	277	val_277
+NULL	NULL	NULL	NULL	277	val_277
+NULL	NULL	NULL	NULL	278	val_278
+NULL	NULL	NULL	NULL	278	val_278
+NULL	NULL	NULL	NULL	28	val_28
+NULL	NULL	NULL	NULL	280	val_280
+NULL	NULL	NULL	NULL	280	val_280
+NULL	NULL	NULL	NULL	281	val_281
+NULL	NULL	NULL	NULL	281	val_281
+NULL	NULL	NULL	NULL	282	val_282
+NULL	NULL	NULL	NULL	282	val_282
+NULL	NULL	NULL	NULL	283	val_283
+NULL	NULL	NULL	NULL	284	val_284
+NULL	NULL	NULL	NULL	285	val_285
+NULL	NULL	NULL	NULL	286	val_286
+NULL	NULL	NULL	NULL	287	val_287
+NULL	NULL	NULL	NULL	288	val_288
+NULL	NULL	NULL	NULL	288	val_288
+NULL	NULL	NULL	NULL	289	val_289
+NULL	NULL	NULL	NULL	291	val_291
+NULL	NULL	NULL	NULL	292	val_292
+NULL	NULL	NULL	NULL	296	val_296
+NULL	NULL	NULL	NULL	298	val_298
+NULL	NULL	NULL	NULL	298	val_298
+NULL	NULL	NULL	NULL	298	val_298
+NULL	NULL	NULL	NULL	30	val_30
+NULL	NULL	NULL	NULL	302	val_302
+NULL	NULL	NULL	NULL	305	val_305
+NULL	NULL	NULL	NULL	306	val_306
+NULL	NULL	NULL	NULL	307	val_307
+NULL	NULL	NULL	NULL	307	val_307
+NULL	NULL	NULL	NULL	308	val_308
+NULL	NULL	NULL	NULL	309	val_309
+NULL	NULL	NULL	NULL	309	val_309
+NULL	NULL	NULL	NULL	310	val_310
+NULL	NULL	NULL	NULL	311	val_311
+NULL	NULL	NULL	NULL	311	val_311
+NULL	NULL	NULL	NULL	311	val_311
+NULL	NULL	NULL	NULL	315	val_315
+NULL	NULL	NULL	NULL	316	val_316
+NULL	NULL	NULL	NULL	316	val_316
+NULL	NULL	NULL	NULL	316	val_316
+NULL	NULL	NULL	NULL	317	val_317
+NULL	NULL	NULL	NULL	317	val_317
+NULL	NULL	NULL	NULL	318	val_318
+NULL	NULL	NULL	NULL	318	val_318
+NULL	NULL	NULL	NULL	318	val_318
+NULL	NULL	NULL	NULL	321	val_321
+NULL	NULL	NULL	NULL	321	val_321
+NULL	NULL	NULL	NULL	322	val_322
+NULL	NULL	NULL	NULL	322	val_322
+NULL	NULL	NULL	NULL	323	val_323
+NULL	NULL	NULL	NULL	325	val_325
+NULL	NULL	NULL	NULL	325	val_325
+NULL	NULL	NULL	NULL	327	val_327
+NULL	NULL	NULL	NULL	327	val_327
+NULL	NULL	NULL	NULL	327	val_327
+NULL	NULL	NULL	NULL	33	val_33
+NULL	NULL	NULL	NULL	331	val_331
+NULL	NULL	NULL	NULL	331	val_331
+NULL	NULL	NULL	NULL	332	val_332
+NULL	NULL	NULL	NULL	333	val_333
+NULL	NULL	NULL	NULL	333	val_333
+NULL	NULL	NULL	NULL	335	val_335
+NULL	NULL	NULL	NULL	336	val_336
+NULL	NULL	NULL	NULL	338	val_338
+NULL	NULL	NULL	NULL	339	val_339
+NULL	NULL	NULL	NULL	34	val_34
+NULL	NULL	NULL	NULL	341	val_341
+NULL	NULL	NULL	NULL	342	val_342
+NULL	NULL	NULL	NULL	342	val_342
+NULL	NULL	NULL	NULL	344	val_344
+NULL	NULL	NULL	NULL	344	val_344
+NULL	NULL	NULL	NULL	345	val_345
+NULL	NULL	NULL	NULL	348	val_348
+NULL	NULL	NULL	NULL	348	val_348
+NULL	NULL	NULL	NULL	348	val_348
+NULL	NULL	NULL	NULL	348	val_348
+NULL	NULL	NULL	NULL	348	val_348
+NULL	NULL	NULL	NULL	35	val_35
+NULL	NULL	NULL	NULL	35	val_35
+NULL	NULL	NULL	NULL	35	val_35
+NULL	NULL	NULL	NULL	351	val_351
+NULL	NULL	NULL	NULL	353	val_353
+NULL	NULL	NULL	NULL	353	val_353
+NULL	NULL	NULL	NULL	356	val_356
+NULL	NULL	NULL	NULL	360	val_360
+NULL	NULL	NULL	NULL	362	val_362
+NULL	NULL	NULL	NULL	364	val_364
+NULL	NULL	NULL	NULL	365	val_365
+NULL	NULL	NULL	NULL	366	val_366
+NULL	NULL	NULL	NULL	367	val_367
+NULL	NULL	NULL	NULL	367	val_367
+NULL	NULL	NULL	NULL	368	val_368
+NULL	NULL	NULL	NULL	369	val_369
+NULL	NULL	NULL	NULL	369	val_369
+NULL	NULL	NULL	NULL	369	val_369
+NULL	NULL	NULL	NULL	37	val_37
+NULL	NULL	NULL	NULL	37	val_37
+NULL	NULL	NULL	NULL	373	val_373
+NULL	NULL	NULL	NULL	374	val_374
+NULL	NULL	NULL	NULL	375	val_375
+NULL	NULL	NULL	NULL	377	val_377
+NULL	NULL	NULL	NULL	378	val_378
+NULL	NULL	NULL	NULL	379	val_379
+NULL	NULL	NULL	NULL	382	val_382
+NULL	NULL	NULL	NULL	382	val_382
+NULL	NULL	NULL	NULL	384	val_384
+NULL	NULL	NULL	NULL	384	val_384
+NULL	NULL	NULL	NULL	384	val_384
+NULL	NULL	NULL	NULL	386	val_386
+NULL	NULL	NULL	NULL	389	val_389
+NULL	NULL	NULL	NULL	392	val_392
+NULL	NULL	NULL	NULL	393	val_393
+NULL	NULL	NULL	NULL	394	val_394
+NULL	NULL	NULL	NULL	395	val_395
+NULL	NULL	NULL	NULL	395	val_395
+NULL	NULL	NULL	NULL	396	val_396
+NULL	NULL	NULL	NULL	396	val_396
+NULL	NULL	NULL	NULL	396	val_396
+NULL	NULL	NULL	NULL	397	val_397
+NULL	NULL	NULL	NULL	397	val_397
+NULL	NULL	NULL	NULL	399	val_399
+NULL	NULL	NULL	NULL	399	val_399
+NULL	NULL	NULL	NULL	4	val_4
+NULL	NULL	NULL	NULL	400	val_400
+NULL	NULL	NULL	NULL	401	val_401
+NULL	NULL	NULL	NULL	401	val_401
+NULL	NULL	NULL	NULL	401	val_401
+NULL	NULL	NULL	NULL	401	val_401
+NULL	NULL	NULL	NULL	401	val_401
+NULL	NULL	NULL	NULL	402	val_402
+NULL	NULL	NULL	NULL	403	val_403
+NULL	NULL	NULL	NULL	403	val_403
+NULL	NULL	NULL	NULL	403	val_403
+NULL	NULL	NULL	NULL	404	val_404
+NULL	NULL	NULL	NULL	404	val_404
+NULL	NULL	NULL	NULL	406	val_406
+NULL	NULL	NULL	NULL	406	val_406
+NULL	NULL	NULL	NULL	406	val_406
+NULL	NULL	NULL	NULL	406	val_406
+NULL	NULL	NULL	NULL	407	val_407
+NULL	NULL	NULL	NULL	409	val_409
+NULL	NULL	NULL	NULL	409	val_409
+NULL	NULL	NULL	NULL	409	val_409
+NULL	NULL	NULL	NULL	41	val_41
+NULL	NULL	NULL	NULL	411	val_411
+NULL	NULL	NULL	NULL	413	val_413
+NULL	NULL	NULL	NULL	413	val_413
+NULL	NULL	NULL	NULL	414	val_414
+NULL	NULL	NULL	NULL	414	val_414
+NULL	NULL	NULL	NULL	417	val_417
+NULL	NULL	NULL	NULL	417	val_417
+NULL	NULL	NULL	NULL	417	val_417
+NULL	NULL	NULL	NULL	418	val_418
+NULL	NULL	NULL	NULL	419	val_419
+NULL	NULL	NULL	NULL	42	val_42
+NULL	NULL	NULL	NULL	42	val_42
+NULL	NULL	NULL	NULL	421	val_421
+NULL	NULL	NULL	NULL	424	val_424
+NULL	NULL	NULL	NULL	424	val_424
+NULL	NULL	NULL	NULL	427	val_427
+NULL	NULL	NULL	NULL	429	val_429
+NULL	NULL	NULL	NULL	429	val_429
+NULL	NULL	NULL	NULL	43	val_43
+NULL	NULL	NULL	NULL	430	val_430
+NULL	NULL	NULL	NULL	430	val_430
+NULL	NULL	NULL	NULL	430	val_430
+NULL	NULL	NULL	NULL	431	val_431
+NULL	NULL	NULL	NULL	431	val_431
+NULL	NULL	NULL	NULL	431	val_431
+NULL	NULL	NULL	NULL	432	val_432
+NULL	NULL	NULL	NULL	435	val_435
+NULL	NULL	NULL	NULL	436	val_436
+NULL	NULL	NULL	NULL	437	val_437
+NULL	NULL	NULL	NULL	438	val_438
+NULL	NULL	NULL	NULL	438	val_438
+NULL	NULL	NULL	NULL	438	val_438
+NULL	NULL	NULL	NULL	439	val_439
+NULL	NULL	NULL	NULL	439	val_439
+NULL	NULL	NULL	NULL	44	val_44
+NULL	NULL	NULL	NULL	443	val_443
+NULL	NULL	NULL	NULL	444	val_444
+NULL	NULL	NULL	NULL	446	val_446
+NULL	NULL	NULL	NULL	448	val_448
+NULL	NULL	NULL	NULL	449	val_449
+NULL	NULL	NULL	NULL	452	val_452
+NULL	NULL	NULL	NULL	453	val_453
+NULL	NULL	NULL	NULL	454	val_454
+NULL	NULL	NULL	NULL	454	val_454
+NULL	NULL	NULL	NULL	454	val_454
+NULL	NULL	NULL	NULL	455	val_455
+NULL	NULL	NULL	NULL	457	val_457
+NULL	NULL	NULL	NULL	458	val_458
+NULL	NULL	NULL	NULL	458	val_458
+NULL	NULL	NULL	NULL	459	val_459
+NULL	NULL	NULL	NULL	459	val_459
+NULL	NULL	NULL	NULL	460	val_460
+NULL	NULL	NULL	NULL	462	val_462
+NULL	NULL	NULL	NULL	462	val_462
+NULL	NULL	NULL	NULL	463	val_463
+NULL	NULL	NULL	NULL	463	val_463
+NULL	NULL	NULL	NULL	466	val_466
+NULL	NULL	NULL	NULL	466	val_466
+NULL	NULL	NULL	NULL	466	val_466
+NULL	NULL	NULL	NULL	467	val_467
+NULL	NULL	NULL	NULL	468	val_468
+NULL	NULL	NULL	NULL	468	val_468
+NULL	NULL	NULL	NULL	468	val_468
+NULL	NULL	NULL	NULL	468	val_468
+NULL	NULL	NULL	NULL	469	val_469
+NULL	NULL	NULL	NULL	469	val_469
+NULL	NULL	NULL	NULL	469	val_469
+NULL	NULL	NULL	NULL	469	val_469
+NULL	NULL	NULL	NULL	469	val_469
+NULL	NULL	NULL	NULL	47	val_47
+NULL	NULL	NULL	NULL	470	val_470
+NULL	NULL	NULL	NULL	472	val_472
+NULL	NULL	NULL	NULL	475	val_475
+NULL	NULL	NULL	NULL	477	val_477
+NULL	NULL	NULL	NULL	478	val_478
+NULL	NULL	NULL	NULL	478	val_478
+NULL	NULL	NULL	NULL	479	val_479
+NULL	NULL	NULL	NULL	480	val_480
+NULL	NULL	NULL	NULL	480	val_480
+NULL	NULL	NULL	NULL	480	val_480
+NULL	NULL	NULL	NULL	481	val_481
+NULL	NULL	NULL	NULL	482	val_482
+NULL	NULL	NULL	NULL	483	val_483
+NULL	NULL	NULL	NULL	484	val_484
+NULL	NULL	NULL	NULL	485	val_485
+NULL	NULL	NULL	NULL	487	val_487
+NULL	NULL	NULL	NULL	489	val_489
+NULL	NULL	NULL	NULL	489	val_489
+NULL	NULL	NULL	NULL	489	val_489
+NULL	NULL	NULL	NULL	489	val_489
+NULL	NULL	NULL	NULL	490	val_490
+NULL	NULL	NULL	NULL	491	val_491
+NULL	NULL	NULL	NULL	492	val_492
+NULL	NULL	NULL	NULL	492	val_492
+NULL	NULL	NULL	NULL	493	val_493
+NULL	NULL	NULL	NULL	494	val_494
+NULL	NULL	NULL	NULL	495	val_495
+NULL	NULL	NULL	NULL	496	val_496
+NULL	NULL	NULL	NULL	497	val_497
+NULL	NULL	NULL	NULL	498	val_498
+NULL	NULL	NULL	NULL	498	val_498
+NULL	NULL	NULL	NULL	498	val_498
+NULL	NULL	NULL	NULL	5	val_5
+NULL	NULL	NULL	NULL	5	val_5
+NULL	NULL	NULL	NULL	5	val_5
+NULL	NULL	NULL	NULL	51	val_51
+NULL	NULL	NULL	NULL	51	val_51
+NULL	NULL	NULL	NULL	53	val_53
+NULL	NULL	NULL	NULL	54	val_54
+NULL	NULL	NULL	NULL	57	val_57
+NULL	NULL	NULL	NULL	58	val_58
+NULL	NULL	NULL	NULL	58	val_58
+NULL	NULL	NULL	NULL	64	val_64
+NULL	NULL	NULL	NULL	65	val_65
+NULL	NULL	NULL	NULL	66	val_66
+NULL	NULL	NULL	NULL	67	val_67
+NULL	NULL	NULL	NULL	67	val_67
+NULL	NULL	NULL	NULL	69	val_69
+NULL	NULL	NULL	NULL	70	val_70
+NULL	NULL	NULL	NULL	70	val_70
+NULL	NULL	NULL	NULL	70	val_70
+NULL	NULL	NULL	NULL	72	val_72
+NULL	NULL	NULL	NULL	72	val_72
+NULL	NULL	NULL	NULL	74	val_74
+NULL	NULL	NULL	NULL	76	val_76
+NULL	NULL	NULL	NULL	76	val_76
+NULL	NULL	NULL	NULL	77	val_77
+NULL	NULL	NULL	NULL	78	val_78
+NULL	NULL	NULL	NULL	8	val_8
+NULL	NULL	NULL	NULL	80	val_80
+NULL	NULL	NULL	NULL	82	val_82
+NULL	NULL	NULL	NULL	83	val_83
+NULL	NULL	NULL	NULL	83	val_83
+NULL	NULL	NULL	NULL	84	val_84
+NULL	NULL	NULL	NULL	84	val_84
+NULL	NULL	NULL	NULL	85	val_85
+NULL	NULL	NULL	NULL	86	val_86
+NULL	NULL	NULL	NULL	87	val_87
+NULL	NULL	NULL	NULL	9	val_9
+NULL	NULL	NULL	NULL	90	val_90
+NULL	NULL	NULL	NULL	90	val_90
+NULL	NULL	NULL	NULL	90	val_90
+NULL	NULL	NULL	NULL	92	val_92
+NULL	NULL	NULL	NULL	95	val_95
+NULL	NULL	NULL	NULL	95	val_95
+NULL	NULL	NULL	NULL	96	val_96
+NULL	NULL	NULL	NULL	97	val_97
+NULL	NULL	NULL	NULL	97	val_97
+NULL	NULL	NULL	NULL	98	val_98
+NULL	NULL	NULL	NULL	98	val_98
 PREHOOK: query: explain
 SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value
 PREHOOK: type: QUERY


[37/39] hive git commit: HIVE-10609: Vectorization : Q64 fails with ClassCastException (Matt McCline via Vikram Dixit K)

Posted by pr...@apache.org.
HIVE-10609: Vectorization : Q64 fails with ClassCastException (Matt McCline via Vikram Dixit K)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f65528a9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f65528a9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f65528a9

Branch: refs/heads/llap
Commit: f65528a961bfe702a1c8f7aafeab0d23dadb6eea
Parents: 1c6f345
Author: vikram <vi...@hortonworks.com>
Authored: Mon May 11 16:00:07 2015 -0700
Committer: vikram <vi...@hortonworks.com>
Committed: Mon May 11 16:00:07 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java    | 8 ++++++--
 .../ql/exec/vector/VectorMapJoinOuterFilteredOperator.java   | 6 ++++--
 2 files changed, 10 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f65528a9/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
index 15c747e..e9bd44a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
@@ -101,15 +101,19 @@ public class VectorMapJoinOperator extends VectorMapJoinBaseOperator {
   @Override
   public Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
 
+    // Use a final variable to properly parameterize the processVectorInspector closure.
+    // Using a member variable in the closure will not do the right thing...
+    final int parameterizePosBigTable = conf.getPosBigTable();
+
     // Code borrowed from VectorReduceSinkOperator.initializeOp
     VectorExpressionWriterFactory.processVectorInspector(
-        (StructObjectInspector) inputObjInspectors[0],
+        (StructObjectInspector) inputObjInspectors[parameterizePosBigTable],
         new VectorExpressionWriterFactory.SingleOIDClosure() {
           @Override
           public void assign(VectorExpressionWriter[] writers,
                              ObjectInspector objectInspector) {
             rowWriters = writers;
-            inputObjInspectors[0] = objectInspector;
+            inputObjInspectors[parameterizePosBigTable] = objectInspector;
           }
         });
     singleRow = new Object[rowWriters.length];

http://git-wip-us.apache.org/repos/asf/hive/blob/f65528a9/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOuterFilteredOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOuterFilteredOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOuterFilteredOperator.java
index 5aecfcc..a96816f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOuterFilteredOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOuterFilteredOperator.java
@@ -62,10 +62,12 @@ public class VectorMapJoinOuterFilteredOperator extends VectorMapJoinBaseOperato
   @Override
   public Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
 
+    final int posBigTable = conf.getPosBigTable();
+
     // We need a input object inspector that is for the row we will extract out of the
     // vectorized row batch, not for example, an original inspector for an ORC table, etc.
-    inputObjInspectors[0] =
-        VectorizedBatchUtil.convertToStandardStructObjectInspector((StructObjectInspector) inputObjInspectors[0]);
+    inputObjInspectors[posBigTable] =
+        VectorizedBatchUtil.convertToStandardStructObjectInspector((StructObjectInspector) inputObjInspectors[posBigTable]);
 
     // Call super VectorMapJoinOuterFilteredOperator, which calls super MapJoinOperator with
     // new input inspector.


[11/39] hive git commit: HIVE-10542: Full outer joins in tez produce incorrect results in certain cases (Vikram Dixit K, reviewed by Gunther Hagleitner)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/26ec033c/ql/src/test/results/clientpositive/tez/mergejoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/mergejoin.q.out b/ql/src/test/results/clientpositive/tez/mergejoin.q.out
index 48cd2a1..97df12a 100644
--- a/ql/src/test/results/clientpositive/tez/mergejoin.q.out
+++ b/ql/src/test/results/clientpositive/tez/mergejoin.q.out
@@ -1,7 +1,11 @@
-PREHOOK: query: explain
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
 select * from src a join src1 b on a.key = b.key
 PREHOOK: type: QUERY
-POSTHOOK: query: explain
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+explain
 select * from src a join src1 b on a.key = b.key
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
@@ -347,74 +351,12 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 0	val_0	2008-04-08	0	val_0	2008-04-08
 0	val_0	2008-04-08	0	val_0	2008-04-08
 0	val_0	2008-04-08	0	val_0	2008-04-08
-2	val_2	2008-04-08	2	val_2	2008-04-08
-4	val_4	2008-04-08	4	val_4	2008-04-08
-8	val_8	2008-04-08	8	val_8	2008-04-08
-11	val_11	2008-04-08	11	val_11	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-17	val_17	2008-04-08	17	val_17	2008-04-08
-19	val_19	2008-04-08	19	val_19	2008-04-08
-20	val_20	2008-04-08	20	val_20	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-28	val_28	2008-04-08	28	val_28	2008-04-08
-33	val_33	2008-04-08	33	val_33	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-44	val_44	2008-04-08	44	val_44	2008-04-08
-51	val_51	2008-04-08	51	val_51	2008-04-08
-51	val_51	2008-04-08	51	val_51	2008-04-08
-51	val_51	2008-04-08	51	val_51	2008-04-08
-51	val_51	2008-04-08	51	val_51	2008-04-08
-53	val_53	2008-04-08	53	val_53	2008-04-08
-57	val_57	2008-04-08	57	val_57	2008-04-08
-64	val_64	2008-04-08	64	val_64	2008-04-08
-66	val_66	2008-04-08	66	val_66	2008-04-08
-77	val_77	2008-04-08	77	val_77	2008-04-08
-80	val_80	2008-04-08	80	val_80	2008-04-08
-82	val_82	2008-04-08	82	val_82	2008-04-08
-84	val_84	2008-04-08	84	val_84	2008-04-08
-84	val_84	2008-04-08	84	val_84	2008-04-08
-84	val_84	2008-04-08	84	val_84	2008-04-08
-84	val_84	2008-04-08	84	val_84	2008-04-08
-86	val_86	2008-04-08	86	val_86	2008-04-08
-95	val_95	2008-04-08	95	val_95	2008-04-08
-95	val_95	2008-04-08	95	val_95	2008-04-08
-95	val_95	2008-04-08	95	val_95	2008-04-08
-95	val_95	2008-04-08	95	val_95	2008-04-08
-97	val_97	2008-04-08	97	val_97	2008-04-08
-97	val_97	2008-04-08	97	val_97	2008-04-08
-97	val_97	2008-04-08	97	val_97	2008-04-08
-97	val_97	2008-04-08	97	val_97	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 105	val_105	2008-04-08	105	val_105	2008-04-08
+11	val_11	2008-04-08	11	val_11	2008-04-08
 114	val_114	2008-04-08	114	val_114	2008-04-08
 116	val_116	2008-04-08	116	val_116	2008-04-08
 118	val_118	2008-04-08	118	val_118	2008-04-08
@@ -456,6 +398,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
 150	val_150	2008-04-08	150	val_150	2008-04-08
 152	val_152	2008-04-08	152	val_152	2008-04-08
 152	val_152	2008-04-08	152	val_152	2008-04-08
@@ -493,6 +439,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
+17	val_17	2008-04-08	17	val_17	2008-04-08
 170	val_170	2008-04-08	170	val_170	2008-04-08
 172	val_172	2008-04-08	172	val_172	2008-04-08
 172	val_172	2008-04-08	172	val_172	2008-04-08
@@ -519,10 +466,13 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 187	val_187	2008-04-08	187	val_187	2008-04-08
 187	val_187	2008-04-08	187	val_187	2008-04-08
 189	val_189	2008-04-08	189	val_189	2008-04-08
+19	val_19	2008-04-08	19	val_19	2008-04-08
 190	val_190	2008-04-08	190	val_190	2008-04-08
 192	val_192	2008-04-08	192	val_192	2008-04-08
 194	val_194	2008-04-08	194	val_194	2008-04-08
 196	val_196	2008-04-08	196	val_196	2008-04-08
+2	val_2	2008-04-08	2	val_2	2008-04-08
+20	val_20	2008-04-08	20	val_20	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
@@ -569,6 +519,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
@@ -580,6 +534,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 255	val_255	2008-04-08	255	val_255	2008-04-08
 255	val_255	2008-04-08	255	val_255	2008-04-08
 257	val_257	2008-04-08	257	val_257	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
 260	val_260	2008-04-08	260	val_260	2008-04-08
 262	val_262	2008-04-08	262	val_262	2008-04-08
 266	val_266	2008-04-08	266	val_266	2008-04-08
@@ -609,6 +567,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
+28	val_28	2008-04-08	28	val_28	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
@@ -670,11 +629,21 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
+33	val_33	2008-04-08	33	val_33	2008-04-08
 332	val_332	2008-04-08	332	val_332	2008-04-08
 336	val_336	2008-04-08	336	val_336	2008-04-08
 338	val_338	2008-04-08	338	val_338	2008-04-08
 341	val_341	2008-04-08	341	val_341	2008-04-08
 345	val_345	2008-04-08	345	val_345	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
 356	val_356	2008-04-08	356	val_356	2008-04-08
 365	val_365	2008-04-08	365	val_365	2008-04-08
 367	val_367	2008-04-08	367	val_367	2008-04-08
@@ -690,6 +659,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
 374	val_374	2008-04-08	374	val_374	2008-04-08
 378	val_378	2008-04-08	378	val_378	2008-04-08
 389	val_389	2008-04-08	389	val_389	2008-04-08
@@ -704,6 +677,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
+4	val_4	2008-04-08	4	val_4	2008-04-08
 400	val_400	2008-04-08	400	val_400	2008-04-08
 402	val_402	2008-04-08	402	val_402	2008-04-08
 404	val_404	2008-04-08	404	val_404	2008-04-08
@@ -741,6 +715,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 417	val_417	2008-04-08	417	val_417	2008-04-08
 417	val_417	2008-04-08	417	val_417	2008-04-08
 419	val_419	2008-04-08	419	val_419	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
@@ -760,6 +738,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
+44	val_44	2008-04-08	44	val_44	2008-04-08
 444	val_444	2008-04-08	444	val_444	2008-04-08
 446	val_446	2008-04-08	446	val_446	2008-04-08
 448	val_448	2008-04-08	448	val_448	2008-04-08
@@ -818,68 +797,6 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 493	val_493	2008-04-08	493	val_493	2008-04-08
 495	val_495	2008-04-08	495	val_495	2008-04-08
 497	val_497	2008-04-08	497	val_497	2008-04-08
-PREHOOK: query: select * from tab a join tab_part b on a.key = b.key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@tab
-PREHOOK: Input: default@tab@ds=2008-04-08
-PREHOOK: Input: default@tab_part
-PREHOOK: Input: default@tab_part@ds=2008-04-08
-#### A masked pattern was here ####
-POSTHOOK: query: select * from tab a join tab_part b on a.key = b.key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@tab
-POSTHOOK: Input: default@tab@ds=2008-04-08
-POSTHOOK: Input: default@tab_part
-POSTHOOK: Input: default@tab_part@ds=2008-04-08
-#### A masked pattern was here ####
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-0	val_0	2008-04-08	0	val_0	2008-04-08
-2	val_2	2008-04-08	2	val_2	2008-04-08
-4	val_4	2008-04-08	4	val_4	2008-04-08
-8	val_8	2008-04-08	8	val_8	2008-04-08
-11	val_11	2008-04-08	11	val_11	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-15	val_15	2008-04-08	15	val_15	2008-04-08
-17	val_17	2008-04-08	17	val_17	2008-04-08
-19	val_19	2008-04-08	19	val_19	2008-04-08
-20	val_20	2008-04-08	20	val_20	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-24	val_24	2008-04-08	24	val_24	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-26	val_26	2008-04-08	26	val_26	2008-04-08
-28	val_28	2008-04-08	28	val_28	2008-04-08
-33	val_33	2008-04-08	33	val_33	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-35	val_35	2008-04-08	35	val_35	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-37	val_37	2008-04-08	37	val_37	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-42	val_42	2008-04-08	42	val_42	2008-04-08
-44	val_44	2008-04-08	44	val_44	2008-04-08
 51	val_51	2008-04-08	51	val_51	2008-04-08
 51	val_51	2008-04-08	51	val_51	2008-04-08
 51	val_51	2008-04-08	51	val_51	2008-04-08
@@ -889,6 +806,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 64	val_64	2008-04-08	64	val_64	2008-04-08
 66	val_66	2008-04-08	66	val_66	2008-04-08
 77	val_77	2008-04-08	77	val_77	2008-04-08
+8	val_8	2008-04-08	8	val_8	2008-04-08
 80	val_80	2008-04-08	80	val_80	2008-04-08
 82	val_82	2008-04-08	82	val_82	2008-04-08
 84	val_84	2008-04-08	84	val_84	2008-04-08
@@ -904,11 +822,35 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 97	val_97	2008-04-08	97	val_97	2008-04-08
 97	val_97	2008-04-08	97	val_97	2008-04-08
 97	val_97	2008-04-08	97	val_97	2008-04-08
+PREHOOK: query: select * from tab a join tab_part b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from tab a join tab_part b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
+0	val_0	2008-04-08	0	val_0	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 103	val_103	2008-04-08	103	val_103	2008-04-08
 105	val_105	2008-04-08	105	val_105	2008-04-08
+11	val_11	2008-04-08	11	val_11	2008-04-08
 114	val_114	2008-04-08	114	val_114	2008-04-08
 116	val_116	2008-04-08	116	val_116	2008-04-08
 118	val_118	2008-04-08	118	val_118	2008-04-08
@@ -950,6 +892,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
 149	val_149	2008-04-08	149	val_149	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
+15	val_15	2008-04-08	15	val_15	2008-04-08
 150	val_150	2008-04-08	150	val_150	2008-04-08
 152	val_152	2008-04-08	152	val_152	2008-04-08
 152	val_152	2008-04-08	152	val_152	2008-04-08
@@ -987,6 +933,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
 169	val_169	2008-04-08	169	val_169	2008-04-08
+17	val_17	2008-04-08	17	val_17	2008-04-08
 170	val_170	2008-04-08	170	val_170	2008-04-08
 172	val_172	2008-04-08	172	val_172	2008-04-08
 172	val_172	2008-04-08	172	val_172	2008-04-08
@@ -1013,10 +960,13 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 187	val_187	2008-04-08	187	val_187	2008-04-08
 187	val_187	2008-04-08	187	val_187	2008-04-08
 189	val_189	2008-04-08	189	val_189	2008-04-08
+19	val_19	2008-04-08	19	val_19	2008-04-08
 190	val_190	2008-04-08	190	val_190	2008-04-08
 192	val_192	2008-04-08	192	val_192	2008-04-08
 194	val_194	2008-04-08	194	val_194	2008-04-08
 196	val_196	2008-04-08	196	val_196	2008-04-08
+2	val_2	2008-04-08	2	val_2	2008-04-08
+20	val_20	2008-04-08	20	val_20	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
 200	val_200	2008-04-08	200	val_200	2008-04-08
@@ -1063,6 +1013,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
 239	val_239	2008-04-08	239	val_239	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
+24	val_24	2008-04-08	24	val_24	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
 242	val_242	2008-04-08	242	val_242	2008-04-08
@@ -1074,6 +1028,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 255	val_255	2008-04-08	255	val_255	2008-04-08
 255	val_255	2008-04-08	255	val_255	2008-04-08
 257	val_257	2008-04-08	257	val_257	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
+26	val_26	2008-04-08	26	val_26	2008-04-08
 260	val_260	2008-04-08	260	val_260	2008-04-08
 262	val_262	2008-04-08	262	val_262	2008-04-08
 266	val_266	2008-04-08	266	val_266	2008-04-08
@@ -1103,6 +1061,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
 277	val_277	2008-04-08	277	val_277	2008-04-08
+28	val_28	2008-04-08	28	val_28	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
 280	val_280	2008-04-08	280	val_280	2008-04-08
@@ -1164,11 +1123,21 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
 327	val_327	2008-04-08	327	val_327	2008-04-08
+33	val_33	2008-04-08	33	val_33	2008-04-08
 332	val_332	2008-04-08	332	val_332	2008-04-08
 336	val_336	2008-04-08	336	val_336	2008-04-08
 338	val_338	2008-04-08	338	val_338	2008-04-08
 341	val_341	2008-04-08	341	val_341	2008-04-08
 345	val_345	2008-04-08	345	val_345	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
+35	val_35	2008-04-08	35	val_35	2008-04-08
 356	val_356	2008-04-08	356	val_356	2008-04-08
 365	val_365	2008-04-08	365	val_365	2008-04-08
 367	val_367	2008-04-08	367	val_367	2008-04-08
@@ -1184,6 +1153,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
 369	val_369	2008-04-08	369	val_369	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
+37	val_37	2008-04-08	37	val_37	2008-04-08
 374	val_374	2008-04-08	374	val_374	2008-04-08
 378	val_378	2008-04-08	378	val_378	2008-04-08
 389	val_389	2008-04-08	389	val_389	2008-04-08
@@ -1198,6 +1171,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
 396	val_396	2008-04-08	396	val_396	2008-04-08
+4	val_4	2008-04-08	4	val_4	2008-04-08
 400	val_400	2008-04-08	400	val_400	2008-04-08
 402	val_402	2008-04-08	402	val_402	2008-04-08
 404	val_404	2008-04-08	404	val_404	2008-04-08
@@ -1235,6 +1209,10 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 417	val_417	2008-04-08	417	val_417	2008-04-08
 417	val_417	2008-04-08	417	val_417	2008-04-08
 419	val_419	2008-04-08	419	val_419	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
+42	val_42	2008-04-08	42	val_42	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
 424	val_424	2008-04-08	424	val_424	2008-04-08
@@ -1254,6 +1232,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
 439	val_439	2008-04-08	439	val_439	2008-04-08
+44	val_44	2008-04-08	44	val_44	2008-04-08
 444	val_444	2008-04-08	444	val_444	2008-04-08
 446	val_446	2008-04-08	446	val_446	2008-04-08
 448	val_448	2008-04-08	448	val_448	2008-04-08
@@ -1312,6 +1291,31 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 493	val_493	2008-04-08	493	val_493	2008-04-08
 495	val_495	2008-04-08	495	val_495	2008-04-08
 497	val_497	2008-04-08	497	val_497	2008-04-08
+51	val_51	2008-04-08	51	val_51	2008-04-08
+51	val_51	2008-04-08	51	val_51	2008-04-08
+51	val_51	2008-04-08	51	val_51	2008-04-08
+51	val_51	2008-04-08	51	val_51	2008-04-08
+53	val_53	2008-04-08	53	val_53	2008-04-08
+57	val_57	2008-04-08	57	val_57	2008-04-08
+64	val_64	2008-04-08	64	val_64	2008-04-08
+66	val_66	2008-04-08	66	val_66	2008-04-08
+77	val_77	2008-04-08	77	val_77	2008-04-08
+8	val_8	2008-04-08	8	val_8	2008-04-08
+80	val_80	2008-04-08	80	val_80	2008-04-08
+82	val_82	2008-04-08	82	val_82	2008-04-08
+84	val_84	2008-04-08	84	val_84	2008-04-08
+84	val_84	2008-04-08	84	val_84	2008-04-08
+84	val_84	2008-04-08	84	val_84	2008-04-08
+84	val_84	2008-04-08	84	val_84	2008-04-08
+86	val_86	2008-04-08	86	val_86	2008-04-08
+95	val_95	2008-04-08	95	val_95	2008-04-08
+95	val_95	2008-04-08	95	val_95	2008-04-08
+95	val_95	2008-04-08	95	val_95	2008-04-08
+95	val_95	2008-04-08	95	val_95	2008-04-08
+97	val_97	2008-04-08	97	val_97	2008-04-08
+97	val_97	2008-04-08	97	val_97	2008-04-08
+97	val_97	2008-04-08	97	val_97	2008-04-08
+97	val_97	2008-04-08	97	val_97	2008-04-08
 PREHOOK: query: explain
 select count(*)
 from tab a left outer join tab_part b on a.key = b.key
@@ -2526,3 +2530,589 @@ POSTHOOK: Input: default@tab_part
 POSTHOOK: Input: default@tab_part@ds=2008-04-08
 #### A masked pattern was here ####
 480
+PREHOOK: query: select * from (select * from tab where tab.key = 0)a full outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select * from tab where tab.key = 0)a full outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+0	val_0	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	NULL	NULL	NULL
+NULL	NULL	NULL	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08
+PREHOOK: query: select * from (select * from tab where tab.key = 0)a right outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select * from tab where tab.key = 0)a right outer join (select * from tab_part where tab_part.key = 98)b on a.key = b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+NULL	NULL	NULL	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08
+PREHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+full outer join
+(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+full outer join
+(select * from tab_part where tab_part.key = 98)b join tab_part c on a.key = b.key and b.key = c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
+NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
+Warning: Shuffle Join MERGEJOIN[17][tables = [a, b]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+join
+(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tab
+PREHOOK: Input: default@tab@ds=2008-04-08
+PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+POSTHOOK: query: select * from
+(select * from tab where tab.key = 0)a
+join
+(select * from tab_part where tab_part.key = 98)b full outer join tab_part c on a.key = b.key and b.key = c.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tab
+POSTHOOK: Input: default@tab@ds=2008-04-08
+POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
+#### A masked pattern was here ####
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+0	val_0	2008-04-08	98	val_98	2008-04-08	NULL	NULL	NULL
+NULL	NULL	NULL	NULL	NULL	NULL	0	val_0	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	0	val_0	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	0	val_0	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	10	val_10	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	100	val_100	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	100	val_100	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	103	val_103	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	103	val_103	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	104	val_104	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	104	val_104	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	105	val_105	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	11	val_11	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	111	val_111	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	113	val_113	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	113	val_113	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	114	val_114	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	116	val_116	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	118	val_118	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	118	val_118	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	119	val_119	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	119	val_119	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	119	val_119	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	12	val_12	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	12	val_12	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	120	val_120	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	120	val_120	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	125	val_125	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	125	val_125	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	126	val_126	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	128	val_128	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	128	val_128	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	128	val_128	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	129	val_129	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	129	val_129	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	131	val_131	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	133	val_133	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	134	val_134	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	134	val_134	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	136	val_136	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	137	val_137	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	137	val_137	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	138	val_138	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	143	val_143	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	145	val_145	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	146	val_146	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	146	val_146	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	149	val_149	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	149	val_149	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	15	val_15	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	15	val_15	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	150	val_150	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	152	val_152	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	152	val_152	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	153	val_153	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	155	val_155	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	156	val_156	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	157	val_157	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	158	val_158	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	160	val_160	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	162	val_162	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	163	val_163	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	164	val_164	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	164	val_164	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	165	val_165	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	165	val_165	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	166	val_166	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	167	val_167	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	167	val_167	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	167	val_167	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	168	val_168	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	169	val_169	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	17	val_17	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	170	val_170	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	172	val_172	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	172	val_172	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	174	val_174	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	174	val_174	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	175	val_175	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	175	val_175	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	176	val_176	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	176	val_176	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	177	val_177	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	178	val_178	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	179	val_179	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	179	val_179	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	18	val_18	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	18	val_18	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	180	val_180	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	181	val_181	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	183	val_183	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	186	val_186	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	187	val_187	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	187	val_187	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	187	val_187	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	189	val_189	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	19	val_19	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	190	val_190	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	191	val_191	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	191	val_191	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	192	val_192	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	193	val_193	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	193	val_193	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	193	val_193	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	194	val_194	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	195	val_195	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	195	val_195	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	196	val_196	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	197	val_197	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	197	val_197	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	199	val_199	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	199	val_199	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	199	val_199	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	2	val_2	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	20	val_20	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	200	val_200	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	200	val_200	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	201	val_201	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	202	val_202	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	203	val_203	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	203	val_203	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	205	val_205	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	205	val_205	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	207	val_207	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	207	val_207	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	208	val_208	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	208	val_208	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	208	val_208	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	209	val_209	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	209	val_209	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	213	val_213	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	213	val_213	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	214	val_214	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	216	val_216	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	216	val_216	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	217	val_217	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	217	val_217	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	218	val_218	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	219	val_219	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	219	val_219	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	221	val_221	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	221	val_221	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	222	val_222	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	223	val_223	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	223	val_223	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	224	val_224	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	224	val_224	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	226	val_226	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	228	val_228	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	229	val_229	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	229	val_229	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	230	val_230	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	233	val_233	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	233	val_233	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	235	val_235	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	237	val_237	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	237	val_237	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	238	val_238	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	238	val_238	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	239	val_239	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	239	val_239	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	24	val_24	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	24	val_24	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	241	val_241	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	242	val_242	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	242	val_242	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	244	val_244	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	247	val_247	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	248	val_248	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	249	val_249	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	252	val_252	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	255	val_255	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	255	val_255	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	256	val_256	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	256	val_256	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	257	val_257	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	258	val_258	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	26	val_26	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	26	val_26	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	260	val_260	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	262	val_262	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	263	val_263	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	265	val_265	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	265	val_265	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	266	val_266	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	27	val_27	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	272	val_272	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	272	val_272	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	273	val_273	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	273	val_273	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	273	val_273	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	274	val_274	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	275	val_275	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	277	val_277	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	278	val_278	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	278	val_278	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	28	val_28	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	280	val_280	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	280	val_280	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	281	val_281	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	281	val_281	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	282	val_282	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	282	val_282	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	283	val_283	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	284	val_284	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	285	val_285	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	286	val_286	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	287	val_287	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	288	val_288	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	288	val_288	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	289	val_289	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	291	val_291	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	292	val_292	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	296	val_296	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	298	val_298	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	298	val_298	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	298	val_298	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	30	val_30	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	302	val_302	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	305	val_305	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	306	val_306	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	307	val_307	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	307	val_307	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	308	val_308	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	309	val_309	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	309	val_309	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	310	val_310	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	311	val_311	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	311	val_311	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	311	val_311	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	315	val_315	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	316	val_316	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	316	val_316	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	316	val_316	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	317	val_317	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	317	val_317	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	318	val_318	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	318	val_318	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	318	val_318	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	321	val_321	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	321	val_321	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	322	val_322	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	322	val_322	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	323	val_323	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	325	val_325	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	325	val_325	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	327	val_327	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	327	val_327	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	327	val_327	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	33	val_33	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	331	val_331	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	331	val_331	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	332	val_332	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	333	val_333	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	333	val_333	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	335	val_335	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	336	val_336	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	338	val_338	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	339	val_339	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	34	val_34	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	341	val_341	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	342	val_342	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	342	val_342	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	344	val_344	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	344	val_344	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	345	val_345	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	348	val_348	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	35	val_35	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	35	val_35	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	35	val_35	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	351	val_351	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	353	val_353	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	353	val_353	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	356	val_356	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	360	val_360	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	362	val_362	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	364	val_364	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	365	val_365	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	366	val_366	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	367	val_367	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	367	val_367	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	368	val_368	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	369	val_369	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	369	val_369	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	369	val_369	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	37	val_37	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	37	val_37	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	373	val_373	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	374	val_374	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	375	val_375	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	377	val_377	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	378	val_378	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	379	val_379	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	382	val_382	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	382	val_382	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	384	val_384	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	384	val_384	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	384	val_384	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	386	val_386	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	389	val_389	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	392	val_392	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	393	val_393	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	394	val_394	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	395	val_395	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	395	val_395	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	396	val_396	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	396	val_396	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	396	val_396	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	397	val_397	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	397	val_397	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	399	val_399	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	399	val_399	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	4	val_4	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	400	val_400	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	401	val_401	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	402	val_402	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	403	val_403	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	403	val_403	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	403	val_403	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	404	val_404	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	404	val_404	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	406	val_406	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	407	val_407	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	409	val_409	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	409	val_409	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	409	val_409	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	41	val_41	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	411	val_411	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	413	val_413	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	413	val_413	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	414	val_414	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	414	val_414	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	417	val_417	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	417	val_417	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	417	val_417	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	418	val_418	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	419	val_419	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	42	val_42	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	42	val_42	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	421	val_421	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	424	val_424	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	424	val_424	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	427	val_427	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	429	val_429	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	429	val_429	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	43	val_43	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	430	val_430	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	430	val_430	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	430	val_430	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	431	val_431	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	431	val_431	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	431	val_431	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	432	val_432	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	435	val_435	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	436	val_436	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	437	val_437	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	438	val_438	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	438	val_438	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	438	val_438	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	439	val_439	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	439	val_439	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	44	val_44	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	443	val_443	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	444	val_444	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	446	val_446	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	448	val_448	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	449	val_449	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	452	val_452	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	453	val_453	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	454	val_454	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	454	val_454	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	454	val_454	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	455	val_455	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	457	val_457	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	458	val_458	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	458	val_458	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	459	val_459	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	459	val_459	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	460	val_460	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	462	val_462	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	462	val_462	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	463	val_463	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	463	val_463	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	466	val_466	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	466	val_466	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	466	val_466	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	467	val_467	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	468	val_468	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	469	val_469	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	47	val_47	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	470	val_470	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	472	val_472	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	475	val_475	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	477	val_477	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	478	val_478	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	478	val_478	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	479	val_479	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	480	val_480	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	480	val_480	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	480	val_480	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	481	val_481	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	482	val_482	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	483	val_483	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	484	val_484	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	485	val_485	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	487	val_487	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	489	val_489	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	490	val_490	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	491	val_491	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	492	val_492	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	492	val_492	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	493	val_493	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	494	val_494	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	495	val_495	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	496	val_496	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	497	val_497	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	498	val_498	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	498	val_498	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	498	val_498	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	5	val_5	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	5	val_5	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	5	val_5	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	51	val_51	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	51	val_51	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	53	val_53	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	54	val_54	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	57	val_57	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	58	val_58	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	58	val_58	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	64	val_64	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	65	val_65	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	66	val_66	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	67	val_67	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	67	val_67	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	69	val_69	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	70	val_70	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	70	val_70	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	70	val_70	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	72	val_72	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	72	val_72	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	74	val_74	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	76	val_76	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	76	val_76	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	77	val_77	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	78	val_78	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	8	val_8	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	80	val_80	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	82	val_82	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	83	val_83	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	83	val_83	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	84	val_84	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	84	val_84	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	85	val_85	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	86	val_86	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	87	val_87	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	9	val_9	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	90	val_90	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	90	val_90	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	90	val_90	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	92	val_92	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	95	val_95	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	95	val_95	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	96	val_96	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	97	val_97	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	97	val_97	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	98	val_98	2008-04-08
+NULL	NULL	NULL	NULL	NULL	NULL	98	val_98	2008-04-08


[39/39] hive git commit: Merge from trunk to llap - 05/11/2015 (Prasanth Jayachandran)

Posted by pr...@apache.org.
Merge from trunk to llap - 05/11/2015 (Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e6b1556e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e6b1556e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e6b1556e

Branch: refs/heads/llap
Commit: e6b1556e39f81dc2861f612733b2ba61c17ff698
Parents: dc7ceb4 433714f
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Mon May 11 18:23:08 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Mon May 11 18:23:08 2015 -0700

----------------------------------------------------------------------
 README.txt                                      |    32 +-
 RELEASE_NOTES.txt                               |   459 +
 bin/ext/orcfiledump.sh                          |     9 +-
 .../apache/hadoop/hive/common/FileUtils.java    |   155 +-
 .../org/apache/hadoop/hive/conf/HiveConf.java   |    18 +-
 data/files/tjoin1.txt                           |     3 +
 data/files/tjoin2.txt                           |     4 +
 .../hive/hcatalog/common/HiveClientCache.java   |     9 +-
 .../hcatalog/mapreduce/TestPassProperties.java  |     5 +-
 .../templeton/tool/TempletonControllerJob.java  |     7 +-
 .../test/resources/testconfiguration.properties |     5 +-
 .../upgrade/mysql/hive-schema-1.2.0.mysql.sql   |     2 +-
 .../upgrade/mysql/hive-schema-1.3.0.mysql.sql   |     2 +-
 .../hive/metastore/AggregateStatsCache.java     |    33 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |    27 +-
 .../hive/metastore/MetaStoreDirectSql.java      |    24 +-
 .../hive/metastore/RetryingMetaStoreClient.java |    60 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java   |    35 +-
 .../hive/metastore/TestHiveMetastoreCli.java    |    63 +
 .../hive/metastore/txn/TestTxnHandler.java      |    39 +-
 .../java/org/apache/hadoop/hive/ql/Context.java |    10 +-
 .../hive/ql/exec/CommonMergeJoinOperator.java   |    54 +-
 .../hadoop/hive/ql/exec/MapJoinOperator.java    |     1 -
 .../hadoop/hive/ql/exec/ReduceSinkOperator.java |     3 +-
 .../apache/hadoop/hive/ql/exec/Registry.java    |    29 +-
 .../apache/hadoop/hive/ql/exec/Utilities.java   |     5 +
 .../persistence/HybridHashTableContainer.java   |     2 +-
 .../exec/vector/VectorMapJoinBaseOperator.java  |   185 +
 .../ql/exec/vector/VectorMapJoinOperator.java   |   132 +-
 .../VectorMapJoinOuterFilteredOperator.java     |   122 +
 .../mapjoin/VectorMapJoinCommonOperator.java    |     5 +-
 .../VectorMapJoinGenerateResultOperator.java    |     5 +
 .../hive/ql/io/orc/ColumnStatisticsImpl.java    |    16 +-
 .../ql/io/orc/ConversionTreeReaderFactory.java  |    38 +
 .../apache/hadoop/hive/ql/io/orc/FileDump.java  |    91 +-
 .../hadoop/hive/ql/io/orc/JsonFileDump.java     |   365 +
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |     8 +-
 .../hadoop/hive/ql/io/orc/OrcRecordUpdater.java |    24 +-
 .../hive/ql/io/orc/RecordReaderFactory.java     |   269 +
 .../hadoop/hive/ql/io/orc/RecordReaderImpl.java |     2 +-
 .../ql/lockmgr/zookeeper/ZooKeeperHiveLock.java |    22 +
 .../optimizer/ConstantPropagateProcFactory.java |    83 +-
 .../ql/optimizer/IdentityProjectRemover.java    |    12 +
 .../ql/optimizer/NonBlockingOpDeDupProc.java    |    11 +
 .../hadoop/hive/ql/optimizer/Optimizer.java     |     8 +-
 .../ql/optimizer/calcite/cost/HiveCost.java     |    16 +-
 .../rules/HiveExpandDistinctAggregatesRule.java |   278 +
 .../translator/HiveOpConverterPostProc.java     |    10 +
 .../hive/ql/optimizer/physical/Vectorizer.java  |    23 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |    14 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |    15 +-
 .../apache/hadoop/hive/ql/plan/PlanUtils.java   |     4 +
 .../StorageBasedAuthorizationProvider.java      |   114 +-
 .../hadoop/hive/ql/session/SessionState.java    |     4 +-
 .../hadoop/hive/ql/txn/compactor/Cleaner.java   |    20 +-
 .../hive/ql/txn/compactor/CompactorThread.java  |    12 +-
 .../hadoop/hive/ql/txn/compactor/Initiator.java |    11 +-
 .../hadoop/hive/ql/txn/compactor/Worker.java    |    12 +
 .../hadoop/hive/ql/io/orc/TestJsonFileDump.java |   138 +
 .../hadoop/hive/ql/plan/TestViewEntity.java     |   108 +
 .../hive/ql/txn/compactor/TestCleaner.java      |    56 +-
 .../hive/ql/txn/compactor/TestInitiator.java    |    63 +-
 .../hive/ql/txn/compactor/TestWorker.java       |    45 +
 .../test/queries/clientpositive/bucket_many.q   |    16 +
 .../test/queries/clientpositive/explainuser_2.q |     1 +
 .../extrapolate_part_stats_partial.q            |     2 +
 .../extrapolate_part_stats_partial_ndv.q        |     2 +
 ql/src/test/queries/clientpositive/fold_case.q  |    12 +
 ql/src/test/queries/clientpositive/fold_when.q  |    31 +
 .../queries/clientpositive/mapjoin_mapjoin.q    |     1 +
 ql/src/test/queries/clientpositive/mergejoin.q  |    17 +
 .../clientpositive/orc_int_type_promotion.q     |    79 +
 .../clientpositive/vector_left_outer_join2.q    |    62 +
 .../clientpositive/vector_leftsemi_mapjoin.q    |   403 +
 ql/src/test/resources/orc-file-dump.json        |  1354 ++
 .../annotate_stats_join_pkfk.q.out              |    20 +-
 .../results/clientpositive/bucket_many.q.out    |   230 +
 .../encryption_insert_partition_static.q.out    |    14 +-
 .../test/results/clientpositive/fold_case.q.out |   301 +
 .../test/results/clientpositive/fold_when.q.out |   480 +
 ql/src/test/results/clientpositive/join32.q.out |    84 +-
 .../clientpositive/join32_lessSize.q.out        |   423 +-
 ql/src/test/results/clientpositive/join33.q.out |    84 +-
 .../clientpositive/join_alt_syntax.q.out        |   306 +-
 .../clientpositive/join_cond_pushdown_2.q.out   |   150 +-
 .../clientpositive/join_cond_pushdown_4.q.out   |   150 +-
 .../test/results/clientpositive/mergejoin.q.out |   844 +-
 .../clientpositive/orc_int_type_promotion.q.out |   377 +
 .../ql_rewrite_gbtoidx_cbo_2.q.out              |    14 +-
 .../results/clientpositive/spark/cbo_gby.q.out  |     4 +-
 .../clientpositive/spark/cbo_udf_udaf.q.out     |     2 +-
 ...pby_complex_types_multi_single_reducer.q.out |    38 +-
 .../results/clientpositive/spark/join32.q.out   |    88 +-
 .../clientpositive/spark/join32_lessSize.q.out  |   286 +-
 .../results/clientpositive/spark/join33.q.out   |    88 +-
 .../clientpositive/spark/join_alt_syntax.q.out  |   210 +-
 .../spark/join_cond_pushdown_2.q.out            |    98 +-
 .../spark/join_cond_pushdown_4.q.out            |    98 +-
 .../spark/lateral_view_explode2.q.out           |     4 +-
 .../clientpositive/spark/union_remove_25.q.out  |     2 +-
 .../clientpositive/spark/union_top_level.q.out  |    16 +-
 .../spark/vector_cast_constant.q.java1.7.out    |    16 +-
 .../spark/vector_cast_constant.q.java1.8.out    |    16 +-
 .../spark/vectorized_timestamp_funcs.q.out      |     4 +-
 .../clientpositive/tez/auto_join29.q.out        |   500 +
 .../clientpositive/tez/explainuser_2.q.out      |  1529 +-
 .../clientpositive/tez/limit_pushdown.q.out     |    31 +-
 .../results/clientpositive/tez/mergejoin.q.out  |   844 +-
 .../test/results/clientpositive/tez/mrr.q.out   |    48 +-
 .../tez/vector_count_distinct.q.out             |    28 +-
 .../tez/vector_left_outer_join2.q.out           |   553 +
 .../tez/vector_leftsemi_mapjoin.q.out           | 13807 +++++++++++++++++
 .../tez/vectorization_limit.q.out               |    31 +-
 .../tez/vectorized_distinct_gby.q.out           |    51 +-
 .../vector_left_outer_join2.q.out               |   568 +
 .../vector_leftsemi_mapjoin.q.out               | 13572 ++++++++++++++++
 .../apache/hive/service/cli/ColumnValue.java    |     2 +
 .../apache/hadoop/hive/shims/Hadoop23Shims.java |    29 +-
 .../org/apache/hadoop/fs/DefaultFileAccess.java |    65 +-
 .../apache/hadoop/hive/shims/HadoopShims.java   |    24 +-
 .../hadoop/hive/shims/HadoopShimsSecure.java    |     8 -
 testutils/metastore/execute-test-on-lxc.sh      |     7 +-
 .../org/apache/hive/ptest/execution/PTest.java  |    12 +-
 .../ptest/execution/conf/TestConfiguration.java |    12 +-
 .../execution/conf/TestTestConfiguration.java   |    38 +-
 .../resources/test-configuration.properties     |     2 +
 126 files changed, 38732 insertions(+), 2892 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e6b1556e/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --cc common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index bfc5172,eff4d30..f4a70b2
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@@ -1020,10 -1021,8 +1021,10 @@@ public class HiveConf extends Configura
      HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS("hive.orc.splits.include.file.footer", false,
          "If turned on splits generated by orc will include metadata about the stripes in the file. This\n" +
          "data is read remotely (from the client or HS2 machine) and sent to all the tasks."),
 +    HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS("hive.orc.splits.include.fileid", true,
 +        "Include file ID in splits on file systems thaty support it."),
      HIVE_ORC_CACHE_STRIPE_DETAILS_SIZE("hive.orc.cache.stripe.details.size", 10000,
-         "Cache size for keeping meta info about orc splits cached in the client."),
+         "Max cache size for keeping meta info about orc splits cached in the client."),
      HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS("hive.orc.compute.splits.num.threads", 10,
          "How many threads orc should use to create splits in parallel."),
      HIVE_ORC_SKIP_CORRUPT_DATA("hive.exec.orc.skip.corrupt.data", false,

http://git-wip-us.apache.org/repos/asf/hive/blob/e6b1556e/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/e6b1556e/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/e6b1556e/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/e6b1556e/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/e6b1556e/ql/src/java/org/apache/hadoop/hive/ql/io/orc/JsonFileDump.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/io/orc/JsonFileDump.java
index 0000000,c33004e..a438855
mode 000000,100644..100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/JsonFileDump.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/JsonFileDump.java
@@@ -1,0 -1,365 +1,365 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  * <p/>
+  * http://www.apache.org/licenses/LICENSE-2.0
+  * <p/>
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.ql.io.orc;
+ 
+ import java.io.IOException;
+ import java.util.List;
+ 
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.hive.ql.io.filters.BloomFilterIO;
+ import org.codehaus.jettison.json.JSONException;
+ import org.codehaus.jettison.json.JSONObject;
+ import org.codehaus.jettison.json.JSONArray;
+ import org.codehaus.jettison.json.JSONStringer;
+ import org.codehaus.jettison.json.JSONWriter;
+ 
+ /**
+  * File dump tool with json formatted output.
+  */
+ public class JsonFileDump {
+ 
+   public static void printJsonMetaData(List<String> files, Configuration conf,
+       List<Integer> rowIndexCols, boolean prettyPrint, boolean printTimeZone) throws JSONException, IOException {
+     JSONStringer writer = new JSONStringer();
+     boolean multiFile = files.size() > 1;
+     if (multiFile) {
+       writer.array();
+     } else {
+       writer.object();
+     }
+     for (String filename : files) {
+       if (multiFile) {
+         writer.object();
+       }
+       writer.key("fileName").value(filename);
+       Path path = new Path(filename);
+       Reader reader = OrcFile.createReader(path, OrcFile.readerOptions(conf));
+       writer.key("fileVersion").value(reader.getFileVersion().getName());
+       writer.key("writerVersion").value(reader.getWriterVersion());
+       RecordReaderImpl rows = (RecordReaderImpl) reader.rows();
+       writer.key("numberOfRows").value(reader.getNumberOfRows());
+       writer.key("compression").value(reader.getCompression());
+       if (reader.getCompression() != CompressionKind.NONE) {
+         writer.key("compressionBufferSize").value(reader.getCompressionSize());
+       }
+       writer.key("schemaString").value(reader.getObjectInspector().getTypeName());
+       writer.key("schema").array();
+       writeSchema(writer, reader.getTypes());
+       writer.endArray();
+ 
+       writer.key("stripeStatistics").array();
 -      Metadata metadata = reader.getMetadata();
 -      for (int n = 0; n < metadata.getStripeStatistics().size(); n++) {
++      List<StripeStatistics> stripeStatistics = reader.getStripeStatistics();
++      for (int n = 0; n < stripeStatistics.size(); n++) {
+         writer.object();
+         writer.key("stripeNumber").value(n + 1);
 -        StripeStatistics ss = metadata.getStripeStatistics().get(n);
++        StripeStatistics ss = stripeStatistics.get(n);
+         writer.key("columnStatistics").array();
+         for (int i = 0; i < ss.getColumnStatistics().length; i++) {
+           writer.object();
+           writer.key("columnId").value(i);
+           writeColumnStatistics(writer, ss.getColumnStatistics()[i]);
+           writer.endObject();
+         }
+         writer.endArray();
+         writer.endObject();
+       }
+       writer.endArray();
+ 
+       ColumnStatistics[] stats = reader.getStatistics();
+       int colCount = stats.length;
+       writer.key("fileStatistics").array();
+       for (int i = 0; i < stats.length; ++i) {
+         writer.object();
+         writer.key("columnId").value(i);
+         writeColumnStatistics(writer, stats[i]);
+         writer.endObject();
+       }
+       writer.endArray();
+ 
+       writer.key("stripes").array();
+       int stripeIx = -1;
+       for (StripeInformation stripe : reader.getStripes()) {
+         ++stripeIx;
+         long stripeStart = stripe.getOffset();
+         OrcProto.StripeFooter footer = rows.readStripeFooter(stripe);
+         writer.object(); // start of stripe information
+         writer.key("stripeNumber").value(stripeIx + 1);
+         writer.key("stripeInformation");
+         writeStripeInformation(writer, stripe);
+         if (printTimeZone) {
+           writer.key("writerTimezone").value(
+               footer.hasWriterTimezone() ? footer.getWriterTimezone() : FileDump.UNKNOWN);
+         }
+         long sectionStart = stripeStart;
+ 
+         writer.key("streams").array();
+         for (OrcProto.Stream section : footer.getStreamsList()) {
+           writer.object();
+           String kind = section.hasKind() ? section.getKind().name() : FileDump.UNKNOWN;
+           writer.key("columnId").value(section.getColumn());
+           writer.key("section").value(kind);
+           writer.key("startOffset").value(sectionStart);
+           writer.key("length").value(section.getLength());
+           sectionStart += section.getLength();
+           writer.endObject();
+         }
+         writer.endArray();
+ 
+         writer.key("encodings").array();
+         for (int i = 0; i < footer.getColumnsCount(); ++i) {
+           writer.object();
+           OrcProto.ColumnEncoding encoding = footer.getColumns(i);
+           writer.key("columnId").value(i);
+           writer.key("kind").value(encoding.getKind());
+           if (encoding.getKind() == OrcProto.ColumnEncoding.Kind.DICTIONARY ||
+               encoding.getKind() == OrcProto.ColumnEncoding.Kind.DICTIONARY_V2) {
+             writer.key("dictionarySize").value(encoding.getDictionarySize());
+           }
+           writer.endObject();
+         }
+         writer.endArray();
+ 
+         if (rowIndexCols != null && !rowIndexCols.isEmpty()) {
+           // include the columns that are specified, only if the columns are included, bloom filter
+           // will be read
+           boolean[] sargColumns = new boolean[colCount];
+           for (int colIdx : rowIndexCols) {
+             sargColumns[colIdx] = true;
+           }
+           RecordReaderImpl.Index indices = rows.readRowIndex(stripeIx, null, sargColumns);
+           writer.key("indexes").array();
+           for (int col : rowIndexCols) {
+             writer.object();
+             writer.key("columnId").value(col);
+             writeRowGroupIndexes(writer, col, indices.getRowGroupIndex());
+             writeBloomFilterIndexes(writer, col, indices.getBloomFilterIndex());
+             writer.endObject();
+           }
+           writer.endArray();
+         }
+         writer.endObject(); // end of stripe information
+       }
+       writer.endArray();
+ 
+       FileSystem fs = path.getFileSystem(conf);
+       long fileLen = fs.getContentSummary(path).getLength();
+       long paddedBytes = FileDump.getTotalPaddingSize(reader);
+       // empty ORC file is ~45 bytes. Assumption here is file length always >0
+       double percentPadding = ((double) paddedBytes / (double) fileLen) * 100;
+       writer.key("fileLength").value(fileLen);
+       writer.key("paddingLength").value(paddedBytes);
+       writer.key("paddingRatio").value(percentPadding);
+       rows.close();
+ 
+       writer.endObject();
+     }
+     if (multiFile) {
+       writer.endArray();
+     }
+ 
+     if (prettyPrint) {
+       final String prettyJson;
+       if (multiFile) {
+         JSONArray jsonArray = new JSONArray(writer.toString());
+         prettyJson = jsonArray.toString(2);
+       } else {
+         JSONObject jsonObject = new JSONObject(writer.toString());
+         prettyJson = jsonObject.toString(2);
+       }
+       System.out.println(prettyJson);
+     } else {
+       System.out.println(writer.toString());
+     }
+   }
+ 
+   private static void writeSchema(JSONStringer writer, List<OrcProto.Type> types)
+       throws JSONException {
+     int i = 0;
+     for(OrcProto.Type type : types) {
+       writer.object();
+       writer.key("columnId").value(i++);
+       writer.key("columnType").value(type.getKind());
+       if (type.getFieldNamesCount() > 0) {
+         writer.key("childColumnNames").array();
+         for (String field : type.getFieldNamesList()) {
+           writer.value(field);
+         }
+         writer.endArray();
+         writer.key("childColumnIds").array();
+         for (Integer colId : type.getSubtypesList()) {
+           writer.value(colId);
+         }
+         writer.endArray();
+       }
+       if (type.hasPrecision()) {
+         writer.key("precision").value(type.getPrecision());
+       }
+ 
+       if (type.hasScale()) {
+         writer.key("scale").value(type.getScale());
+       }
+ 
+       if (type.hasMaximumLength()) {
+         writer.key("maxLength").value(type.getMaximumLength());
+       }
+       writer.endObject();
+     }
+   }
+ 
+   private static void writeStripeInformation(JSONWriter writer, StripeInformation stripe)
+       throws JSONException {
+     writer.object();
+     writer.key("offset").value(stripe.getOffset());
+     writer.key("indexLength").value(stripe.getIndexLength());
+     writer.key("dataLength").value(stripe.getDataLength());
+     writer.key("footerLength").value(stripe.getFooterLength());
+     writer.key("rowCount").value(stripe.getNumberOfRows());
+     writer.endObject();
+   }
+ 
+   private static void writeColumnStatistics(JSONWriter writer, ColumnStatistics cs)
+       throws JSONException {
+     if (cs != null) {
+       writer.key("count").value(cs.getNumberOfValues());
+       writer.key("hasNull").value(cs.hasNull());
+       if (cs instanceof BinaryColumnStatistics) {
+         writer.key("totalLength").value(((BinaryColumnStatistics) cs).getSum());
+         writer.key("type").value(OrcProto.Type.Kind.BINARY);
+       } else if (cs instanceof BooleanColumnStatistics) {
+         writer.key("trueCount").value(((BooleanColumnStatistics) cs).getTrueCount());
+         writer.key("falseCount").value(((BooleanColumnStatistics) cs).getFalseCount());
+         writer.key("type").value(OrcProto.Type.Kind.BOOLEAN);
+       } else if (cs instanceof IntegerColumnStatistics) {
+         writer.key("min").value(((IntegerColumnStatistics) cs).getMinimum());
+         writer.key("max").value(((IntegerColumnStatistics) cs).getMaximum());
+         if (((IntegerColumnStatistics) cs).isSumDefined()) {
+           writer.key("sum").value(((IntegerColumnStatistics) cs).getSum());
+         }
+         writer.key("type").value(OrcProto.Type.Kind.LONG);
+       } else if (cs instanceof DoubleColumnStatistics) {
+         writer.key("min").value(((DoubleColumnStatistics) cs).getMinimum());
+         writer.key("max").value(((DoubleColumnStatistics) cs).getMaximum());
+         writer.key("sum").value(((DoubleColumnStatistics) cs).getSum());
+         writer.key("type").value(OrcProto.Type.Kind.DOUBLE);
+       } else if (cs instanceof StringColumnStatistics) {
+         writer.key("min").value(((StringColumnStatistics) cs).getMinimum());
+         writer.key("max").value(((StringColumnStatistics) cs).getMaximum());
+         writer.key("totalLength").value(((StringColumnStatistics) cs).getSum());
+         writer.key("type").value(OrcProto.Type.Kind.STRING);
+       } else if (cs instanceof DateColumnStatistics) {
+         if (((DateColumnStatistics) cs).getMaximum() != null) {
+           writer.key("min").value(((DateColumnStatistics) cs).getMinimum());
+           writer.key("max").value(((DateColumnStatistics) cs).getMaximum());
+         }
+         writer.key("type").value(OrcProto.Type.Kind.DATE);
+       } else if (cs instanceof TimestampColumnStatistics) {
+         if (((TimestampColumnStatistics) cs).getMaximum() != null) {
+           writer.key("min").value(((TimestampColumnStatistics) cs).getMinimum());
+           writer.key("max").value(((TimestampColumnStatistics) cs).getMaximum());
+         }
+         writer.key("type").value(OrcProto.Type.Kind.TIMESTAMP);
+       } else if (cs instanceof DecimalColumnStatistics) {
+         if (((DecimalColumnStatistics) cs).getMaximum() != null) {
+           writer.key("min").value(((DecimalColumnStatistics) cs).getMinimum());
+           writer.key("max").value(((DecimalColumnStatistics) cs).getMaximum());
+           writer.key("sum").value(((DecimalColumnStatistics) cs).getSum());
+         }
+         writer.key("type").value(OrcProto.Type.Kind.DECIMAL);
+       }
+     }
+   }
+ 
+   private static void writeBloomFilterIndexes(JSONWriter writer, int col,
+       OrcProto.BloomFilterIndex[] bloomFilterIndex) throws JSONException {
+ 
+     BloomFilterIO stripeLevelBF = null;
+     if (bloomFilterIndex != null && bloomFilterIndex[col] != null) {
+       int entryIx = 0;
+       writer.key("bloomFilterIndexes").array();
+       for (OrcProto.BloomFilter bf : bloomFilterIndex[col].getBloomFilterList()) {
+         writer.object();
+         writer.key("entryId").value(entryIx++);
+         BloomFilterIO toMerge = new BloomFilterIO(bf);
+         writeBloomFilterStats(writer, toMerge);
+         if (stripeLevelBF == null) {
+           stripeLevelBF = toMerge;
+         } else {
+           stripeLevelBF.merge(toMerge);
+         }
+         writer.endObject();
+       }
+       writer.endArray();
+     }
+     if (stripeLevelBF != null) {
+       writer.key("stripeLevelBloomFilter");
+       writer.object();
+       writeBloomFilterStats(writer, stripeLevelBF);
+       writer.endObject();
+     }
+   }
+ 
+   private static void writeBloomFilterStats(JSONWriter writer, BloomFilterIO bf)
+       throws JSONException {
+     int bitCount = bf.getBitSize();
+     int popCount = 0;
+     for (long l : bf.getBitSet()) {
+       popCount += Long.bitCount(l);
+     }
+     int k = bf.getNumHashFunctions();
+     float loadFactor = (float) popCount / (float) bitCount;
+     float expectedFpp = (float) Math.pow(loadFactor, k);
+     writer.key("numHashFunctions").value(k);
+     writer.key("bitCount").value(bitCount);
+     writer.key("popCount").value(popCount);
+     writer.key("loadFactor").value(loadFactor);
+     writer.key("expectedFpp").value(expectedFpp);
+   }
+ 
+   private static void writeRowGroupIndexes(JSONWriter writer, int col,
+       OrcProto.RowIndex[] rowGroupIndex)
+       throws JSONException {
+ 
+     OrcProto.RowIndex index;
+     if (rowGroupIndex == null || (col >= rowGroupIndex.length) ||
+         ((index = rowGroupIndex[col]) == null)) {
+       return;
+     }
+ 
+     writer.key("rowGroupIndexes").array();
+     for (int entryIx = 0; entryIx < index.getEntryCount(); ++entryIx) {
+       writer.object();
+       writer.key("entryId").value(entryIx);
+       OrcProto.RowIndexEntry entry = index.getEntry(entryIx);
+       if (entry == null) {
+         continue;
+       }
+       OrcProto.ColumnStatistics colStats = entry.getStatistics();
+       writeColumnStatistics(writer, ColumnStatisticsImpl.deserialize(colStats));
+       writer.key("positions").array();
+       for (int posIx = 0; posIx < entry.getPositionsCount(); ++posIx) {
+         writer.value(entry.getPositions(posIx));
+       }
+       writer.endArray();
+       writer.endObject();
+     }
+     writer.endArray();
+   }
+ 
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/e6b1556e/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/e6b1556e/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/e6b1556e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/e6b1556e/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/e6b1556e/ql/src/test/results/clientpositive/tez/mrr.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/tez/mrr.q.out
index 8101f3b,d90b27f..1713783
--- a/ql/src/test/results/clientpositive/tez/mrr.q.out
+++ b/ql/src/test/results/clientpositive/tez/mrr.q.out
@@@ -469,19 -468,26 +467,25 @@@ STAGE PLANS
                  mode: mergepartial
                  outputColumnNames: _col0, _col1
                  Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
-                 Reduce Output Operator
-                   key expressions: _col1 (type: bigint), _col0 (type: string)
-                   sort order: ++
-                   Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+                 Group By Operator
+                   aggregations: count(_col1)
+                   keys: _col0 (type: string)
+                   mode: complete
+                   outputColumnNames: _col0, _col1
+                   Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
+                   Reduce Output Operator
 -                    key expressions: _col1 (type: bigint)
 -                    sort order: +
++                    key expressions: _col1 (type: bigint), _col0 (type: string)
++                    sort order: ++
+                     Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
 -                    value expressions: _col0 (type: string)
          Reducer 4 
              Reduce Operator Tree:
                Select Operator
 -                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: bigint)
 +                expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: bigint)
                  outputColumnNames: _col0, _col1
-                 Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+                 Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
                  File Output Operator
                    compressed: false
-                   Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+                   Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
                    table:
                        input format: org.apache.hadoop.mapred.TextInputFormat
                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@@ -501,734 -507,739 +505,738 @@@ POSTHOOK: query: SELECT s2.key, count(d
  POSTHOOK: type: QUERY
  POSTHOOK: Input: default@src
  #### A masked pattern was here ####
 -98	1
 -97	1
 -96	1
 -95	1
 -92	1
 -90	1
 -9	1
 -87	1
 -86	1
 -85	1
 -84	1
 -83	1
 -82	1
 -80	1
 -8	1
 -78	1
 -77	1
 -76	1
 -74	1
 -72	1
 -70	1
 -69	1
 -67	1
 -66	1
 -65	1
 -64	1
 -58	1
 -57	1
 -54	1
 -53	1
 -51	1
 -5	1
 -498	1
 -497	1
 -496	1
 -495	1
 -494	1
 -493	1
 -492	1
 -491	1
 -490	1
 -489	1
 -487	1
 -485	1
 -484	1
 -483	1
 -482	1
 -481	1
 -480	1
 -479	1
 -478	1
 -477	1
 -475	1
 -472	1
 -470	1
 -47	1
 -469	1
 -468	1
 -467	1
 -466	1
 -463	1
 -462	1
 -460	1
 -459	1
 -458	1
 -457	1
 -455	1
 -454	1
 -453	1
 -452	1
 -449	1
 -448	1
 -446	1
 -444	1
 -443	1
 -44	1
 -439	1
 -438	1
 -437	1
 -436	1
 -435	1
 -432	1
 -431	1
 -430	1
 -43	1
 -429	1
 -427	1
 -424	1
 -421	1
 -42	1
 -419	1
 -418	1
 -417	1
 -414	1
 -413	1
 -411	1
 -41	1
 -409	1
 -407	1
 -406	1
 -404	1
 -403	1
 -402	1
 -401	1
 -400	1
 -4	1
 -399	1
 -397	1
 -396	1
 -395	1
 -394	1
 -393	1
 -392	1
 -389	1
 -386	1
 -384	1
 -382	1
 -379	1
 -378	1
 -377	1
 -375	1
 -374	1
 -373	1
 -37	1
 -369	1
 -368	1
 -367	1
 -366	1
 -365	1
 -364	1
 -362	1
 -360	1
 -356	1
 -353	1
 -351	1
 -35	1
 -348	1
 -345	1
 -344	1
 -342	1
 -341	1
 -34	1
 -339	1
 -338	1
 -336	1
 -335	1
 -333	1
 -332	1
 -331	1
 -33	1
 -327	1
 -325	1
 -323	1
 -322	1
 -321	1
 -318	1
 -317	1
 -316	1
 -315	1
 -311	1
 -310	1
 -309	1
 -308	1
 -307	1
 -306	1
 -305	1
 -302	1
 -30	1
 -298	1
 -296	1
 -292	1
 -291	1
 -289	1
 -288	1
 -287	1
 -286	1
 -285	1
 -284	1
 -283	1
 -282	1
 -281	1
 -280	1
 -28	1
 -278	1
 -277	1
 -275	1
 -274	1
 -273	1
 -272	1
 -27	1
 -266	1
 -265	1
 -263	1
 -262	1
 -260	1
 -26	1
 -258	1
 -257	1
 -256	1
 -255	1
 -252	1
 -249	1
 -248	1
 -247	1
 -244	1
 -242	1
 -241	1
 -24	1
 -239	1
 -238	1
 -237	1
 -235	1
 -233	1
 -230	1
 -229	1
 -228	1
 -226	1
 -224	1
 -223	1
 -222	1
 -221	1
 -219	1
 -218	1
 -217	1
 -216	1
 -214	1
 -213	1
 -209	1
 -208	1
 -207	1
 -205	1
 -203	1
 -202	1
 -201	1
 -200	1
 -20	1
 -2	1
 -199	1
 -197	1
 -196	1
 -195	1
 -194	1
 -193	1
 -192	1
 -191	1
 -190	1
 -19	1
 -189	1
 -187	1
 -186	1
 -183	1
 -181	1
 -180	1
 -18	1
 -179	1
 -178	1
 -177	1
 -176	1
 -175	1
 -174	1
 -172	1
 -170	1
 -17	1
 -169	1
 -168	1
 -167	1
 -166	1
 -165	1
 -164	1
 -163	1
 -162	1
 -160	1
 -158	1
 -157	1
 -156	1
 -155	1
 -153	1
 -152	1
 -150	1
 -15	1
 -149	1
 -146	1
 -145	1
 -143	1
 -138	1
 -137	1
 -136	1
 -134	1
 -133	1
 -131	1
 -129	1
 -128	1
 -126	1
 -125	1
 -120	1
 -12	1
 -119	1
 -118	1
 -116	1
 -114	1
 -113	1
 -111	1
 -11	1
 -105	1
 -104	1
 -103	1
 -100	1
 -10	1
  0	1
 -PREHOOK: query: -- same query with broadcast join
 -EXPLAIN SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt
 -PREHOOK: type: QUERY
 -POSTHOOK: query: -- same query with broadcast join
 -EXPLAIN SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt
 -POSTHOOK: type: QUERY
 -STAGE DEPENDENCIES:
 -  Stage-1 is a root stage
 -  Stage-0 depends on stages: Stage-1
 -
 -STAGE PLANS:
 -  Stage: Stage-1
 -    Tez
 -      Edges:
 -        Map 1 <- Map 4 (BROADCAST_EDGE)
 -        Reducer 2 <- Map 1 (SIMPLE_EDGE)
 -        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
 -#### A masked pattern was here ####
 -      Vertices:
 -        Map 1 
 -            Map Operator Tree:
 -                TableScan
 -                  alias: s1
 -                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
 -                  Filter Operator
 -                    predicate: key is not null (type: boolean)
 -                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
 -                    Select Operator
 -                      expressions: key (type: string), value (type: string)
 -                      outputColumnNames: _col0, _col1
 -                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
 -                      Map Join Operator
 -                        condition map:
 -                             Inner Join 0 to 1
 -                        keys:
 -                          0 _col0 (type: string)
 -                          1 _col0 (type: string)
 -                        outputColumnNames: _col0, _col1
 -                        input vertices:
 -                          1 Map 4
 -                        Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
 -                        HybridGraceHashJoin: true
 -                        Group By Operator
 -                          keys: _col0 (type: string), _col1 (type: string)
 -                          mode: hash
 -                          outputColumnNames: _col0, _col1
 -                          Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
 -                          Reduce Output Operator
 -                            key expressions: _col0 (type: string), _col1 (type: string)
 -                            sort order: ++
 -                            Map-reduce partition columns: _col0 (type: string)
 -                            Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
 -        Map 4 
 -            Map Operator Tree:
 -                TableScan
 -                  alias: s1
 -                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
 -                  Filter Operator
 -                    predicate: key is not null (type: boolean)
 -                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
 -                    Select Operator
 -                      expressions: key (type: string)
 -                      outputColumnNames: _col0
 -                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
 -                      Reduce Output Operator
 -                        key expressions: _col0 (type: string)
 -                        sort order: +
 -                        Map-reduce partition columns: _col0 (type: string)
 -                        Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
 -        Reducer 2 
 -            Reduce Operator Tree:
 -              Group By Operator
 -                keys: KEY._col0 (type: string), KEY._col1 (type: string)
 -                mode: mergepartial
 -                outputColumnNames: _col0, _col1
 -                Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
 -                Group By Operator
 -                  aggregations: count(_col1)
 -                  keys: _col0 (type: string)
 -                  mode: complete
 -                  outputColumnNames: _col0, _col1
 -                  Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
 -                  Reduce Output Operator
 -                    key expressions: _col1 (type: bigint)
 -                    sort order: +
 -                    Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
 -                    value expressions: _col0 (type: string)
 -        Reducer 3 
 -            Reduce Operator Tree:
 -              Select Operator
 -                expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: bigint)
 -                outputColumnNames: _col0, _col1
 -                Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
 -                File Output Operator
 -                  compressed: false
 -                  Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
 -                  table:
 -                      input format: org.apache.hadoop.mapred.TextInputFormat
 -                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 -                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 -
 -  Stage: Stage-0
 -    Fetch Operator
 -      limit: -1
 -      Processor Tree:
 -        ListSink
 -
 -PREHOOK: query: SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt
 -PREHOOK: type: QUERY
 -PREHOOK: Input: default@src
 -#### A masked pattern was here ####
 -POSTHOOK: query: SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt
 -POSTHOOK: type: QUERY
 -POSTHOOK: Input: default@src
 -#### A masked pattern was here ####
 -98	1
 -97	1
 -96	1
 -95	1
 -92	1
 -90	1
 -9	1
 -87	1
 -86	1
 -85	1
 -84	1
 -83	1
 -82	1
 -80	1
 -8	1
 -78	1
 -77	1
 -76	1
 -74	1
 -72	1
 -70	1
 -69	1
 -67	1
 -66	1
 -65	1
 -64	1
 -58	1
 -57	1
 -54	1
 -53	1
 -51	1
 -5	1
 -498	1
 -497	1
 -496	1
 -495	1
 -494	1
 -493	1
 -492	1
 -491	1
 -490	1
 -489	1
 -487	1
 -485	1
 -484	1
 -483	1
 -482	1
 -481	1
 -480	1
 -479	1
 -478	1
 -477	1
 -475	1
 -472	1
 -470	1
 -47	1
 -469	1
 -468	1
 -467	1
 -466	1
 -463	1
 -462	1
 -460	1
 -459	1
 -458	1
 -457	1
 -455	1
 -454	1
 -453	1
 -452	1
 -449	1
 -448	1
 -446	1
 -444	1
 -443	1
 -44	1
 -439	1
 -438	1
 -437	1
 -436	1
 -435	1
 -432	1
 -431	1
 -430	1
 -43	1
 -429	1
 -427	1
 -424	1
 -421	1
 -42	1
 -419	1
 -418	1
 -417	1
 -414	1
 -413	1
 -411	1
 -41	1
 -409	1
 -407	1
 -406	1
 -404	1
 -403	1
 -402	1
 -401	1
 -400	1
 -4	1
 -399	1
 -397	1
 -396	1
 -395	1
 -394	1
 -393	1
 -392	1
 -389	1
 -386	1
 -384	1
 -382	1
 -379	1
 -378	1
 -377	1
 -375	1
 -374	1
 -373	1
 -37	1
 -369	1
 -368	1
 -367	1
 -366	1
 -365	1
 -364	1
 -362	1
 -360	1
 -356	1
 -353	1
 -351	1
 -35	1
 -348	1
 -345	1
 -344	1
 -342	1
 -341	1
 -34	1
 -339	1
 -338	1
 -336	1
 -335	1
 -333	1
 -332	1
 -331	1
 -33	1
 -327	1
 -325	1
 -323	1
 -322	1
 -321	1
 -318	1
 -317	1
 -316	1
 -315	1
 -311	1
 -310	1
 -309	1
 -308	1
 -307	1
 -306	1
 -305	1
 -302	1
 -30	1
 -298	1
 -296	1
 -292	1
 -291	1
 -289	1
 -288	1
 -287	1
 -286	1
 -285	1
 -284	1
 -283	1
 -282	1
 -281	1
 -280	1
 -28	1
 -278	1
 -277	1
 -275	1
 -274	1
 -273	1
 -272	1
 -27	1
 -266	1
 -265	1
 -263	1
 -262	1
 -260	1
 -26	1
 -258	1
 -257	1
 -256	1
 -255	1
 -252	1
 -249	1
 -248	1
 -247	1
 -244	1
 -242	1
 -241	1
 -24	1
 -239	1
 -238	1
 -237	1
 -235	1
 -233	1
 -230	1
 -229	1
 -228	1
 -226	1
 -224	1
 -223	1
 -222	1
 -221	1
 -219	1
 -218	1
 -217	1
 -216	1
 -214	1
 -213	1
 -209	1
 -208	1
 -207	1
 -205	1
 -203	1
 -202	1
 -201	1
 -200	1
 -20	1
 -2	1
 -199	1
 -197	1
 -196	1
 -195	1
 -194	1
 -193	1
 -192	1
 -191	1
 -190	1
 -19	1
 -189	1
 -187	1
 -186	1
 -183	1
 -181	1
 -180	1
 -18	1
 -179	1
 -178	1
 -177	1
 -176	1
 -175	1
 -174	1
 -172	1
 -170	1
 -17	1
 -169	1
 -168	1
 -167	1
 -166	1
 -165	1
 -164	1
 -163	1
 -162	1
 -160	1
 -158	1
 -157	1
 -156	1
 -155	1
 -153	1
 -152	1
 -150	1
 -15	1
 -149	1
 -146	1
 -145	1
 -143	1
 -138	1
 -137	1
 -136	1
 -134	1
 -133	1
 -131	1
 -129	1
 -128	1
 -126	1
 -125	1
 -120	1
 -12	1
 -119	1
 -118	1
 -116	1
 -114	1
 -113	1
 -111	1
 -11	1
 -105	1
 -104	1
 -103	1
 -100	1
  10	1
 -0	1
 +100	1
 +103	1
 +104	1
 +105	1
 +11	1
 +111	1
 +113	1
 +114	1
 +116	1
 +118	1
 +119	1
 +12	1
 +120	1
 +125	1
 +126	1
 +128	1
 +129	1
 +131	1
 +133	1
 +134	1
 +136	1
 +137	1
 +138	1
 +143	1
 +145	1
 +146	1
 +149	1
 +15	1
 +150	1
 +152	1
 +153	1
 +155	1
 +156	1
 +157	1
 +158	1
 +160	1
 +162	1
 +163	1
 +164	1
 +165	1
 +166	1
 +167	1
 +168	1
 +169	1
 +17	1
 +170	1
 +172	1
 +174	1
 +175	1
 +176	1
 +177	1
 +178	1
 +179	1
 +18	1
 +180	1
 +181	1
 +183	1
 +186	1
 +187	1
 +189	1
 +19	1
 +190	1
 +191	1
 +192	1
 +193	1
 +194	1
 +195	1
 +196	1
 +197	1
 +199	1
 +2	1
 +20	1
 +200	1
 +201	1
 +202	1
 +203	1
 +205	1
 +207	1
 +208	1
 +209	1
 +213	1
 +214	1
 +216	1
 +217	1
 +218	1
 +219	1
 +221	1
 +222	1
 +223	1
 +224	1
 +226	1
 +228	1
 +229	1
 +230	1
 +233	1
 +235	1
 +237	1
 +238	1
 +239	1
 +24	1
 +241	1
 +242	1
 +244	1
 +247	1
 +248	1
 +249	1
 +252	1
 +255	1
 +256	1
 +257	1
 +258	1
 +26	1
 +260	1
 +262	1
 +263	1
 +265	1
 +266	1
 +27	1
 +272	1
 +273	1
 +274	1
 +275	1
 +277	1
 +278	1
 +28	1
 +280	1
 +281	1
 +282	1
 +283	1
 +284	1
 +285	1
 +286	1
 +287	1
 +288	1
 +289	1
 +291	1
 +292	1
 +296	1
 +298	1
 +30	1
 +302	1
 +305	1
 +306	1
 +307	1
 +308	1
 +309	1
 +310	1
 +311	1
 +315	1
 +316	1
 +317	1
 +318	1
 +321	1
 +322	1
 +323	1
 +325	1
 +327	1
 +33	1
 +331	1
 +332	1
 +333	1
 +335	1
 +336	1
 +338	1
 +339	1
 +34	1
 +341	1
 +342	1
 +344	1
 +345	1
 +348	1
 +35	1
 +351	1
 +353	1
 +356	1
 +360	1
 +362	1
 +364	1
 +365	1
 +366	1
 +367	1
 +368	1
 +369	1
 +37	1
 +373	1
 +374	1
 +375	1
 +377	1
 +378	1
 +379	1
 +382	1
 +384	1
 +386	1
 +389	1
 +392	1
 +393	1
 +394	1
 +395	1
 +396	1
 +397	1
 +399	1
 +4	1
 +400	1
 +401	1
 +402	1
 +403	1
 +404	1
 +406	1
 +407	1
 +409	1
 +41	1
 +411	1
 +413	1
 +414	1
 +417	1
 +418	1
 +419	1
 +42	1
 +421	1
 +424	1
 +427	1
 +429	1
 +43	1
 +430	1
 +431	1
 +432	1
 +435	1
 +436	1
 +437	1
 +438	1
 +439	1
 +44	1
 +443	1
 +444	1
 +446	1
 +448	1
 +449	1
 +452	1
 +453	1
 +454	1
 +455	1
 +457	1
 +458	1
 +459	1
 +460	1
 +462	1
 +463	1
 +466	1
 +467	1
 +468	1
 +469	1
 +47	1
 +470	1
 +472	1
 +475	1
 +477	1
 +478	1
 +479	1
 +480	1
 +481	1
 +482	1
 +483	1
 +484	1
 +485	1
 +487	1
 +489	1
 +490	1
 +491	1
 +492	1
 +493	1
 +494	1
 +495	1
 +496	1
 +497	1
 +498	1
 +5	1
 +51	1
 +53	1
 +54	1
 +57	1
 +58	1
 +64	1
 +65	1
 +66	1
 +67	1
 +69	1
 +70	1
 +72	1
 +74	1
 +76	1
 +77	1
 +78	1
 +8	1
 +80	1
 +82	1
 +83	1
 +84	1
 +85	1
 +86	1
 +87	1
 +9	1
 +90	1
 +92	1
 +95	1
 +96	1
 +97	1
 +98	1
 +PREHOOK: query: -- same query with broadcast join
 +EXPLAIN SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt,s2.key
 +PREHOOK: type: QUERY
 +POSTHOOK: query: -- same query with broadcast join
 +EXPLAIN SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt,s2.key
 +POSTHOOK: type: QUERY
 +STAGE DEPENDENCIES:
 +  Stage-1 is a root stage
 +  Stage-0 depends on stages: Stage-1
 +
 +STAGE PLANS:
 +  Stage: Stage-1
 +    Tez
 +      Edges:
 +        Map 1 <- Map 4 (BROADCAST_EDGE)
 +        Reducer 2 <- Map 1 (SIMPLE_EDGE)
 +        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
 +#### A masked pattern was here ####
 +      Vertices:
 +        Map 1 
 +            Map Operator Tree:
 +                TableScan
 +                  alias: s1
 +                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
 +                  Filter Operator
 +                    predicate: key is not null (type: boolean)
 +                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
 +                    Select Operator
 +                      expressions: key (type: string), value (type: string)
 +                      outputColumnNames: _col0, _col1
 +                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
 +                      Map Join Operator
 +                        condition map:
 +                             Inner Join 0 to 1
 +                        keys:
 +                          0 _col0 (type: string)
 +                          1 _col0 (type: string)
 +                        outputColumnNames: _col0, _col1
 +                        input vertices:
 +                          1 Map 4
 +                        Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
 +                        HybridGraceHashJoin: true
 +                        Group By Operator
-                           aggregations: count(DISTINCT _col1)
 +                          keys: _col0 (type: string), _col1 (type: string)
 +                          mode: hash
-                           outputColumnNames: _col0, _col1, _col2
++                          outputColumnNames: _col0, _col1
 +                          Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
 +                          Reduce Output Operator
 +                            key expressions: _col0 (type: string), _col1 (type: string)
 +                            sort order: ++
 +                            Map-reduce partition columns: _col0 (type: string)
 +                            Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
 +        Map 4 
 +            Map Operator Tree:
 +                TableScan
 +                  alias: s1
 +                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
 +                  Filter Operator
 +                    predicate: key is not null (type: boolean)
 +                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
 +                    Select Operator
 +                      expressions: key (type: string)
 +                      outputColumnNames: _col0
 +                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
 +                      Reduce Output Operator
 +                        key expressions: _col0 (type: string)
 +                        sort order: +
 +                        Map-reduce partition columns: _col0 (type: string)
 +                        Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
 +        Reducer 2 
 +            Reduce Operator Tree:
 +              Group By Operator
-                 aggregations: count(DISTINCT KEY._col1:0._col0)
-                 keys: KEY._col0 (type: string)
++                keys: KEY._col0 (type: string), KEY._col1 (type: string)
 +                mode: mergepartial
 +                outputColumnNames: _col0, _col1
 +                Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
-                 Reduce Output Operator
-                   key expressions: _col1 (type: bigint), _col0 (type: string)
-                   sort order: ++
-                   Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
++                Group By Operator
++                  aggregations: count(_col1)
++                  keys: _col0 (type: string)
++                  mode: complete
++                  outputColumnNames: _col0, _col1
++                  Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
++                  Reduce Output Operator
++                    key expressions: _col1 (type: bigint), _col0 (type: string)
++                    sort order: ++
++                    Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
 +        Reducer 3 
 +            Reduce Operator Tree:
 +              Select Operator
 +                expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: bigint)
 +                outputColumnNames: _col0, _col1
-                 Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
++                Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
 +                File Output Operator
 +                  compressed: false
-                   Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
++                  Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
 +                  table:
 +                      input format: org.apache.hadoop.mapred.TextInputFormat
 +                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 +                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 +
 +  Stage: Stage-0
 +    Fetch Operator
 +      limit: -1
 +      Processor Tree:
 +        ListSink
 +
 +PREHOOK: query: SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt,s2.key
 +PREHOOK: type: QUERY
 +PREHOOK: Input: default@src
 +#### A masked pattern was here ####
 +POSTHOOK: query: SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt,s2.key
 +POSTHOOK: type: QUERY
 +POSTHOOK: Input: default@src
 +#### A masked pattern was here ####
 +0	1
 +10	1
 +100	1
 +103	1
 +104	1
 +105	1
 +11	1
 +111	1
 +113	1
 +114	1
 +116	1
 +118	1
 +119	1
 +12	1
 +120	1
 +125	1
 +126	1
 +128	1
 +129	1
 +131	1
 +133	1
 +134	1
 +136	1
 +137	1
 +138	1
 +143	1
 +145	1
 +146	1
 +149	1
 +15	1
 +150	1
 +152	1
 +153	1
 +155	1
 +156	1
 +157	1
 +158	1
 +160	1
 +162	1
 +163	1
 +164	1
 +165	1
 +166	1
 +167	1
 +168	1
 +169	1
 +17	1
 +170	1
 +172	1
 +174	1
 +175	1
 +176	1
 +177	1
 +178	1
 +179	1
 +18	1
 +180	1
 +181	1
 +183	1
 +186	1
 +187	1
 +189	1
 +19	1
 +190	1
 +191	1
 +192	1
 +193	1
 +194	1
 +195	1
 +196	1
 +197	1
 +199	1
 +2	1
 +20	1
 +200	1
 +201	1
 +202	1
 +203	1
 +205	1
 +207	1
 +208	1
 +209	1
 +213	1
 +214	1
 +216	1
 +217	1
 +218	1
 +219	1
 +221	1
 +222	1
 +223	1
 +224	1
 +226	1
 +228	1
 +229	1
 +230	1
 +233	1
 +235	1
 +237	1
 +238	1
 +239	1
 +24	1
 +241	1
 +242	1
 +244	1
 +247	1
 +248	1
 +249	1
 +252	1
 +255	1
 +256	1
 +257	1
 +258	1
 +26	1
 +260	1
 +262	1
 +263	1
 +265	1
 +266	1
 +27	1
 +272	1
 +273	1
 +274	1
 +275	1
 +277	1
 +278	1
 +28	1
 +280	1
 +281	1
 +282	1
 +283	1
 +284	1
 +285	1
 +286	1
 +287	1
 +288	1
 +289	1
 +291	1
 +292	1
 +296	1
 +298	1
 +30	1
 +302	1
 +305	1
 +306	1
 +307	1
 +308	1
 +309	1
 +310	1
 +311	1
 +315	1
 +316	1
 +317	1
 +318	1
 +321	1
 +322	1
 +323	1
 +325	1
 +327	1
 +33	1
 +331	1
 +332	1
 +333	1
 +335	1
 +336	1
 +338	1
 +339	1
 +34	1
 +341	1
 +342	1
 +344	1
 +345	1
 +348	1
 +35	1
 +351	1
 +353	1
 +356	1
 +360	1
 +362	1
 +364	1
 +365	1
 +366	1
 +367	1
 +368	1
 +369	1
 +37	1
 +373	1
 +374	1
 +375	1
 +377	1
 +378	1
 +379	1
 +382	1
 +384	1
 +386	1
 +389	1
 +392	1
 +393	1
 +394	1
 +395	1
 +396	1
 +397	1
 +399	1
 +4	1
 +400	1
 +401	1
 +402	1
 +403	1
 +404	1
 +406	1
 +407	1
 +409	1
 +41	1
 +411	1
 +413	1
 +414	1
 +417	1
 +418	1
 +419	1
 +42	1
 +421	1
 +424	1
 +427	1
 +429	1
 +43	1
 +430	1
 +431	1
 +432	1
 +435	1
 +436	1
 +437	1
 +438	1
 +439	1
 +44	1
 +443	1
 +444	1
 +446	1
 +448	1
 +449	1
 +452	1
 +453	1
 +454	1
 +455	1
 +457	1
 +458	1
 +459	1
 +460	1
 +462	1
 +463	1
 +466	1
 +467	1
 +468	1
 +469	1
 +47	1
 +470	1
 +472	1
 +475	1
 +477	1
 +478	1
 +479	1
 +480	1
 +481	1
 +482	1
 +483	1
 +484	1
 +485	1
 +487	1
 +489	1
 +490	1
 +491	1
 +492	1
 +493	1
 +494	1
 +495	1
 +496	1
 +497	1
 +498	1
 +5	1
 +51	1
 +53	1
 +54	1
 +57	1
 +58	1
 +64	1
 +65	1
 +66	1
 +67	1
 +69	1
 +70	1
 +72	1
 +74	1
 +76	1
 +77	1
 +78	1
 +8	1
 +80	1
 +82	1
 +83	1
 +84	1
 +85	1
 +86	1
 +87	1
 +9	1
 +90	1
 +92	1
 +95	1
 +96	1
 +97	1
 +98	1
  PREHOOK: query: -- query with multiple branches in the task dag
  EXPLAIN
  SELECT * 

http://git-wip-us.apache.org/repos/asf/hive/blob/e6b1556e/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/e6b1556e/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
----------------------------------------------------------------------


[15/39] hive git commit: HIVE-10595 Dropping a table can cause NPEs in the compactor (Alan Gates, reviewed by Eugene Koifman)

Posted by pr...@apache.org.
HIVE-10595 Dropping a table can cause NPEs in the compactor (Alan Gates, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c156b32b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c156b32b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c156b32b

Branch: refs/heads/llap
Commit: c156b32b49aeb5943e45a68fc7600c9244afb128
Parents: 72088ca
Author: Alan Gates <ga...@hortonworks.com>
Authored: Thu May 7 12:49:21 2015 +0100
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Thu May 7 12:49:21 2015 +0100

----------------------------------------------------------------------
 .../hadoop/hive/ql/txn/compactor/Cleaner.java   | 20 ++++++-
 .../hive/ql/txn/compactor/CompactorThread.java  | 12 ++--
 .../hadoop/hive/ql/txn/compactor/Initiator.java | 11 +++-
 .../hadoop/hive/ql/txn/compactor/Worker.java    | 12 ++++
 .../hive/ql/txn/compactor/TestCleaner.java      | 56 ++++++++++++++++-
 .../hive/ql/txn/compactor/TestInitiator.java    | 63 +++++++++++++++++++-
 .../hive/ql/txn/compactor/TestWorker.java       | 45 ++++++++++++++
 7 files changed, 207 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/c156b32b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
index 83b0d3d..16d2c81 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java
@@ -26,10 +26,12 @@ import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.ValidReadTxnList;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -183,7 +185,23 @@ public class Cleaner extends CompactorThread {
   private void clean(CompactionInfo ci) throws MetaException {
     LOG.info("Starting cleaning for " + ci.getFullPartitionName());
     try {
-      StorageDescriptor sd = resolveStorageDescriptor(resolveTable(ci), resolvePartition(ci));
+      Table t = resolveTable(ci);
+      if (t == null) {
+        // The table was dropped before we got around to cleaning it.
+        LOG.info("Unable to find table " + ci.getFullTableName() + ", assuming it was dropped");
+        return;
+      }
+      Partition p = null;
+      if (ci.partName != null) {
+        p = resolvePartition(ci);
+        if (p == null) {
+          // The partition was dropped before we got around to cleaning it.
+          LOG.info("Unable to find partition " + ci.getFullPartitionName() +
+              ", assuming it was dropped");
+          return;
+        }
+      }
+      StorageDescriptor sd = resolveStorageDescriptor(t, p);
       final String location = sd.getLocation();
 
       // Create a bogus validTxnList with a high water mark set to MAX_LONG and no open

http://git-wip-us.apache.org/repos/asf/hive/blob/c156b32b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
index 7d097fd..38cd95e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java
@@ -32,13 +32,13 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
 import org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler;
-import org.apache.hadoop.hive.metastore.txn.TxnHandler;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 
@@ -105,13 +105,15 @@ abstract class CompactorThread extends Thread implements MetaStoreThread {
    * one partition.
    */
   protected Partition resolvePartition(CompactionInfo ci) throws Exception {
-    Partition p = null;
     if (ci.partName != null) {
-      List<String> names = new ArrayList<String>(1);
-      names.add(ci.partName);
       List<Partition> parts = null;
       try {
-        parts = rs.getPartitionsByNames(ci.dbname, ci.tableName, names);
+        parts = rs.getPartitionsByNames(ci.dbname, ci.tableName,
+            Collections.singletonList(ci.partName));
+        if (parts == null || parts.size() == 0) {
+          // The partition got dropped before we went looking for it.
+          return null;
+        }
       } catch (Exception e) {
         LOG.error("Unable to find partition " + ci.getFullPartitionName() + ", " + e.getMessage());
         throw e;

http://git-wip-us.apache.org/repos/asf/hive/blob/c156b32b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
index f706ac1..847d751 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java
@@ -85,13 +85,13 @@ public class Initiator extends CompactorThread {
           LOG.debug("Found " + potentials.size() + " potential compactions, " +
               "checking to see if we should compact any of them");
           for (CompactionInfo ci : potentials) {
-            LOG.debug("Checking to see if we should compact " + ci.getFullPartitionName());
+            LOG.info("Checking to see if we should compact " + ci.getFullPartitionName());
             try {
               Table t = resolveTable(ci);
               if (t == null) {
                 // Most likely this means it's a temp table
-                LOG.debug("Can't find table " + ci.getFullTableName() + ", assuming it's a temp " +
-                    "table and moving on.");
+                LOG.info("Can't find table " + ci.getFullTableName() + ", assuming it's a temp " +
+                    "table or has been dropped and moving on.");
                 continue;
               }
 
@@ -121,6 +121,11 @@ public class Initiator extends CompactorThread {
 
               // Figure out who we should run the file operations as
               Partition p = resolvePartition(ci);
+              if (p == null && ci.partName != null) {
+                LOG.info("Can't find partition " + ci.getFullPartitionName() +
+                    ", assuming it has been dropped and moving on.");
+                continue;
+              }
               StorageDescriptor sd = resolveStorageDescriptor(t, p);
               String runAs = findUserToRunAs(sd.getLocation(), t);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/c156b32b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
index 3ce9ffd..f26225a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java
@@ -94,6 +94,12 @@ public class Worker extends CompactorThread {
         Table t1 = null;
         try {
           t1 = resolveTable(ci);
+          if (t1 == null) {
+            LOG.info("Unable to find table " + ci.getFullTableName() +
+                ", assuming it was dropped and moving on.");
+            txnHandler.markCleaned(ci);
+            continue;
+          }
         } catch (MetaException e) {
           txnHandler.markCleaned(ci);
           continue;
@@ -106,6 +112,12 @@ public class Worker extends CompactorThread {
         Partition p = null;
         try {
           p = resolvePartition(ci);
+          if (p == null && ci.partName != null) {
+            LOG.info("Unable to find partition " + ci.getFullPartitionName() +
+                ", assuming it was dropped and moving on.");
+            txnHandler.markCleaned(ci);
+            continue;
+          }
         } catch (Exception e) {
           txnHandler.markCleaned(ci);
           continue;

http://git-wip-us.apache.org/repos/asf/hive/blob/c156b32b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
index 7687851..ffdbb9a 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
@@ -17,17 +17,17 @@
  */
 package org.apache.hadoop.hive.ql.txn.compactor;
 
-import junit.framework.Assert;
+import org.junit.Assert;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.MetaStoreThread;
 import org.apache.hadoop.hive.metastore.api.*;
 import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
 import org.junit.Test;
 
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -428,4 +428,56 @@ public class TestCleaner extends CompactorTest {
     Assert.assertEquals(1, paths.size());
     Assert.assertEquals("base_25", paths.get(0).getName());
   }
+
+  @Test
+  public void droppedTable() throws Exception {
+    Table t = newTable("default", "dt", false);
+
+    addDeltaFile(t, null, 1L, 22L, 22);
+    addDeltaFile(t, null, 23L, 24L, 2);
+    addBaseFile(t, null, 25L, 25);
+
+    burnThroughTransactions(25);
+
+    CompactionRequest rqst = new CompactionRequest("default", "dt", CompactionType.MINOR);
+    txnHandler.compact(rqst);
+    CompactionInfo ci = txnHandler.findNextToCompact("fred");
+    txnHandler.markCompacted(ci);
+    txnHandler.setRunAs(ci.id, System.getProperty("user.name"));
+
+    ms.dropTable("default", "dt");
+
+    startCleaner();
+
+    // Check there are no compactions requests left.
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    Assert.assertEquals(0, rsp.getCompactsSize());
+  }
+
+  @Test
+  public void droppedPartition() throws Exception {
+    Table t = newTable("default", "dp", true);
+    Partition p = newPartition(t, "today");
+
+    addDeltaFile(t, p, 1L, 22L, 22);
+    addDeltaFile(t, p, 23L, 24L, 2);
+    addBaseFile(t, p, 25L, 25);
+
+    burnThroughTransactions(25);
+
+    CompactionRequest rqst = new CompactionRequest("default", "dp", CompactionType.MAJOR);
+    rqst.setPartitionname("ds=today");
+    txnHandler.compact(rqst);
+    CompactionInfo ci = txnHandler.findNextToCompact("fred");
+    txnHandler.markCompacted(ci);
+    txnHandler.setRunAs(ci.id, System.getProperty("user.name"));
+
+    ms.dropPartition("default", "dp", Collections.singletonList("today"), true);
+
+    startCleaner();
+
+    // Check there are no compactions requests left.
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    Assert.assertEquals(0, rsp.getCompactsSize());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c156b32b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
index 1a9cbca..00b13de 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hive.ql.txn.compactor;
 
-import junit.framework.Assert;
+import org.junit.Assert;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -27,6 +27,7 @@ import org.junit.Before;
 import org.junit.Test;
 
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -653,4 +654,64 @@ public class TestInitiator extends CompactorTest {
     Assert.assertEquals(0, compacts.size());
   }
 
+  @Test
+  public void dropTable() throws Exception {
+    Table t = newTable("default", "dt", false);
+
+    addBaseFile(t, null, 20L, 20);
+    addDeltaFile(t, null, 21L, 22L, 2);
+    addDeltaFile(t, null, 23L, 24L, 2);
+
+    burnThroughTransactions(23);
+
+    long txnid = openTxn();
+    LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
+    comp.setTablename("dt");
+    List<LockComponent> components = new ArrayList<LockComponent>(1);
+    components.add(comp);
+    LockRequest req = new LockRequest(components, "me", "localhost");
+    req.setTxnid(txnid);
+    LockResponse res = txnHandler.lock(req);
+    txnHandler.commitTxn(new CommitTxnRequest(txnid));
+
+    ms.dropTable("default", "dt");
+
+    startInitiator();
+
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
+    Assert.assertEquals(0, compacts.size());
+  }
+
+  @Test
+  public void dropPartition() throws Exception {
+    Table t = newTable("default", "dp", true);
+    Partition p = newPartition(t, "today");
+
+    addBaseFile(t, p, 20L, 20);
+    addDeltaFile(t, p, 21L, 22L, 2);
+    addDeltaFile(t, p, 23L, 24L, 2);
+
+    burnThroughTransactions(23);
+
+    long txnid = openTxn();
+    LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
+    comp.setTablename("dp");
+    comp.setPartitionname("ds=today");
+    List<LockComponent> components = new ArrayList<LockComponent>(1);
+    components.add(comp);
+    LockRequest req = new LockRequest(components, "me", "localhost");
+    req.setTxnid(txnid);
+    LockResponse res = txnHandler.lock(req);
+    txnHandler.commitTxn(new CommitTxnRequest(txnid));
+
+    ms.dropPartition("default", "dp", Collections.singletonList("today"), true);
+
+    startInitiator();
+
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
+    Assert.assertEquals(0, compacts.size());
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c156b32b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
index 78a7f9e..bebac54 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
@@ -29,6 +29,7 @@ import org.junit.Test;
 import java.io.*;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -799,4 +800,48 @@ public class TestWorker extends CompactorTest {
     Assert.assertEquals("delta_23_25", stat[3].getPath().getName());
     Assert.assertEquals("delta_26_27", stat[4].getPath().getName());
   }
+
+  @Test
+  public void droppedTable() throws Exception {
+    Table t = newTable("default", "dt", false);
+
+    addDeltaFile(t, null, 1L, 2L, 2);
+    addDeltaFile(t, null, 3L, 4L, 2);
+    burnThroughTransactions(4);
+
+    CompactionRequest rqst = new CompactionRequest("default", "dt", CompactionType.MAJOR);
+    txnHandler.compact(rqst);
+
+    ms.dropTable("default", "dt");
+
+    startWorker();
+
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
+    Assert.assertEquals(0, compacts.size());
+  }
+
+  @Test
+  public void droppedPartition() throws Exception {
+    Table t = newTable("default", "dp", true);
+    Partition p = newPartition(t, "today");
+
+    addBaseFile(t, p, 20L, 20);
+    addDeltaFile(t, p, 21L, 22L, 2);
+    addDeltaFile(t, p, 23L, 24L, 2);
+
+    burnThroughTransactions(25);
+
+    CompactionRequest rqst = new CompactionRequest("default", "dp", CompactionType.MINOR);
+    rqst.setPartitionname("ds=today");
+    txnHandler.compact(rqst);
+
+    ms.dropPartition("default", "dp", Collections.singletonList("today"), true);
+
+    startWorker();
+
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
+    Assert.assertEquals(0, compacts.size());
+  }
 }


[28/39] hive git commit: HIVE-10526: CBO (Calcite Return Path): HiveCost epsilon comparison should take row count in to account (Laljo John Pullokkaran reviewed by Ashutosh Chauhan)

Posted by pr...@apache.org.
HIVE-10526: CBO (Calcite Return Path): HiveCost epsilon comparison should take row count in to account (Laljo John Pullokkaran reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/809fcb01
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/809fcb01
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/809fcb01

Branch: refs/heads/llap
Commit: 809fcb01457ab7ba09786f237ce558e81c53ee49
Parents: 4a0ccd1
Author: jpullokk <jp...@apache.org>
Authored: Thu May 7 14:16:25 2015 -0700
Committer: jpullokk <jp...@apache.org>
Committed: Thu May 7 14:16:25 2015 -0700

----------------------------------------------------------------------
 .../ql/optimizer/calcite/cost/HiveCost.java     |   16 +-
 .../annotate_stats_join_pkfk.q.out              |   20 +-
 .../encryption_insert_partition_static.q.out    |   14 +-
 ql/src/test/results/clientpositive/join32.q.out |   84 +-
 .../clientpositive/join32_lessSize.q.out        |  423 ++---
 ql/src/test/results/clientpositive/join33.q.out |   84 +-
 .../clientpositive/join_alt_syntax.q.out        |  306 ++--
 .../clientpositive/join_cond_pushdown_2.q.out   |  150 +-
 .../clientpositive/join_cond_pushdown_4.q.out   |  150 +-
 .../results/clientpositive/spark/join32.q.out   |   88 +-
 .../clientpositive/spark/join32_lessSize.q.out  |  286 ++--
 .../results/clientpositive/spark/join33.q.out   |   88 +-
 .../clientpositive/spark/join_alt_syntax.q.out  |  210 ++-
 .../spark/join_cond_pushdown_2.q.out            |   98 +-
 .../spark/join_cond_pushdown_4.q.out            |   98 +-
 .../clientpositive/tez/explainuser_2.q.out      | 1529 +++++++++---------
 16 files changed, 1770 insertions(+), 1874 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java
index 0755943..3c5cac2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveCost.java
@@ -21,7 +21,15 @@ import org.apache.calcite.plan.RelOptCost;
 import org.apache.calcite.plan.RelOptCostFactory;
 import org.apache.calcite.plan.RelOptUtil;
 
-// TODO: This should inherit from VolcanoCost and should just override isLE method.
+/***
+ * NOTE:<br>
+ * 1. Hivecost normalizes cpu and io in to time.<br>
+ * 2. CPU, IO cost is added together to find the query latency.<br>
+ * 3. If query latency is equal then row count is compared.
+ */
+
+// TODO: This should inherit from VolcanoCost and should just override isLE
+// method.
 public class HiveCost implements RelOptCost {
   // ~ Static fields/initializers ---------------------------------------------
 
@@ -114,8 +122,10 @@ public class HiveCost implements RelOptCost {
   }
 
   public boolean isEqWithEpsilon(RelOptCost other) {
-    return (this == other) || (Math.abs((this.cpu + this.io) -
-            (other.getCpu() + other.getIo())) < RelOptUtil.EPSILON);
+    return (this == other)
+        || ((Math.abs(this.io - other.getIo()) < RelOptUtil.EPSILON)
+            && (Math.abs(this.cpu - other.getCpu()) < RelOptUtil.EPSILON) && (Math
+            .abs(this.rowCount - other.getRows()) < RelOptUtil.EPSILON));
   }
 
   public RelOptCost minus(RelOptCost other) {

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
index 4a5d02d..66e0e9f 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
@@ -808,32 +808,32 @@ STAGE PLANS:
             alias: s
             Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: s_store_sk is not null (type: boolean)
-              Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+              predicate: ((s_floor_space > 1000) and s_store_sk is not null) (type: boolean)
+              Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: s_store_sk (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
             Statistics: Num rows: 12 Data size: 3143 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: ((s_floor_space > 1000) and s_store_sk is not null) (type: boolean)
-              Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+              predicate: s_store_sk is not null (type: boolean)
+              Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: s_store_sk (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -843,10 +843,10 @@ STAGE PLANS:
             0 _col0 (type: int)
             1 _col0 (type: int)
             2 _col0 (type: int)
-          outputColumnNames: _col2
+          outputColumnNames: _col1
           Statistics: Num rows: 322 Data size: 1288 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
-            expressions: _col2 (type: int)
+            expressions: _col1 (type: int)
             outputColumnNames: _col0
             Statistics: Num rows: 322 Data size: 1288 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out
index 9e1c1e3..96f8b6a 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out
@@ -555,16 +555,16 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: encryptedtable
-            Statistics: Num rows: 12 Data size: 2777 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 12 Data size: 2767 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Select Operator
               expressions: key (type: string), value (type: string)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 12 Data size: 2777 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 12 Data size: 2767 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 sort order: 
                 Map-reduce partition columns: _col0 (type: string)
-                Statistics: Num rows: 12 Data size: 2777 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 12 Data size: 2767 Basic stats: COMPLETE Column stats: NONE
                 tag: -1
                 value expressions: _col0 (type: string), _col1 (type: string)
                 auto parallelism: false
@@ -595,7 +595,7 @@ STAGE PLANS:
               serialization.ddl struct encryptedtable { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-              totalSize 1392
+              totalSize 1385
 #### A masked pattern was here ####
             serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
           
@@ -643,7 +643,7 @@ STAGE PLANS:
               serialization.ddl struct encryptedtable { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
-              totalSize 1385
+              totalSize 1382
 #### A masked pattern was here ####
             serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
           
@@ -675,14 +675,14 @@ STAGE PLANS:
         Select Operator
           expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 12 Data size: 2777 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 12 Data size: 2767 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             GlobalTableId: 1
 #### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=today/.hive-staging
             NumFilesPerFileSink: 1
             Static Partition Specification: ds=today/
-            Statistics: Num rows: 12 Data size: 2777 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 12 Data size: 2767 Basic stats: COMPLETE Column stats: NONE
 #### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=today/.hive-staging
             table:
                 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/join32.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join32.q.out b/ql/src/test/results/clientpositive/join32.q.out
index d9e8dd3..a05a356 100644
--- a/ql/src/test/results/clientpositive/join32.q.out
+++ b/ql/src/test/results/clientpositive/join32.q.out
@@ -109,71 +109,25 @@ STAGE PLANS:
   Stage: Stage-7
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:z 
+        $hdt$_0:y 
           Fetch Operator
             limit: -1
-            Partition Description:
-                Partition
-                  base file name: hr=11
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 11
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
         $hdt$_1:$hdt$_2:x 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:z 
+        $hdt$_0:y 
           TableScan
-            alias: z
+            alias: y
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
+              predicate: key is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: value (type: string)
-                outputColumnNames: _col0
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 HashTable Sink Operator
                   keys:
@@ -187,7 +141,7 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: (key is not null and value is not null) (type: boolean)
+              predicate: (value is not null and key is not null) (type: boolean)
               Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -196,31 +150,31 @@ STAGE PLANS:
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string)
-                    1 _col0 (type: string)
+                    1 _col1 (type: string)
                   Position of Big Table: 0
 
   Stage: Stage-5
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: y
+            alias: z
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: key is not null (type: boolean)
+              predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
+                expressions: value (type: string)
+                outputColumnNames: _col0
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
                     0 _col0 (type: string)
-                    1 _col0 (type: string)
-                  outputColumnNames: _col1, _col2, _col3
+                    1 _col1 (type: string)
+                  outputColumnNames: _col0, _col3
                   Position of Big Table: 0
                   Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                   Map Join Operator
@@ -229,11 +183,11 @@ STAGE PLANS:
                     keys:
                       0 _col0 (type: string)
                       1 _col3 (type: string)
-                    outputColumnNames: _col0, _col4, _col5
+                    outputColumnNames: _col1, _col2, _col5
                     Position of Big Table: 1
                     Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: _col5 (type: string), _col0 (type: string), _col4 (type: string)
+                      expressions: _col5 (type: string), _col2 (type: string), _col1 (type: string)
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
@@ -402,7 +356,7 @@ STAGE PLANS:
               name: default.srcpart
             name: default.srcpart
       Truncated Path -> Alias:
-        /src [$hdt$_1:$hdt$_1:y]
+        /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:$hdt$_1:z]
 
   Stage: Stage-0
     Move Operator
@@ -451,8 +405,8 @@ POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Output: default@dest_j1
 POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.val2 EXPRESSION [(src)y.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value EXPRESSION [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from dest_j1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_j1

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/join32_lessSize.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join32_lessSize.q.out b/ql/src/test/results/clientpositive/join32_lessSize.q.out
index 9e3d06d..136c306 100644
--- a/ql/src/test/results/clientpositive/join32_lessSize.q.out
+++ b/ql/src/test/results/clientpositive/join32_lessSize.q.out
@@ -130,7 +130,7 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: (key is not null and value is not null) (type: boolean)
+              predicate: (value is not null and key is not null) (type: boolean)
               Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -139,31 +139,31 @@ STAGE PLANS:
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string)
-                    1 _col0 (type: string)
+                    1 _col1 (type: string)
                   Position of Big Table: 0
 
   Stage: Stage-6
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: y
+            alias: z
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: key is not null (type: boolean)
+              predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
+                expressions: value (type: string)
+                outputColumnNames: _col0
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
                     0 _col0 (type: string)
-                    1 _col0 (type: string)
-                  outputColumnNames: _col1, _col2, _col3
+                    1 _col1 (type: string)
+                  outputColumnNames: _col0, _col3
                   Position of Big Table: 0
                   Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -175,8 +175,8 @@ STAGE PLANS:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                         properties:
-                          columns _col1,_col2,_col3
-                          columns.types string,string,string
+                          columns _col0,_col3
+                          columns.types string,string
                           escape.delim \
                           serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
                         serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -190,7 +190,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: src
+            base file name: src1
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
@@ -200,14 +200,14 @@ STAGE PLANS:
               columns.comments 'default','default'
               columns.types string:string
 #### A masked pattern was here ####
-              name default.src
+              name default.src1
               numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
+              numRows 25
+              rawDataSize 191
+              serialization.ddl struct src1 { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
+              totalSize 216
 #### A masked pattern was here ####
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
@@ -220,23 +220,26 @@ STAGE PLANS:
                 columns.comments 'default','default'
                 columns.types string:string
 #### A masked pattern was here ####
-                name default.src
+                name default.src1
                 numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
+                numRows 25
+                rawDataSize 191
+                serialization.ddl struct src1 { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
+                totalSize 216
 #### A masked pattern was here ####
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
+              name: default.src1
+            name: default.src1
 #### A masked pattern was here ####
           Partition
-            base file name: src1
+            base file name: hr=11
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
             properties:
               COLUMN_STATS_ACCURATE true
               bucket_count -1
@@ -244,106 +247,59 @@ STAGE PLANS:
               columns.comments 'default','default'
               columns.types string:string
 #### A masked pattern was here ####
-              name default.src1
+              name default.srcpart
               numFiles 1
-              numRows 25
-              rawDataSize 191
-              serialization.ddl struct src1 { string key, string value}
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 216
+              totalSize 5812
 #### A masked pattern was here ####
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE true
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
                 columns.types string:string
 #### A masked pattern was here ####
-                name default.src1
-                numFiles 1
-                numRows 25
-                rawDataSize 191
-                serialization.ddl struct src1 { string key, string value}
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 216
 #### A masked pattern was here ####
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src1
-            name: default.src1
+              name: default.srcpart
+            name: default.srcpart
       Truncated Path -> Alias:
-        /src [$hdt$_1:$hdt$_1:y]
+        /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:$hdt$_1:z]
 
   Stage: Stage-7
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:z 
+        $hdt$_0:y 
           Fetch Operator
             limit: -1
-            Partition Description:
-                Partition
-                  base file name: hr=11
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 11
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
       Alias -> Map Local Operator Tree:
-        $hdt$_0:z 
+        $hdt$_0:y 
           TableScan
-            alias: z
+            alias: y
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
+              predicate: key is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: value (type: string)
-                outputColumnNames: _col0
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 HashTable Sink Operator
                   keys:
@@ -362,11 +318,11 @@ STAGE PLANS:
               keys:
                 0 _col0 (type: string)
                 1 _col3 (type: string)
-              outputColumnNames: _col0, _col4, _col5
+              outputColumnNames: _col1, _col2, _col5
               Position of Big Table: 1
               Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: _col5 (type: string), _col0 (type: string), _col4 (type: string)
+                expressions: _col5 (type: string), _col2 (type: string), _col1 (type: string)
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
@@ -406,8 +362,8 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
             properties:
-              columns _col1,_col2,_col3
-              columns.types string,string,string
+              columns _col0,_col3
+              columns.types string,string
               escape.delim \
               serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
             serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -415,19 +371,16 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
               properties:
-                columns _col1,_col2,_col3
-                columns.types string,string,string
+                columns _col0,_col3
+                columns.types string,string
                 escape.delim \
                 serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
               serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 #### A masked pattern was here ####
           Partition
-            base file name: hr=11
+            base file name: src
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
             properties:
               COLUMN_STATS_ACCURATE true
               bucket_count -1
@@ -435,13 +388,11 @@ STAGE PLANS:
               columns.comments 'default','default'
               columns.types string:string
 #### A masked pattern was here ####
-              name default.srcpart
+              name default.src
               numFiles 1
               numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
               rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
+              serialization.ddl struct src { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               totalSize 5812
@@ -451,21 +402,24 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
+                COLUMN_STATS_ACCURATE true
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
                 columns.types string:string
 #### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
+                name default.src
+                numFiles 1
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
 #### A masked pattern was here ####
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
+              name: default.src
+            name: default.src
       Truncated Path -> Alias:
 #### A masked pattern was here ####
 
@@ -516,8 +470,8 @@ POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Output: default@dest_j1
 POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.val2 EXPRESSION [(src)y.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value EXPRESSION [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from dest_j1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_j1
@@ -703,28 +657,27 @@ STAGE DEPENDENCIES:
   Stage-8 depends on stages: Stage-11
   Stage-10 depends on stages: Stage-8
   Stage-7 depends on stages: Stage-10
-  Stage-0 depends on stages: Stage-7
+  Stage-9 depends on stages: Stage-7
+  Stage-6 depends on stages: Stage-9
+  Stage-0 depends on stages: Stage-6
   Stage-2 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-11
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_1:$hdt$_2:x 
-          Fetch Operator
-            limit: -1
-        $hdt$_1:$hdt$_3:x 
+        $hdt$_1:$hdt$_2:$hdt$_2:x 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_1:$hdt$_2:x 
+        $hdt$_1:$hdt$_2:$hdt$_2:x 
           TableScan
             alias: x
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: (value is not null and key is not null) (type: boolean)
+              predicate: (key is not null and value is not null) (type: boolean)
               Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -733,9 +686,12 @@ STAGE PLANS:
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string)
-                    1 _col1 (type: string)
-                  Position of Big Table: 0
-        $hdt$_1:$hdt$_3:x 
+                    1 _col0 (type: string)
+                  Position of Big Table: 1
+
+  Stage: Stage-8
+    Map Reduce
+      Map Operator Tree:
           TableScan
             alias: x
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -748,62 +704,32 @@ STAGE PLANS:
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                HashTable Sink Operator
-                  keys:
-                    0 _col1 (type: string)
-                    1 _col0 (type: string)
-                  Position of Big Table: 0
-
-  Stage: Stage-8
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: w
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: value is not null (type: boolean)
-              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: value (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
                     0 _col0 (type: string)
-                    1 _col1 (type: string)
-                  outputColumnNames: _col1
-                  Position of Big Table: 0
-                  Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-                  Map Join Operator
-                    condition map:
-                         Inner Join 0 to 1
-                    keys:
-                      0 _col1 (type: string)
-                      1 _col0 (type: string)
-                    outputColumnNames: _col1, _col4
-                    Position of Big Table: 0
-                    Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      GlobalTableId: 0
+                    1 _col0 (type: string)
+                  outputColumnNames: _col0, _col1, _col3
+                  Position of Big Table: 1
+                  Statistics: Num rows: 14 Data size: 108 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
 #### A masked pattern was here ####
-                      NumFilesPerFileSink: 1
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          properties:
-                            columns _col1,_col4
-                            columns.types string,string
-                            escape.delim \
-                            serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                          serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                      TotalFiles: 1
-                      GatherStats: false
-                      MultiFileSpray: false
+                    NumFilesPerFileSink: 1
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        properties:
+                          columns _col0,_col1,_col3
+                          columns.types string,string,string
+                          escape.delim \
+                          serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                        serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
       Local Work:
         Map Reduce Local Work
       Path -> Alias:
@@ -811,7 +737,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: src
+            base file name: src1
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
@@ -821,14 +747,14 @@ STAGE PLANS:
               columns.comments 'default','default'
               columns.types string:string
 #### A masked pattern was here ####
-              name default.src
+              name default.src1
               numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
+              numRows 25
+              rawDataSize 191
+              serialization.ddl struct src1 { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
+              totalSize 216
 #### A masked pattern was here ####
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
@@ -841,21 +767,106 @@ STAGE PLANS:
                 columns.comments 'default','default'
                 columns.types string:string
 #### A masked pattern was here ####
-                name default.src
+                name default.src1
                 numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
+                numRows 25
+                rawDataSize 191
+                serialization.ddl struct src1 { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
+                totalSize 216
 #### A masked pattern was here ####
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
+              name: default.src1
+            name: default.src1
+      Truncated Path -> Alias:
+        /src1 [$hdt$_1:$hdt$_2:$hdt$_3:x]
+
+  Stage: Stage-10
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        $hdt$_1:$hdt$_1:w 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        $hdt$_1:$hdt$_1:w 
+          TableScan
+            alias: w
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: value is not null (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: value (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                HashTable Sink Operator
+                  keys:
+                    0 _col0 (type: string)
+                    1 _col1 (type: string)
+                  Position of Big Table: 1
+
+  Stage: Stage-7
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            GatherStats: false
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              keys:
+                0 _col0 (type: string)
+                1 _col1 (type: string)
+              outputColumnNames: _col1, _col4
+              Position of Big Table: 1
+              Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 0
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    properties:
+                      columns _col1,_col4
+                      columns.types string,string
+                      escape.delim \
+                      serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                TotalFiles: 1
+                GatherStats: false
+                MultiFileSpray: false
+      Local Work:
+        Map Reduce Local Work
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: src1
+            base file name: -mr-10002
+            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+            properties:
+              columns _col0,_col1,_col3
+              columns.types string,string,string
+              escape.delim \
+              serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+          
+              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+              properties:
+                columns _col0,_col1,_col3
+                columns.types string,string,string
+                escape.delim \
+                serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+#### A masked pattern was here ####
+          Partition
+            base file name: src
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
@@ -865,14 +876,14 @@ STAGE PLANS:
               columns.comments 'default','default'
               columns.types string:string
 #### A masked pattern was here ####
-              name default.src1
+              name default.src
               numFiles 1
-              numRows 25
-              rawDataSize 191
-              serialization.ddl struct src1 { string key, string value}
+              numRows 500
+              rawDataSize 5312
+              serialization.ddl struct src { string key, string value}
               serialization.format 1
               serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 216
+              totalSize 5812
 #### A masked pattern was here ####
             serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
@@ -885,22 +896,22 @@ STAGE PLANS:
                 columns.comments 'default','default'
                 columns.types string:string
 #### A masked pattern was here ####
-                name default.src1
+                name default.src
                 numFiles 1
-                numRows 25
-                rawDataSize 191
-                serialization.ddl struct src1 { string key, string value}
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
                 serialization.format 1
                 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 216
+                totalSize 5812
 #### A masked pattern was here ####
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src1
-            name: default.src1
+              name: default.src
+            name: default.src
       Truncated Path -> Alias:
-        /src [$hdt$_1:$hdt$_1:w]
+#### A masked pattern was here ####
 
-  Stage: Stage-10
+  Stage: Stage-9
     Map Reduce Local Work
       Alias -> Map Local Tables:
         $hdt$_0:w 
@@ -926,7 +937,7 @@ STAGE PLANS:
                     1 _col1 (type: string)
                   Position of Big Table: 1
 
-  Stage: Stage-7
+  Stage: Stage-6
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -939,17 +950,17 @@ STAGE PLANS:
                 1 _col1 (type: string)
               outputColumnNames: _col1, _col3, _col6
               Position of Big Table: 1
-              Statistics: Num rows: 332 Data size: 3534 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: _col3 (type: string), _col6 (type: string), _col1 (type: string)
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 332 Data size: 3534 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
                   GlobalTableId: 1
 #### A masked pattern was here ####
                   NumFilesPerFileSink: 1
-                  Statistics: Num rows: 332 Data size: 3534 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
@@ -982,7 +993,7 @@ STAGE PLANS:
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: -mr-10002
+            base file name: -mr-10001
             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
             properties:

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/join33.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join33.q.out b/ql/src/test/results/clientpositive/join33.q.out
index d9e8dd3..a05a356 100644
--- a/ql/src/test/results/clientpositive/join33.q.out
+++ b/ql/src/test/results/clientpositive/join33.q.out
@@ -109,71 +109,25 @@ STAGE PLANS:
   Stage: Stage-7
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:z 
+        $hdt$_0:y 
           Fetch Operator
             limit: -1
-            Partition Description:
-                Partition
-                  base file name: hr=11
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 11
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
         $hdt$_1:$hdt$_2:x 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:z 
+        $hdt$_0:y 
           TableScan
-            alias: z
+            alias: y
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
+              predicate: key is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: value (type: string)
-                outputColumnNames: _col0
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 HashTable Sink Operator
                   keys:
@@ -187,7 +141,7 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: (key is not null and value is not null) (type: boolean)
+              predicate: (value is not null and key is not null) (type: boolean)
               Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -196,31 +150,31 @@ STAGE PLANS:
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string)
-                    1 _col0 (type: string)
+                    1 _col1 (type: string)
                   Position of Big Table: 0
 
   Stage: Stage-5
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: y
+            alias: z
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: key is not null (type: boolean)
+              predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
+                expressions: value (type: string)
+                outputColumnNames: _col0
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Map Join Operator
                   condition map:
                        Inner Join 0 to 1
                   keys:
                     0 _col0 (type: string)
-                    1 _col0 (type: string)
-                  outputColumnNames: _col1, _col2, _col3
+                    1 _col1 (type: string)
+                  outputColumnNames: _col0, _col3
                   Position of Big Table: 0
                   Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                   Map Join Operator
@@ -229,11 +183,11 @@ STAGE PLANS:
                     keys:
                       0 _col0 (type: string)
                       1 _col3 (type: string)
-                    outputColumnNames: _col0, _col4, _col5
+                    outputColumnNames: _col1, _col2, _col5
                     Position of Big Table: 1
                     Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: _col5 (type: string), _col0 (type: string), _col4 (type: string)
+                      expressions: _col5 (type: string), _col2 (type: string), _col1 (type: string)
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
@@ -402,7 +356,7 @@ STAGE PLANS:
               name: default.srcpart
             name: default.srcpart
       Truncated Path -> Alias:
-        /src [$hdt$_1:$hdt$_1:y]
+        /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:$hdt$_1:z]
 
   Stage: Stage-0
     Move Operator
@@ -451,8 +405,8 @@ POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Output: default@dest_j1
 POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.val2 EXPRESSION [(src)y.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value EXPRESSION [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from dest_j1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_j1

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/join_alt_syntax.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_alt_syntax.q.out b/ql/src/test/results/clientpositive/join_alt_syntax.q.out
index 007e4c6..cc908c1 100644
--- a/ql/src/test/results/clientpositive/join_alt_syntax.q.out
+++ b/ql/src/test/results/clientpositive/join_alt_syntax.q.out
@@ -359,13 +359,13 @@ where p2.p_name = p3.p_name and p1.p_partkey = p4.p_partkey
             and p1.p_partkey = p2.p_partkey
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-2
+  Stage-3 is a root stage
+  Stage-2 depends on stages: Stage-3
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -379,36 +379,34 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
                   Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string)
           TableScan
             alias: p1
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: p_partkey is not null (type: boolean)
-              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              predicate: (p_partkey is not null and p_name is not null) (type: boolean)
+              Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: p_partkey (type: int), p_name (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string)
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                  Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col0 (type: int)
-            1 _col0 (type: int)
+            0 _col0 (type: int), _col1 (type: string)
+            1 _col0 (type: int), _col1 (type: string)
           outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -420,60 +418,6 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: int), _col1 (type: string)
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col3 (type: string)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: int), _col1 (type: string)
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col2 (type: string)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: int), _col1 (type: string)
-            1 _col0 (type: int), _col1 (type: string)
-          outputColumnNames: _col1, _col3, _col5, _col6
-          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col1 (type: string), _col5 (type: string), _col6 (type: string), _col3 (type: string)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: p1
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (p_name is not null and p_partkey is not null) (type: boolean)
-              Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: p_partkey (type: int), p_name (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col1 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col1 (type: string)
-                  Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: int)
-          TableScan
             alias: p1
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
@@ -488,14 +432,21 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+          TableScan
+            Reduce Output Operator
+              key expressions: _col3 (type: string)
+              sort order: +
+              Map-reduce partition columns: _col3 (type: string)
+              Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: int), _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col1 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col2
+            0 _col0 (type: string)
+            1 _col3 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col4
           Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
@@ -504,6 +455,53 @@ STAGE PLANS:
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: p1
+            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: p_partkey is not null (type: boolean)
+              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: p_partkey (type: int), p_name (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: string)
+          TableScan
+            Reduce Output Operator
+              key expressions: _col1 (type: int)
+              sort order: +
+              Map-reduce partition columns: _col1 (type: int)
+              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: string), _col2 (type: string), _col4 (type: string)
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col0 (type: int)
+            1 _col1 (type: int)
+          outputColumnNames: _col1, _col2, _col4, _col6
+          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col4 (type: string), _col6 (type: string), _col2 (type: string), _col1 (type: string)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
   Stage: Stage-0
     Fetch Operator
       limit: -1
@@ -521,56 +519,54 @@ where p2.p_name = p3.p_name and p1.p_partkey = p4.p_partkey
             and p1.p_partkey = p2.p_partkey
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-2
+  Stage-3 is a root stage
+  Stage-2 depends on stages: Stage-3
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
             alias: p1
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (p_partkey is not null and p_name is not null) (type: boolean)
+              predicate: (p_name is not null and p_partkey is not null) (type: boolean)
               Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: p_partkey (type: int), p_name (type: string)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
+                  key expressions: _col1 (type: string), _col0 (type: int)
+                  sort order: ++
+                  Map-reduce partition columns: _col1 (type: string), _col0 (type: int)
                   Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string)
           TableScan
             alias: p1
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: p_partkey is not null (type: boolean)
-              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              predicate: (p_name is not null and p_partkey is not null) (type: boolean)
+              Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: p_partkey (type: int), p_name (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string)
+                  key expressions: _col1 (type: string), _col0 (type: int)
+                  sort order: ++
+                  Map-reduce partition columns: _col1 (type: string), _col0 (type: int)
+                  Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col0 (type: int)
-            1 _col0 (type: int)
+            0 _col1 (type: string), _col0 (type: int)
+            1 _col1 (type: string), _col0 (type: int)
           outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -582,60 +578,6 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            Reduce Output Operator
-              key expressions: _col1 (type: string), _col0 (type: int)
-              sort order: ++
-              Map-reduce partition columns: _col1 (type: string), _col0 (type: int)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col3 (type: string)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col1 (type: string), _col0 (type: int)
-              sort order: ++
-              Map-reduce partition columns: _col1 (type: string), _col0 (type: int)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col2 (type: string)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col1 (type: string), _col0 (type: int)
-            1 _col1 (type: string), _col0 (type: int)
-          outputColumnNames: _col1, _col3, _col5, _col6
-          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col1 (type: string), _col5 (type: string), _col6 (type: string), _col3 (type: string)
-            outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: p1
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (p_name is not null and p_partkey is not null) (type: boolean)
-              Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: p_partkey (type: int), p_name (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col1 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col1 (type: string)
-                  Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: int)
-          TableScan
             alias: p1
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
@@ -650,14 +592,21 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+          TableScan
+            Reduce Output Operator
+              key expressions: _col3 (type: string)
+              sort order: +
+              Map-reduce partition columns: _col3 (type: string)
+              Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: int), _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col1 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col2
+            0 _col0 (type: string)
+            1 _col3 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col4
           Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
@@ -666,6 +615,53 @@ STAGE PLANS:
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: p1
+            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: p_partkey is not null (type: boolean)
+              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: p_partkey (type: int), p_name (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: string)
+          TableScan
+            Reduce Output Operator
+              key expressions: _col1 (type: int)
+              sort order: +
+              Map-reduce partition columns: _col1 (type: int)
+              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: string), _col2 (type: string), _col4 (type: string)
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col0 (type: int)
+            1 _col1 (type: int)
+          outputColumnNames: _col1, _col2, _col4, _col6
+          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col4 (type: string), _col6 (type: string), _col2 (type: string), _col1 (type: string)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
   Stage: Stage-0
     Fetch Operator
       limit: -1


[18/39] hive git commit: HIVE-9644 : CASE comparison operator rotation optimization (Ashutosh Chauhan via Gopal V)

Posted by pr...@apache.org.
HIVE-9644 : CASE comparison operator rotation optimization (Ashutosh Chauhan via Gopal V)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/48a243ef
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/48a243ef
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/48a243ef

Branch: refs/heads/llap
Commit: 48a243efdf91a7f5334b28810b30fc8d82925d51
Parents: d434f64
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Sat May 2 13:17:47 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Thu May 7 10:27:35 2015 -0700

----------------------------------------------------------------------
 .../optimizer/ConstantPropagateProcFactory.java |  83 +++-
 ql/src/test/queries/clientpositive/fold_case.q  |  12 +
 ql/src/test/queries/clientpositive/fold_when.q  |  31 ++
 .../test/results/clientpositive/fold_case.q.out | 301 ++++++++++++
 .../test/results/clientpositive/fold_when.q.out | 480 +++++++++++++++++++
 .../ql_rewrite_gbtoidx_cbo_2.q.out              |  14 +-
 6 files changed, 912 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/48a243ef/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
index e9436e5..f536ef6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hive.ql.exec.RowSchema;
 import org.apache.hadoop.hive.ql.exec.SelectOperator;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.UDF;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.lib.Node;
 import org.apache.hadoop.hive.ql.lib.NodeProcessor;
@@ -65,11 +66,15 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredJavaObject;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseCompare;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFCase;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNot;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotEqual;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotNull;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFWhen;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
@@ -79,9 +84,11 @@ import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantBooleanObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.io.BooleanWritable;
 
 import com.google.common.collect.ImmutableSet;
 
@@ -199,10 +206,11 @@ public final class ConstantPropagateProcFactory {
    * @param op processing operator
    * @param propagate if true, assignment expressions will be added to constants.
    * @return fold expression
+   * @throws UDFArgumentException
    */
   private static ExprNodeDesc foldExpr(ExprNodeDesc desc, Map<ColumnInfo, ExprNodeDesc> constants,
       ConstantPropagateProcCtx cppCtx, Operator<? extends Serializable> op, int tag,
-      boolean propagate) {
+      boolean propagate) throws UDFArgumentException {
     if (desc instanceof ExprNodeGenericFuncDesc) {
       ExprNodeGenericFuncDesc funcDesc = (ExprNodeGenericFuncDesc) desc;
 
@@ -356,7 +364,7 @@ public final class ConstantPropagateProcFactory {
     return (expr instanceof ExprNodeColumnDesc) ? (ExprNodeColumnDesc)expr : null;
   }
 
-  private static ExprNodeDesc shortcutFunction(GenericUDF udf, List<ExprNodeDesc> newExprs) {
+  private static ExprNodeDesc shortcutFunction(GenericUDF udf, List<ExprNodeDesc> newExprs) throws UDFArgumentException {
     if (udf instanceof GenericUDFOPAnd) {
       for (int i = 0; i < 2; i++) {
         ExprNodeDesc childExpr = newExprs.get(i);
@@ -407,6 +415,77 @@ public final class ConstantPropagateProcFactory {
       }
     }
 
+    if (udf instanceof GenericUDFWhen) {
+      if (!(newExprs.size() == 2 || newExprs.size() == 3)) {
+        // In general, when can have unlimited # of branches,
+        // we currently only handle either 1 or 2 branch.
+        return null;
+      }
+      ExprNodeDesc thenExpr = newExprs.get(1);
+      if (thenExpr instanceof ExprNodeNullDesc && (newExprs.size() == 2 || newExprs.get(2) instanceof ExprNodeNullDesc)) {
+        return thenExpr;
+      }
+      ExprNodeDesc elseExpr = newExprs.size() == 3 ? newExprs.get(2) :
+        new ExprNodeConstantDesc(newExprs.get(2).getTypeInfo(),null);
+
+      ExprNodeDesc whenExpr = newExprs.get(0);
+      if (whenExpr instanceof ExprNodeConstantDesc) {
+        Boolean whenVal = (Boolean)((ExprNodeConstantDesc) whenExpr).getValue();
+        return (whenVal == null || Boolean.FALSE.equals(whenVal)) ? elseExpr : thenExpr;
+      }
+
+      if (thenExpr instanceof ExprNodeConstantDesc && elseExpr instanceof ExprNodeConstantDesc) {
+        ExprNodeConstantDesc constThen = (ExprNodeConstantDesc) thenExpr;
+        ExprNodeConstantDesc constElse = (ExprNodeConstantDesc) elseExpr;
+        Object thenVal = constThen.getValue();
+        Object elseVal = constElse.getValue();
+        if (thenVal == null) {
+          return elseVal == null ? thenExpr : null;
+        } else if(thenVal.equals(elseVal)){
+          return thenExpr;
+        } else if (thenVal instanceof Boolean && elseVal instanceof Boolean) {
+          return Boolean.TRUE.equals(thenVal) ? newExprs.get(0) :
+            ExprNodeGenericFuncDesc.newInstance(new GenericUDFOPNot(), newExprs.subList(0, 1));
+        } else {
+          return null;
+        }
+      }
+    }
+    if (udf instanceof GenericUDFCase) {
+      // HIVE-9644 Attempt to fold expression like :
+      // where (case ss_sold_date when '1998-01-01' then 1=1 else null=1 end);
+      // where ss_sold_date= '1998-01-01' ;
+      if (!(newExprs.size() == 3 || newExprs.size() == 4)) {
+        // In general case can have unlimited # of branches,
+        // we currently only handle either 1 or 2 branch.
+        return null;
+      }
+      ExprNodeDesc thenExpr = newExprs.get(2);
+      if (thenExpr instanceof ExprNodeNullDesc && (newExprs.size() == 3 || newExprs.get(3) instanceof ExprNodeNullDesc)) {
+        return thenExpr;
+      }
+
+      ExprNodeDesc elseExpr = newExprs.size() == 4 ? newExprs.get(3) :
+        new ExprNodeConstantDesc(newExprs.get(2).getTypeInfo(),null);
+
+      if (thenExpr instanceof ExprNodeConstantDesc && elseExpr instanceof ExprNodeConstantDesc) {
+        ExprNodeConstantDesc constThen = (ExprNodeConstantDesc) thenExpr;
+        ExprNodeConstantDesc constElse = (ExprNodeConstantDesc) elseExpr;
+        Object thenVal = constThen.getValue();
+        Object elseVal = constElse.getValue();
+        if (thenVal == null) {
+          return elseVal == null ? thenExpr : null;
+        } else if(thenVal.equals(elseVal)){
+          return thenExpr;
+        } else if (thenVal instanceof Boolean && elseVal instanceof Boolean) {
+          return Boolean.TRUE.equals(thenVal) ? ExprNodeGenericFuncDesc.newInstance(new GenericUDFOPEqual(), newExprs.subList(0, 2)) :
+            ExprNodeGenericFuncDesc.newInstance(new GenericUDFOPNotEqual(), newExprs.subList(0, 2));
+        } else {
+          return null;
+        }
+      }
+    }
+
     return null;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/48a243ef/ql/src/test/queries/clientpositive/fold_case.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/fold_case.q b/ql/src/test/queries/clientpositive/fold_case.q
new file mode 100644
index 0000000..3f9e3a3
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/fold_case.q
@@ -0,0 +1,12 @@
+explain
+select count(1) from src where (case key when '238' then true else false end);
+explain 
+select count(1) from src where (case key when '238' then 1=2 else 1=1 end);
+explain 
+select count(1) from src where (case key when '238' then 1=2 else 1=31 end);
+explain 
+select count(1) from src where (case key when '238' then true else 1=1 end);
+explain
+select count(1) from src where (case key when '238' then 1=1 else 1=null end);
+explain 
+select count(1) from src where (case key when '238' then null else 1=1 end);

http://git-wip-us.apache.org/repos/asf/hive/blob/48a243ef/ql/src/test/queries/clientpositive/fold_when.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/fold_when.q b/ql/src/test/queries/clientpositive/fold_when.q
new file mode 100644
index 0000000..e827a5c
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/fold_when.q
@@ -0,0 +1,31 @@
+explain
+select key from src where ((case when (key = '238') then null     end) = 1);
+explain
+select key from src where ((case when (key = '238') then null else null end) = 1);
+explain
+select key from src where ((case when (key = '238') then 1 else 1 end) = 1);
+explain
+select key from src where ((case when (key = '238') then 1 else 1 end) = 2);
+explain
+select key from src where ((case when (key = '238') then 1 else null end) = 1);
+explain
+select key from src where ((case when (key = '238') then 1=1 else null=1 end));
+explain
+select key from src where ((case when (key = '238') then 1=1 else 2=2 end));
+explain
+select key from src where ((case when (key = '238') then 1=3 else 2=1 end));
+explain
+select key from src where ((case when (key = '238') then 1=1 else 2=1 end));
+explain
+select key from src where ((case when (key = '238') then 1=3 else 1=1 end));
+explain
+select key from src where ((case when ('23' = '23') then 1 else 1 end) = 1);
+explain
+select key from src where ((case when ('2' = '238') then 1 else 2 end) = 2);
+explain
+select key from src where ((case when (true=null) then 1 else 1 end) = 1);
+explain
+select key from src where ((case when (key = (case when (key = '238') then '11' else '11'  end)) then false else true end));
+explain
+select key from src where ((case when (key = (case when (key = '238') then '12' else '11'  end)) then 2=2   else true end));
+

http://git-wip-us.apache.org/repos/asf/hive/blob/48a243ef/ql/src/test/results/clientpositive/fold_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/fold_case.q.out b/ql/src/test/results/clientpositive/fold_case.q.out
new file mode 100644
index 0000000..de6c43e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/fold_case.q.out
@@ -0,0 +1,301 @@
+PREHOOK: query: explain
+select count(1) from src where (case key when '238' then true else false end)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(1) from src where (case key when '238' then true else false end)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key = '238') (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count(1)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain 
+select count(1) from src where (case key when '238' then 1=2 else 1=1 end)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain 
+select count(1) from src where (case key when '238' then 1=2 else 1=1 end)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key <> '238') (type: boolean)
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count(1)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain 
+select count(1) from src where (case key when '238' then 1=2 else 1=31 end)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain 
+select count(1) from src where (case key when '238' then 1=2 else 1=31 end)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            Filter Operator
+              predicate: false (type: boolean)
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
+              Group By Operator
+                aggregations: count(1)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain 
+select count(1) from src where (case key when '238' then true else 1=1 end)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain 
+select count(1) from src where (case key when '238' then true else 1=1 end)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                aggregations: count(1)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select count(1) from src where (case key when '238' then 1=1 else 1=null end)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select count(1) from src where (case key when '238' then 1=1 else 1=null end)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: CASE (key) WHEN ('238') THEN (true) ELSE (null) END (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count(1)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain 
+select count(1) from src where (case key when '238' then null else 1=1 end)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain 
+select count(1) from src where (case key when '238' then null else 1=1 end)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: CASE (key) WHEN ('238') THEN (null) ELSE (true) END (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count(1)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/48a243ef/ql/src/test/results/clientpositive/fold_when.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/fold_when.q.out b/ql/src/test/results/clientpositive/fold_when.q.out
new file mode 100644
index 0000000..37803e0
--- /dev/null
+++ b/ql/src/test/results/clientpositive/fold_when.q.out
@@ -0,0 +1,480 @@
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then null     end) = 1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then null     end) = 1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: false (type: boolean)
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then null else null end) = 1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then null else null end) = 1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: false (type: boolean)
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then 1 else 1 end) = 1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then 1 else 1 end) = 1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then 1 else 1 end) = 2)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then 1 else 1 end) = 2)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: false (type: boolean)
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then 1 else null end) = 1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then 1 else null end) = 1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (CASE WHEN ((key = '238')) THEN (1) ELSE (null) END = 1) (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=1 else null=1 end))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=1 else null=1 end))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: CASE WHEN ((key = '238')) THEN (true) ELSE (null) END (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=1 else 2=2 end))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=1 else 2=2 end))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=3 else 2=1 end))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=3 else 2=1 end))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: false (type: boolean)
+              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=1 else 2=1 end))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=1 else 2=1 end))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key = '238') (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: '238' (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=3 else 1=1 end))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = '238') then 1=3 else 1=1 end))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (not (key = '238')) (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when ('23' = '23') then 1 else 1 end) = 1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when ('23' = '23') then 1 else 1 end) = 1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when ('2' = '238') then 1 else 2 end) = 2)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when ('2' = '238') then 1 else 2 end) = 2)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (true=null) then 1 else 1 end) = 1)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (true=null) then 1 else 1 end) = 1)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = (case when (key = '238') then '11' else '11'  end)) then false else true end))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = (case when (key = '238') then '11' else '11'  end)) then false else true end))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (not (key = '11')) (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain
+select key from src where ((case when (key = (case when (key = '238') then '12' else '11'  end)) then 2=2   else true end))
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select key from src where ((case when (key = (case when (key = '238') then '12' else '11'  end)) then 2=2   else true end))
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/48a243ef/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out b/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out
index 6340a75..381e58f 100644
--- a/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out
+++ b/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out
@@ -3814,22 +3814,22 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: lineitem_ix
-            Statistics: Num rows: 3024 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 0 Data size: 12099 Basic stats: PARTIAL Column stats: COMPLETE
             Select Operator
-              expressions: CASE (l_orderkey) WHEN (null) THEN (1) ELSE (1) END (type: int)
+              expressions: 1 (type: int)
               outputColumnNames: _col0
-              Statistics: Num rows: 3024 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 0 Data size: 12099 Basic stats: PARTIAL Column stats: COMPLETE
               Group By Operator
                 aggregations: count(_col0)
                 keys: _col0 (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 3024 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 3024 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -3837,10 +3837,10 @@ STAGE PLANS:
           keys: KEY._col0 (type: int)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat


[31/39] hive git commit: HIVE-10568 : Select count(distinct()) can have more optimal execution plan (Ashutosh Chauhan via John Pullokkaran)

Posted by pr...@apache.org.
HIVE-10568 : Select count(distinct()) can have more optimal execution plan (Ashutosh Chauhan via John Pullokkaran)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1538a71e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1538a71e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1538a71e

Branch: refs/heads/llap
Commit: 1538a71ef55c315b9d5e620f63be5895ac2dbb76
Parents: 08b0708
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Thu Apr 30 18:33:09 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Sun May 10 10:37:02 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   5 +-
 .../rules/HiveExpandDistinctAggregatesRule.java | 278 +++++++++++++++++++
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |  13 +-
 .../clientpositive/tez/limit_pushdown.q.out     |  31 ++-
 .../test/results/clientpositive/tez/mrr.q.out   |  52 ++--
 .../tez/vector_count_distinct.q.out             |  28 +-
 .../tez/vectorization_limit.q.out               |  31 ++-
 .../tez/vectorized_distinct_gby.q.out           |  51 +++-
 8 files changed, 417 insertions(+), 72 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1538a71e/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 5d5a928..115a5d0 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1214,7 +1214,10 @@ public class HiveConf extends Configuration {
     HIVESAMPLINGNUMBERFORORDERBY("hive.optimize.sampling.orderby.number", 1000, "Total number of samples to be obtained."),
     HIVESAMPLINGPERCENTFORORDERBY("hive.optimize.sampling.orderby.percent", 0.1f, new RatioValidator(),
         "Probability with which a row will be chosen."),
-
+    HIVEOPTIMIZEDISTINCTREWRITE("hive.optimize.distinct.rewrite", true, "When applicable this "
+        + "optimization rewrites distinct aggregates from a single stage to multi-stage "
+        + "aggregation. This may not be optimal in all cases. Ideally, whether to trigger it or "
+        + "not should be cost based decision. Until Hive formalizes cost model for this, this is config driven."),
     // whether to optimize union followed by select followed by filesink
     // It creates sub-directories in the final output, so should not be turned on in systems
     // where MAPREDUCE-1501 is not present

http://git-wip-us.apache.org/repos/asf/hive/blob/1538a71e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExpandDistinctAggregatesRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExpandDistinctAggregatesRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExpandDistinctAggregatesRule.java
new file mode 100644
index 0000000..73c7cac
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveExpandDistinctAggregatesRule.java
@@ -0,0 +1,278 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
+
+import org.apache.calcite.plan.RelOptRule;
+import org.apache.calcite.plan.RelOptRuleCall;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Aggregate;
+import org.apache.calcite.rel.core.AggregateCall;
+import org.apache.calcite.rel.core.RelFactories;
+import org.apache.calcite.rel.metadata.RelColumnOrigin;
+import org.apache.calcite.rel.metadata.RelMetadataQuery;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rex.RexInputRef;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.util.ImmutableBitSet;
+import org.apache.calcite.util.Pair;
+import org.apache.calcite.util.Util;
+import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Planner rule that expands distinct aggregates
+ * (such as {@code COUNT(DISTINCT x)}) from a
+ * {@link org.apache.calcite.rel.core.Aggregate}.
+ *
+ * <p>How this is done depends upon the arguments to the function. If all
+ * functions have the same argument
+ * (e.g. {@code COUNT(DISTINCT x), SUM(DISTINCT x)} both have the argument
+ * {@code x}) then one extra {@link org.apache.calcite.rel.core.Aggregate} is
+ * sufficient.
+ *
+ * <p>If there are multiple arguments
+ * (e.g. {@code COUNT(DISTINCT x), COUNT(DISTINCT y)})
+ * the rule creates separate {@code Aggregate}s and combines using a
+ * {@link org.apache.calcite.rel.core.Join}.
+ */
+
+// Stripped down version of org.apache.calcite.rel.rules.AggregateExpandDistinctAggregatesRule
+// This is adapted for Hive, but should eventually be deleted from Hive and make use of above.
+
+public final class HiveExpandDistinctAggregatesRule extends RelOptRule {
+  //~ Static fields/initializers ---------------------------------------------
+
+  /** The default instance of the rule; operates only on logical expressions. */
+  public static final HiveExpandDistinctAggregatesRule INSTANCE =
+      new HiveExpandDistinctAggregatesRule(HiveAggregate.class,
+          HiveProject.DEFAULT_PROJECT_FACTORY);
+
+  private static RelFactories.ProjectFactory projFactory;
+
+  //~ Constructors -----------------------------------------------------------
+
+  public HiveExpandDistinctAggregatesRule(
+      Class<? extends Aggregate> clazz,RelFactories.ProjectFactory projectFactory) {
+    super(operand(clazz, any()));
+    projFactory = projectFactory;
+  }
+
+  //~ Methods ----------------------------------------------------------------
+
+  @Override
+  public void onMatch(RelOptRuleCall call) {
+    final Aggregate aggregate = call.rel(0);
+    if (!aggregate.containsDistinctCall()) {
+      return;
+    }
+
+    // Find all of the agg expressions. We use a LinkedHashSet to ensure
+    // determinism.
+    int nonDistinctCount = 0;
+    Set<List<Integer>> argListSets = new LinkedHashSet<List<Integer>>();
+    for (AggregateCall aggCall : aggregate.getAggCallList()) {
+      if (!aggCall.isDistinct()) {
+        ++nonDistinctCount;
+        continue;
+      }
+      ArrayList<Integer> argList = new ArrayList<Integer>();
+      for (Integer arg : aggCall.getArgList()) {
+        argList.add(arg);
+      }
+      argListSets.add(argList);
+    }
+    Util.permAssert(argListSets.size() > 0, "containsDistinctCall lied");
+
+    // If all of the agg expressions are distinct and have the same
+    // arguments then we can use a more efficient form.
+    if ((nonDistinctCount == 0) && (argListSets.size() == 1)) {
+      for (Integer arg : argListSets.iterator().next()) {
+        Set<RelColumnOrigin> colOrigs = RelMetadataQuery.getColumnOrigins(aggregate, arg);
+        if (null != colOrigs) {
+          for (RelColumnOrigin colOrig : colOrigs) {
+            RelOptHiveTable hiveTbl = (RelOptHiveTable)colOrig.getOriginTable();
+            if(hiveTbl.getPartColInfoMap().containsKey(colOrig.getOriginColumnOrdinal())) {
+              // Encountered partitioning column, this will be better handled by MetadataOnly optimizer.
+              return;
+            }
+          }
+        }
+      }
+      RelNode converted =
+          convertMonopole(
+              aggregate,
+              argListSets.iterator().next());
+      call.transformTo(converted);
+      return;
+    }
+  }
+
+  /**
+   * Converts an aggregate relational expression that contains just one
+   * distinct aggregate function (or perhaps several over the same arguments)
+   * and no non-distinct aggregate functions.
+   */
+  private RelNode convertMonopole(
+      Aggregate aggregate,
+      List<Integer> argList) {
+    // For example,
+    //    SELECT deptno, COUNT(DISTINCT sal), SUM(DISTINCT sal)
+    //    FROM emp
+    //    GROUP BY deptno
+    //
+    // becomes
+    //
+    //    SELECT deptno, COUNT(distinct_sal), SUM(distinct_sal)
+    //    FROM (
+    //      SELECT DISTINCT deptno, sal AS distinct_sal
+    //      FROM EMP GROUP BY deptno)
+    //    GROUP BY deptno
+
+    // Project the columns of the GROUP BY plus the arguments
+    // to the agg function.
+    Map<Integer, Integer> sourceOf = new HashMap<Integer, Integer>();
+    final Aggregate distinct =
+        createSelectDistinct(aggregate, argList, sourceOf);
+
+    // Create an aggregate on top, with the new aggregate list.
+    final List<AggregateCall> newAggCalls =
+        Lists.newArrayList(aggregate.getAggCallList());
+    rewriteAggCalls(newAggCalls, argList, sourceOf);
+    final int cardinality = aggregate.getGroupSet().cardinality();
+    return aggregate.copy(aggregate.getTraitSet(), distinct,
+        aggregate.indicator, ImmutableBitSet.range(cardinality), null,
+        newAggCalls);
+  }
+
+  private static void rewriteAggCalls(
+      List<AggregateCall> newAggCalls,
+      List<Integer> argList,
+      Map<Integer, Integer> sourceOf) {
+    // Rewrite the agg calls. Each distinct agg becomes a non-distinct call
+    // to the corresponding field from the right; for example,
+    // "COUNT(DISTINCT e.sal)" becomes   "COUNT(distinct_e.sal)".
+    for (int i = 0; i < newAggCalls.size(); i++) {
+      final AggregateCall aggCall = newAggCalls.get(i);
+
+      // Ignore agg calls which are not distinct or have the wrong set
+      // arguments. If we're rewriting aggs whose args are {sal}, we will
+      // rewrite COUNT(DISTINCT sal) and SUM(DISTINCT sal) but ignore
+      // COUNT(DISTINCT gender) or SUM(sal).
+      if (!aggCall.isDistinct()) {
+        continue;
+      }
+      if (!aggCall.getArgList().equals(argList)) {
+        continue;
+      }
+
+      // Re-map arguments.
+      final int argCount = aggCall.getArgList().size();
+      final List<Integer> newArgs = new ArrayList<Integer>(argCount);
+      for (int j = 0; j < argCount; j++) {
+        final Integer arg = aggCall.getArgList().get(j);
+        newArgs.add(sourceOf.get(arg));
+      }
+      final AggregateCall newAggCall =
+          new AggregateCall(
+              aggCall.getAggregation(),
+              false,
+              newArgs,
+              aggCall.getType(),
+              aggCall.getName());
+      newAggCalls.set(i, newAggCall);
+    }
+  }
+
+  /**
+   * Given an {@link org.apache.calcite.rel.logical.LogicalAggregate}
+   * and the ordinals of the arguments to a
+   * particular call to an aggregate function, creates a 'select distinct'
+   * relational expression which projects the group columns and those
+   * arguments but nothing else.
+   *
+   * <p>For example, given
+   *
+   * <blockquote>
+   * <pre>select f0, count(distinct f1), count(distinct f2)
+   * from t group by f0</pre>
+   * </blockquote>
+   *
+   * and the arglist
+   *
+   * <blockquote>{2}</blockquote>
+   *
+   * returns
+   *
+   * <blockquote>
+   * <pre>select distinct f0, f2 from t</pre>
+   * </blockquote>
+   *
+   * '
+   *
+   * <p>The <code>sourceOf</code> map is populated with the source of each
+   * column; in this case sourceOf.get(0) = 0, and sourceOf.get(1) = 2.</p>
+   *
+   * @param aggregate Aggregate relational expression
+   * @param argList   Ordinals of columns to make distinct
+   * @param sourceOf  Out parameter, is populated with a map of where each
+   *                  output field came from
+   * @return Aggregate relational expression which projects the required
+   * columns
+   */
+  private static Aggregate createSelectDistinct(
+      Aggregate aggregate,
+      List<Integer> argList,
+      Map<Integer, Integer> sourceOf) {
+    final List<Pair<RexNode, String>> projects =
+        new ArrayList<Pair<RexNode, String>>();
+    final RelNode child = aggregate.getInput();
+    final List<RelDataTypeField> childFields =
+        child.getRowType().getFieldList();
+    for (int i : aggregate.getGroupSet()) {
+      sourceOf.put(i, projects.size());
+      projects.add(RexInputRef.of2(i, childFields));
+    }
+    for (Integer arg : argList) {
+      if (sourceOf.get(arg) != null) {
+        continue;
+      }
+      sourceOf.put(arg, projects.size());
+      projects.add(RexInputRef.of2(arg, childFields));
+    }
+    final RelNode project =
+        projFactory.createProject(child, Pair.left(projects), Pair.right(projects));
+
+    // Get the distinct values of the GROUP BY fields and the arguments
+    // to the agg functions.
+    return aggregate.copy(aggregate.getTraitSet(), project, false,
+        ImmutableBitSet.range(projects.size()),
+        null, ImmutableList.<AggregateCall>of());
+  }
+}
+
+// End AggregateExpandDistinctAggregatesRule.java
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/1538a71e/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 48f488f..6e6923c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -137,6 +137,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSort;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion;
+import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveExpandDistinctAggregatesRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveFilterJoinRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveFilterProjectTransposeRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveFilterSetOpTransposeRule;
@@ -267,7 +268,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
           this.ctx.setCboInfo("Plan optimized by CBO.");
           this.ctx.setCboSucceeded(true);
           LOG.debug(newAST.dump());
-          } 
+          }
         } catch (Exception e) {
           boolean isMissingStats = noColsMissingStats.get() > 0;
           if (isMissingStats) {
@@ -901,6 +902,16 @@ public class CalcitePlanner extends SemanticAnalyzer {
       // Partition Pruning; otherwise Expression evaluation may try to execute
       // corelated sub query.
 
+      //0. Distinct aggregate rewrite
+      // Run this optimization early, since it is expanding the operator pipeline.
+      if (conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez") &&
+          conf.getBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEDISTINCTREWRITE)) {
+        // Its not clear, if this rewrite is always performant on MR, since extra map phase
+        // introduced for 2nd MR job may offset gains of this multi-stage aggregation.
+        // We need a cost model for MR to enable this on MR.
+        basePlan = hepPlan(basePlan, true, mdProvider, HiveExpandDistinctAggregatesRule.INSTANCE);
+      }
+
       // 1. Push Down Semi Joins
       basePlan = hepPlan(basePlan, true, mdProvider, SemiJoinJoinTransposeRule.INSTANCE,
           SemiJoinFilterTransposeRule.INSTANCE, SemiJoinProjectTransposeRule.INSTANCE);

http://git-wip-us.apache.org/repos/asf/hive/blob/1538a71e/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out b/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out
index 7038b4d..2a41aae 100644
--- a/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out
@@ -476,35 +476,38 @@ STAGE PLANS:
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(DISTINCT _col1)
                       keys: _col0 (type: tinyint), _col1 (type: double)
                       mode: hash
-                      outputColumnNames: _col0, _col1, _col2
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: tinyint), _col1 (type: double)
                         sort order: ++
                         Map-reduce partition columns: _col0 (type: tinyint)
                         Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
-                        TopN Hash Memory Usage: 0.3
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
-                aggregations: count(DISTINCT KEY._col1:0._col0)
-                keys: KEY._col0 (type: tinyint)
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: double)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
-                Limit
-                  Number of rows: 20
-                  Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
+                Group By Operator
+                  aggregations: count(_col1)
+                  keys: _col0 (type: tinyint)
+                  mode: complete
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 3072 Data size: 660491 Basic stats: COMPLETE Column stats: NONE
+                  Limit
+                    Number of rows: 20
                     Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1538a71e/ql/src/test/results/clientpositive/tez/mrr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/mrr.q.out b/ql/src/test/results/clientpositive/tez/mrr.q.out
index ac5ac5c..d90b27f 100644
--- a/ql/src/test/results/clientpositive/tez/mrr.q.out
+++ b/ql/src/test/results/clientpositive/tez/mrr.q.out
@@ -452,10 +452,9 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: count(DISTINCT _col1)
                   keys: _col0 (type: string), _col1 (type: string)
                   mode: hash
-                  outputColumnNames: _col0, _col1, _col2
+                  outputColumnNames: _col0, _col1
                   Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: string), _col1 (type: string)
@@ -465,25 +464,30 @@ STAGE PLANS:
         Reducer 3 
             Reduce Operator Tree:
               Group By Operator
-                aggregations: count(DISTINCT KEY._col1:0._col0)
-                keys: KEY._col0 (type: string)
+                keys: KEY._col0 (type: string), KEY._col1 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col1 (type: bigint)
-                  sort order: +
-                  Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: string)
+                Group By Operator
+                  aggregations: count(_col1)
+                  keys: _col0 (type: string)
+                  mode: complete
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col1 (type: bigint)
+                    sort order: +
+                    Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: string)
         Reducer 4 
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: bigint)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -855,10 +859,9 @@ STAGE PLANS:
                         Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                         HybridGraceHashJoin: true
                         Group By Operator
-                          aggregations: count(DISTINCT _col1)
                           keys: _col0 (type: string), _col1 (type: string)
                           mode: hash
-                          outputColumnNames: _col0, _col1, _col2
+                          outputColumnNames: _col0, _col1
                           Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                           Reduce Output Operator
                             key expressions: _col0 (type: string), _col1 (type: string)
@@ -885,25 +888,30 @@ STAGE PLANS:
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
-                aggregations: count(DISTINCT KEY._col1:0._col0)
-                keys: KEY._col0 (type: string)
+                keys: KEY._col0 (type: string), KEY._col1 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col1 (type: bigint)
-                  sort order: +
-                  Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: string)
+                Group By Operator
+                  aggregations: count(_col1)
+                  keys: _col0 (type: string)
+                  mode: complete
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col1 (type: bigint)
+                    sort order: +
+                    Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: string)
         Reducer 3 
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: bigint)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 68 Data size: 722 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/1538a71e/ql/src/test/results/clientpositive/tez/vector_count_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_count_distinct.q.out b/ql/src/test/results/clientpositive/tez/vector_count_distinct.q.out
index f1da471..e6d34ff 100644
--- a/ql/src/test/results/clientpositive/tez/vector_count_distinct.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_count_distinct.q.out
@@ -1248,6 +1248,7 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -1260,30 +1261,47 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(DISTINCT _col0)
                       keys: _col0 (type: int)
                       mode: hash
-                      outputColumnNames: _col0, _col1
+                      outputColumnNames: _col0
                       Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
                         Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
-                aggregations: count(DISTINCT KEY._col0:0._col0)
+                keys: KEY._col0 (type: int)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1000 Data size: 1752000 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count(_col0)
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1538a71e/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out b/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out
index d815938..1c5b51f 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out
@@ -345,36 +345,39 @@ STAGE PLANS:
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(DISTINCT _col1)
                       keys: _col0 (type: tinyint), _col1 (type: double)
                       mode: hash
-                      outputColumnNames: _col0, _col1, _col2
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: tinyint), _col1 (type: double)
                         sort order: ++
                         Map-reduce partition columns: _col0 (type: tinyint)
                         Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
-                        TopN Hash Memory Usage: 0.3
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
-                aggregations: count(DISTINCT KEY._col1:0._col0)
-                keys: KEY._col0 (type: tinyint)
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: double)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
-                Limit
-                  Number of rows: 20
-                  Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
+                Group By Operator
+                  aggregations: count(_col1)
+                  keys: _col0 (type: tinyint)
+                  mode: complete
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 3072 Data size: 660491 Basic stats: COMPLETE Column stats: NONE
+                  Limit
+                    Number of rows: 20
                     Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 20 Data size: 4300 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1538a71e/ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out b/ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out
index 90c9934..cf95c8a 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out
@@ -41,31 +41,35 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(DISTINCT _col0), count(DISTINCT _col0)
-                      bucketGroup: true
                       keys: _col0 (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: NONE
+                      mode: final
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: sum(_col0), count(_col0)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
+                          Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col0 (type: bigint), _col1 (type: bigint)
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
-                aggregations: sum(DISTINCT KEY._col0:0._col0), count(DISTINCT KEY._col0:1._col0)
+                aggregations: sum(VALUE._col0), count(VALUE._col1)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
 
   Stage: Stage-0
     Fetch Operator
@@ -95,6 +99,7 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -107,20 +112,36 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(DISTINCT _col0), count(DISTINCT _col0), avg(DISTINCT _col0), std(DISTINCT _col0)
                       keys: _col0 (type: int)
                       mode: hash
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      outputColumnNames: _col0
                       Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
                         Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
-                aggregations: sum(DISTINCT KEY._col0:0._col0), count(DISTINCT KEY._col0:1._col0), avg(DISTINCT KEY._col0:2._col0), std(DISTINCT KEY._col0:3._col0)
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: sum(_col0), count(_col0), avg(_col0), std(_col0)
+                  mode: hash
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: struct<count:bigint,sum:double,input:int>), _col3 (type: struct<count:bigint,sum:double,variance:double>)
+        Reducer 3 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0), count(VALUE._col1), avg(VALUE._col2), std(VALUE._col3)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE


[08/39] hive git commit: HIVE-10506: CBO (Calcite Return Path): Disallow return path to be enable if CBO is off (Jesus Camacho Rodriguez via Laljo John Pullokkaran)

Posted by pr...@apache.org.
HIVE-10506: CBO (Calcite Return Path): Disallow return path to be enable if CBO is off (Jesus Camacho Rodriguez via Laljo John Pullokkaran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/93995c8b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/93995c8b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/93995c8b

Branch: refs/heads/llap
Commit: 93995c8be3dedc8785ced64939c608ae2433d4af
Parents: ecde4ae
Author: John Pullokkaran <jp...@hortonworks.com>
Authored: Wed May 6 18:15:33 2015 -0700
Committer: jpullokk <jp...@apache.org>
Committed: Wed May 6 18:21:19 2015 -0700

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/Context.java      | 10 +++++++++-
 .../hive/ql/optimizer/IdentityProjectRemover.java       | 12 ++++++++++++
 .../hive/ql/optimizer/NonBlockingOpDeDupProc.java       | 11 +++++++++++
 .../org/apache/hadoop/hive/ql/optimizer/Optimizer.java  |  8 +++-----
 .../calcite/translator/HiveOpConverterPostProc.java     | 10 ++++++++++
 .../org/apache/hadoop/hive/ql/parse/CalcitePlanner.java |  1 +
 6 files changed, 46 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/93995c8b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
index 9692738..a74bbbe 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
@@ -23,7 +23,6 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
 import java.text.SimpleDateFormat;
-import java.util.ArrayList;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.List;
@@ -86,6 +85,7 @@ public class Context {
   protected int pathid = 10000;
   protected boolean explain = false;
   protected String cboInfo;
+  protected boolean cboSucceeded;
   protected boolean explainLogical = false;
   protected String cmd = "";
   // number of previous attempts
@@ -706,4 +706,12 @@ public class Context {
     this.cboInfo = cboInfo;
   }
 
+  public boolean isCboSucceeded() {
+    return cboSucceeded;
+  }
+
+  public void setCboSucceeded(boolean cboSucceeded) {
+    this.cboSucceeded = cboSucceeded;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/93995c8b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
index 433699b..e3d3ce6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IdentityProjectRemover.java
@@ -26,8 +26,10 @@ import java.util.Stack;
 
 import com.google.common.base.Predicates;
 import com.google.common.collect.Iterators;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.LateralViewForwardOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
@@ -69,6 +71,16 @@ public class IdentityProjectRemover implements Transform {
   private static final Log LOG = LogFactory.getLog(IdentityProjectRemover.class);
   @Override
   public ParseContext transform(ParseContext pctx) throws SemanticException {
+    // 0. We check the conditions to apply this transformation,
+    //    if we do not meet them we bail out
+    final boolean cboEnabled = HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVE_CBO_ENABLED);
+    final boolean returnPathEnabled = HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP);
+    final boolean cboSucceeded = pctx.getContext().isCboSucceeded();
+    if(cboEnabled && returnPathEnabled && cboSucceeded) {
+      return pctx;
+    }
+
+    // 1. We apply the transformation
     Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
     opRules.put(new RuleRegExp("R1",
       "(" + SelectOperator.getOperatorName() + "%)"), new ProjectRemover());

http://git-wip-us.apache.org/repos/asf/hive/blob/93995c8b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
index 95c2b0b..3006a6e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
@@ -28,6 +28,7 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
 
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.FilterOperator;
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
@@ -57,6 +58,16 @@ public class NonBlockingOpDeDupProc implements Transform {
 
   @Override
   public ParseContext transform(ParseContext pctx) throws SemanticException {
+    // 0. We check the conditions to apply this transformation,
+    //    if we do not meet them we bail out
+    final boolean cboEnabled = HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVE_CBO_ENABLED);
+    final boolean returnPathEnabled = HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP);
+    final boolean cboSucceeded = pctx.getContext().isCboSucceeded();
+    if(cboEnabled && returnPathEnabled && cboSucceeded) {
+      return pctx;
+    }
+
+    // 1. We apply the transformation
     String SEL = SelectOperator.getOperatorName();
     String FIL = FilterOperator.getOperatorName();
     Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();

http://git-wip-us.apache.org/repos/asf/hive/blob/93995c8b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
index 58f8afe..a7cf8b7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
@@ -62,11 +62,9 @@ public class Optimizer {
 
     transformations = new ArrayList<Transform>();
 
-    // If we are translating Calcite operators into Hive operators, we need
-    // additional postprocessing
-    if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP)) {
-      transformations.add(new HiveOpConverterPostProc());
-    }
+    // Add the additional postprocessing transformations needed if
+    // we are translating Calcite operators into Hive operators.
+    transformations.add(new HiveOpConverterPostProc());
 
     // Add the transformation that computes the lineage information.
     transformations.add(new Generator());

http://git-wip-us.apache.org/repos/asf/hive/blob/93995c8b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java
index cdd7c7e..e7c8342 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java
@@ -26,6 +26,7 @@ import java.util.Stack;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
@@ -55,6 +56,15 @@ public class HiveOpConverterPostProc implements Transform {
 
   @Override
   public ParseContext transform(ParseContext pctx) throws SemanticException {
+    // 0. We check the conditions to apply this transformation,
+    //    if we do not meet them we bail out
+    final boolean cboEnabled = HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVE_CBO_ENABLED);
+    final boolean returnPathEnabled = HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP);
+    final boolean cboSucceeded = pctx.getContext().isCboSucceeded();
+    if(!(cboEnabled && returnPathEnabled && cboSucceeded)) {
+      return pctx;
+    }
+
     // 1. Initialize aux data structures
     this.pctx = pctx;
     this.aliasToOpInfo = new HashMap<String, Operator<? extends OperatorDesc>>();

http://git-wip-us.apache.org/repos/asf/hive/blob/93995c8b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 49ad6ad..48f488f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -265,6 +265,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
           sinkOp = genPlan(getQB());
           LOG.info("CBO Succeeded; optimized logical plan.");
           this.ctx.setCboInfo("Plan optimized by CBO.");
+          this.ctx.setCboSucceeded(true);
           LOG.debug(newAST.dump());
           } 
         } catch (Exception e) {


[16/39] hive git commit: HIVE-10634: The HMS upgrade test script on LXC is exiting with error even if the test was run successfuly (Sergio Pena, reviewed by Szehon Ho)

Posted by pr...@apache.org.
HIVE-10634: The HMS upgrade test script on LXC is exiting with error even if the test was run successfuly (Sergio Pena, reviewed by Szehon Ho)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e24662c3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e24662c3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e24662c3

Branch: refs/heads/llap
Commit: e24662c3d80138cdc4ee102a2da5e9a81c0bce7d
Parents: c156b32
Author: Sergio Pena <se...@cloudera.com>
Authored: Thu May 7 09:43:54 2015 -0500
Committer: Sergio Pena <se...@cloudera.com>
Committed: Thu May 7 09:43:54 2015 -0500

----------------------------------------------------------------------
 testutils/metastore/execute-test-on-lxc.sh | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e24662c3/testutils/metastore/execute-test-on-lxc.sh
----------------------------------------------------------------------
diff --git a/testutils/metastore/execute-test-on-lxc.sh b/testutils/metastore/execute-test-on-lxc.sh
index 033c445..592e696 100644
--- a/testutils/metastore/execute-test-on-lxc.sh
+++ b/testutils/metastore/execute-test-on-lxc.sh
@@ -97,7 +97,7 @@ lxc_prepare() {
 	cat>$tmpfile<<EOF
 rm -rf hive
 mkdir hive
-git clone --depth 1 -b $BRANCH https://git-wip-us.apache.org/repos/asf/hive.git >/dev/null
+git clone --depth 1 -b $BRANCH https://github.com/apache/hive.git >/dev/null
 cd hive
 wget $PATCH_URL -O hms.patch
 bash -x testutils/ptest2/src/main/resources/smart-apply-patch.sh hms.patch
@@ -153,5 +153,8 @@ do
 	log "$(lxc_print_metastore_log $name)"
 	lxc_stop $name
 
-	[ $rc != 0 ] && exit 1
+	if [[ $rc != 0 ]]; then
+		log "Tests failed. Exiting with error code (1)."
+		exit 1
+	fi
 done


[27/39] hive git commit: HIVE-10526: CBO (Calcite Return Path): HiveCost epsilon comparison should take row count in to account (Laljo John Pullokkaran reviewed by Ashutosh Chauhan)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out
index ff92d9f..98008ad 100644
--- a/ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out
+++ b/ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out
@@ -141,13 +141,13 @@ from part p1 join part p2 join part p3 on p2.p_name = p1.p_name join part p4 on
             and p1.p_partkey = p2.p_partkey
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-2
+  Stage-3 is a root stage
+  Stage-2 depends on stages: Stage-3
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -161,36 +161,36 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                 Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
                   Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                  value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
           TableScan
             alias: p1
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: p_partkey is not null (type: boolean)
-              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              predicate: (p_partkey is not null and p_name is not null) (type: boolean)
+              Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                  Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col0 (type: int)
-            1 _col0 (type: int)
+            0 _col0 (type: int), _col1 (type: string)
+            1 _col0 (type: int), _col1 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
-          Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -202,44 +202,6 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: int), _col1 (type: string)
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col9 (type: int), _col10 (type: string)
-              sort order: ++
-              Map-reduce partition columns: _col9 (type: int), _col10 (type: string)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: int), _col1 (type: string)
-            1 _col9 (type: int), _col10 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
-          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: string), _col30 (type: string), _col31 (type: string), _col32 (type: int), _col33 (type: string), _col34 (type: double), _col35 (type: string), _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
-            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
-            Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
             alias: p1
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
@@ -256,29 +218,20 @@ STAGE PLANS:
                   Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
           TableScan
-            alias: p1
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (p_name is not null and p_partkey is not null) (type: boolean)
-              Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col1 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col1 (type: string)
-                  Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+            Reduce Output Operator
+              key expressions: _col10 (type: string)
+              sort order: +
+              Map-reduce partition columns: _col10 (type: string)
+              Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: int), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
             0 _col1 (type: string)
-            1 _col1 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
+            1 _col10 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26
           Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
@@ -287,6 +240,53 @@ STAGE PLANS:
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: p1
+            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: p_partkey is not null (type: boolean)
+              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+          TableScan
+            Reduce Output Operator
+              key expressions: _col9 (type: int)
+              sort order: +
+              Map-reduce partition columns: _col9 (type: int)
+              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string), _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string)
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col0 (type: int)
+            1 _col9 (type: int)
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
+          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: string), _col30 (type: string), _col31 (type: string), _col32 (type: int), _col33 (type: string), _col34 (type: double), _col35 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string), _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
+            Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
   Stage: Stage-0
     Fetch Operator
       limit: -1

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out
index f608cfd..a1dd24e 100644
--- a/ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out
+++ b/ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out
@@ -145,13 +145,13 @@ where p2.p_name = p3.p_name and p1.p_partkey = p4.p_partkey
             and p1.p_partkey = p2.p_partkey
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-2
+  Stage-3 is a root stage
+  Stage-2 depends on stages: Stage-3
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
-  Stage: Stage-1
+  Stage: Stage-3
     Map Reduce
       Map Operator Tree:
           TableScan
@@ -165,36 +165,36 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                 Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
                   Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                  value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
           TableScan
             alias: p1
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: p_partkey is not null (type: boolean)
-              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              predicate: (p_partkey is not null and p_name is not null) (type: boolean)
+              Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                  key expressions: _col0 (type: int), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                  Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col0 (type: int)
-            1 _col0 (type: int)
+            0 _col0 (type: int), _col1 (type: string)
+            1 _col0 (type: int), _col1 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
-          Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -206,44 +206,6 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: int), _col1 (type: string)
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col9 (type: int), _col10 (type: string)
-              sort order: ++
-              Map-reduce partition columns: _col9 (type: int), _col10 (type: string)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 _col0 (type: int), _col1 (type: string)
-            1 _col9 (type: int), _col10 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
-          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: string), _col30 (type: string), _col31 (type: string), _col32 (type: int), _col33 (type: string), _col34 (type: double), _col35 (type: string), _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
-            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
-            Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
             alias: p1
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
@@ -260,29 +222,20 @@ STAGE PLANS:
                   Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
           TableScan
-            alias: p1
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (p_name is not null and p_partkey is not null) (type: boolean)
-              Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col1 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col1 (type: string)
-                  Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+            Reduce Output Operator
+              key expressions: _col10 (type: string)
+              sort order: +
+              Map-reduce partition columns: _col10 (type: string)
+              Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: int), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
             0 _col1 (type: string)
-            1 _col1 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
+            1 _col10 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26
           Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
@@ -291,6 +244,53 @@ STAGE PLANS:
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                 serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: p1
+            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: p_partkey is not null (type: boolean)
+              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+          TableScan
+            Reduce Output Operator
+              key expressions: _col9 (type: int)
+              sort order: +
+              Map-reduce partition columns: _col9 (type: int)
+              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string), _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string)
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col0 (type: int)
+            1 _col9 (type: int)
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
+          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: string), _col30 (type: string), _col31 (type: string), _col32 (type: int), _col33 (type: string), _col34 (type: double), _col35 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string), _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
+            Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
   Stage: Stage-0
     Fetch Operator
       limit: -1

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/spark/join32.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join32.q.out b/ql/src/test/results/clientpositive/spark/join32.q.out
index 0f16678..a9d50b4 100644
--- a/ql/src/test/results/clientpositive/spark/join32.q.out
+++ b/ql/src/test/results/clientpositive/spark/join32.q.out
@@ -113,16 +113,16 @@ STAGE PLANS:
         Map 1 
             Map Operator Tree:
                 TableScan
-                  alias: z
+                  alias: y
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
+                    predicate: key is not null (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: value (type: string)
-                      outputColumnNames: _col0
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
@@ -136,12 +136,9 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: hr=11
+                  base file name: src
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 11
                   properties:
                     COLUMN_STATS_ACCURATE true
                     bucket_count -1
@@ -149,13 +146,11 @@ STAGE PLANS:
                     columns.comments 'default','default'
                     columns.types string:string
 #### A masked pattern was here ####
-                    name default.srcpart
+                    name default.src
                     numFiles 1
                     numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
                     rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
+                    serialization.ddl struct src { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     totalSize 5812
@@ -165,23 +160,26 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
+                      COLUMN_STATS_ACCURATE true
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
                       columns.types string:string
 #### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
+                      name default.src
+                      numFiles 1
+                      numRows 500
+                      rawDataSize 5312
+                      serialization.ddl struct src { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 5812
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
+                    name: default.src
+                  name: default.src
             Truncated Path -> Alias:
-              /srcpart/ds=2008-04-08/hr=11 [z]
+              /src [y]
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -190,7 +188,7 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (key is not null and value is not null) (type: boolean)
+                    predicate: (value is not null and key is not null) (type: boolean)
                     Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -199,7 +197,7 @@ STAGE PLANS:
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: string)
-                          1 _col0 (type: string)
+                          1 _col1 (type: string)
                         Position of Big Table: 0
             Local Work:
               Map Reduce Local Work
@@ -260,24 +258,24 @@ STAGE PLANS:
         Map 2 
             Map Operator Tree:
                 TableScan
-                  alias: y
+                  alias: z
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: key is not null (type: boolean)
+                    predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
+                      expressions: value (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
                         keys:
                           0 _col0 (type: string)
-                          1 _col0 (type: string)
-                        outputColumnNames: _col1, _col2, _col3
+                          1 _col1 (type: string)
+                        outputColumnNames: _col0, _col3
                         input vertices:
                           1 Map 3
                         Position of Big Table: 0
@@ -288,13 +286,13 @@ STAGE PLANS:
                           keys:
                             0 _col0 (type: string)
                             1 _col3 (type: string)
-                          outputColumnNames: _col0, _col4, _col5
+                          outputColumnNames: _col1, _col2, _col5
                           input vertices:
                             0 Map 1
                           Position of Big Table: 1
                           Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                           Select Operator
-                            expressions: _col5 (type: string), _col0 (type: string), _col4 (type: string)
+                            expressions: _col5 (type: string), _col2 (type: string), _col1 (type: string)
                             outputColumnNames: _col0, _col1, _col2
                             Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                             File Output Operator
@@ -330,9 +328,12 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: src
+                  base file name: hr=11
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                    hr 11
                   properties:
                     COLUMN_STATS_ACCURATE true
                     bucket_count -1
@@ -340,11 +341,13 @@ STAGE PLANS:
                     columns.comments 'default','default'
                     columns.types string:string
 #### A masked pattern was here ####
-                    name default.src
+                    name default.srcpart
                     numFiles 1
                     numRows 500
+                    partition_columns ds/hr
+                    partition_columns.types string:string
                     rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
+                    serialization.ddl struct srcpart { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     totalSize 5812
@@ -354,26 +357,23 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE true
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
                       columns.types string:string
 #### A masked pattern was here ####
-                      name default.src
-                      numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
+                      name default.srcpart
+                      partition_columns ds/hr
+                      partition_columns.types string:string
+                      serialization.ddl struct srcpart { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
+                    name: default.srcpart
+                  name: default.srcpart
             Truncated Path -> Alias:
-              /src [y]
+              /srcpart/ds=2008-04-08/hr=11 [z]
 
   Stage: Stage-0
     Move Operator
@@ -422,8 +422,8 @@ POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Output: default@dest_j1
 POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.val2 EXPRESSION [(src)y.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value EXPRESSION [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from dest_j1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_j1

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out b/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out
index 54f47f9..dac9610 100644
--- a/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out
+++ b/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out
@@ -121,16 +121,16 @@ STAGE PLANS:
         Map 1 
             Map Operator Tree:
                 TableScan
-                  alias: z
+                  alias: y
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
+                    predicate: key is not null (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: value (type: string)
-                      outputColumnNames: _col0
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
@@ -144,12 +144,9 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: hr=11
+                  base file name: src
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 11
                   properties:
                     COLUMN_STATS_ACCURATE true
                     bucket_count -1
@@ -157,13 +154,11 @@ STAGE PLANS:
                     columns.comments 'default','default'
                     columns.types string:string
 #### A masked pattern was here ####
-                    name default.srcpart
+                    name default.src
                     numFiles 1
                     numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
                     rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
+                    serialization.ddl struct src { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     totalSize 5812
@@ -173,23 +168,26 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
+                      COLUMN_STATS_ACCURATE true
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
                       columns.types string:string
 #### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
+                      name default.src
+                      numFiles 1
+                      numRows 500
+                      rawDataSize 5312
+                      serialization.ddl struct src { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 5812
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
+                    name: default.src
+                  name: default.src
             Truncated Path -> Alias:
-              /srcpart/ds=2008-04-08/hr=11 [z]
+              /src [y]
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -198,7 +196,7 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (key is not null and value is not null) (type: boolean)
+                    predicate: (value is not null and key is not null) (type: boolean)
                     Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -207,7 +205,7 @@ STAGE PLANS:
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: string)
-                          1 _col0 (type: string)
+                          1 _col1 (type: string)
                         Position of Big Table: 0
             Local Work:
               Map Reduce Local Work
@@ -268,24 +266,24 @@ STAGE PLANS:
         Map 2 
             Map Operator Tree:
                 TableScan
-                  alias: y
+                  alias: z
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: key is not null (type: boolean)
+                    predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
+                      expressions: value (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
                         keys:
                           0 _col0 (type: string)
-                          1 _col0 (type: string)
-                        outputColumnNames: _col1, _col2, _col3
+                          1 _col1 (type: string)
+                        outputColumnNames: _col0, _col3
                         input vertices:
                           1 Map 3
                         Position of Big Table: 0
@@ -296,13 +294,13 @@ STAGE PLANS:
                           keys:
                             0 _col0 (type: string)
                             1 _col3 (type: string)
-                          outputColumnNames: _col0, _col4, _col5
+                          outputColumnNames: _col1, _col2, _col5
                           input vertices:
                             0 Map 1
                           Position of Big Table: 1
                           Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                           Select Operator
-                            expressions: _col5 (type: string), _col0 (type: string), _col4 (type: string)
+                            expressions: _col5 (type: string), _col2 (type: string), _col1 (type: string)
                             outputColumnNames: _col0, _col1, _col2
                             Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                             File Output Operator
@@ -338,9 +336,12 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: src
+                  base file name: hr=11
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                    hr 11
                   properties:
                     COLUMN_STATS_ACCURATE true
                     bucket_count -1
@@ -348,11 +349,13 @@ STAGE PLANS:
                     columns.comments 'default','default'
                     columns.types string:string
 #### A masked pattern was here ####
-                    name default.src
+                    name default.srcpart
                     numFiles 1
                     numRows 500
+                    partition_columns ds/hr
+                    partition_columns.types string:string
                     rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
+                    serialization.ddl struct srcpart { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     totalSize 5812
@@ -362,26 +365,23 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE true
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
                       columns.types string:string
 #### A masked pattern was here ####
-                      name default.src
-                      numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
+                      name default.srcpart
+                      partition_columns ds/hr
+                      partition_columns.types string:string
+                      serialization.ddl struct srcpart { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
+                    name: default.srcpart
+                  name: default.srcpart
             Truncated Path -> Alias:
-              /src [y]
+              /srcpart/ds=2008-04-08/hr=11 [z]
 
   Stage: Stage-0
     Move Operator
@@ -430,8 +430,8 @@ POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Output: default@dest_j1
 POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.val2 EXPRESSION [(src)y.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value EXPRESSION [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from dest_j1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_j1
@@ -613,34 +613,35 @@ TOK_QUERY
 
 
 STAGE DEPENDENCIES:
-  Stage-3 is a root stage
+  Stage-4 is a root stage
+  Stage-3 depends on stages: Stage-4
   Stage-1 depends on stages: Stage-3
   Stage-0 depends on stages: Stage-1
   Stage-2 depends on stages: Stage-0
 
 STAGE PLANS:
-  Stage: Stage-3
+  Stage: Stage-4
     Spark
 #### A masked pattern was here ####
       Vertices:
-        Map 1 
+        Map 3 
             Map Operator Tree:
                 TableScan
-                  alias: w
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  alias: x
+                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: key is not null (type: boolean)
-                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key is not null and value is not null) (type: boolean)
+                    Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: string)
-                          1 _col1 (type: string)
+                          1 _col0 (type: string)
                         Position of Big Table: 1
             Local Work:
               Map Reduce Local Work
@@ -649,7 +650,7 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: src
+                  base file name: src1
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
@@ -659,14 +660,14 @@ STAGE PLANS:
                     columns.comments 'default','default'
                     columns.types string:string
 #### A masked pattern was here ####
-                    name default.src
+                    name default.src1
                     numFiles 1
-                    numRows 500
-                    rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
+                    numRows 25
+                    rawDataSize 191
+                    serialization.ddl struct src1 { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
+                    totalSize 216
 #### A masked pattern was here ####
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 
@@ -679,39 +680,44 @@ STAGE PLANS:
                       columns.comments 'default','default'
                       columns.types string:string
 #### A masked pattern was here ####
-                      name default.src
+                      name default.src1
                       numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
+                      numRows 25
+                      rawDataSize 191
+                      serialization.ddl struct src1 { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
+                      totalSize 216
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
+                    name: default.src1
+                  name: default.src1
             Truncated Path -> Alias:
-              /src [w]
-        Map 3 
+              /src1 [x]
+
+  Stage: Stage-3
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
             Map Operator Tree:
                 TableScan
-                  alias: x
-                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                  alias: w
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (value is not null and key is not null) (type: boolean)
-                    Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: string)
                           1 _col1 (type: string)
-                        Position of Big Table: 0
+                        Position of Big Table: 1
             Local Work:
               Map Reduce Local Work
             Path -> Alias:
@@ -719,7 +725,7 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: src1
+                  base file name: src
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
@@ -729,14 +735,14 @@ STAGE PLANS:
                     columns.comments 'default','default'
                     columns.types string:string
 #### A masked pattern was here ####
-                    name default.src1
+                    name default.src
                     numFiles 1
-                    numRows 25
-                    rawDataSize 191
-                    serialization.ddl struct src1 { string key, string value}
+                    numRows 500
+                    rawDataSize 5312
+                    serialization.ddl struct src { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 216
+                    totalSize 5812
 #### A masked pattern was here ####
                   serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                 
@@ -749,20 +755,20 @@ STAGE PLANS:
                       columns.comments 'default','default'
                       columns.types string:string
 #### A masked pattern was here ####
-                      name default.src1
+                      name default.src
                       numFiles 1
-                      numRows 25
-                      rawDataSize 191
-                      serialization.ddl struct src1 { string key, string value}
+                      numRows 500
+                      rawDataSize 5312
+                      serialization.ddl struct src { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 216
+                      totalSize 5812
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src1
-                  name: default.src1
+                    name: default.src
+                  name: default.src
             Truncated Path -> Alias:
-              /src1 [x]
+              /src [w]
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -777,11 +783,22 @@ STAGE PLANS:
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                      Spark HashTable Sink Operator
+                      Map Join Operator
+                        condition map:
+                             Inner Join 0 to 1
                         keys:
-                          0 _col1 (type: string)
+                          0 _col0 (type: string)
                           1 _col0 (type: string)
-                        Position of Big Table: 0
+                        outputColumnNames: _col0, _col1, _col3
+                        input vertices:
+                          0 Map 3
+                        Position of Big Table: 1
+                        Statistics: Num rows: 14 Data size: 108 Basic stats: COMPLETE Column stats: NONE
+                        Spark HashTable Sink Operator
+                          keys:
+                            0 _col0 (type: string)
+                            1 _col1 (type: string)
+                          Position of Big Table: 0
             Local Work:
               Map Reduce Local Work
             Path -> Alias:
@@ -858,68 +875,57 @@ STAGE PLANS:
                         keys:
                           0 _col0 (type: string)
                           1 _col1 (type: string)
-                        outputColumnNames: _col1
+                        outputColumnNames: _col1, _col4
                         input vertices:
-                          1 Map 3
+                          1 Map 4
                         Position of Big Table: 0
                         Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                         Map Join Operator
                           condition map:
                                Inner Join 0 to 1
                           keys:
-                            0 _col1 (type: string)
-                            1 _col0 (type: string)
-                          outputColumnNames: _col1, _col4
+                            0 _col0 (type: string)
+                            1 _col1 (type: string)
+                          outputColumnNames: _col1, _col3, _col6
                           input vertices:
-                            1 Map 4
-                          Position of Big Table: 0
+                            0 Map 1
+                          Position of Big Table: 1
                           Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
-                          Map Join Operator
-                            condition map:
-                                 Inner Join 0 to 1
-                            keys:
-                              0 _col0 (type: string)
-                              1 _col1 (type: string)
-                            outputColumnNames: _col1, _col3, _col6
-                            input vertices:
-                              0 Map 1
-                            Position of Big Table: 1
-                            Statistics: Num rows: 332 Data size: 3534 Basic stats: COMPLETE Column stats: NONE
-                            Select Operator
-                              expressions: _col3 (type: string), _col6 (type: string), _col1 (type: string)
-                              outputColumnNames: _col0, _col1, _col2
-                              Statistics: Num rows: 332 Data size: 3534 Basic stats: COMPLETE Column stats: NONE
-                              File Output Operator
-                                compressed: false
-                                GlobalTableId: 1
+                          Select Operator
+                            expressions: _col3 (type: string), _col6 (type: string), _col1 (type: string)
+                            outputColumnNames: _col0, _col1, _col2
+                            Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+                            File Output Operator
+                              compressed: false
+                              GlobalTableId: 1
 #### A masked pattern was here ####
-                                NumFilesPerFileSink: 1
-                                Statistics: Num rows: 332 Data size: 3534 Basic stats: COMPLETE Column stats: NONE
+                              NumFilesPerFileSink: 1
+                              Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
-                                table:
-                                    input format: org.apache.hadoop.mapred.TextInputFormat
-                                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                                    properties:
-                                      COLUMN_STATS_ACCURATE true
-                                      bucket_count -1
-                                      columns key,value,val2
-                                      columns.comments 
-                                      columns.types string:string:string
+                              table:
+                                  input format: org.apache.hadoop.mapred.TextInputFormat
+                                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                                  properties:
+                                    COLUMN_STATS_ACCURATE true
+                                    bucket_count -1
+                                    columns key,value,val2
+                                    columns.comments 
+                                    columns.types string:string:string
 #### A masked pattern was here ####
-                                      name default.dest_j1
-                                      numFiles 1
-                                      numRows 85
-                                      rawDataSize 1600
-                                      serialization.ddl struct dest_j1 { string key, string value, string val2}
-                                      serialization.format 1
-                                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                                      totalSize 1685
+                                    name default.dest_j1
+                                    numFiles 1
+                                    numRows 85
+                                    rawDataSize 1600
+                                    serialization.ddl struct dest_j1 { string key, string value, string val2}
+                                    serialization.format 1
+                                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                                    totalSize 1685
 #### A masked pattern was here ####
-                                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                                    name: default.dest_j1
-                                TotalFiles: 1
-                                GatherStats: true
-                                MultiFileSpray: false
+                                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                                  name: default.dest_j1
+                              TotalFiles: 1
+                              GatherStats: true
+                              MultiFileSpray: false
             Local Work:
               Map Reduce Local Work
             Path -> Alias:

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/spark/join33.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join33.q.out b/ql/src/test/results/clientpositive/spark/join33.q.out
index 0f16678..a9d50b4 100644
--- a/ql/src/test/results/clientpositive/spark/join33.q.out
+++ b/ql/src/test/results/clientpositive/spark/join33.q.out
@@ -113,16 +113,16 @@ STAGE PLANS:
         Map 1 
             Map Operator Tree:
                 TableScan
-                  alias: z
+                  alias: y
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
+                    predicate: key is not null (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: value (type: string)
-                      outputColumnNames: _col0
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
@@ -136,12 +136,9 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: hr=11
+                  base file name: src
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 11
                   properties:
                     COLUMN_STATS_ACCURATE true
                     bucket_count -1
@@ -149,13 +146,11 @@ STAGE PLANS:
                     columns.comments 'default','default'
                     columns.types string:string
 #### A masked pattern was here ####
-                    name default.srcpart
+                    name default.src
                     numFiles 1
                     numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
                     rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
+                    serialization.ddl struct src { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     totalSize 5812
@@ -165,23 +160,26 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
+                      COLUMN_STATS_ACCURATE true
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
                       columns.types string:string
 #### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
+                      name default.src
+                      numFiles 1
+                      numRows 500
+                      rawDataSize 5312
+                      serialization.ddl struct src { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 5812
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
+                    name: default.src
+                  name: default.src
             Truncated Path -> Alias:
-              /srcpart/ds=2008-04-08/hr=11 [z]
+              /src [y]
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -190,7 +188,7 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (key is not null and value is not null) (type: boolean)
+                    predicate: (value is not null and key is not null) (type: boolean)
                     Statistics: Num rows: 7 Data size: 53 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -199,7 +197,7 @@ STAGE PLANS:
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: string)
-                          1 _col0 (type: string)
+                          1 _col1 (type: string)
                         Position of Big Table: 0
             Local Work:
               Map Reduce Local Work
@@ -260,24 +258,24 @@ STAGE PLANS:
         Map 2 
             Map Operator Tree:
                 TableScan
-                  alias: y
+                  alias: z
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: key is not null (type: boolean)
+                    predicate: ((11.0 = 11.0) and value is not null) (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
+                      expressions: value (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
                         keys:
                           0 _col0 (type: string)
-                          1 _col0 (type: string)
-                        outputColumnNames: _col1, _col2, _col3
+                          1 _col1 (type: string)
+                        outputColumnNames: _col0, _col3
                         input vertices:
                           1 Map 3
                         Position of Big Table: 0
@@ -288,13 +286,13 @@ STAGE PLANS:
                           keys:
                             0 _col0 (type: string)
                             1 _col3 (type: string)
-                          outputColumnNames: _col0, _col4, _col5
+                          outputColumnNames: _col1, _col2, _col5
                           input vertices:
                             0 Map 1
                           Position of Big Table: 1
                           Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                           Select Operator
-                            expressions: _col5 (type: string), _col0 (type: string), _col4 (type: string)
+                            expressions: _col5 (type: string), _col2 (type: string), _col1 (type: string)
                             outputColumnNames: _col0, _col1, _col2
                             Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
                             File Output Operator
@@ -330,9 +328,12 @@ STAGE PLANS:
             Path -> Partition:
 #### A masked pattern was here ####
                 Partition
-                  base file name: src
+                  base file name: hr=11
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  partition values:
+                    ds 2008-04-08
+                    hr 11
                   properties:
                     COLUMN_STATS_ACCURATE true
                     bucket_count -1
@@ -340,11 +341,13 @@ STAGE PLANS:
                     columns.comments 'default','default'
                     columns.types string:string
 #### A masked pattern was here ####
-                    name default.src
+                    name default.srcpart
                     numFiles 1
                     numRows 500
+                    partition_columns ds/hr
+                    partition_columns.types string:string
                     rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
+                    serialization.ddl struct srcpart { string key, string value}
                     serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     totalSize 5812
@@ -354,26 +357,23 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE true
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
                       columns.types string:string
 #### A masked pattern was here ####
-                      name default.src
-                      numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
+                      name default.srcpart
+                      partition_columns ds/hr
+                      partition_columns.types string:string
+                      serialization.ddl struct srcpart { string key, string value}
                       serialization.format 1
                       serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
 #### A masked pattern was here ####
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
+                    name: default.srcpart
+                  name: default.srcpart
             Truncated Path -> Alias:
-              /src [y]
+              /srcpart/ds=2008-04-08/hr=11 [z]
 
   Stage: Stage-0
     Move Operator
@@ -422,8 +422,8 @@ POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Output: default@dest_j1
 POSTHOOK: Lineage: dest_j1.key SIMPLE [(src1)x.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.val2 EXPRESSION [(src)y.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest_j1.value SIMPLE [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.val2 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_j1.value EXPRESSION [(srcpart)z.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from dest_j1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_j1


[10/39] hive git commit: HIVE-10521 - TxnHandler.timeOutTxns only times out some of the expired transactions (Alan Gates via Eugene Koifman)

Posted by pr...@apache.org.
HIVE-10521 - TxnHandler.timeOutTxns only times out some of the expired transactions (Alan Gates via Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4b444082
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4b444082
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4b444082

Branch: refs/heads/llap
Commit: 4b444082fcae9eb8ea60ec160723a0337ead1852
Parents: 80fb891
Author: Eugene Koifman <ek...@hortonworks.com>
Authored: Wed May 6 19:36:48 2015 -0700
Committer: Eugene Koifman <ek...@hortonworks.com>
Committed: Wed May 6 19:36:48 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/metastore/txn/TxnHandler.java   | 35 ++++++++++++------
 .../hive/metastore/txn/TestTxnHandler.java      | 39 +++++++++++++++-----
 2 files changed, 53 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4b444082/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 704c3ed..7c3b55c 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -75,6 +75,7 @@ public class TxnHandler {
   static final protected char LOCK_SEMI_SHARED = 'w';
 
   static final private int ALLOWED_REPEATED_DEADLOCKS = 10;
+  static final private int TIMED_OUT_TXN_ABORT_BATCH_SIZE = 100;
   static final private Log LOG = LogFactory.getLog(TxnHandler.class.getName());
 
   static private DataSource connPool;
@@ -130,7 +131,8 @@ public class TxnHandler {
     timeout = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS);
     deadlockCnt = 0;
     buildJumpTable();
-    retryInterval = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HMSHANDLERINTERVAL, TimeUnit.MILLISECONDS);
+    retryInterval = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HMSHANDLERINTERVAL,
+        TimeUnit.MILLISECONDS);
     retryLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HMSHANDLERATTEMPTS);
     deadlockRetryInterval = retryInterval / 10;
 
@@ -334,9 +336,7 @@ public class TxnHandler {
       Connection dbConn = null;
       try {
         dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE);
-        List<Long> txnids = new ArrayList<Long>(1);
-        txnids.add(txnid);
-        if (abortTxns(dbConn, txnids) != 1) {
+        if (abortTxns(dbConn, Collections.singletonList(txnid)) != 1) {
           LOG.debug("Going to rollback");
           dbConn.rollback();
           throw new NoSuchTxnException("No such transaction: " + txnid);
@@ -1321,8 +1321,6 @@ public class TxnHandler {
       LOG.debug("Going to execute update <" + buf.toString() + ">");
       updateCnt = stmt.executeUpdate(buf.toString());
 
-      LOG.debug("Going to commit");
-      dbConn.commit();
     } finally {
       closeStmt(stmt);
     }
@@ -1818,10 +1816,10 @@ public class TxnHandler {
     }
   }
 
-  // Abort timed out transactions.  This calls abortTxn(), which does a commit,
+  // Abort timed out transactions.  This does a commit,
   // and thus should be done before any calls to heartbeat that will leave
   // open transactions on the underlying database.
-  private void timeOutTxns(Connection dbConn) throws SQLException, MetaException {
+  private void timeOutTxns(Connection dbConn) throws SQLException, MetaException, RetryException {
     long now = getDbTime(dbConn);
     Statement stmt = null;
     try {
@@ -1834,10 +1832,23 @@ public class TxnHandler {
       List<Long> deadTxns = new ArrayList<Long>();
       // Limit the number of timed out transactions we do in one pass to keep from generating a
       // huge delete statement
-      for (int i = 0; i < 20 && rs.next(); i++) deadTxns.add(rs.getLong(1));
-      // We don't care whether all of the transactions get deleted or not,
-      // if some didn't it most likely means someone else deleted them in the interum
-      if (deadTxns.size() > 0) abortTxns(dbConn, deadTxns);
+      do {
+        deadTxns.clear();
+        for (int i = 0; i <  TIMED_OUT_TXN_ABORT_BATCH_SIZE && rs.next(); i++) {
+          deadTxns.add(rs.getLong(1));
+        }
+        // We don't care whether all of the transactions get deleted or not,
+        // if some didn't it most likely means someone else deleted them in the interum
+        if (deadTxns.size() > 0) abortTxns(dbConn, deadTxns);
+      } while (deadTxns.size() > 0);
+      LOG.debug("Going to commit");
+      dbConn.commit();
+    } catch (SQLException e) {
+      LOG.debug("Going to rollback");
+      rollbackDBConn(dbConn);
+      checkRetryable(dbConn, e, "abortTxn");
+      throw new MetaException("Unable to update transaction database "
+        + StringUtils.stringifyException(e));
     } finally {
       closeStmt(stmt);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/4b444082/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
index d4266e1..f478184 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
@@ -937,16 +937,16 @@ public class TestTxnHandler {
   @Test
   public void testLockTimeout() throws Exception {
     long timeout = txnHandler.setTimeout(1);
-    LockComponent comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
-    comp.setTablename("mytable");
-    comp.setPartitionname("mypartition");
-    List<LockComponent> components = new ArrayList<LockComponent>(1);
-    components.add(comp);
-    LockRequest req = new LockRequest(components, "me", "localhost");
-    LockResponse res = txnHandler.lock(req);
-    assertTrue(res.getState() == LockState.ACQUIRED);
-    Thread.currentThread().sleep(10);
     try {
+      LockComponent comp = new LockComponent(LockType.EXCLUSIVE, LockLevel.DB, "mydb");
+      comp.setTablename("mytable");
+      comp.setPartitionname("mypartition");
+      List<LockComponent> components = new ArrayList<LockComponent>(1);
+      components.add(comp);
+      LockRequest req = new LockRequest(components, "me", "localhost");
+      LockResponse res = txnHandler.lock(req);
+      assertTrue(res.getState() == LockState.ACQUIRED);
+      Thread.currentThread().sleep(10);
       txnHandler.checkLock(new CheckLockRequest(res.getLockid()));
       fail("Told there was a lock, when it should have timed out.");
     } catch (NoSuchLockException e) {
@@ -956,6 +956,27 @@ public class TestTxnHandler {
   }
 
   @Test
+  public void testRecoverManyTimeouts() throws Exception {
+    long timeout = txnHandler.setTimeout(1);
+    try {
+      txnHandler.openTxns(new OpenTxnRequest(503, "me", "localhost"));
+      Thread.currentThread().sleep(10);
+      txnHandler.getOpenTxns();
+      GetOpenTxnsInfoResponse rsp = txnHandler.getOpenTxnsInfo();
+      int numAborted = 0;
+      for (TxnInfo txnInfo : rsp.getOpen_txns()) {
+        assertEquals(TxnState.ABORTED, txnInfo.getState());
+        numAborted++;
+      }
+      assertEquals(503, numAborted);
+    } finally {
+      txnHandler.setTimeout(timeout);
+    }
+
+
+  }
+
+  @Test
   public void testHeartbeatNoLock() throws Exception {
     HeartbeatRequest h = new HeartbeatRequest();
     h.setLockid(29389839L);


[04/39] hive git commit: HIVE-9743: Incorrect result set for vectorized left outer join (Matt McCline, reviewed by Vikram Dixit)

Posted by pr...@apache.org.
HIVE-9743: Incorrect result set for vectorized left outer join (Matt McCline, reviewed by Vikram Dixit)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/25310407
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/25310407
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/25310407

Branch: refs/heads/llap
Commit: 2531040758e796c5cc469a893c50a8f5a388ded6
Parents: 632a309
Author: Jason Dere <jd...@hortonworks.com>
Authored: Wed May 6 16:19:59 2015 -0700
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Wed May 6 16:19:59 2015 -0700

----------------------------------------------------------------------
 data/files/tjoin1.txt                           |     3 +
 data/files/tjoin2.txt                           |     4 +
 .../test/resources/testconfiguration.properties |     2 +
 .../exec/vector/VectorMapJoinBaseOperator.java  |   185 +
 .../ql/exec/vector/VectorMapJoinOperator.java   |   129 +-
 .../VectorMapJoinOuterFilteredOperator.java     |   120 +
 .../VectorMapJoinGenerateResultOperator.java    |     5 +
 .../hive/ql/optimizer/physical/Vectorizer.java  |    23 +-
 .../clientpositive/vector_left_outer_join2.q    |    62 +
 .../clientpositive/vector_leftsemi_mapjoin.q    |   403 +
 .../tez/vector_left_outer_join2.q.out           |   553 +
 .../tez/vector_leftsemi_mapjoin.q.out           | 13807 +++++++++++++++++
 .../vector_left_outer_join2.q.out               |   568 +
 .../vector_leftsemi_mapjoin.q.out               | 13572 ++++++++++++++++
 14 files changed, 29317 insertions(+), 119 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/data/files/tjoin1.txt
----------------------------------------------------------------------
diff --git a/data/files/tjoin1.txt b/data/files/tjoin1.txt
new file mode 100644
index 0000000..897e0c5
--- /dev/null
+++ b/data/files/tjoin1.txt
@@ -0,0 +1,3 @@
+0|10|15
+1|20|25
+2|\N|50
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/data/files/tjoin2.txt
----------------------------------------------------------------------
diff --git a/data/files/tjoin2.txt b/data/files/tjoin2.txt
new file mode 100644
index 0000000..24820e9
--- /dev/null
+++ b/data/files/tjoin2.txt
@@ -0,0 +1,4 @@
+0|10|BB
+1|15|DD
+2|\N|EE
+3|10|FF
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 8e9984a..134fded 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -222,6 +222,8 @@ minitez.query.files.shared=alter_merge_2_orc.q,\
   vector_interval_1.q,\
   vector_interval_2.q,\
   vector_left_outer_join.q,\
+  vector_left_outer_join2.q,\
+  vector_leftsemi_mapjoin.q,\
   vector_mapjoin_reduce.q,\
   vector_mr_diff_schema_alias.q,\
   vector_multi_insert.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinBaseOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinBaseOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinBaseOperator.java
new file mode 100644
index 0000000..0baec2c
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinBaseOperator.java
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.vector;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.Future;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
+import org.apache.hadoop.hive.ql.exec.persistence.HybridHashTableContainer;
+import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer;
+import org.apache.hadoop.hive.ql.exec.persistence.ObjectContainer;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
+import org.apache.hadoop.hive.ql.plan.OperatorDesc;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.io.DataOutputBuffer;
+
+/**
+ * The *NON-NATIVE* base vector map join operator class used by VectorMapJoinOperator and
+ * VectorMapJoinOuterFilteredOperator.
+ *
+ * It has common variables and code for the output batch, Hybrid Grace spill batch, and more.
+ */
+public class VectorMapJoinBaseOperator extends MapJoinOperator implements VectorizationContextRegion {
+
+  private static final Log LOG = LogFactory.getLog(VectorMapJoinBaseOperator.class.getName());
+
+  private static final long serialVersionUID = 1L;
+
+  protected VectorizationContext vOutContext;
+
+  // The above members are initialized by the constructor and must not be
+  // transient.
+  //---------------------------------------------------------------------------
+
+  protected transient VectorizedRowBatch outputBatch;
+  protected transient VectorizedRowBatch scratchBatch;  // holds restored (from disk) big table rows
+
+  protected transient Map<ObjectInspector, VectorAssignRowSameBatch> outputVectorAssignRowMap;
+
+  protected transient VectorizedRowBatchCtx vrbCtx = null;
+
+  protected transient int tag;  // big table alias
+
+  public VectorMapJoinBaseOperator() {
+    super();
+  }
+
+  public VectorMapJoinBaseOperator (VectorizationContext vContext, OperatorDesc conf)
+    throws HiveException {
+    super();
+
+    MapJoinDesc desc = (MapJoinDesc) conf;
+    this.conf = desc;
+
+    order = desc.getTagOrder();
+    numAliases = desc.getExprs().size();
+    posBigTable = (byte) desc.getPosBigTable();
+    filterMaps = desc.getFilterMap();
+    noOuterJoin = desc.isNoOuterJoin();
+
+     // We are making a new output vectorized row batch.
+    vOutContext = new VectorizationContext(getName(), desc.getOutputColumnNames());
+  }
+
+  @Override
+  public Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+
+    Collection<Future<?>> result = super.initializeOp(hconf);
+
+    vrbCtx = new VectorizedRowBatchCtx();
+    vrbCtx.init(vOutContext.getScratchColumnTypeMap(), (StructObjectInspector) this.outputObjInspector);
+
+    outputBatch = vrbCtx.createVectorizedRowBatch();
+
+    outputVectorAssignRowMap = new HashMap<ObjectInspector, VectorAssignRowSameBatch>();
+
+    return result;
+  }
+
+  /**
+   * 'forwards' the (row-mode) record into the (vectorized) output batch
+   */
+  @Override
+  protected void internalForward(Object row, ObjectInspector outputOI) throws HiveException {
+    Object[] values = (Object[]) row;
+    VectorAssignRowSameBatch va = outputVectorAssignRowMap.get(outputOI);
+    if (va == null) {
+      va = new VectorAssignRowSameBatch();
+      va.init((StructObjectInspector) outputOI, vOutContext.getProjectedColumns());
+      va.setOneBatch(outputBatch);
+      outputVectorAssignRowMap.put(outputOI, va);
+    }
+
+    va.assignRow(outputBatch.size, values);
+
+    ++outputBatch.size;
+    if (outputBatch.size == VectorizedRowBatch.DEFAULT_SIZE) {
+      flushOutput();
+    }
+  }
+
+  private void flushOutput() throws HiveException {
+    forward(outputBatch, null);
+    outputBatch.reset();
+  }
+
+  @Override
+  public void closeOp(boolean aborted) throws HiveException {
+    super.closeOp(aborted);
+    for (MapJoinTableContainer tableContainer : mapJoinTables) {
+      if (tableContainer != null) {
+        tableContainer.dumpMetrics();
+      }
+    }
+    if (!aborted && 0 < outputBatch.size) {
+      flushOutput();
+    }
+  }
+
+  /**
+   * For a vectorized row batch from the rows feed from the super MapJoinOperator.
+   */
+  @Override
+  protected void reProcessBigTable(int partitionId)
+      throws HiveException {
+
+    if (scratchBatch == null) {
+      // The process method was not called -- no big table rows.
+      return;
+    }
+
+    HybridHashTableContainer.HashPartition partition = firstSmallTable.getHashPartitions()[partitionId];
+    ObjectContainer bigTable = partition.getMatchfileObjContainer();
+
+    DataOutputBuffer dataOutputBuffer = new DataOutputBuffer();
+    while (bigTable.hasNext()) {
+      Object row = bigTable.next();
+      VectorizedBatchUtil.addProjectedRowToBatchFrom(row,
+          (StructObjectInspector) inputObjInspectors[posBigTable],
+          scratchBatch.size, scratchBatch, dataOutputBuffer);
+      scratchBatch.size++;
+
+      if (scratchBatch.size == VectorizedRowBatch.DEFAULT_SIZE) {
+        process(scratchBatch, tag); // call process once we have a full batch
+        scratchBatch.reset();
+        dataOutputBuffer.reset();
+      }
+    }
+    // Process the row batch that has less than DEFAULT_SIZE rows
+    if (scratchBatch.size > 0) {
+      process(scratchBatch, tag);
+      scratchBatch.reset();
+      dataOutputBuffer.reset();
+    }
+    bigTable.clear();
+  }
+
+  @Override
+  public VectorizationContext getOuputVectorizationContext() {
+    return vOutContext;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
index 0547346..15c747e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOperator.java
@@ -18,10 +18,8 @@
 
 package org.apache.hadoop.hive.ql.exec.vector;
 
-import java.util.Arrays;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.Future;
@@ -31,11 +29,8 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator;
 import org.apache.hadoop.hive.ql.exec.JoinUtil;
-import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
-import org.apache.hadoop.hive.ql.exec.persistence.HybridHashTableContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer;
 import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.ReusableGetAdaptor;
-import org.apache.hadoop.hive.ql.exec.persistence.ObjectContainer;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory;
@@ -45,36 +40,28 @@ import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.io.DataOutputBuffer;
 
 /**
  * The vectorized version of the MapJoinOperator.
  */
-public class VectorMapJoinOperator extends MapJoinOperator implements VectorizationContextRegion {
+public class VectorMapJoinOperator extends VectorMapJoinBaseOperator {
 
-  private static final Log LOG = LogFactory.getLog(
-      VectorMapJoinOperator.class.getName());
-
-   /**
-   *
-   */
   private static final long serialVersionUID = 1L;
 
-  private VectorExpression[] keyExpressions;
+  private static final Log LOG = LogFactory.getLog(
+      VectorMapJoinOperator.class.getName());
 
-  private VectorExpression[] bigTableFilterExpressions;
-  private VectorExpression[] bigTableValueExpressions;
+  protected VectorExpression[] keyExpressions;
 
-  private VectorizationContext vOutContext;
+  protected VectorExpression[] bigTableFilterExpressions;
+  protected VectorExpression[] bigTableValueExpressions;
 
   // The above members are initialized by the constructor and must not be
   // transient.
   //---------------------------------------------------------------------------
 
-  private transient VectorizedRowBatch outputBatch;
-  private transient VectorizedRowBatch scratchBatch;  // holds restored (from disk) big table rows
+
   private transient VectorExpressionWriter[] valueWriters;
-  private transient Map<ObjectInspector, VectorAssignRowSameBatch> outputVectorAssignRowMap;
 
   // These members are used as out-of-band params
   // for the inner-loop supper.processOp callbacks
@@ -84,9 +71,6 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
   private transient VectorHashKeyWrapperBatch keyWrapperBatch;
   private transient VectorExpressionWriter[] keyOutputWriters;
 
-  private transient VectorizedRowBatchCtx vrbCtx = null;
-
-  private transient int tag;  // big table alias
   private VectorExpressionWriter[] rowWriters;  // Writer for producing row from input batch
   protected transient Object[] singleRow;
 
@@ -97,16 +81,10 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
 
   public VectorMapJoinOperator (VectorizationContext vContext, OperatorDesc conf)
     throws HiveException {
-    this();
 
-    MapJoinDesc desc = (MapJoinDesc) conf;
-    this.conf = desc;
+    super(vContext, conf);
 
-    order = desc.getTagOrder();
-    numAliases = desc.getExprs().size();
-    posBigTable = (byte) desc.getPosBigTable();
-    filterMaps = desc.getFilterMap();
-    noOuterJoin = desc.isNoOuterJoin();
+    MapJoinDesc desc = (MapJoinDesc) conf;
 
     Map<Byte, List<ExprNodeDesc>> filterExpressions = desc.getFilters();
     bigTableFilterExpressions = vContext.getVectorExpressions(filterExpressions.get(posBigTable),
@@ -118,9 +96,6 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
     // We're only going to evaluate the big table vectorized expressions,
     Map<Byte, List<ExprNodeDesc>> exprs = desc.getExprs();
     bigTableValueExpressions = vContext.getVectorExpressions(exprs.get(posBigTable));
-
-    // We are making a new output vectorized row batch.
-    vOutContext = new VectorizationContext(getName(), desc.getOutputColumnNames());
   }
 
   @Override
@@ -144,12 +119,7 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
     List<ExprNodeDesc> keyDesc = conf.getKeys().get(posBigTable);
     keyOutputWriters = VectorExpressionWriterFactory.getExpressionWriters(keyDesc);
 
-    vrbCtx = new VectorizedRowBatchCtx();
-    vrbCtx.init(vOutContext.getScratchColumnTypeMap(), (StructObjectInspector) this.outputObjInspector);
-
-    outputBatch = vrbCtx.createVectorizedRowBatch();
-
-    keyWrapperBatch =VectorHashKeyWrapperBatch.compileKeyWrapperBatch(keyExpressions);
+    keyWrapperBatch = VectorHashKeyWrapperBatch.compileKeyWrapperBatch(keyExpressions);
 
     Map<Byte, List<ExprNodeDesc>> valueExpressions = conf.getExprs();
     List<ExprNodeDesc> bigTableExpressions = valueExpressions.get(posBigTable);
@@ -203,51 +173,9 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
     // Filtering is handled in the input batch processing
     filterMaps[posBigTable] = null;
 
-    outputVectorAssignRowMap = new HashMap<ObjectInspector, VectorAssignRowSameBatch>();
-
     return result;
   }
 
-  /**
-   * 'forwards' the (row-mode) record into the (vectorized) output batch
-   */
-  @Override
-  protected void internalForward(Object row, ObjectInspector outputOI) throws HiveException {
-    Object[] values = (Object[]) row;
-    VectorAssignRowSameBatch va = outputVectorAssignRowMap.get(outputOI);
-    if (va == null) {
-      va = new VectorAssignRowSameBatch();
-      va.init((StructObjectInspector) outputOI, vOutContext.getProjectedColumns());
-      va.setOneBatch(outputBatch);
-      outputVectorAssignRowMap.put(outputOI, va);
-    }
-
-    va.assignRow(outputBatch.size, values);
-
-    ++outputBatch.size;
-    if (outputBatch.size == VectorizedRowBatch.DEFAULT_SIZE) {
-      flushOutput();
-    }
-  }
-
-  private void flushOutput() throws HiveException {
-    forward(outputBatch, null);
-    outputBatch.reset();
-  }
-
-  @Override
-  public void closeOp(boolean aborted) throws HiveException {
-    super.closeOp(aborted);
-    for (MapJoinTableContainer tableContainer : mapJoinTables) {
-      if (tableContainer != null) {
-        tableContainer.dumpMetrics();
-      }
-    }
-    if (!aborted && 0 < outputBatch.size) {
-      flushOutput();
-    }
-  }
-
   @Override
   protected JoinUtil.JoinResult setMapJoinKey(ReusableGetAdaptor dest, Object row, byte alias)
       throws HiveException {
@@ -256,7 +184,7 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
 
   @Override
   public void process(Object row, int tag) throws HiveException {
-    byte alias = (byte) tag;
+
     VectorizedRowBatch inBatch = (VectorizedRowBatch) row;
 
     // Preparation for hybrid grace hash join
@@ -297,11 +225,6 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
   }
 
   @Override
-  public VectorizationContext getOuputVectorizationContext() {
-    return vOutContext;
-  }
-
-  @Override
   protected void spillBigTableRow(MapJoinTableContainer hybridHtContainer, Object row)
       throws HiveException {
     // Extract the actual row from row batch
@@ -310,36 +233,6 @@ public class VectorMapJoinOperator extends MapJoinOperator implements Vectorizat
     super.spillBigTableRow(hybridHtContainer, actualRow);
   }
 
-  @Override
-  protected void reProcessBigTable(int partitionId)
-      throws HiveException {
-
-    HybridHashTableContainer.HashPartition partition = firstSmallTable.getHashPartitions()[partitionId];
-    ObjectContainer bigTable = partition.getMatchfileObjContainer();
-
-    DataOutputBuffer dataOutputBuffer = new DataOutputBuffer();
-    while (bigTable.hasNext()) {
-      Object row = bigTable.next();
-      VectorizedBatchUtil.addProjectedRowToBatchFrom(row,
-          (StructObjectInspector) inputObjInspectors[posBigTable],
-          scratchBatch.size, scratchBatch, dataOutputBuffer);
-      scratchBatch.size++;
-
-      if (scratchBatch.size == VectorizedRowBatch.DEFAULT_SIZE) {
-        process(scratchBatch, tag); // call process once we have a full batch
-        scratchBatch.reset();
-        dataOutputBuffer.reset();
-      }
-    }
-    // Process the row batch that has less than DEFAULT_SIZE rows
-    if (scratchBatch.size > 0) {
-      process(scratchBatch, tag);
-      scratchBatch.reset();
-      dataOutputBuffer.reset();
-    }
-    bigTable.clear();
-  }
-
   // Code borrowed from VectorReduceSinkOperator
   private Object[] getRowObject(VectorizedRowBatch vrb, int rowIndex) throws HiveException {
     int batchIndex = rowIndex;

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOuterFilteredOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOuterFilteredOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOuterFilteredOperator.java
new file mode 100644
index 0000000..5aecfcc
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapJoinOuterFilteredOperator.java
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.vector;
+
+import java.util.Collection;
+import java.util.concurrent.Future;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.plan.OperatorDesc;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+
+/**
+ * This is the *NON-NATIVE* vector map join operator for just LEFT OUTER JOIN and filtered.
+ *
+ * It is a row pass-thru so that super MapJoinOperator can do the outer join filtering properly.
+ *
+ */
+public class VectorMapJoinOuterFilteredOperator extends VectorMapJoinBaseOperator {
+
+  private static final long serialVersionUID = 1L;
+
+  private VectorizationContext vContext;
+
+  // The above members are initialized by the constructor and must not be
+  // transient.
+  //---------------------------------------------------------------------------
+
+  private transient boolean firstBatch;
+
+  private transient VectorExtractRowDynBatch vectorExtractRowDynBatch;
+
+  protected transient Object[] singleRow;
+
+  public VectorMapJoinOuterFilteredOperator() {
+    super();
+  }
+
+  public VectorMapJoinOuterFilteredOperator(VectorizationContext vContext, OperatorDesc conf)
+      throws HiveException {
+    super(vContext, conf);
+
+    this.vContext = vContext;
+  }
+
+  @Override
+  public Collection<Future<?>> initializeOp(Configuration hconf) throws HiveException {
+
+    // We need a input object inspector that is for the row we will extract out of the
+    // vectorized row batch, not for example, an original inspector for an ORC table, etc.
+    inputObjInspectors[0] =
+        VectorizedBatchUtil.convertToStandardStructObjectInspector((StructObjectInspector) inputObjInspectors[0]);
+
+    // Call super VectorMapJoinOuterFilteredOperator, which calls super MapJoinOperator with
+    // new input inspector.
+    Collection<Future<?>> result = super.initializeOp(hconf);
+
+    firstBatch = true;
+
+    return result;
+  }
+
+  @Override
+  public void process(Object data, int tag) throws HiveException {
+
+    VectorizedRowBatch batch = (VectorizedRowBatch) data;
+
+    // Preparation for hybrid grace hash join
+    this.tag = tag;
+    if (scratchBatch == null) {
+      scratchBatch = VectorizedBatchUtil.makeLike(batch);
+    }
+
+    if (firstBatch) {
+      vectorExtractRowDynBatch = new VectorExtractRowDynBatch();
+      vectorExtractRowDynBatch.init((StructObjectInspector) inputObjInspectors[0], vContext.getProjectedColumns());
+
+      singleRow = new Object[vectorExtractRowDynBatch.getCount()];
+
+      firstBatch = false;
+    }
+
+
+    vectorExtractRowDynBatch.setBatchOnEntry(batch);
+
+    // VectorizedBatchUtil.debugDisplayBatch( batch, "VectorReduceSinkOperator processOp ");
+
+    if (batch.selectedInUse) {
+      int selected[] = batch.selected;
+      for (int logical = 0 ; logical < batch.size; logical++) {
+        int batchIndex = selected[logical];
+        vectorExtractRowDynBatch.extractRow(batchIndex, singleRow);
+        super.process(singleRow, tag);
+      }
+    } else {
+      for (int batchIndex = 0 ; batchIndex < batch.size; batchIndex++) {
+        vectorExtractRowDynBatch.extractRow(batchIndex, singleRow);
+        super.process(singleRow, tag);
+      }
+    }
+
+    vectorExtractRowDynBatch.forgetBatchOnExit();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
index 0f1c7a8..860ebb5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinGenerateResultOperator.java
@@ -523,6 +523,11 @@ public abstract class VectorMapJoinGenerateResultOperator extends VectorMapJoinC
 
     LOG.info(CLASS_NAME + " reProcessBigTable enter...");
 
+    if (spillReplayBatch == null) {
+      // The process method was not called -- no big table rows.
+      return;
+    }
+
     HashPartition partition = firstSmallTable.getHashPartitions()[partitionId];
 
     int rowCount = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index 64d7c3e..096239e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -54,6 +54,9 @@ import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinLeftSemiString
 import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinOuterLongOperator;
 import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinOuterMultiKeyOperator;
 import org.apache.hadoop.hive.ql.exec.vector.mapjoin.VectorMapJoinOuterStringOperator;
+import org.apache.hadoop.hive.ql.exec.vector.VectorMapJoinOperator;
+import org.apache.hadoop.hive.ql.exec.vector.VectorMapJoinOuterFilteredOperator;
+import org.apache.hadoop.hive.ql.exec.vector.VectorSMBMapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizationContextRegion;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedInputFormatInterface;
@@ -1596,7 +1599,25 @@ public class Vectorizer implements PhysicalPlanResolver {
           boolean specialize = canSpecializeMapJoin(op, desc, isTez);
 
           if (!specialize) {
-            vectorOp = OperatorFactory.getVectorOperator(desc, vContext);
+
+            Class<? extends Operator<?>> opClass = null;
+            if (op instanceof MapJoinOperator) {
+
+              // *NON-NATIVE* vector map differences for LEFT OUTER JOIN and Filtered...
+
+              List<ExprNodeDesc> bigTableFilters = desc.getFilters().get((byte) desc.getPosBigTable());
+              boolean isOuterAndFiltered = (!desc.isNoOuterJoin() && bigTableFilters.size() > 0);
+              if (!isOuterAndFiltered) {
+                opClass = VectorMapJoinOperator.class;
+              } else {
+                opClass = VectorMapJoinOuterFilteredOperator.class;
+              }
+            } else if (op instanceof SMBMapJoinOperator) {
+              opClass = VectorSMBMapJoinOperator.class;
+            }
+
+            vectorOp = OperatorFactory.getVectorOperator(opClass, op.getConf(), vContext);
+
           } else {
 
             // TEMPORARY Until Native Vector Map Join with Hybrid passes tests...

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/test/queries/clientpositive/vector_left_outer_join2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_left_outer_join2.q b/ql/src/test/queries/clientpositive/vector_left_outer_join2.q
new file mode 100644
index 0000000..098d002
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/vector_left_outer_join2.q
@@ -0,0 +1,62 @@
+set hive.fetch.task.conversion=none;
+set hive.auto.convert.join=true;
+set hive.auto.convert.join.noconditionaltask=true;
+set hive.auto.convert.join.noconditionaltask.size=10000;
+
+drop table if exists TJOIN1;
+drop table if exists TJOIN2;
+create table if not exists TJOIN1 (RNUM int , C1 int, C2 int) STORED AS orc;
+create table if not exists TJOIN2 (RNUM int , C1 int, C2 char(2)) STORED AS orc;
+create table if not exists TJOIN1STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE ;
+create table if not exists TJOIN2STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE ;
+LOAD DATA LOCAL INPATH '../../data/files/tjoin1.txt' OVERWRITE INTO TABLE TJOIN1STAGE;
+LOAD DATA LOCAL INPATH '../../data/files/tjoin2.txt' OVERWRITE INTO TABLE TJOIN2STAGE;
+INSERT INTO TABLE TJOIN1 SELECT * from TJOIN1STAGE;
+INSERT INTO TABLE TJOIN2 SELECT * from TJOIN2STAGE;
+
+set hive.vectorized.execution.enabled=false;
+set hive.mapjoin.hybridgrace.hashtable=false;
+explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+set hive.vectorized.execution.enabled=false;
+set hive.mapjoin.hybridgrace.hashtable=true;
+explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=false;
+SET hive.vectorized.execution.mapjoin.native.enabled=false;
+explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=true;
+SET hive.vectorized.execution.mapjoin.native.enabled=false;
+explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=false;
+SET hive.vectorized.execution.mapjoin.native.enabled=true;
+explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=true;
+SET hive.vectorized.execution.mapjoin.native.enabled=true;
+explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
+
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 );
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q b/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q
new file mode 100644
index 0000000..522ab12
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/vector_leftsemi_mapjoin.q
@@ -0,0 +1,403 @@
+set hive.fetch.task.conversion=none;
+set hive.auto.convert.join=true;
+set hive.auto.convert.join.noconditionaltask=true;
+set hive.auto.convert.join.noconditionaltask.size=10000;
+
+-- SORT_QUERY_RESULTS
+
+create table t1 stored as orc as select cast(key as int) key, value from src where key <= 10;
+
+select * from t1 sort by key;
+
+create table t2 stored as orc as select cast(2*key as int) key, value from t1;
+
+select * from t2 sort by key;
+
+create table t3 stored as orc as select * from (select * from t1 union all select * from t2) b;
+select * from t3 sort by key, value;
+
+create table t4 (key int, value string) stored as orc;
+select * from t4;
+
+
+set hive.vectorized.execution.enabled=false;
+set hive.mapjoin.hybridgrace.hashtable=false;
+
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+ 
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+
+set hive.vectorized.execution.enabled=false;
+set hive.mapjoin.hybridgrace.hashtable=true;
+
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+ 
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=false;
+SET hive.vectorized.execution.mapjoin.native.enabled=false;
+
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+ 
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=true;
+SET hive.vectorized.execution.mapjoin.native.enabled=false;
+
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+ 
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=false;
+SET hive.vectorized.execution.mapjoin.native.enabled=true;
+
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+ 
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+
+set hive.vectorized.execution.enabled=true;
+set hive.mapjoin.hybridgrace.hashtable=true;
+SET hive.vectorized.execution.mapjoin.native.enabled=true;
+
+explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value;
+
+explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value;
+
+explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value;
+
+explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value;
+
+explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value;
+
+explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value ;
+
+explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value;
+
+explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key;
+
+explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value;
+
+explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value;
+ 
+explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value;
+
+explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key;
+
+explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/test/results/clientpositive/tez/vector_left_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_left_outer_join2.q.out b/ql/src/test/results/clientpositive/tez/vector_left_outer_join2.q.out
new file mode 100644
index 0000000..929194e
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/vector_left_outer_join2.q.out
@@ -0,0 +1,553 @@
+PREHOOK: query: drop table if exists TJOIN1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists TJOIN1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table if exists TJOIN2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table if exists TJOIN2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table if not exists TJOIN1 (RNUM int , C1 int, C2 int) STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN1
+POSTHOOK: query: create table if not exists TJOIN1 (RNUM int , C1 int, C2 int) STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN1
+PREHOOK: query: create table if not exists TJOIN2 (RNUM int , C1 int, C2 char(2)) STORED AS orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN2
+POSTHOOK: query: create table if not exists TJOIN2 (RNUM int , C1 int, C2 char(2)) STORED AS orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN2
+PREHOOK: query: create table if not exists TJOIN1STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN1STAGE
+POSTHOOK: query: create table if not exists TJOIN1STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN1STAGE
+PREHOOK: query: create table if not exists TJOIN2STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@TJOIN2STAGE
+POSTHOOK: query: create table if not exists TJOIN2STAGE (RNUM int , C1 int, C2 char(2)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@TJOIN2STAGE
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin1.txt' OVERWRITE INTO TABLE TJOIN1STAGE
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@tjoin1stage
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin1.txt' OVERWRITE INTO TABLE TJOIN1STAGE
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@tjoin1stage
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin2.txt' OVERWRITE INTO TABLE TJOIN2STAGE
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@tjoin2stage
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/tjoin2.txt' OVERWRITE INTO TABLE TJOIN2STAGE
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@tjoin2stage
+PREHOOK: query: INSERT INTO TABLE TJOIN1 SELECT * from TJOIN1STAGE
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1stage
+PREHOOK: Output: default@tjoin1
+POSTHOOK: query: INSERT INTO TABLE TJOIN1 SELECT * from TJOIN1STAGE
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1stage
+POSTHOOK: Output: default@tjoin1
+POSTHOOK: Lineage: tjoin1.c1 SIMPLE [(tjoin1stage)tjoin1stage.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin1.c2 EXPRESSION [(tjoin1stage)tjoin1stage.FieldSchema(name:c2, type:char(2), comment:null), ]
+POSTHOOK: Lineage: tjoin1.rnum SIMPLE [(tjoin1stage)tjoin1stage.FieldSchema(name:rnum, type:int, comment:null), ]
+PREHOOK: query: INSERT INTO TABLE TJOIN2 SELECT * from TJOIN2STAGE
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin2stage
+PREHOOK: Output: default@tjoin2
+POSTHOOK: query: INSERT INTO TABLE TJOIN2 SELECT * from TJOIN2STAGE
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin2stage
+POSTHOOK: Output: default@tjoin2
+POSTHOOK: Lineage: tjoin2.c1 SIMPLE [(tjoin2stage)tjoin2stage.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin2.c2 SIMPLE [(tjoin2stage)tjoin2stage.FieldSchema(name:c2, type:char(2), comment:null), ]
+POSTHOOK: Lineage: tjoin2.rnum SIMPLE [(tjoin2stage)tjoin2stage.FieldSchema(name:rnum, type:int, comment:null), ]
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                    filter predicates:
+                      0 {(c2 > 15)}
+                      1 
+                    keys:
+                      0 c1 (type: int)
+                      1 c1 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col8
+                    input vertices:
+                      1 Map 2
+                    Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: c1 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: c1 (type: int)
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: c2 (type: char(2))
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+0	10	15	NULL
+1	20	25	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                    filter predicates:
+                      0 {(c2 > 15)}
+                      1 
+                    keys:
+                      0 c1 (type: int)
+                      1 c1 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col8
+                    input vertices:
+                      1 Map 2
+                    Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                    HybridGraceHashJoin: true
+                    Select Operator
+                      expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: c1 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: c1 (type: int)
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: c2 (type: char(2))
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+1	20	25	NULL
+0	10	15	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                    filter predicates:
+                      0 {(c2 > 15)}
+                      1 
+                    keys:
+                      0 c1 (type: int)
+                      1 c1 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col8
+                    input vertices:
+                      1 Map 2
+                    Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: c1 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: c1 (type: int)
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: c2 (type: char(2))
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+0	10	15	NULL
+1	20	25	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                    filter predicates:
+                      0 {(c2 > 15)}
+                      1 
+                    keys:
+                      0 c1 (type: int)
+                      1 c1 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col8
+                    input vertices:
+                      1 Map 2
+                    Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                    HybridGraceHashJoin: true
+                    Select Operator
+                      expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: c1 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: c1 (type: int)
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: c2 (type: char(2))
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+1	20	25	NULL
+0	10	15	NULL
+2	NULL	50	NULL
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                    filter predicates:
+                      0 {(c2 > 15)}
+                      1 
+                    keys:
+                      0 c1 (type: int)
+                      1 c1 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col8
+                    input vertices:
+                      1 Map 2
+                    Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: c1 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: c1 (type: int)
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: c2 (type: char(2))
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+1	20	25	NULL
+2	NULL	50	NULL
+0	10	15	BB
+0	10	15	FF
+PREHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 2 (BROADCAST_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin1
+                  Statistics: Num rows: 3 Data size: 32 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                    filter predicates:
+                      0 {(c2 > 15)}
+                      1 
+                    keys:
+                      0 c1 (type: int)
+                      1 c1 (type: int)
+                    outputColumnNames: _col0, _col1, _col2, _col8
+                    input vertices:
+                      1 Map 2
+                    Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                    HybridGraceHashJoin: true
+                    Select Operator
+                      expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col8 (type: char(2))
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 4 Data size: 409 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.TextInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: tjoin2
+                  Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: c1 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: c1 (type: int)
+                    Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: c2 (type: char(2))
+            Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tjoin1
+PREHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+POSTHOOK: query: select tjoin1.rnum, tjoin1.c1, tjoin1.c2, tjoin2.c2 as c2j2 from tjoin1 left outer join tjoin2 on ( tjoin1.c1 = tjoin2.c1 and tjoin1.c2 > 15 )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tjoin1
+POSTHOOK: Input: default@tjoin2
+#### A masked pattern was here ####
+2	NULL	50	NULL
+1	20	25	NULL
+0	10	15	BB
+0	10	15	FF


[29/39] hive git commit: HIVE-9828: Semantic analyzer does not capture view parent entity for tables referred in view with union all (Prasad via Xuefu)

Posted by pr...@apache.org.
HIVE-9828: Semantic analyzer does not capture view parent entity for tables referred in view with union all (Prasad via Xuefu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3e713bcc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3e713bcc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3e713bcc

Branch: refs/heads/llap
Commit: 3e713bcc1f74c90aba1da654b63b85878ab23768
Parents: 809fcb0
Author: Xuefu Zhang <xz...@Cloudera.com>
Authored: Sat May 9 02:32:13 2015 -0700
Committer: Xuefu Zhang <xz...@Cloudera.com>
Committed: Sat May 9 02:32:13 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  15 +--
 .../apache/hadoop/hive/ql/plan/PlanUtils.java   |   4 +
 .../hadoop/hive/ql/plan/TestViewEntity.java     | 108 +++++++++++++++++++
 3 files changed, 121 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3e713bcc/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index cbc5466..2993539 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -222,6 +222,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   public static final String DUMMY_DATABASE = "_dummy_database";
   public static final String DUMMY_TABLE = "_dummy_table";
+  public static final String SUBQUERY_TAG_1 = "-subquery1";
+  public static final String SUBQUERY_TAG_2 = "-subquery2";
+
   // Max characters when auto generating the column name with func name
   private static final int AUTOGEN_COLALIAS_PRFX_MAXLENGTH = 20;
 
@@ -429,16 +432,16 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       qbexpr.setOpcode(QBExpr.Opcode.UNION);
       // query 1
       assert (ast.getChild(0) != null);
-      QBExpr qbexpr1 = new QBExpr(alias + "-subquery1");
-      doPhase1QBExpr((ASTNode) ast.getChild(0), qbexpr1, id + "-subquery1",
-          alias + "-subquery1");
+      QBExpr qbexpr1 = new QBExpr(alias + SUBQUERY_TAG_1);
+      doPhase1QBExpr((ASTNode) ast.getChild(0), qbexpr1, id + SUBQUERY_TAG_1,
+          alias + SUBQUERY_TAG_1);
       qbexpr.setQBExpr1(qbexpr1);
 
       // query 2
       assert (ast.getChild(1) != null);
-      QBExpr qbexpr2 = new QBExpr(alias + "-subquery2");
-      doPhase1QBExpr((ASTNode) ast.getChild(1), qbexpr2, id + "-subquery2",
-          alias + "-subquery2");
+      QBExpr qbexpr2 = new QBExpr(alias + SUBQUERY_TAG_2);
+      doPhase1QBExpr((ASTNode) ast.getChild(1), qbexpr2, id + SUBQUERY_TAG_2,
+          alias + SUBQUERY_TAG_2);
       qbexpr.setQBExpr2(qbexpr2);
     }
       break;

http://git-wip-us.apache.org/repos/asf/hive/blob/3e713bcc/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
index 80e11a3..87a2548 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
 import org.apache.hadoop.hive.ql.metadata.HiveUtils;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
+import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory;
 import org.apache.hadoop.hive.ql.session.SessionState;
@@ -975,6 +976,9 @@ public final class PlanUtils {
     // T's parent would be V1
     for (int pos = 0; pos < aliases.length; pos++) {
       currentAlias = currentAlias == null ? aliases[pos] : currentAlias + ":" + aliases[pos];
+
+      currentAlias = currentAlias.replace(SemanticAnalyzer.SUBQUERY_TAG_1, "")
+          .replace(SemanticAnalyzer.SUBQUERY_TAG_2, "");
       ReadEntity input = viewAliasToInput.get(currentAlias);
       if (input == null) {
         return currentInput;

http://git-wip-us.apache.org/repos/asf/hive/blob/3e713bcc/ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java
new file mode 100644
index 0000000..17a4e06
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestViewEntity.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.plan;
+
+import static org.junit.Assert.*;
+
+import java.io.Serializable;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.CommandNeedRetryException;
+import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.hooks.ReadEntity;
+import org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook;
+import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.TestReadEntityDirect.CheckInputReadEntityDirect;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestViewEntity {
+  /**
+   * Hook used in the test to capture the set of ReadEntities
+   */
+  public static class CheckInputReadEntity extends
+      AbstractSemanticAnalyzerHook {
+    public static ReadEntity[] readEntities;
+
+    @Override
+    public void postAnalyze(HiveSemanticAnalyzerHookContext context,
+        List<Task<? extends Serializable>> rootTasks) throws SemanticException {
+      readEntities = context.getInputs().toArray(new ReadEntity[0]);
+    }
+
+  }
+
+  private static Driver driver;
+
+  @BeforeClass
+  public static void onetimeSetup() throws Exception {
+    HiveConf conf = new HiveConf(Driver.class);
+    conf.setVar(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK,
+        CheckInputReadEntity.class.getName());
+    HiveConf
+        .setBoolVar(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+    SessionState.start(conf);
+    driver = new Driver(conf);
+    driver.init();
+  }
+
+  @AfterClass
+  public static void cleanUp() throws Exception {
+    driver.close();
+    driver.destroy();
+  }
+
+  /**
+   * Verify that the parent entities are captured correctly for union views
+   * @throws Exception
+   */
+  @Test
+  public void testUnionView() throws Exception {
+    int ret = driver.run("create table t1(id int)").getResponseCode();
+    assertEquals("Checking command success", 0, ret);
+    ret = driver.run("create table t2(id int)").getResponseCode();
+    assertEquals("Checking command success", 0, ret);
+    ret = driver.run("create view v1 as select t.id from "
+            + "(select t1.id from t1 union all select t2.id from t2) as t")
+        .getResponseCode();
+    assertEquals("Checking command success", 0, ret);
+
+    driver.compile("select * from v1");
+    // view entity
+    assertEquals("default@v1", CheckInputReadEntity.readEntities[0].getName());
+
+    // first table in union query with view as parent
+    assertEquals("default@t1", CheckInputReadEntity.readEntities[1].getName());
+    assertEquals("default@v1", CheckInputReadEntity.readEntities[1]
+        .getParents()
+        .iterator().next().getName());
+    // second table in union query with view as parent
+    assertEquals("default@t2", CheckInputReadEntity.readEntities[2].getName());
+    assertEquals("default@v1", CheckInputReadEntity.readEntities[2]
+        .getParents()
+        .iterator().next().getName());
+
+  }
+
+}


[23/39] hive git commit: HIVE-10538: Fix NPE in FileSinkOperator from hashcode mismatch (Peter Slawski reviewed by Prasanth Jayachandran)

Posted by pr...@apache.org.
HIVE-10538: Fix NPE in FileSinkOperator from hashcode mismatch (Peter Slawski reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3633db25
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3633db25
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3633db25

Branch: refs/heads/llap
Commit: 3633db25fadd39fdcad15d95af1cf69cc6e2429e
Parents: 0af1d6e
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Thu May 7 13:40:45 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Thu May 7 13:40:45 2015 -0700

----------------------------------------------------------------------
 .../test/resources/testconfiguration.properties |   1 +
 .../hadoop/hive/ql/exec/ReduceSinkOperator.java |   3 +-
 .../test/queries/clientpositive/bucket_many.q   |  16 ++
 .../results/clientpositive/bucket_many.q.out    | 230 +++++++++++++++++++
 .../results/clientpositive/spark/cbo_gby.q.out  |   4 +-
 .../clientpositive/spark/cbo_udf_udaf.q.out     |   2 +-
 ...pby_complex_types_multi_single_reducer.q.out |  38 +--
 .../spark/lateral_view_explode2.q.out           |   4 +-
 .../clientpositive/spark/union_remove_25.q.out  |   2 +-
 .../clientpositive/spark/union_top_level.q.out  |  16 +-
 .../spark/vector_cast_constant.q.java1.7.out    |  16 +-
 .../spark/vector_cast_constant.q.java1.8.out    |  16 +-
 .../spark/vectorized_timestamp_funcs.q.out      |   4 +-
 13 files changed, 300 insertions(+), 52 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 3eff7d0..eeb46cc 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -3,6 +3,7 @@ minimr.query.files=auto_sortmerge_join_16.q,\
   bucket4.q,\
   bucket5.q,\
   bucket6.q,\
+  bucket_many.q,\
   bucket_num_reducers.q,\
   bucket_num_reducers2.q,\
   bucketizedhiveinputformat.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
index 468d87f..859a28f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java
@@ -125,7 +125,7 @@ public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
   protected transient Object[] cachedValues;
   protected transient List<List<Integer>> distinctColIndices;
   protected transient Random random;
-  protected transient int bucketNumber;
+  protected transient int bucketNumber = -1;
 
   /**
    * This two dimensional array holds key data and a corresponding Union object
@@ -552,6 +552,7 @@ public class ReduceSinkOperator extends TerminalOperator<ReduceSinkDesc>
     // in case of bucketed table, insert the bucket number as the last column in value
     if (bucketEval != null) {
       length -= 1;
+      assert bucketNumber >= 0;
       cachedValues[length] = new Text(String.valueOf(bucketNumber));
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/queries/clientpositive/bucket_many.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/bucket_many.q b/ql/src/test/queries/clientpositive/bucket_many.q
new file mode 100644
index 0000000..1f0b795
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/bucket_many.q
@@ -0,0 +1,16 @@
+set hive.enforce.bucketing = true;
+set mapred.reduce.tasks = 16;
+
+create table bucket_many(key int, value string) clustered by (key) into 256 buckets;
+
+explain extended
+insert overwrite table bucket_many
+select * from src;
+
+insert overwrite table bucket_many
+select * from src;
+
+explain
+select * from bucket_many tablesample (bucket 1 out of 256) s;
+
+select * from bucket_many tablesample (bucket 1 out of 256) s;

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/bucket_many.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_many.q.out b/ql/src/test/results/clientpositive/bucket_many.q.out
new file mode 100644
index 0000000..9f09163
--- /dev/null
+++ b/ql/src/test/results/clientpositive/bucket_many.q.out
@@ -0,0 +1,230 @@
+PREHOOK: query: create table bucket_many(key int, value string) clustered by (key) into 256 buckets
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@bucket_many
+POSTHOOK: query: create table bucket_many(key int, value string) clustered by (key) into 256 buckets
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@bucket_many
+PREHOOK: query: explain extended
+insert overwrite table bucket_many
+select * from src
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+insert overwrite table bucket_many
+select * from src
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  
+TOK_QUERY
+   TOK_FROM
+      TOK_TABREF
+         TOK_TABNAME
+            src
+   TOK_INSERT
+      TOK_DESTINATION
+         TOK_TAB
+            TOK_TABNAME
+               bucket_many
+      TOK_SELECT
+         TOK_SELEXPR
+            TOK_ALLCOLREF
+
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                sort order: 
+                Map-reduce partition columns: UDFToInteger(_col0) (type: int)
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                tag: -1
+                value expressions: _col0 (type: string), _col1 (type: string)
+                auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: src
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE true
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.src
+              numFiles 1
+              numRows 500
+              rawDataSize 5312
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE true
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.src
+                numFiles 1
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src
+            name: default.src
+      Truncated Path -> Alias:
+        /src [src]
+      Needs Tagging: false
+      Reduce Operator Tree:
+        Select Operator
+          expressions: UDFToInteger(VALUE._col0) (type: int), VALUE._col1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            GlobalTableId: 1
+#### A masked pattern was here ####
+            NumFilesPerFileSink: 16
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                properties:
+                  bucket_count 256
+                  bucket_field_name key
+                  columns key,value
+                  columns.comments 
+                  columns.types int:string
+#### A masked pattern was here ####
+                  name default.bucket_many
+                  serialization.ddl struct bucket_many { i32 key, string value}
+                  serialization.format 1
+                  serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                name: default.bucket_many
+            TotalFiles: 256
+            GatherStats: true
+            MultiFileSpray: true
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count 256
+                bucket_field_name key
+                columns key,value
+                columns.comments 
+                columns.types int:string
+#### A masked pattern was here ####
+                name default.bucket_many
+                serialization.ddl struct bucket_many { i32 key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.bucket_many
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table bucket_many
+select * from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@bucket_many
+POSTHOOK: query: insert overwrite table bucket_many
+select * from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@bucket_many
+POSTHOOK: Lineage: bucket_many.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: bucket_many.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: explain
+select * from bucket_many tablesample (bucket 1 out of 256) s
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select * from bucket_many tablesample (bucket 1 out of 256) s
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: s
+            Statistics: Num rows: 55 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (((hash(key) & 2147483647) % 256) = 0) (type: boolean)
+              Statistics: Num rows: 27 Data size: 2853 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: int), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 27 Data size: 2853 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 27 Data size: 2853 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from bucket_many tablesample (bucket 1 out of 256) s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_many
+#### A masked pattern was here ####
+POSTHOOK: query: select * from bucket_many tablesample (bucket 1 out of 256) s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_many
+#### A masked pattern was here ####
+256	val_256
+0	val_0
+0	val_0
+0	val_0
+256	val_256

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/spark/cbo_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/cbo_gby.q.out b/ql/src/test/results/clientpositive/spark/cbo_gby.q.out
index 67c7a63..9ca8a88 100644
--- a/ql/src/test/results/clientpositive/spark/cbo_gby.q.out
+++ b/ql/src/test/results/clientpositive/spark/cbo_gby.q.out
@@ -11,10 +11,10 @@ POSTHOOK: Input: default@cbo_t1
 POSTHOOK: Input: default@cbo_t1@dt=2014
 #### A masked pattern was here ####
 1	4	12
+ 1 	4	2
 NULL	NULL	NULL
  1	4	2
 1 	4	2
- 1 	4	2
 PREHOOK: query: select x, y, count(*) from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from cbo_t1 group by c_float, cbo_t1.c_int, key) R group by y, x
 PREHOOK: type: QUERY
 PREHOOK: Input: default@cbo_t1
@@ -25,9 +25,9 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@cbo_t1
 POSTHOOK: Input: default@cbo_t1@dt=2014
 #### A masked pattern was here ####
+5.0	2	3
 NULL	NULL	1
 5.0	12	1
-5.0	2	3
 PREHOOK: query: select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0) group by c_float, cbo_t1.c_int, key order by a) cbo_t1 join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key order by q/10 desc, r asc) cbo_t2 on cbo_t1.a=p join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c order by cbo_t3.c_int+c desc, c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@cbo_t1

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/spark/cbo_udf_udaf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/cbo_udf_udaf.q.out b/ql/src/test/results/clientpositive/spark/cbo_udf_udaf.q.out
index 932943d..ded043f 100644
--- a/ql/src/test/results/clientpositive/spark/cbo_udf_udaf.q.out
+++ b/ql/src/test/results/clientpositive/spark/cbo_udf_udaf.q.out
@@ -79,9 +79,9 @@ POSTHOOK: Input: default@cbo_t1
 POSTHOOK: Input: default@cbo_t1@dt=2014
 #### A masked pattern was here ####
 NULL	0	NULL
+1 	2	1.0
  1 	2	1.0
  1	2	1.0
-1 	2	1.0
 1	12	1.0
 PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from cbo_t1 group by c_float order by a
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out
index 9118845..9fe3b72 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out
@@ -204,16 +204,16 @@ POSTHOOK: query: SELECT DEST1.* FROM DEST1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dest1
 #### A masked pattern was here ####
-["166"]	1
-["169"]	4
+["118"]	2
+["180"]	1
+["201"]	1
+["202"]	1
 ["238"]	2
-["258"]	1
-["306"]	1
-["384"]	3
-["392"]	1
-["435"]	1
-["455"]	1
-["468"]	4
+["273"]	3
+["282"]	2
+["419"]	1
+["432"]	1
+["467"]	1
 PREHOOK: query: SELECT DEST2.* FROM DEST2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest2
@@ -222,13 +222,13 @@ POSTHOOK: query: SELECT DEST2.* FROM DEST2
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@dest2
 #### A masked pattern was here ####
-{"120":"val_120"}	2
-{"129":"val_129"}	2
-{"160":"val_160"}	1
-{"26":"val_26"}	2
-{"27":"val_27"}	1
-{"288":"val_288"}	2
-{"298":"val_298"}	3
-{"30":"val_30"}	1
-{"311":"val_311"}	3
-{"74":"val_74"}	1
+{"0":"val_0"}	3
+{"138":"val_138"}	4
+{"170":"val_170"}	1
+{"19":"val_19"}	1
+{"222":"val_222"}	1
+{"223":"val_223"}	2
+{"226":"val_226"}	1
+{"489":"val_489"}	4
+{"8":"val_8"}	1
+{"80":"val_80"}	1

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/spark/lateral_view_explode2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/lateral_view_explode2.q.out b/ql/src/test/results/clientpositive/spark/lateral_view_explode2.q.out
index a5c95b5..41d60f5 100644
--- a/ql/src/test/results/clientpositive/spark/lateral_view_explode2.q.out
+++ b/ql/src/test/results/clientpositive/spark/lateral_view_explode2.q.out
@@ -93,9 +93,9 @@ POSTHOOK: query: SELECT col1, col2 FROM src LATERAL VIEW explode2(array(1,2,3))
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
-2	2
-1	1
 3	3
+1	1
+2	2
 PREHOOK: query: DROP TEMPORARY FUNCTION explode2
 PREHOOK: type: DROPFUNCTION
 PREHOOK: Output: explode2

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
index f32aaea..5853cc0 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
@@ -424,7 +424,7 @@ Partition Parameters:
 	numFiles            	2                   
 	numRows             	-1                  
 	rawDataSize         	-1                  
-	totalSize           	6814                
+	totalSize           	6826                
 #### A masked pattern was here ####
 	 	 
 # Storage Information	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/spark/union_top_level.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_top_level.q.out b/ql/src/test/results/clientpositive/spark/union_top_level.q.out
index f57fc04..a64fc95 100644
--- a/ql/src/test/results/clientpositive/spark/union_top_level.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_top_level.q.out
@@ -348,18 +348,18 @@ POSTHOOK: Input: default@src
 0	val_0
 0	val_0
 0	val_0
-10	val_10
-10	val_10
+0	val_0
+0	val_0
+100	val_100
+100	val_100
+100	val_100
+100	val_100
+100	val_100
+100	val_100
 100	val_100
 100	val_100
-103	val_103
-103	val_103
-104	val_104
-104	val_104
 104	val_104
 104	val_104
-111	val_111
-111	val_111
 PREHOOK: query: -- ctas
 explain
 create table union_top as

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
index e159c8b..aaac8aa 100644
--- a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
@@ -191,13 +191,13 @@ POSTHOOK: query: SELECT
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
+65636	50.0	50.0	50
+65550	50.0	50.0	50
+65592	50.0	50.0	50
+65744	50.0	50.0	50
+65668	50.0	50.0	50
+65722	50.0	50.0	50
 65598	50.0	50.0	50
-65694	50.0	50.0	50
-65678	50.0	50.0	50
-65684	50.0	50.0	50
+65568	50.0	50.0	50
 65596	50.0	50.0	50
-65692	50.0	50.0	50
-65630	50.0	50.0	50
-65674	50.0	50.0	50
-65628	50.0	50.0	50
-65776	50.0	50.0	50
+65738	50.0	50.0	50

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.8.out b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.8.out
index 43c07e6..44ecd09 100644
--- a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.8.out
+++ b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.8.out
@@ -191,13 +191,13 @@ POSTHOOK: query: SELECT
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
-65788	50.0	50.0	50
+65636	50.0	50.0	50
+65550	50.0	50.0	50
+65592	50.0	50.0	50
+65744	50.0	50.0	50
+65722	50.0	50.0	50
+65668	50.0	50.0	50
 65598	50.0	50.0	50
-65694	50.0	50.0	50
-65678	50.0	50.0	50
-65684	50.0	50.0	50
 65596	50.0	50.0	50
-65692	50.0	50.0	50
-65630	50.0	50.0	50
-65674	50.0	50.0	50
-65628	50.0	50.0	50
+65568	50.0	50.0	50
+65738	50.0	50.0	50

http://git-wip-us.apache.org/repos/asf/hive/blob/3633db25/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
index 3a0c3f1..3044582 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
@@ -768,7 +768,7 @@ FROM alltypesorc_string
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc_string
 #### A masked pattern was here ####
-1123143.857
+1123143.8569999998
 PREHOOK: query: EXPLAIN SELECT
   avg(ctimestamp1),
   variance(ctimestamp1),
@@ -868,4 +868,4 @@ FROM alltypesorc_string
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc_string
 #### A masked pattern was here ####
-2.8798560435897438E13	8.970772952794215E19	8.970772952794215E19	9.206845925236167E19	9.471416447815086E9	9.471416447815086E9	9.471416447815086E9	9.595231068211004E9
+2.8798560435897438E13	8.970772952794214E19	8.970772952794214E19	9.206845925236167E19	9.471416447815086E9	9.471416447815086E9	9.471416447815086E9	9.595231068211004E9


[36/39] hive git commit: HIVE-10606: Divide by zero error in HybridHashTableContainer (Jason Dere, reviewed by Alex Pivovarov)

Posted by pr...@apache.org.
HIVE-10606: Divide by zero error in HybridHashTableContainer (Jason Dere, reviewed by Alex Pivovarov)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1c6f3456
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1c6f3456
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1c6f3456

Branch: refs/heads/llap
Commit: 1c6f345631203c2b7fe185e1dfcbdb94bebbcf26
Parents: 9e4803c
Author: Jason Dere <jd...@hortonworks.com>
Authored: Mon May 11 14:29:27 2015 -0700
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Mon May 11 14:29:27 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1c6f3456/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
index 412226e..f80ffc5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
@@ -238,7 +238,7 @@ public class HybridHashTableContainer
         keyCountAdj, threshold, loadFactor, keyCount);
 
     memoryThreshold = memoryAvailable;
-    tableRowSize = estimatedTableSize / keyCount;
+    tableRowSize = estimatedTableSize / (keyCount != 0 ? keyCount : 1);
     memoryCheckFrequency = memCheckFreq;
 
     this.nwayConf = nwayConf;


[07/39] hive git commit: Revert "HIVE-9736 : StorageBasedAuthProvider should batch namenode-calls where possible (Mithun Radhakrishnan, reviewed by Chris Nauroth, Sushanth Sowmyan)"

Posted by pr...@apache.org.
Revert "HIVE-9736 : StorageBasedAuthProvider should batch namenode-calls where possible (Mithun Radhakrishnan, reviewed by Chris Nauroth, Sushanth Sowmyan)"

This reverts commit 19886150121b6081127bf1e581b24d8dcc12f1df.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ecde4ae9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ecde4ae9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ecde4ae9

Branch: refs/heads/llap
Commit: ecde4ae96f88ed88141a6593e1f935126d6466f3
Parents: 306e61a
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Wed May 6 17:53:00 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Wed May 6 17:53:00 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/common/FileUtils.java    | 155 ++++++-------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   7 -
 .../StorageBasedAuthorizationProvider.java      | 114 +-------------
 .../apache/hadoop/hive/shims/Hadoop23Shims.java |  29 +---
 .../org/apache/hadoop/fs/DefaultFileAccess.java |  65 +++-----
 .../apache/hadoop/hive/shims/HadoopShims.java   |  24 +--
 .../hadoop/hive/shims/HadoopShimsSecure.java    |   8 -
 7 files changed, 84 insertions(+), 318 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ecde4ae9/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
index 536fe11..c2c54bc 100644
--- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
@@ -25,16 +25,12 @@ import java.net.URISyntaxException;
 import java.security.AccessControlException;
 import java.security.PrivilegedExceptionAction;
 import java.util.BitSet;
-import java.util.EnumSet;
-import java.util.Iterator;
 import java.util.List;
 
-import com.google.common.base.Function;
-import com.google.common.collect.Iterators;
-import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DefaultFileAccess;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -373,54 +369,26 @@ public final class FileUtils {
   public static void checkFileAccessWithImpersonation(final FileSystem fs,
       final FileStatus stat, final FsAction action, final String user)
           throws IOException, AccessControlException, InterruptedException, Exception {
-    checkFileAccessWithImpersonation(fs,
-                                     Iterators.singletonIterator(stat),
-                                     EnumSet.of(action),
-                                     user);
-  }
-
-  /**
-   * Perform a check to determine if the user is able to access the file passed in.
-   * If the user name passed in is different from the current user, this method will
-   * attempt to do impersonate the user to do the check; the current user should be
-   * able to create proxy users in this case.
-   * @param fs   FileSystem of the path to check
-   * @param statuses FileStatus instances representing the file
-   * @param actions The FsActions that will be checked
-   * @param user User name of the user that will be checked for access.  If the user name
-   *             is null or the same as the current user, no user impersonation will be done
-   *             and the check will be done as the current user. Otherwise the file access
-   *             check will be performed within a doAs() block to use the access privileges
-   *             of this user. In this case the user must be configured to impersonate other
-   *             users, otherwise this check will fail with error.
-   * @throws IOException
-   * @throws AccessControlException
-   * @throws InterruptedException
-   * @throws Exception
-   */
-  public static void checkFileAccessWithImpersonation(final FileSystem fs,
-      final Iterator<FileStatus> statuses, final EnumSet<FsAction> actions, final String user)
-          throws IOException, AccessControlException, InterruptedException, Exception {
     UserGroupInformation ugi = Utils.getUGI();
     String currentUser = ugi.getShortUserName();
 
     if (user == null || currentUser.equals(user)) {
       // No need to impersonate user, do the checks as the currently configured user.
-      ShimLoader.getHadoopShims().checkFileAccess(fs, statuses, actions);
-    }
-    else {
-      // Otherwise, try user impersonation. Current user must be configured to do user impersonation.
-      UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(
-          user, UserGroupInformation.getLoginUser());
-      proxyUser.doAs(new PrivilegedExceptionAction<Object>() {
-        @Override
-        public Object run() throws Exception {
-          FileSystem fsAsUser = FileSystem.get(fs.getUri(), fs.getConf());
-          ShimLoader.getHadoopShims().checkFileAccess(fsAsUser, statuses, actions);
-          return null;
-        }
-      });
+      ShimLoader.getHadoopShims().checkFileAccess(fs, stat, action);
+      return;
     }
+
+    // Otherwise, try user impersonation. Current user must be configured to do user impersonation.
+    UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(
+        user, UserGroupInformation.getLoginUser());
+    proxyUser.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        FileSystem fsAsUser = FileSystem.get(fs.getUri(), fs.getConf());
+        ShimLoader.getHadoopShims().checkFileAccess(fsAsUser, stat, action);
+        return null;
+      }
+    });
   }
 
   /**
@@ -709,91 +677,70 @@ public final class FileUtils {
    * @param path
    * @param conf
    * @param user
+   * @throws AccessControlException
+   * @throws InterruptedException
    * @throws Exception
    */
-  public static void checkDeletePermission(Path path, Configuration conf, String user) throws  Exception {
+  public static void checkDeletePermission(Path path, Configuration conf, String user)
+      throws AccessControlException, InterruptedException, Exception {
+   // This requires ability to delete the given path.
+    // The following 2 conditions should be satisfied for this-
+    // 1. Write permissions on parent dir
+    // 2. If sticky bit is set on parent dir then one of following should be
+    // true
+    //   a. User is owner of the current dir/file
+    //   b. User is owner of the parent dir
+    //   Super users are also allowed to drop the file, but there is no good way of checking
+    //   if a user is a super user. Also super users running hive queries is not a common
+    //   use case. super users can also do a chown to be able to drop the file
 
     if(path == null) {
       // no file/dir to be deleted
       return;
     }
 
-    // check user has write permissions on the parent dir
     final FileSystem fs = path.getFileSystem(conf);
+    // check user has write permissions on the parent dir
     FileStatus stat = null;
     try {
       stat = fs.getFileStatus(path);
     } catch (FileNotFoundException e) {
       // ignore
     }
-
     if (stat == null) {
       // no file/dir to be deleted
       return;
     }
-
-    checkDeletePermission(fs, Lists.newArrayList(stat), conf, user);
-  }
-
-  /**
-   * Checks if delete can be performed on given path by given user.
-   * If file does not exist it just returns without throwing an Exception
-   * @param fs The FileSystem instance
-   * @param fileStatuses The FileStatus instances for the paths being checked.
-   * @param conf Configuration, corresponding to the FileSystem.
-   * @param user The user, whose permission is to be checked.
-   * @throws Exception
-   */
-  public static void checkDeletePermission(FileSystem fs, Iterable<FileStatus> fileStatuses,
-                                           Configuration conf, String user) throws Exception {
-
-    // This requires ability to delete the given path.
-    // The following 2 conditions should be satisfied for this-
-    // 1. Write permissions on parent dir
-    // 2. If sticky bit is set on parent dir then one of following should be
-    // true
-    //   a. User is owner of the current dir/file
-    //   b. User is owner of the parent dir
-    FileUtils.checkFileAccessWithImpersonation(fs, fileStatuses.iterator(), EnumSet.of(FsAction.WRITE), user);
+    FileUtils.checkFileAccessWithImpersonation(fs, stat, FsAction.WRITE, user);
 
     HadoopShims shims = ShimLoader.getHadoopShims();
     if (!shims.supportStickyBit()) {
-      // No support for sticky-bit.
+      // not supports sticky bit
       return;
     }
 
-    List<Path> allParentPaths =
-        Lists.newArrayList(
-            Iterators.transform(fileStatuses.iterator(), new Function<FileStatus, Path>() {
-              @Override
-              public Path apply(FileStatus input) {
-                return input.getPath().getParent();
-              }
-            })
-        );
-
-    Iterator<FileStatus> childStatusIterator = fileStatuses.iterator();
-    for (List<Path> parentPaths : Lists.partition(allParentPaths, getListStatusBatchSize(conf))) {
-      for (FileStatus parentFileStatus : fs.listStatus(parentPaths.toArray(new Path[parentPaths.size()]))) {
-        assert childStatusIterator.hasNext() : "Number of parent-file-statuses doesn't match children.";
-        FileStatus childFileStatus = childStatusIterator.next();
-        // Check sticky-bits on parent-dirs.
-        if (shims.hasStickyBit(parentFileStatus.getPermission())
-            && !parentFileStatus.getOwner().equals(user)
-            && !childFileStatus.getOwner().equals(user)) {
-          throw new IOException(String.format("Permission Denied: User %s can't delete %s because sticky bit is\""
-              + " set on the parent dir and user does not own this file or its parent\"", user, childFileStatus.getPath()));
-        }
-      } // for_each( parent_path );
-    } // for_each( batch_of_parentPaths );
+    // check if sticky bit is set on the parent dir
+    FileStatus parStatus = fs.getFileStatus(path.getParent());
+    if (!shims.hasStickyBit(parStatus.getPermission())) {
+      // no sticky bit, so write permission on parent dir is sufficient
+      // no further checks needed
+      return;
+    }
 
-    assert !childStatusIterator.hasNext() : "Did not process all file-statuses.";
+    // check if user is owner of parent dir
+    if (parStatus.getOwner().equals(user)) {
+      return;
+    }
 
-  } // static void checkDeletePermission();
+    // check if user is owner of current dir/file
+    FileStatus childStatus = fs.getFileStatus(path);
+    if (childStatus.getOwner().equals(user)) {
+      return;
+    }
+    String msg = String.format("Permission Denied: User %s can't delete %s because sticky bit is"
+        + " set on the parent dir and user does not own this file or its parent", user, path);
+    throw new IOException(msg);
 
-  private static int getListStatusBatchSize(Configuration configuration) {
-    return HiveConf.getIntVar(configuration,
-        HiveConf.ConfVars.HIVE_AUTHORIZATION_HDFS_LIST_STATUS_BATCH_SIZE);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/ecde4ae9/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index db17f0f..54e154c 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1629,13 +1629,6 @@ public class HiveConf extends Configuration {
         "of updating the original list means that you can append to the defaults\n" +
         "set by SQL standard authorization instead of replacing it entirely."),
 
-    HIVE_AUTHORIZATION_HDFS_LIST_STATUS_BATCH_SIZE(
-      "hive.authprovider.hdfs.liststatus.batch.size", 1000,
-      "Number of FileStatus objects to be queried for when listing files, for HDFS-based authorization.\n" +
-          "Note: If this exceeds dfs.ls.limit (as set in hdfs-site.xml), DFSClient might use the smaller value as \n" +
-          "the batch-size, internally."
-    ),
-
     HIVE_CLI_PRINT_HEADER("hive.cli.print.header", false, "Whether to print the names of the columns in query output."),
 
     HIVE_ERROR_ON_EMPTY_PARTITION("hive.error.on.empty.partition", false,

http://git-wip-us.apache.org/repos/asf/hive/blob/ecde4ae9/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
index 6a5c510..8f81ef9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/StorageBasedAuthorizationProvider.java
@@ -18,20 +18,15 @@
 
 package org.apache.hadoop.hive.ql.security.authorization;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.security.AccessControlException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.EnumSet;
-import java.util.Iterator;
 import java.util.List;
 
 import javax.security.auth.login.LoginException;
 
-import com.google.common.base.Function;
-import com.google.common.base.Predicate;
-import com.google.common.collect.Iterators;
-import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -40,7 +35,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.Warehouse;
@@ -69,7 +63,7 @@ import org.apache.hadoop.hive.ql.metadata.Table;
  * out to the parent directory recursively to determine its permissions till
  * it finds a parent that does exist.
  */
-public class StorageBasedAuthorizationProvider extends HiveMultiPartitionAuthorizationProviderBase
+public class StorageBasedAuthorizationProvider extends HiveAuthorizationProviderBase
     implements HiveMetastoreAuthorizationProvider {
 
   private Warehouse wh;
@@ -248,89 +242,6 @@ public class StorageBasedAuthorizationProvider extends HiveMultiPartitionAuthori
     }
   }
 
-  @Override
-  public void authorize(Table table, Iterable<Partition> partitions,
-                        Privilege[] requiredReadPrivileges, Privilege[] requiredWritePrivileges)
-       throws HiveException, AuthorizationException {
-
-    try {
-      class MustCheckTablePermissions { // For closure.
-        public boolean value = false;
-      }
-
-      final MustCheckTablePermissions mustCheckTablePermissions = new MustCheckTablePermissions();
-      final FileSystem fs = table.getDataLocation().getFileSystem(getConf());
-
-      // Get partition paths. Filter out null-partitions, and partitions without data-locations.
-      Iterator<Partition> nonNullPartitions
-               = Iterators.filter(partitions.iterator(), new Predicate<Partition>() {
-        @Override
-        public boolean apply(Partition partition) {
-          try {
-            boolean isValidPartitionPath = partition != null
-                                           && partition.getDataLocation() != null
-                                           && fs.exists(partition.getDataLocation());
-            mustCheckTablePermissions.value |= isValidPartitionPath;
-            return isValidPartitionPath;
-          }
-          catch (IOException exception){
-            throw new RuntimeException("Could not find location for partition: " + partition, exception);
-          }
-        }
-      });
-
-      if (mustCheckTablePermissions.value) {
-        // At least one partition was null, or had a non-existent path. So check table-permissions, once.
-        // Partition path can be null in the case of a new create partition - in this case,
-        // we try to default to checking the permissions of the parent table.
-        // Partition itself can also be null, in cases where this gets called as a generic
-        // catch-all call in cases like those with CTAS onto an unpartitioned table (see HIVE-1887)
-
-        // this should be the case only if this is a create partition.
-        // The privilege needed on the table should be ALTER_DATA, and not CREATE
-        authorize(table, new Privilege[]{}, new Privilege[]{Privilege.ALTER_DATA});
-      }
-
-
-      // authorize drops if there was a drop privilege requirement
-      // extract drop privileges
-      DropPrivilegeExtractor privExtractor = new DropPrivilegeExtractor(requiredReadPrivileges, requiredWritePrivileges);
-      requiredReadPrivileges = privExtractor.getReadReqPriv();
-      requiredWritePrivileges = privExtractor.getWriteReqPriv();
-      EnumSet<FsAction> actions = getFsActions(requiredReadPrivileges);
-      actions.addAll(getFsActions(requiredWritePrivileges));
-
-      ArrayList<Path> allPartitionPaths
-                = Lists.newArrayList(Iterators.transform(nonNullPartitions, new Function<Partition, Path>() {
-        @Override
-        public Path apply(Partition input) {
-          return input.getDataLocation();
-        }
-      }));
-
-      for (List<Path> partitionPaths : Lists.partition(allPartitionPaths, getListStatusBatchSize(getConf()))) {
-
-        List<FileStatus> fileStatuses = Arrays.asList(
-            fs.listStatus(partitionPaths.toArray(new Path[partitionPaths.size()])));
-
-        if (privExtractor.hasDropPrivilege) {
-          FileUtils.checkDeletePermission(fs, fileStatuses, getConf(), authenticator.getUserName());
-        }
-
-        checkPermissions(fs, fileStatuses.iterator(), actions, authenticator.getUserName());
-      }
-
-    }
-    catch (Exception exception) {
-      throw hiveException(exception);
-    }
-  }
-
-  private static int getListStatusBatchSize(Configuration configuration) {
-    return HiveConf.getIntVar(configuration,
-                              HiveConf.ConfVars.HIVE_AUTHORIZATION_HDFS_LIST_STATUS_BATCH_SIZE);
-  }
-
   private void checkDeletePermission(Path dataLocation, Configuration conf, String userName)
       throws HiveException {
     try {
@@ -477,28 +388,17 @@ public class StorageBasedAuthorizationProvider extends HiveMultiPartitionAuthori
   protected static void checkPermissions(final FileSystem fs, final FileStatus stat,
       final EnumSet<FsAction> actions, String user) throws IOException,
       AccessControlException, HiveException {
-    checkPermissions(fs, Iterators.singletonIterator(stat), actions, user);
-  }
-
-  @SuppressWarnings("deprecation")
-  protected static void checkPermissions(final FileSystem fs, Iterator<FileStatus> fileStatuses,
-                                         final EnumSet<FsAction> actions, String user)
-      throws IOException, AccessControlException, HiveException {
 
+    if (stat == null) {
+      // File named by path doesn't exist; nothing to validate.
+      return;
+    }
     FsAction checkActions = FsAction.NONE;
     for (FsAction action : actions) {
       checkActions = checkActions.or(action);
     }
-
-    Iterator<FileStatus> nonNullFileStatuses = Iterators.filter(fileStatuses, new Predicate<FileStatus>() {
-      @Override
-      public boolean apply(FileStatus fileStatus) {
-        return fileStatus != null;
-      }
-    });
-
     try {
-      FileUtils.checkFileAccessWithImpersonation(fs, nonNullFileStatuses, EnumSet.of(checkActions), user);
+      FileUtils.checkFileAccessWithImpersonation(fs, stat, checkActions, user);
     } catch (Exception err) {
       // fs.permission.AccessControlException removed by HADOOP-11356, but Hive users on older
       // Hadoop versions may still see this exception .. have to reference by name.

http://git-wip-us.apache.org/repos/asf/hive/blob/ecde4ae9/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
----------------------------------------------------------------------
diff --git a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
index 4547baa..d349068 100644
--- a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
+++ b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
@@ -29,11 +29,11 @@ import java.security.AccessControlException;
 import java.security.NoSuchAlgorithmException;
 import java.util.ArrayList;
 import java.util.Comparator;
-import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.TreeMap;
 
 import org.apache.commons.lang.StringUtils;
@@ -986,33 +986,6 @@ public class Hadoop23Shims extends HadoopShimsSecure {
     }
   }
 
-  @Override
-  public void checkFileAccess(FileSystem fs, Iterator<FileStatus> statuses, EnumSet<FsAction> actions)
-      throws IOException, AccessControlException, Exception {
-    try {
-      if (accessMethod == null) {
-        // Have to rely on Hive implementation of filesystem permission checks.
-        DefaultFileAccess.checkFileAccess(fs, statuses, actions);
-      }
-      else {
-        while (statuses.hasNext()) {
-          accessMethod.invoke(fs, statuses.next(), combine(actions));
-        }
-      }
-
-    } catch (Exception err) {
-      throw wrapAccessException(err);
-    }
-  }
-
-  private static FsAction combine(EnumSet<FsAction> actions) {
-    FsAction resultantAction = FsAction.NONE;
-    for (FsAction action : actions) {
-      resultantAction = resultantAction.or(action);
-    }
-    return resultantAction;
-  }
-
   /**
    * If there is an AccessException buried somewhere in the chain of failures, wrap the original
    * exception in an AccessException. Othewise just return the original exception.

http://git-wip-us.apache.org/repos/asf/hive/blob/ecde4ae9/shims/common/src/main/java/org/apache/hadoop/fs/DefaultFileAccess.java
----------------------------------------------------------------------
diff --git a/shims/common/src/main/java/org/apache/hadoop/fs/DefaultFileAccess.java b/shims/common/src/main/java/org/apache/hadoop/fs/DefaultFileAccess.java
index c4261cb..45ca210 100644
--- a/shims/common/src/main/java/org/apache/hadoop/fs/DefaultFileAccess.java
+++ b/shims/common/src/main/java/org/apache/hadoop/fs/DefaultFileAccess.java
@@ -18,22 +18,23 @@
 
 package org.apache.hadoop.fs;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.security.AccessControlException;
+import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.EnumSet;
-import java.util.Iterator;
 import java.util.List;
 
 import javax.security.auth.login.LoginException;
 
-import com.google.common.collect.Iterators;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.shims.Utils;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -46,7 +47,7 @@ public class DefaultFileAccess {
 
   private static Log LOG = LogFactory.getLog(DefaultFileAccess.class);
 
-  private static List<String> emptyGroups = Collections.emptyList();
+  private static List<String> emptyGroups = new ArrayList<String>(0);
 
   public static void checkFileAccess(FileSystem fs, FileStatus stat, FsAction action)
       throws IOException, AccessControlException, LoginException {
@@ -59,62 +60,34 @@ public class DefaultFileAccess {
 
   public static void checkFileAccess(FileSystem fs, FileStatus stat, FsAction action,
       String user, List<String> groups) throws IOException, AccessControlException {
-    checkFileAccess(fs, Iterators.singletonIterator(stat), EnumSet.of(action), user, groups);
-  }
-
-  public static void checkFileAccess(FileSystem fs, Iterator<FileStatus> statuses, EnumSet<FsAction> actions,
-                                     String user, List<String> groups)
-    throws IOException, AccessControlException {
 
     if (groups == null) {
       groups = emptyGroups;
     }
 
-    // Short-circuit for super-users.
     String superGroupName = getSuperGroupName(fs.getConf());
     if (userBelongsToSuperGroup(superGroupName, groups)) {
       LOG.info("User \"" + user + "\" belongs to super-group \"" + superGroupName + "\". " +
-          "Permission granted for actions: " + actions + ".");
+          "Permission granted for action: " + action + ".");
       return;
     }
 
-    while (statuses.hasNext()) {
-
-      FileStatus stat = statuses.next();
-      final FsPermission dirPerms = stat.getPermission();
-      final String grp = stat.getGroup();
+    final FsPermission dirPerms = stat.getPermission();
+    final String grp = stat.getGroup();
 
-      FsAction combinedAction = combine(actions);
-      if (user.equals(stat.getOwner())) {
-        if (dirPerms.getUserAction().implies(combinedAction)) {
-          continue;
-        }
-      } else if (groups.contains(grp)) {
-        if (dirPerms.getGroupAction().implies(combinedAction)) {
-          continue;
-        }
-      } else if (dirPerms.getOtherAction().implies(combinedAction)) {
-        continue;
+    if (user.equals(stat.getOwner())) {
+      if (dirPerms.getUserAction().implies(action)) {
+        return;
       }
-
-      throw new AccessControlException("action " + combinedAction + " not permitted on path "
-          + stat.getPath() + " for user " + user);
-
-    } // for_each(fileStatus);
-  }
-
-  private static FsAction combine(EnumSet<FsAction> actions) {
-    FsAction resultantAction = FsAction.NONE;
-    for (FsAction action : actions) {
-      resultantAction = resultantAction.or(action);
+    } else if (groups.contains(grp)) {
+      if (dirPerms.getGroupAction().implies(action)) {
+        return;
+      }
+    } else if (dirPerms.getOtherAction().implies(action)) {
+      return;
     }
-    return resultantAction;
-  }
-
-  public static void checkFileAccess(FileSystem fs, Iterator<FileStatus> statuses, EnumSet<FsAction> actions)
-    throws IOException, AccessControlException, LoginException {
-    UserGroupInformation ugi = Utils.getUGI();
-    checkFileAccess(fs, statuses, actions, ugi.getShortUserName(), Arrays.asList(ugi.getGroupNames()));
+    throw new AccessControlException("action " + action + " not permitted on path "
+        + stat.getPath() + " for user " + user);
   }
 
   private static String getSuperGroupName(Configuration configuration) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ecde4ae9/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
----------------------------------------------------------------------
diff --git a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
index 4b79d95..5a6bc44 100644
--- a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
+++ b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java
@@ -24,17 +24,19 @@ import java.net.URI;
 import java.nio.ByteBuffer;
 import java.security.AccessControlException;
 import java.security.NoSuchAlgorithmException;
+import java.security.PrivilegedExceptionAction;
 import java.util.Comparator;
-import java.util.EnumSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.TreeMap;
 
+import javax.security.auth.login.LoginException;
 
 import com.google.common.annotations.VisibleForTesting;
 
-import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -45,6 +47,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.shims.HadoopShims.StoragePolicyValue;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.mapred.ClusterStatus;
 import org.apache.hadoop.mapred.JobConf;
@@ -527,21 +530,6 @@ public interface HadoopShims {
       throws IOException, AccessControlException, Exception;
 
   /**
-   * Check if the configured UGI has access to the path for the given file system action.
-   * Method will return successfully if action is permitted. AccessControlExceptoin will
-   * be thrown if user does not have access to perform the action. Other exceptions may
-   * be thrown for non-access related errors.
-   * @param fs The FileSystem instance
-   * @param statuses The FileStatuses for the paths being checked
-   * @param actions The FsActions being checked
-   * @throws IOException
-   * @throws AccessControlException
-   * @throws Exception
-   */
-  public void checkFileAccess(FileSystem fs, Iterator<FileStatus> statuses, EnumSet<FsAction> actions)
-      throws Exception;
-
-  /**
    * Use password API (if available) to fetch credentials/password
    * @param conf
    * @param name

http://git-wip-us.apache.org/repos/asf/hive/blob/ecde4ae9/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java
----------------------------------------------------------------------
diff --git a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java
index 8e51c02..89d7798 100644
--- a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java
+++ b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java
@@ -25,9 +25,7 @@ import java.net.URI;
 import java.security.AccessControlException;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.EnumSet;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.Set;
 
 import org.apache.commons.lang.ArrayUtils;
@@ -393,11 +391,5 @@ public abstract class HadoopShimsSecure implements HadoopShims {
   }
 
   @Override
-  public void checkFileAccess(FileSystem fs, Iterator<FileStatus> statuses, EnumSet<FsAction> action)
-      throws IOException, AccessControlException, Exception {
-    DefaultFileAccess.checkFileAccess(fs, statuses, action);
-  }
-
-  @Override
   abstract public void addDelegationTokens(FileSystem fs, Credentials cred, String uname) throws IOException;
 }


[34/39] hive git commit: Updating RELEASE_NOTES for 1.2.0

Posted by pr...@apache.org.
Updating RELEASE_NOTES for 1.2.0


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8141a4e6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8141a4e6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8141a4e6

Branch: refs/heads/llap
Commit: 8141a4e63c24d63c1d330698ffee0d85d4ca9f31
Parents: 588d145
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Mon May 11 09:13:11 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Mon May 11 09:13:43 2015 -0700

----------------------------------------------------------------------
 RELEASE_NOTES.txt | 459 +++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 459 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/8141a4e6/RELEASE_NOTES.txt
----------------------------------------------------------------------
diff --git a/RELEASE_NOTES.txt b/RELEASE_NOTES.txt
index f31fe4c..28272a9 100644
--- a/RELEASE_NOTES.txt
+++ b/RELEASE_NOTES.txt
@@ -1,3 +1,462 @@
+
+
+Release Notes - Hive - Version 1.2.0
+
+** Sub-task
+    * [HIVE-8119] - Implement Date in ParquetSerde
+    * [HIVE-8164] - Adding in a ReplicationTask that converts a Notification Event to actionable tasks
+    * [HIVE-8165] - Annotation changes for replication
+    * [HIVE-8379] - NanoTimeUtils performs some work needlessly
+    * [HIVE-8696] - HCatClientHMSImpl doesn't use a Retrying-HiveMetastoreClient.
+    * [HIVE-8817] - Create unit test where we insert into an encrypted table and then read from it with pig
+    * [HIVE-8818] - Create unit test where we insert into an encrypted table and then read from it with hcatalog mapreduce
+    * [HIVE-9009] - order by (limit) meaning for the last subquery of union in Hive is different from other main stream RDBMS
+    * [HIVE-9253] - MetaStore server should support timeout for long running requests
+    * [HIVE-9271] - Add ability for client to request metastore to fire an event
+    * [HIVE-9273] - Add option to fire metastore event on insert
+    * [HIVE-9327] - CBO (Calcite Return Path): Removing Row Resolvers from ParseContext
+    * [HIVE-9333] - Move parquet serialize implementation to DataWritableWriter to improve write speeds
+    * [HIVE-9432] - CBO (Calcite Return Path): Removing QB from ParseContext
+    * [HIVE-9501] - DbNotificationListener doesn't include dbname in create database notification and does not include tablename in create table notification
+    * [HIVE-9508] - MetaStore client socket connection should have a lifetime
+    * [HIVE-9550] - ObjectStore.getNextNotification() can return events inside NotificationEventResponse as null which conflicts with its thrift "required" tag
+    * [HIVE-9558] - [Parquet] support HiveDecimalWritable, HiveCharWritable, HiveVarcharWritable in vectorized mode
+    * [HIVE-9563] - CBO(Calcite Return Path): Translate GB to Hive OP [CBO branch]
+    * [HIVE-9571] - CBO (Calcite Return Path): Generate FileSink Op [CBO branch]
+    * [HIVE-9582] - HCatalog should use IMetaStoreClient interface
+    * [HIVE-9585] - AlterPartitionMessage should return getKeyValues instead of getValues
+    * [HIVE-9657] - Use new parquet Types API builder to construct data types
+    * [HIVE-9666] - Improve some qtests
+    * [HIVE-9690] - Refactoring for non-numeric arithmetic operations
+    * [HIVE-9750] - avoid log locks in operators
+    * [HIVE-9792] - Support interval type in expressions/predicates
+    * [HIVE-9810] - prep object registry for multi threading
+    * [HIVE-9819] - Add timeout check inside the HMS server
+    * [HIVE-9824] - LLAP: Native Vectorization of Map Join
+    * [HIVE-9894] - Use new parquet Types API builder to construct DATE data type
+    * [HIVE-9906] - Add timeout mechanism in RawStoreProxy
+    * [HIVE-9937] - LLAP: Vectorized Field-By-Field Serialize / Deserialize to support new Vectorized Map Join
+    * [HIVE-9982] - CBO (Calcite Return Path): Prune TS Relnode schema
+    * [HIVE-9998] - Vectorization support for interval types
+    * [HIVE-10037] - JDBC support for interval expressions
+    * [HIVE-10044] - Allow interval params for year/month/day/hour/minute/second functions
+    * [HIVE-10053] - Override new init API fom ReadSupport instead of the deprecated one
+    * [HIVE-10071] - CBO (Calcite Return Path): Join to MultiJoin rule
+    * [HIVE-10076] - Bump up parquet-hadoop-bundle and parquet-column to the version of 1.6.0rc6
+    * [HIVE-10131] - LLAP: BytesBytesMultiHashMap and mapjoin container should reuse refs
+    * [HIVE-10227] - Concrete implementation of Export/Import based ReplicationTaskFactory
+    * [HIVE-10228] - Changes to Hive Export/Import/DropTable/DropPartition to support replication semantics
+    * [HIVE-10243] - CBO (Calcite Return Path): Introduce JoinAlgorithm Interface
+    * [HIVE-10252] - Make PPD work for Parquet in row group level
+    * [HIVE-10262] - CBO (Calcite Return Path): Temporarily disable Aggregate check input for bucketing
+    * [HIVE-10263] - CBO (Calcite Return Path): Aggregate checking input for bucketing should be conditional
+    * [HIVE-10326] - CBO (Calcite Return Path): Invoke Hive's Cumulative Cost
+    * [HIVE-10329] - Hadoop reflectionutils has issues
+    * [HIVE-10343] - CBO (Calcite Return Path): Parameterize algorithm cost model
+    * [HIVE-10347] - Merge spark to trunk 4/15/2015
+    * [HIVE-10350] - CBO: Use total size instead of bucket count to determine number of splits & parallelism
+    * [HIVE-10369] - CBO: Don't use HiveDefaultCostModel when With Tez and hive.cbo.costmodel.extended enabled
+    * [HIVE-10375] - CBO (Calcite Return Path): disable the identity project remover for some union operators
+    * [HIVE-10386] - CBO (Calcite Return Path): Disable Trivial Project Removal on ret path
+    * [HIVE-10391] - CBO (Calcite Return Path): HiveOpConverter always assumes that HiveFilter does not include a partition column
+    * [HIVE-10400] - CBO (Calcite Return Path): Exception when column name contains dot or colon characters
+    * [HIVE-10413] - [CBO] Return path assumes distinct column cant be same as grouping column
+    * [HIVE-10416] - CBO (Calcite Return Path): Fix return columns if Sort operator is on top of plan returned by Calcite
+    * [HIVE-10426] - Rework/simplify ReplicationTaskFactory instantiation
+    * [HIVE-10455] - CBO (Calcite Return Path): Different data types at Reducer before JoinOp
+    * [HIVE-10462] - CBO (Calcite Return Path): MapJoin and SMBJoin conversion not triggered
+    * [HIVE-10493] - Merge multiple joins when join keys are the same
+    * [HIVE-10506] - CBO (Calcite Return Path): Disallow return path to be enable if CBO is off
+    * [HIVE-10512] - CBO (Calcite Return Path): SMBJoin conversion throws ClassCastException
+    * [HIVE-10520] - LLAP: Must reset small table result columns for Native Vectorization of Map Join
+    * [HIVE-10522] - CBO (Calcite Return Path): fix the wrong needed column names when TS is created
+    * [HIVE-10526] - CBO (Calcite Return Path): HiveCost epsilon comparison should take row count in to account
+    * [HIVE-10547] - CBO (Calcite Return Path) : genFileSinkPlan uses wrong partition col to create FS
+    * [HIVE-10549] - CBO (Calcite Return Path): Enable NonBlockingOpDeDupProc
+
+
+** Bug
+    * [HIVE-3454] - Problem with CAST(BIGINT as TIMESTAMP)
+    * [HIVE-4625] - HS2 should not attempt to get delegation token from metastore if using embedded metastore
+    * [HIVE-5545] - HCatRecord getInteger method returns String when used on Partition columns of type INT
+    * [HIVE-5672] - Insert with custom separator not supported for non-local directory
+    * [HIVE-6069] - Improve error message in GenericUDFRound
+    * [HIVE-6099] - Multi insert does not work properly with distinct count
+    * [HIVE-6950] - Parsing Error in GROUPING SETS
+    * [HIVE-7351] - ANALYZE TABLE statement fails on postgres metastore
+    * [HIVE-8524] - When table is renamed stats are lost as changes are not propagated to metastore tables TAB_COL_STATS and PART_COL_STATS
+    * [HIVE-8626] - Extend HDFS super-user checks to dropPartitions
+    * [HIVE-8746] - ORC timestamp columns are sensitive to daylight savings time
+    * [HIVE-8890] - HiveServer2 dynamic service discovery: use persistent ephemeral nodes curator recipe
+    * [HIVE-8915] - Log file explosion due to non-existence of COMPACTION_QUEUE table
+    * [HIVE-9002] - union all does not generate correct result for order by and limit
+    * [HIVE-9023] - HiveHistoryImpl relies on removed counters to print num rows
+    * [HIVE-9073] - NPE when using custom windowing UDAFs
+    * [HIVE-9083] - New metastore API to support to purge partition-data directly in dropPartitions().
+    * [HIVE-9086] - Add language support to PURGE data while dropping partitions.
+    * [HIVE-9115] - Hive build failure on hadoop-2.7 due to HADOOP-11356
+    * [HIVE-9118] - Support auto-purge for tables, when dropping tables/partitions.
+    * [HIVE-9151] - Checking s against null in TezJobMonitor#getNameWithProgress() should be done earlier
+    * [HIVE-9228] - Problem with subquery using windowing functions
+    * [HIVE-9303] - Parquet files are written with incorrect definition levels
+    * [HIVE-9322] - Make null-checks consistent for MapObjectInspector subclasses.
+    * [HIVE-9350] - Add ability for HiveAuthorizer implementations to filter out results of 'show tables', 'show databases'
+    * [HIVE-9397] - SELECT max(bar) FROM foo is broken after ANALYZE ... FOR COLUMNS
+    * [HIVE-9430] - NullPointerException on ALTER TABLE ADD PARTITION if no value given
+    * [HIVE-9438] - The standalone-jdbc jar missing some jars
+    * [HIVE-9456] - Make Hive support unicode with MSSQL as Metastore backend
+    * [HIVE-9468] - Test groupby3_map_skew.q fails due to decimal precision difference
+    * [HIVE-9471] - Bad seek in uncompressed ORC, at row-group boundary.
+    * [HIVE-9472] - Implement 7 simple UDFs added to Hive
+    * [HIVE-9474] - truncate table changes permissions on the target
+    * [HIVE-9481] - allow column list specification in INSERT statement
+    * [HIVE-9482] - Hive parquet timestamp compatibility
+    * [HIVE-9484] - ThriftCLIService#getDelegationToken does case sensitive comparison
+    * [HIVE-9486] - Use session classloader instead of application loader
+    * [HIVE-9489] - add javadoc for UDFType annotation
+    * [HIVE-9496] - Slf4j warning in hive command
+    * [HIVE-9507] - Make "LATERAL VIEW inline(expression) mytable" tolerant to nulls
+    * [HIVE-9509] - Restore partition spec validation removed by HIVE-9445
+    * [HIVE-9512] - HIVE-9327 causing regression in stats annotation
+    * [HIVE-9513] - NULL POINTER EXCEPTION
+    * [HIVE-9526] - ClassCastException thrown by HiveStatement
+    * [HIVE-9529] - "alter table .. concatenate" under Tez mode should create TezTask
+    * [HIVE-9539] - Wrong check of version format in TestWebHCatE2e.getHiveVersion()
+    * [HIVE-9553] - Fix log-line in Partition Pruner
+    * [HIVE-9555] - assorted ORC refactorings for LLAP on trunk
+    * [HIVE-9560] - When hive.stats.collect.rawdatasize=true, 'rawDataSize' for an ORC table will result in value '0' after running 'analyze table TABLE_NAME compute statistics;'
+    * [HIVE-9565] - Minor cleanup in TestMetastoreExpr.
+    * [HIVE-9567] - JSON SerDe not escaping special chars when writing char/varchar data
+    * [HIVE-9580] - Server returns incorrect result from JOIN ON VARCHAR columns
+    * [HIVE-9587] - UDF decode should accept STRING_GROUP types for the second parameter
+    * [HIVE-9588] - Reimplement HCatClientHMSImpl.dropPartitions() with HMSC.dropPartitions()
+    * [HIVE-9592] - fix ArrayIndexOutOfBoundsException in date_add and date_sub initialize
+    * [HIVE-9609] - AddPartitionMessage.getPartitions() can return null
+    * [HIVE-9612] - Turn off DEBUG logging for Lazy Objects for tests
+    * [HIVE-9613] - Left join query plan outputs  wrong column when using subquery
+    * [HIVE-9617] - UDF from_utc_timestamp throws NPE if the second argument is null
+    * [HIVE-9619] - Uninitialized read of numBitVectors in NumDistinctValueEstimator
+    * [HIVE-9620] - Cannot retrieve column statistics using HMS API if column name contains uppercase characters
+    * [HIVE-9622] - Getting NPE when trying to restart HS2 when metastore is configured to use org.apache.hadoop.hive.thrift.DBTokenStore
+    * [HIVE-9623] - NullPointerException in MapJoinOperator.processOp(MapJoinOperator.java:253) for TPC-DS Q75 against un-partitioned schema
+    * [HIVE-9624] - NullPointerException in MapJoinOperator.processOp(MapJoinOperator.java:253) for TPC-DS Q75 against un-partitioned schema
+    * [HIVE-9628] - HiveMetaStoreClient.dropPartitions(...List<ObjectPair<Integer,byte[]>>...) doesn't take (boolean needResult)
+    * [HIVE-9633] - Add HCatClient.dropPartitions() overload to skip deletion of partition-directories.
+    * [HIVE-9644] - Fold case & when udfs
+    * [HIVE-9645] - Constant folding case NULL equality
+    * [HIVE-9647] - Discrepancy in cardinality estimates between partitioned and un-partitioned tables
+    * [HIVE-9648] - Null check key provider before doing set
+    * [HIVE-9652] - Tez in place updates should detect redirection of STDERR
+    * [HIVE-9655] - Dynamic partition table insertion error
+    * [HIVE-9665] - Parallel move task optimization causes race condition
+    * [HIVE-9667] - Disable ORC bloom filters for ORC v11 output-format
+    * [HIVE-9674] - *DropPartitionEvent should handle partition-sets.
+    * [HIVE-9679] - Remove redundant null-checks from DbNotificationListener.
+    * [HIVE-9680] - GlobalLimitOptimizer is not checking filters correctly
+    * [HIVE-9681] - Extend HiveAuthorizationProvider to support partition-sets.
+    * [HIVE-9684] - Incorrect disk range computation in ORC because of optional stream kind
+    * [HIVE-9706] - HBase handler support for snapshots should confirm properties before use
+    * [HIVE-9711] - ORC Vectorization DoubleColumnVector.isRepeating=false if all entries are NaN
+    * [HIVE-9716] - Map job fails when table's LOCATION does not have scheme
+    * [HIVE-9717] - The max/min function used by AggrStats for decimal type is not what we expected
+    * [HIVE-9720] - Metastore does not properly migrate column stats when renaming a table across databases.
+    * [HIVE-9721] - Hadoop23Shims.setFullFileStatus should check for null
+    * [HIVE-9727] - GroupingID translation from Calcite
+    * [HIVE-9731] - WebHCat MapReduce Streaming Job does not allow StreamXmlRecordReader to be specified
+    * [HIVE-9734] - Correlating expression cannot contain unqualified column references
+    * [HIVE-9735] - aggregate ( smalllint ) fails when ORC file used ava.lang.ClassCastException: java.lang.Long cannot be cast to java.lang.Short
+    * [HIVE-9743] - Incorrect result set for vectorized left outer join
+    * [HIVE-9749] - ObjectStore schema verification logic is incorrect
+    * [HIVE-9754] - rename GenericUDFLevenstein to GenericUDFLevenshtein
+    * [HIVE-9755] - Hive built-in "ngram" UDAF fails when a mapper has no matches.
+    * [HIVE-9767] - Fixes in Hive UDF to be usable in Pig
+    * [HIVE-9770] - Beeline ignores --showHeader for non-tablular output formats i.e csv,tsv,dsv
+    * [HIVE-9772] - Hive parquet timestamp conversion doesn't work with new Parquet
+    * [HIVE-9779] - ATSHook does not log the end user if doAs=false (it logs the hs2 server user)
+    * [HIVE-9791] - insert into table throws NPE
+    * [HIVE-9797] - Need update some spark tests for java 8
+    * [HIVE-9813] - Hive JDBC - DatabaseMetaData.getColumns method cannot find classes added with "add jar" command
+    * [HIVE-9817] - fix DateFormat pattern in hive-exec
+    * [HIVE-9826] - Firing insert event fails on temporary table
+    * [HIVE-9831] - HiveServer2 should use ConcurrentHashMap in ThreadFactory
+    * [HIVE-9832] - Merge join followed by union and a map join in hive on tez fails.
+    * [HIVE-9834] - VectorGroupByOperator logs too much
+    * [HIVE-9836] - Hive on tez: fails when virtual columns are present in the join conditions (for e.g. partition columns)
+    * [HIVE-9839] - HiveServer2 leaks OperationHandle on async queries which fail at compile phase
+    * [HIVE-9841] - IOException thrown by ORC should include the path of processing file
+    * [HIVE-9845] - HCatSplit repeats information making input split data size huge
+    * [HIVE-9848] - readlink -f is GNU coreutils only (used in bin/hive)
+    * [HIVE-9851] - org.apache.hadoop.hive.serde2.avro.AvroSerializer should use org.apache.avro.generic.GenericData.Array when serializing a list
+    * [HIVE-9855] - Runtime skew join doesn't work when skewed data only exists in big table
+    * [HIVE-9860] - MapredLocalTask/SecureCmdDoAs leaks local files
+    * [HIVE-9866] - Changing a column's type doesn't change column stats type in metastore
+    * [HIVE-9869] - Trunk doesn't build with hadoop-1
+    * [HIVE-9873] - Hive on MR throws DeprecatedParquetHiveInput exception
+    * [HIVE-9877] - Beeline cannot run multiple statements in the same row
+    * [HIVE-9886] - Hive on tez: NPE when converting join to SMB in sub-query
+    * [HIVE-9892] - various MSSQL upgrade scripts don't work
+    * [HIVE-9908] - vectorization error binary type not supported, group by with binary columns
+    * [HIVE-9915] - Allow specifying file format for managed tables
+    * [HIVE-9919] - upgrade scripts don't work on some auto-created DBs due to absence of tables
+    * [HIVE-9920] - DROP DATABASE IF EXISTS throws exception if database does not exist
+    * [HIVE-9923] - No clear message when "from" is missing
+    * [HIVE-9929] - StatsUtil#getAvailableMemory could return negative value
+    * [HIVE-9930] - fix QueryPlan.makeQueryId time format
+    * [HIVE-9932] - DDLTask.conf hides base class Task.conf
+    * [HIVE-9934] - Vulnerability in LdapAuthenticationProviderImpl enables HiveServer2 client to degrade the authentication mechanism to "none", allowing authentication without password
+    * [HIVE-9936] - fix potential NPE in DefaultUDAFEvaluatorResolver
+    * [HIVE-9944] - Convert array[] to string properly in log messages
+    * [HIVE-9945] - FunctionTask.conf hides Task.conf field
+    * [HIVE-9947] - ScriptOperator replaceAll uses unescaped dot and result is not assigned
+    * [HIVE-9948] - SparkUtilities.getFileName passes File.separator to String.split() method
+    * [HIVE-9950] - fix rehash in CuckooSetBytes and CuckooSetLong
+    * [HIVE-9951] - VectorizedRCFileRecordReader creates Exception but does not throw it
+    * [HIVE-9952] - fix NPE in CorrelationUtilities
+    * [HIVE-9953] - fix NPE in WindowingTableFunction
+    * [HIVE-9954] - UDFJson uses the == operator to compare Strings
+    * [HIVE-9955] - TestVectorizedRowBatchCtx compares byte[] using equals() method
+    * [HIVE-9956] - use BigDecimal.valueOf instead of new in TestFileDump
+    * [HIVE-9957] - Hive 1.1.0 not compatible with Hadoop 2.4.0
+    * [HIVE-9961] - HookContext for view should return a table type of VIRTUAL_VIEW
+    * [HIVE-9971] - Clean up operator class
+    * [HIVE-9975] - Renaming a nonexisting partition should not throw out NullPointerException
+    * [HIVE-9976] - Possible race condition in DynamicPartitionPruner for <200ms tasks
+    * [HIVE-9977] - Compactor not running on partitions after dynamic partitioned insert
+    * [HIVE-9984] - JoinReorder's getOutputSize is exponential
+    * [HIVE-9991] - Cannot do a SELECT on external tables that are on S3 due to Encryption error
+    * [HIVE-9994] - Hive query plan returns sensitive data to external applications
+    * [HIVE-9997] - minor tweaks for bytes mapjoin hash table
+    * [HIVE-10001] - SMB join in reduce side
+    * [HIVE-10032] - Remove HCatalog broken java file from source code
+    * [HIVE-10042] - clean up TreeReaders - ORC refactoring for LLAP on trunk
+    * [HIVE-10050] - Support overriding memory configuration for AM launched for TempletonControllerJob
+    * [HIVE-10059] - Make udaf_percentile_approx_23.q test more stable
+    * [HIVE-10062] - HiveOnTez: Union followed by Multi-GB followed by Multi-insert loses data
+    * [HIVE-10066] - Hive on Tez job submission through WebHCat doesn't ship Tez artifacts
+    * [HIVE-10074] - Ability to run HCat Client Unit tests in a system test setting
+    * [HIVE-10078] - Optionally allow logging of records processed in fixed intervals
+    * [HIVE-10083] - SMBJoin fails in case one table is uninitialized
+    * [HIVE-10085] - Lateral view on top of a view throws RuntimeException
+    * [HIVE-10086] - Hive throws error when accessing Parquet file schema using field name match
+    * [HIVE-10087] - Beeline's --silent option should suppress query from being echoed when running with -f option
+    * [HIVE-10093] - Unnecessary HMSHandler initialization for default MemoryTokenStore on HS2
+    * [HIVE-10095] - format_number udf throws NPE
+    * [HIVE-10098] - HS2 local task for map join fails in KMS encrypted cluster
+    * [HIVE-10106] - Regression : Dynamic partition pruning not working after HIVE-9976
+    * [HIVE-10108] - Index#getIndexTableName() returns db.index_table_name
+    * [HIVE-10122] - Hive metastore filter-by-expression is broken for non-partition expressions
+    * [HIVE-10123] - Hybrid grace Hash join : Use estimate key count from stats to initialize BytesBytesMultiHashMap
+    * [HIVE-10128] - BytesBytesMultiHashMap does not allow concurrent read-only access
+    * [HIVE-10136] - BaseWork.vectorMode hides AbstractOperatorDesc.vectorMode
+    * [HIVE-10145] - set Tez ACLs appropriately in hive
+    * [HIVE-10148] - update of bucketing column should not be allowed
+    * [HIVE-10150] - delete from acidTbl where a in(select a from nonAcidOrcTbl) fails
+    * [HIVE-10151] - insert into A select from B is broken when both A and B are Acid tables and bucketed the same way
+    * [HIVE-10152] - ErrorMsg.formatToErrorMsgMap has bad regex
+    * [HIVE-10167] - HS2 logs the server started only before the server is shut down
+    * [HIVE-10172] - Fix performance regression caused by HIVE-8122 for ORC
+    * [HIVE-10178] - DateWritable incorrectly calculates daysSinceEpoch for negative Unix time
+    * [HIVE-10186] - Hive does not log Tez diagnostics on errors
+    * [HIVE-10197] - Add lockId to all ACID log statements
+    * [HIVE-10202] - Beeline outputs prompt+query on standard output when used in non-interactive mode
+    * [HIVE-10208] - templeton.hive.extra.files should be commented out in webhcat-default.xml
+    * [HIVE-10213] - MapReduce jobs using dynamic-partitioning fail on commit.
+    * [HIVE-10225] - CLI JLine does not flush history on quit/Ctrl-C
+    * [HIVE-10226] - Column stats for Date columns not supported
+    * [HIVE-10229] - Set conf and processor context in the constructor instead of init
+    * [HIVE-10231] - Compute partition column stats fails if partition col type is date
+    * [HIVE-10242] - ACID: insert overwrite prevents create table command
+    * [HIVE-10265] - Hive CLI crashes on != inequality
+    * [HIVE-10267] - HIVE-9664 makes hive depend on ivysettings.xml : trivial breakage fix
+    * [HIVE-10269] - HiveMetaStore.java:[6089,29] cannot find symbol class JvmPauseMonitor
+    * [HIVE-10270] - Cannot use Decimal constants less than 0.1BD
+    * [HIVE-10271] - remove hive.server2.thrift.http.min/max.worker.threads properties
+    * [HIVE-10272] - Some HCat tests fail under windows
+    * [HIVE-10273] - Union with partition tables which have no data fails with NPE
+    * [HIVE-10274] - Send context and description to tez via dag info
+    * [HIVE-10275] - GenericUDF getTimestampValue should return Timestamp instead of Date
+    * [HIVE-10284] - enable container reuse for grace hash join
+    * [HIVE-10285] - Incorrect endFunction call in HiveMetaStore
+    * [HIVE-10286] - SARGs: Type Safety via PredicateLeaf.type
+    * [HIVE-10288] - Cannot call permanent UDFs
+    * [HIVE-10290] - Add negative test case to modify a non-existent config value when hive security authorization is enabled.
+    * [HIVE-10303] - HIVE-9471 broke forward compatibility of ORC files
+    * [HIVE-10305] - TestOrcFile has a mistake that makes metadata test ineffective
+    * [HIVE-10306] - We need to print tez summary when hive.server2.logging.level >= PERFORMANCE.
+    * [HIVE-10309] - TestJdbcWithMiniHS2.java broken because of the removal of hive.server2.thrift.http.max.worker.threads
+    * [HIVE-10312] - SASL.QOP in JDBC URL is ignored for Delegation token Authentication
+    * [HIVE-10313] - Literal Decimal ExprNodeConstantDesc should contain value of HiveDecimal instead of String
+    * [HIVE-10318] - The HMS upgrade test does not test patches that affect the upgrade test scripts
+    * [HIVE-10323] - Tez merge join operator does not honor hive.join.emit.interval
+    * [HIVE-10324] - Hive metatool should take table_param_key to allow for changes to avro serde's schema url key
+    * [HIVE-10331] - ORC : Is null SARG filters out all row groups written in old ORC format
+    * [HIVE-10340] - Enable ORC test for timezone reading from old format
+    * [HIVE-10346] - Tez on HBase has problems with settings again
+    * [HIVE-10364] - The HMS upgrade script test does not publish results when prepare.sh fails.
+    * [HIVE-10367] - org.apache.hive.hcatalog.pig.TestHCatLoaderEncryption.* fails in Windows
+    * [HIVE-10370] - Hive does not compile with -Phadoop-1 option
+    * [HIVE-10372] - Update parquet version to 1.6.0
+    * [HIVE-10374] - Revert HIVE-10304 deprecation message to HiveCLI
+    * [HIVE-10384] - RetryingMetaStoreClient does not retry wrapped TTransportExceptions
+    * [HIVE-10407] - separate out the timestamp ranges for testing purposes
+    * [HIVE-10409] - Webhcat tests need to be updated, to accomodate HADOOP-10193
+    * [HIVE-10421] - DROP TABLE with qualified table name ignores database name when checking partitions
+    * [HIVE-10428] - NPE in RegexSerDe using HCat
+    * [HIVE-10431] - HIVE-9555 broke hadoop-1 build
+    * [HIVE-10437] - NullPointerException on queries where map/reduce is not involved on tables with partitions
+    * [HIVE-10441] - Fix confusing log statement in SessionState about hive.execution.engine setting
+    * [HIVE-10442] - HIVE-10098 broke hadoop-1 build
+    * [HIVE-10443] - HIVE-9870 broke hadoop-1 build
+    * [HIVE-10444] - HIVE-10223 breaks hadoop-1 build
+    * [HIVE-10446] - Hybrid Hybrid Grace Hash Join : java.lang.IllegalArgumentException in Kryo while spilling big table
+    * [HIVE-10447] - Beeline JDBC Driver to support 2 way SSL
+    * [HIVE-10450] - More than one TableScan in MapWork not supported in Vectorization -- causes  query to fail during vectorization
+    * [HIVE-10451] -  PTF deserializer fails if values are not used in reducer
+    * [HIVE-10456] - Grace Hash Join should not load spilled partitions on abort
+    * [HIVE-10465] - whitelist restrictions don't get initialized in new copy of HiveConf
+    * [HIVE-10472] - Jenkins HMS upgrade test is not publishing results because JIRAService class is not found.
+    * [HIVE-10477] - Provide option to disable Spark tests
+    * [HIVE-10481] - ACID table update finishes but values not really updated if column names are not all lower case
+    * [HIVE-10483] - insert overwrite partition deadlocks on itself with DbTxnManager
+    * [HIVE-10484] - Vectorization : RuntimeException "Big Table Retained Mapping duplicate column"
+    * [HIVE-10490] - HBase Snapshot IF fails at run time with missing dependency of MetricsRegistry
+    * [HIVE-10499] - Ensure Session/ZooKeeperClient instances are closed
+    * [HIVE-10500] - Repeated deadlocks in underlying RDBMS cause transaction or lock failure
+    * [HIVE-10507] - Expose  RetryingMetastoreClient to other external users of metastore client like Flume and Storm.
+    * [HIVE-10508] - Strip out password information from config passed to Tez/MR in cases where password encryption is not used
+    * [HIVE-10510] - Change 1.2.0-SNAPSHOT to 1.2.0 in branch-1.2
+    * [HIVE-10513] - [CBO] return path : Fix create_func1.q for return path
+    * [HIVE-10514] - Fix MiniCliDriver tests failure
+    * [HIVE-10517] - HCatPartition should not be created with "" as location in tests
+    * [HIVE-10521] - TxnHandler.timeOutTxns only times out some of the expired transactions
+    * [HIVE-10529] - Remove references to tez task context before storing operator plan in object cache
+    * [HIVE-10530] - Aggregate stats cache: bug fixes for RDBMS path
+    * [HIVE-10538] - Fix NPE in FileSinkOperator from hashcode mismatch
+    * [HIVE-10539] - set default value of hive.repl.task.factory
+    * [HIVE-10542] - Full outer joins in tez produce incorrect results in certain cases
+    * [HIVE-10543] - improve error message in MetaStoreAuthzAPIAuthorizerEmbedOnly
+    * [HIVE-10544] - Beeline/Hive JDBC Driver fails in HTTP mode on Windows with java.lang.NoSuchFieldError: INSTANCE
+    * [HIVE-10564] - webhcat should use webhcat-site.xml properties for controller job submission
+    * [HIVE-10572] - Improve Hive service test to check empty string
+    * [HIVE-10576] - add jar command does not work with Windows OS
+    * [HIVE-10578] - update sql standard authorization configuration whitelist
+    * [HIVE-10579] - Fix -Phadoop-1 build
+    * [HIVE-10595] - Dropping a table can cause NPEs in the compactor
+    * [HIVE-10603] - increase default permgen space for HS2 on windows
+    * [HIVE-10604] - update webhcat-default.xml with 1.2 version numbers
+    * [HIVE-10607] - Combination of ReducesinkDedup + TopN optimization yields incorrect result if there are multiple GBY in reducer
+    * [HIVE-10610] - hive command fails to get hadoop version
+    * [HIVE-10612] - HIVE-10578 broke TestSQLStdHiveAccessControllerHS2 tests
+    * [HIVE-10614] - schemaTool upgrade from 0.14.0 to 1.3.0 causes failure
+    * [HIVE-10638] - HIVE-9736 introduces issues with Hadoop23Shims.checkFileAccess
+
+
+** Improvement
+    * [HIVE-1575] - get_json_object does not support JSON array at the root level
+    * [HIVE-2573] - Create per-session function registry
+    * [HIVE-3185] - allow union set operation without ALL keyword (align to ISO-SQL 2011 specification)
+    * [HIVE-5472] - support a simple scalar which returns the current timestamp
+    * [HIVE-7175] - Provide password file option to beeline
+    * [HIVE-8385] - UNION Operator in Hive
+    * [HIVE-9138] - Add some explain to PTF operator
+    * [HIVE-9143] - select user(), current_user()
+    * [HIVE-9298] - Support reading alternate timestamp formats
+    * [HIVE-9470] - Use a generic writable object to run ColumnaStorageBench write/read tests
+    * [HIVE-9480] - Build UDF TRUNC to implement FIRST_DAY as compared with LAST_DAY
+    * [HIVE-9500] - Support nested structs over 24 levels.
+    * [HIVE-9518] - Implement MONTHS_BETWEEN aligned with Oracle one
+    * [HIVE-9521] - Drop support for Java6
+    * [HIVE-9556] - create UDF to calculate the Levenshtein distance between two strings
+    * [HIVE-9564] - Extend HIVE-9298 for JsonSerDe
+    * [HIVE-9590] - add qtests for ADD_MONTHS UDF
+    * [HIVE-9594] - Add qtests for LAST_DAY udf
+    * [HIVE-9596] - move standard getDisplayString impl to GenericUDF
+    * [HIVE-9607] - Remove unnecessary attach-jdbc-driver execution from package/pom.xml
+    * [HIVE-9618] - Deduplicate RS keys for ptf/windowing
+    * [HIVE-9664] - Hive "add jar" command should be able to download and add jars from a repository
+    * [HIVE-9673] - Set operationhandle in ATS entities for lookups
+    * [HIVE-9699] - Extend PTFs to provide referenced columns for CP
+    * [HIVE-9709] - Hive should support replaying cookie from JDBC driver for beeline
+    * [HIVE-9710] - HiveServer2 should support cookie based authentication, when using HTTP transport.
+    * [HIVE-9738] - create SOUNDEX udf
+    * [HIVE-9744] - Move common arguments validation and value extraction code to GenericUDF
+    * [HIVE-9766] - Add JavaConstantXXXObjectInspector
+    * [HIVE-9788] - Make double quote optional in tsv/csv/dsv output
+    * [HIVE-9793] - Remove hard coded paths from cli driver tests
+    * [HIVE-9800] - Create scripts to do metastore upgrade tests on Jenkins
+    * [HIVE-9857] - Create Factorial UDF
+    * [HIVE-9858] - Create cbrt (cube root) UDF
+    * [HIVE-9859] - Create bitwise left/right shift UDFs
+    * [HIVE-9870] - Add JvmPauseMonitor threads to HMS and HS2 daemons
+    * [HIVE-9909] - Specify hive branch to use on jenkins hms tests
+    * [HIVE-9914] - Post success comments on Jira from Jenkins metastore upgrades scripts
+    * [HIVE-9917] - After HIVE-3454 is done, make int to timestamp conversion configurable
+    * [HIVE-9981] - Avoid throwing many exceptions when attempting to create new hdfs encryption shim
+    * [HIVE-10007] - Support qualified table name in analyze table compute statistics for columns
+    * [HIVE-10027] - Use descriptions from Avro schema files in column comments
+    * [HIVE-10072] - Add vectorization support for Hybrid Grace Hash Join
+    * [HIVE-10114] - Split strategies for ORC
+    * [HIVE-10119] - Allow Log verbosity to be set in hiveserver2 session
+    * [HIVE-10120] - Disallow create table with dot/colon in column name
+    * [HIVE-10146] - Not count session as idle if query is running
+    * [HIVE-10160] - Give a warning when grouping or ordering by a constant column
+    * [HIVE-10177] - Enable constant folding for char & varchar
+    * [HIVE-10206] - Improve Alter Table to not initialize Serde unnecessarily
+    * [HIVE-10214] - log metastore call timing information aggregated at query level
+    * [HIVE-10216] - log hive cli classpath at debug level
+    * [HIVE-10223] - Consolidate several redundant FileSystem API calls.
+    * [HIVE-10276] - Implement date_format(timestamp, fmt) UDF
+    * [HIVE-10307] - Support to use number literals in partition column
+    * [HIVE-10339] - Allow JDBC Driver to pass HTTP header Key/Value pairs
+    * [HIVE-10382] - Aggregate stats cache for RDBMS based metastore codepath
+    * [HIVE-10403] - Add n-way join support for Hybrid Grace Hash Join
+    * [HIVE-10467] - Switch to GIT repository on Jenkins precommit tests
+    * [HIVE-10546] - genFileSinkPlan should use the generated SEL's RR for the partition col of FS
+    * [HIVE-10568] - Select count(distinct()) can have more optimal execution plan
+
+
+** New Feature
+    * [HIVE-7998] - Enhance JDBC Driver to not require class specification
+    * [HIVE-9039] - Support Union Distinct
+    * [HIVE-9188] - BloomFilter support in ORC
+    * [HIVE-9277] - Hybrid Hybrid Grace Hash Join
+    * [HIVE-9302] - Beeline add commands to register local jdbc driver names and jars
+    * [HIVE-9780] - Add another level of explain for RDBMS audience
+    * [HIVE-10038] - Add Calcite's ProjectMergeRule.
+    * [HIVE-10099] - Enable constant folding for Decimal
+    * [HIVE-10591] - Support limited integer type promotion in ORC
+
+
+** Task
+    * [HIVE-6617] - Reduce ambiguity in grammar
+    * [HIVE-8898] - Remove HIVE-8874 once HBASE-12493 is fixed
+    * [HIVE-9015] - Constant Folding optimizer doesn't handle expressions involving null
+    * [HIVE-9411] - Improve error messages in TestMultiOutputFormat
+    * [HIVE-9416] - Get rid of Extract Operator
+    * [HIVE-9485] - Update trunk to 1.2.0-SNAPSHOT
+    * [HIVE-9498] - Update golden files of join38 & subquery_in on trunk due to 9327
+    * [HIVE-9520] - Create NEXT_DAY UDF
+    * [HIVE-9572] - Merge from Spark branch to trunk 02/03/2015
+    * [HIVE-9703] - Merge from Spark branch to trunk 02/16/2015
+    * [HIVE-9867] - Migrate usage of deprecated Calcite methods
+    * [HIVE-9903] - Update calcite version
+    * [HIVE-9966] - Get rid of customBucketMapJoin field from MapJoinDesc
+    * [HIVE-10019] - Configure jenkins precommit jobs to run HMS upgrade tests
+    * [HIVE-10222] - Upgrade Calcite dependency to newest version
+    * [HIVE-10268] - Merge cbo branch into trunk
+
+
+** Test
+    * [HIVE-7948] - Add an E2E test  to verify fix for HIVE-7155
+    * [HIVE-8949] - Test for PARQUET-26
+    * [HIVE-9399] - ppd_multi_insert.q generate same output in different order, when mapred.reduce.tasks is set to larger than 1
+    * [HIVE-9525] - Enable constant propagation optimization in few existing tests where it was disabled.
+    * [HIVE-10135] - Add qtest to access  struct<> data type with parquet format after parquet column index access enabled
+    * [HIVE-10168] - make groupby3_map.q more stable
+    * [HIVE-10396] - decimal_precision2.q test is failing on trunk
+
+
+
 Release Notes - Hive - Version 1.1.0
 
 ** Sub-task


[26/39] hive git commit: HIVE-10526: CBO (Calcite Return Path): HiveCost epsilon comparison should take row count in to account (Laljo John Pullokkaran reviewed by Ashutosh Chauhan)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/spark/join_alt_syntax.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_alt_syntax.q.out b/ql/src/test/results/clientpositive/spark/join_alt_syntax.q.out
index f0d96c4..dabdcb8 100644
--- a/ql/src/test/results/clientpositive/spark/join_alt_syntax.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_alt_syntax.q.out
@@ -390,9 +390,9 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2)
         Reducer 6 <- Map 5 (PARTITION-LEVEL SORT, 2), Map 7 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 4 <- Map 3 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Reducer 4 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -401,71 +401,69 @@ STAGE PLANS:
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
-                    Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                    predicate: p_partkey is not null (type: boolean)
+                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
-        Map 4 
+        Map 3 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: p_partkey is not null (type: boolean)
+                    predicate: p_name is not null (type: boolean)
                     Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: p_partkey (type: int), p_name (type: string)
-                      outputColumnNames: _col0, _col1
+                      expressions: p_name (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: int)
+                        key expressions: _col0 (type: string)
                         sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: string)
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (p_name is not null and p_partkey is not null) (type: boolean)
+                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
                     Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string)
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col1 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col1 (type: string)
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
                         Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: int)
         Map 7 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: p_name is not null (type: boolean)
-                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
+                    Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: p_name (type: string)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                      expressions: p_partkey (type: int), p_name (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                        Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -473,27 +471,11 @@ STAGE PLANS:
                      Inner Join 0 to 1
                 keys:
                   0 _col0 (type: int)
-                  1 _col0 (type: int)
-                outputColumnNames: _col0, _col1, _col3
-                Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
-                  Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: string)
-        Reducer 3 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 _col0 (type: int), _col1 (type: string)
-                  1 _col0 (type: int), _col1 (type: string)
-                outputColumnNames: _col1, _col3, _col5, _col6
+                  1 _col1 (type: int)
+                outputColumnNames: _col1, _col2, _col4, _col6
                 Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col1 (type: string), _col5 (type: string), _col6 (type: string), _col3 (type: string)
+                  expressions: _col4 (type: string), _col6 (type: string), _col2 (type: string), _col1 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -503,22 +485,38 @@ STAGE PLANS:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 6 
+        Reducer 4 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 _col1 (type: string)
-                  1 _col0 (type: string)
-                outputColumnNames: _col0, _col1, _col2
+                  0 _col0 (type: string)
+                  1 _col3 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col4
                 Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                  key expressions: _col1 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col1 (type: int)
                   Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: string)
+                  value expressions: _col0 (type: string), _col2 (type: string), _col4 (type: string)
+        Reducer 6 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int), _col1 (type: string)
+                  1 _col0 (type: int), _col1 (type: string)
+                outputColumnNames: _col0, _col1, _col3
+                Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col3 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col3 (type: string)
+                  Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col0 (type: int), _col1 (type: string)
 
   Stage: Stage-0
     Fetch Operator
@@ -544,9 +542,9 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2)
         Reducer 6 <- Map 5 (PARTITION-LEVEL SORT, 2), Map 7 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 4 <- Map 3 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Reducer 4 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -555,36 +553,35 @@ STAGE PLANS:
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
-                    Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                    predicate: p_partkey is not null (type: boolean)
+                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
-        Map 4 
+        Map 3 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: p_partkey is not null (type: boolean)
+                    predicate: p_name is not null (type: boolean)
                     Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: p_partkey (type: int), p_name (type: string)
-                      outputColumnNames: _col0, _col1
+                      expressions: p_name (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: int)
+                        key expressions: _col0 (type: string)
                         sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: string)
         Map 5 
             Map Operator Tree:
                 TableScan
@@ -598,28 +595,27 @@ STAGE PLANS:
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col1 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col1 (type: string)
+                        key expressions: _col1 (type: string), _col0 (type: int)
+                        sort order: ++
+                        Map-reduce partition columns: _col1 (type: string), _col0 (type: int)
                         Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: int)
         Map 7 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: p_name is not null (type: boolean)
-                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (p_name is not null and p_partkey is not null) (type: boolean)
+                    Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: p_name (type: string)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                      expressions: p_partkey (type: int), p_name (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                        key expressions: _col1 (type: string), _col0 (type: int)
+                        sort order: ++
+                        Map-reduce partition columns: _col1 (type: string), _col0 (type: int)
+                        Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -627,27 +623,11 @@ STAGE PLANS:
                      Inner Join 0 to 1
                 keys:
                   0 _col0 (type: int)
-                  1 _col0 (type: int)
-                outputColumnNames: _col0, _col1, _col3
-                Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col1 (type: string), _col0 (type: int)
-                  sort order: ++
-                  Map-reduce partition columns: _col1 (type: string), _col0 (type: int)
-                  Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: string)
-        Reducer 3 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 _col1 (type: string), _col0 (type: int)
-                  1 _col1 (type: string), _col0 (type: int)
-                outputColumnNames: _col1, _col3, _col5, _col6
+                  1 _col1 (type: int)
+                outputColumnNames: _col1, _col2, _col4, _col6
                 Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col1 (type: string), _col5 (type: string), _col6 (type: string), _col3 (type: string)
+                  expressions: _col4 (type: string), _col6 (type: string), _col2 (type: string), _col1 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -657,22 +637,38 @@ STAGE PLANS:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 6 
+        Reducer 4 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 _col1 (type: string)
-                  1 _col0 (type: string)
-                outputColumnNames: _col0, _col1, _col2
+                  0 _col0 (type: string)
+                  1 _col3 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col4
                 Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col1 (type: string), _col0 (type: int)
-                  sort order: ++
-                  Map-reduce partition columns: _col1 (type: string), _col0 (type: int)
+                  key expressions: _col1 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col1 (type: int)
                   Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: string)
+                  value expressions: _col0 (type: string), _col2 (type: string), _col4 (type: string)
+        Reducer 6 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col1 (type: string), _col0 (type: int)
+                  1 _col1 (type: string), _col0 (type: int)
+                outputColumnNames: _col0, _col1, _col3
+                Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col3 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col3 (type: string)
+                  Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col0 (type: int), _col1 (type: string)
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/spark/join_cond_pushdown_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_cond_pushdown_2.q.out b/ql/src/test/results/clientpositive/spark/join_cond_pushdown_2.q.out
index ff1626f..26e05ac 100644
--- a/ql/src/test/results/clientpositive/spark/join_cond_pushdown_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_cond_pushdown_2.q.out
@@ -150,9 +150,9 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2)
         Reducer 6 <- Map 5 (PARTITION-LEVEL SORT, 2), Map 7 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 4 <- Map 3 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Reducer 4 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -161,72 +161,72 @@ STAGE PLANS:
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
-                    Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                    predicate: p_partkey is not null (type: boolean)
+                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                      Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
-        Map 4 
+        Map 3 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: p_partkey is not null (type: boolean)
+                    predicate: p_name is not null (type: boolean)
                     Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                       Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: int)
+                        key expressions: _col1 (type: string)
                         sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
+                        Map-reduce partition columns: _col1 (type: string)
                         Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                        value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: p_name is not null (type: boolean)
-                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
+                    Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                      Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col1 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col1 (type: string)
-                        Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                        Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
         Map 7 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (p_name is not null and p_partkey is not null) (type: boolean)
+                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
                     Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                       Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col1 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col1 (type: string)
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
                         Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                        value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -234,27 +234,11 @@ STAGE PLANS:
                      Inner Join 0 to 1
                 keys:
                   0 _col0 (type: int)
-                  1 _col0 (type: int)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
-                Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
-                  Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
-        Reducer 3 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 _col0 (type: int), _col1 (type: string)
-                  1 _col9 (type: int), _col10 (type: string)
+                  1 _col9 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
                 Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: string), _col30 (type: string), _col31 (type: string), _col32 (type: int), _col33 (type: string), _col34 (type: double), _col35 (type: string), _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
+                  expressions: _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: string), _col30 (type: string), _col31 (type: string), _col32 (type: int), _col33 (type: string), _col34 (type: double), _col35 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string), _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
                   Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -264,22 +248,38 @@ STAGE PLANS:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 6 
+        Reducer 4 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
                   0 _col1 (type: string)
-                  1 _col1 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
+                  1 _col10 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26
                 Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col9 (type: int), _col10 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col9 (type: int), _col10 (type: string)
+                  key expressions: _col9 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col9 (type: int)
                   Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
+                  value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string), _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string)
+        Reducer 6 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int), _col1 (type: string)
+                  1 _col0 (type: int), _col1 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
+                Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col10 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col10 (type: string)
+                  Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: int), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/809fcb01/ql/src/test/results/clientpositive/spark/join_cond_pushdown_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_cond_pushdown_4.q.out b/ql/src/test/results/clientpositive/spark/join_cond_pushdown_4.q.out
index 417ba4f..c821fe4 100644
--- a/ql/src/test/results/clientpositive/spark/join_cond_pushdown_4.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_cond_pushdown_4.q.out
@@ -154,9 +154,9 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2)
         Reducer 6 <- Map 5 (PARTITION-LEVEL SORT, 2), Map 7 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 4 <- Map 3 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Reducer 4 (PARTITION-LEVEL SORT, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -165,72 +165,72 @@ STAGE PLANS:
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
-                    Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                    predicate: p_partkey is not null (type: boolean)
+                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                      Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
-        Map 4 
+        Map 3 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: p_partkey is not null (type: boolean)
+                    predicate: p_name is not null (type: boolean)
                     Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                       Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: int)
+                        key expressions: _col1 (type: string)
                         sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
+                        Map-reduce partition columns: _col1 (type: string)
                         Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                        value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: p_name is not null (type: boolean)
-                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
+                    Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
-                      Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col1 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col1 (type: string)
-                        Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                        Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
         Map 7 
             Map Operator Tree:
                 TableScan
                   alias: p1
                   Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (p_name is not null and p_partkey is not null) (type: boolean)
+                    predicate: (p_partkey is not null and p_name is not null) (type: boolean)
                     Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                       Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col1 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col1 (type: string)
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
                         Statistics: Num rows: 7 Data size: 847 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: int), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                        value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -238,27 +238,11 @@ STAGE PLANS:
                      Inner Join 0 to 1
                 keys:
                   0 _col0 (type: int)
-                  1 _col0 (type: int)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
-                Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int), _col1 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
-                  Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
-        Reducer 3 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 _col0 (type: int), _col1 (type: string)
-                  1 _col9 (type: int), _col10 (type: string)
+                  1 _col9 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
                 Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: string), _col30 (type: string), _col31 (type: string), _col32 (type: int), _col33 (type: string), _col34 (type: double), _col35 (type: string), _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
+                  expressions: _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: string), _col30 (type: string), _col31 (type: string), _col32 (type: int), _col33 (type: string), _col34 (type: double), _col35 (type: string), _col9 (type: int), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string), _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35
                   Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -268,22 +252,38 @@ STAGE PLANS:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 6 
+        Reducer 4 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
                   0 _col1 (type: string)
-                  1 _col1 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
+                  1 _col10 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26
                 Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col9 (type: int), _col10 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: _col9 (type: int), _col10 (type: string)
+                  key expressions: _col9 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col9 (type: int)
                   Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
+                  value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string), _col18 (type: int), _col19 (type: string), _col20 (type: string), _col21 (type: string), _col22 (type: string), _col23 (type: int), _col24 (type: string), _col25 (type: double), _col26 (type: string)
+        Reducer 6 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: int), _col1 (type: string)
+                  1 _col0 (type: int), _col1 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
+                Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col10 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col10 (type: string)
+                  Statistics: Num rows: 7 Data size: 931 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: int), _col11 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: int), _col15 (type: string), _col16 (type: double), _col17 (type: string)
 
   Stage: Stage-0
     Fetch Operator


[06/39] hive git commit: HIVE-10564 : webhcat should use webhcat-site.xml properties for controller job submission HIVE-10564.2.patch (Thejas Nair, reviewed by Eugene Koifman)

Posted by pr...@apache.org.
HIVE-10564 : webhcat should use webhcat-site.xml properties for controller job submission HIVE-10564.2.patch (Thejas Nair, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/306e61af
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/306e61af
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/306e61af

Branch: refs/heads/llap
Commit: 306e61afbab7b5aabc05f624f7ea4621e4fd9eb7
Parents: bd8d59e
Author: Thejas Nair <th...@hortonworks.com>
Authored: Wed May 6 17:50:07 2015 -0700
Committer: Thejas Nair <th...@hortonworks.com>
Committed: Wed May 6 17:50:18 2015 -0700

----------------------------------------------------------------------
 .../hive/hcatalog/templeton/tool/TempletonControllerJob.java  | 7 +------
 1 file changed, 1 insertion(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/306e61af/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java
index 349bd5c..5c7de80 100644
--- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java
+++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java
@@ -76,16 +76,11 @@ public class TempletonControllerJob extends Configured implements Tool, JobSubmi
    *                              and added to the job
    */
   public TempletonControllerJob(boolean secureMetastoreAccess, AppConfig conf) {
-    super();
+    super(new Configuration(conf));
     this.secureMetastoreAccess = secureMetastoreAccess;
     this.appConf = conf;
   }
 
-  @Override
-  public Configuration getConf() {
-    return appConf;
-  }
-
   private JobID submittedJobId;
 
   public String getSubmittedId() {


[32/39] hive git commit: HIVE-10646 : ColumnValue does not handle NULL_TYPE (Yongzhi Chen via Szehon)

Posted by pr...@apache.org.
HIVE-10646 : ColumnValue does not handle NULL_TYPE (Yongzhi Chen via Szehon)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/913229c1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/913229c1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/913229c1

Branch: refs/heads/llap
Commit: 913229c1b2ceb78e65e96533e983a2e1536911af
Parents: 1538a71
Author: Szehon Ho <sz...@cloudera.com>
Authored: Sun May 10 22:21:15 2015 -0500
Committer: Szehon Ho <sz...@cloudera.com>
Committed: Sun May 10 22:21:49 2015 -0500

----------------------------------------------------------------------
 service/src/java/org/apache/hive/service/cli/ColumnValue.java | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/913229c1/service/src/java/org/apache/hive/service/cli/ColumnValue.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/ColumnValue.java b/service/src/java/org/apache/hive/service/cli/ColumnValue.java
index 40144cf..662bd93 100644
--- a/service/src/java/org/apache/hive/service/cli/ColumnValue.java
+++ b/service/src/java/org/apache/hive/service/cli/ColumnValue.java
@@ -202,6 +202,8 @@ public class ColumnValue {
     case UNION_TYPE:
     case USER_DEFINED_TYPE:
       return stringValue((String)value);
+    case NULL_TYPE:
+      return stringValue((String)value);
     default:
       return null;
     }


[33/39] hive git commit: HIVE-10655 : [PTest2] Propagate additionalProfiles flag to the source-prep.vm (Szehon, reviewed by Brock)

Posted by pr...@apache.org.
HIVE-10655 : [PTest2] Propagate additionalProfiles flag to the source-prep.vm (Szehon, reviewed by Brock)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/588d145a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/588d145a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/588d145a

Branch: refs/heads/llap
Commit: 588d145ac9e37ad8ea29b9310af2c33d5d59d9b4
Parents: 913229c
Author: Szehon Ho <sz...@cloudera.com>
Authored: Mon May 11 01:39:40 2015 -0500
Committer: Szehon Ho <sz...@cloudera.com>
Committed: Mon May 11 01:39:40 2015 -0500

----------------------------------------------------------------------
 .../org/apache/hive/ptest/execution/PTest.java  | 12 ++++++-
 .../ptest/execution/conf/TestConfiguration.java | 12 ++++---
 .../execution/conf/TestTestConfiguration.java   | 38 +++++++++++++++-----
 .../resources/test-configuration.properties     |  2 ++
 4 files changed, 51 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/588d145a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/PTest.java
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/PTest.java b/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/PTest.java
index 696b790..7217ef9 100644
--- a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/PTest.java
+++ b/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/PTest.java
@@ -33,6 +33,7 @@ import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.GnuParser;
@@ -80,6 +81,7 @@ public class PTest {
   private final String mBuildTag;
   private final SSHCommandExecutor mSshCommandExecutor;
   private final RSyncCommandExecutor mRsyncCommandExecutor;
+  private final ImmutableMap<String, String> templateDefaults;
 
   public PTest(final TestConfiguration configuration, final ExecutionContext executionContext,
       final String buildTag, final File logDir, final LocalCommandFactory localCommandFactory,
@@ -124,7 +126,10 @@ public class PTest {
     put("mavenArgs", configuration.getMavenArgs()).
     put("mavenBuildArgs", configuration.getMavenBuildArgs()).
     put("mavenTestArgs", configuration.getMavenTestArgs());
-    final ImmutableMap<String, String> templateDefaults = templateDefaultsBuilder.build();
+    if (!Strings.isNullOrEmpty(configuration.getAdditionalProfiles())) {
+      templateDefaultsBuilder.put("additionalProfiles", configuration.getAdditionalProfiles());
+    }
+    templateDefaults = templateDefaultsBuilder.build();
     TestParser testParser = new TestParser(configuration.getContext(), configuration.getTestCasePropertyName(),
         new File(mExecutionContext.getLocalWorkingDirectory(), configuration.getRepositoryName() + "-source"),
         logger);
@@ -360,4 +365,9 @@ public class PTest {
     }
     System.exit(exitCode);
   }
+
+  @VisibleForTesting
+  public Map<String, String> getTemplateDefaults() {
+    return templateDefaults;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/588d145a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/TestConfiguration.java
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/TestConfiguration.java b/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/TestConfiguration.java
index 71c513d..ac4bafb 100644
--- a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/TestConfiguration.java
+++ b/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/conf/TestConfiguration.java
@@ -50,6 +50,7 @@ public class TestConfiguration {
   private static final String MAVEN_ARGS = "mavenArgs";
   private static final String MAVEN_BUILD_ARGS = "mavenBuildArgs";
   private static final String MAVEN_TEST_ARGS = "mavenTestArgs";
+  private static final String ADDITIONAL_PROFILES = "additionalProfiles";
   private static final String ANT_ARGS = "antArgs";
   private static final String JIRA_URL = "jiraUrl";
   private static final String JIRA_USER = "jiraUser";
@@ -69,6 +70,7 @@ public class TestConfiguration {
   private String mavenBuildArgs;
   private String mavenTestArgs;
   private String mavenEnvOpts;
+  private String additionalProfiles;
   private String repositoryType;
   private String repository;
   private String repositoryName;
@@ -114,6 +116,7 @@ public class TestConfiguration {
     mavenBuildArgs = context.getString(MAVEN_BUILD_ARGS, "").trim();
     mavenTestArgs =  context.getString(MAVEN_TEST_ARGS, "").trim();
     mavenEnvOpts =  context.getString(MAVEN_ENV_OPTS, "").trim();
+    additionalProfiles = context.getString(ADDITIONAL_PROFILES, "").trim();
     javaHome =  context.getString(JAVA_HOME, "").trim();
     javaHomeForTests = context.getString(JAVA_HOME_TEST, "").trim();
     patch = Strings.nullToEmpty(null);
@@ -125,6 +128,7 @@ public class TestConfiguration {
     logsURL = context.getString(LOGS_URL, "").trim();
     testCasePropertyName = context.getString(TEST_CASE_PROPERTY_NAME, "testcase").trim();
     sshOpts = context.getString(SSH_OPTS, "").trim();
+
   }
   public Context getContext() {
     return context;
@@ -192,12 +196,11 @@ public class TestConfiguration {
   public String getMavenBuildArgs() {
     return mavenBuildArgs;
   }
-  public String getMavenTestArgs() {
-    return mavenTestArgs;
-  }
+  public String getMavenTestArgs() { return mavenTestArgs; }
   public String getMavenEnvOpts() {
     return mavenEnvOpts;
   }
+  public String getAdditionalProfiles() { return additionalProfiles; }
   public String getJavaHome() {
     return javaHome;
   }
@@ -249,6 +252,7 @@ public class TestConfiguration {
   public void setMavenEnvOpts(String mavenEnvOpts) {
     this.mavenEnvOpts = Strings.nullToEmpty(mavenEnvOpts);
   }
+  public void setAdditionalProfiles(String additionalProfiles) { this.additionalProfiles = additionalProfiles;  }
   @Override
   public String toString() {
     return "TestConfiguration [antArgs=" + antArgs + ", antTestArgs="
@@ -262,7 +266,7 @@ public class TestConfiguration {
         + jiraUrl + ", jiraUser=" + jiraUser + ", jiraPassword=" + jiraPassword
         + ", testCasePropertyName=" + testCasePropertyName + ", buildTool="
         + buildTool + ", jiraName=" + jiraName + ", clearLibraryCache="
-        + clearLibraryCache + "]";
+        + clearLibraryCache + ", additionalProfiles=" + additionalProfiles + "]";
   }
   public static TestConfiguration fromInputStream(InputStream inputStream, Logger logger)
       throws IOException {

http://git-wip-us.apache.org/repos/asf/hive/blob/588d145a/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/conf/TestTestConfiguration.java
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/conf/TestTestConfiguration.java b/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/conf/TestTestConfiguration.java
index 1e0efa0..848faf2 100644
--- a/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/conf/TestTestConfiguration.java
+++ b/testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/conf/TestTestConfiguration.java
@@ -18,22 +18,25 @@
  */
 package org.apache.hive.ptest.execution.conf;
 
-import java.util.Properties;
-import java.util.Set;
-
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import com.google.common.io.Resources;
 import junit.framework.Assert;
-
+import org.apache.hive.ptest.execution.PTest;
 import org.apache.hive.ptest.execution.context.ExecutionContext;
 import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import com.google.common.io.Resources;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
 
 public class TestTestConfiguration {
 
+  public TemporaryFolder baseDir = new TemporaryFolder();
   private static final Logger LOG = LoggerFactory
       .getLogger(TestTestConfiguration.class);
 
@@ -55,6 +58,7 @@ public class TestTestConfiguration {
     Assert.assertEquals("trunk", conf.getBranch());
     Assert.assertEquals("/tmp/hive-ptest-units/working/dir/working", executionContext.getLocalWorkingDirectory());
     Assert.assertEquals("-Dtest.continue.on.failure=true -Dtest.silent=false", conf.getAntArgs());
+    Assert.assertEquals("hadoop-1,hadoop-2", conf.getAdditionalProfiles());
     Assert.assertNotNull(conf.toString());
 
     Assert.assertEquals("", conf.getPatch());
@@ -72,7 +76,6 @@ public class TestTestConfiguration {
 
     conf.setAntArgs("AntArgs");
     Assert.assertEquals("AntArgs", conf.getAntArgs());
-
   }
   @Test
   public void testContext() throws Exception {
@@ -82,4 +85,23 @@ public class TestTestConfiguration {
     Assert.assertEquals(context.getParameters(), (new TestConfiguration(context, LOG)).getContext().getParameters());
 
   }
+
+  @Test
+  public void testPTest() throws Exception {
+    Host testHost = new Host("test", "test", new String[1], 1);
+    Set<Host> testHosts = new HashSet<Host>();
+    testHosts.add(testHost);
+
+    TestConfiguration conf = TestConfiguration.fromInputStream(
+      Resources.getResource("test-configuration.properties").openStream(), LOG);
+    ExecutionContext execContext = new ExecutionContext(null, testHosts, "test", null);
+    PTest.Builder mPTestBuilder = new PTest.Builder();
+    PTest ptest = mPTestBuilder.build(conf, execContext, "1234", baseDir.newFolder(), null, null, null, null);
+    Map<String, String> templateDefaults = ptest.getTemplateDefaults();
+    Assert.assertEquals("git://github.com/apache/hive.git", templateDefaults.get("repository"));
+    Assert.assertEquals("apache-github", templateDefaults.get("repositoryName"));
+    Assert.assertEquals("trunk", templateDefaults.get("branch"));
+    Assert.assertEquals("-Dtest.continue.on.failure=true -Dtest.silent=false", templateDefaults.get("antArgs"));
+    Assert.assertEquals("hadoop-1,hadoop-2", templateDefaults.get("additionalProfiles"));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/588d145a/testutils/ptest2/src/test/resources/test-configuration.properties
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/test/resources/test-configuration.properties b/testutils/ptest2/src/test/resources/test-configuration.properties
index 8a83bf2..caba9ea 100644
--- a/testutils/ptest2/src/test/resources/test-configuration.properties
+++ b/testutils/ptest2/src/test/resources/test-configuration.properties
@@ -17,6 +17,8 @@ branch = trunk
 
 antArgs = -Dtest.continue.on.failure=true -Dtest.silent=false
 
+additionalProfiles = hadoop-1,hadoop-2
+
 qFileTests = clientNegative
 qFileTest.clientNegative.driver = TestNegativeCliDriver
 qFileTest.clientNegative.directory = ql/src/test/queries/clientnegative


[13/39] hive git commit: HIVE-8696 : HCatClientHMSImpl doesn't use a Retrying-HiveMetastoreClient (Thiruvel Thirumoolan via Sushanth Sowmyan)

Posted by pr...@apache.org.
HIVE-8696 : HCatClientHMSImpl doesn't use a Retrying-HiveMetastoreClient (Thiruvel Thirumoolan via Sushanth Sowmyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e0044e07
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e0044e07
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e0044e07

Branch: refs/heads/llap
Commit: e0044e0723d8e831ad0f29c6615b5f70e2ee0658
Parents: 26ec033
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Thu May 7 01:12:11 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Thu May 7 01:13:05 2015 -0700

----------------------------------------------------------------------
 .../hive/hcatalog/common/HiveClientCache.java   |  9 +++-
 .../hcatalog/mapreduce/TestPassProperties.java  |  5 +-
 .../hive/metastore/RetryingMetaStoreClient.java | 53 ++++++++++++++++----
 3 files changed, 55 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e0044e07/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java
index 578b6ea..0966581 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java
@@ -35,6 +35,7 @@ import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.shims.Utils;
@@ -81,7 +82,7 @@ class HiveClientCache {
   }
 
   public static IMetaStoreClient getNonCachedHiveMetastoreClient(HiveConf hiveConf) throws MetaException {
-    return new HiveMetaStoreClient(hiveConf);
+    return RetryingMetaStoreClient.getProxy(hiveConf);
   }
 
   public HiveClientCache(HiveConf hiveConf) {
@@ -226,7 +227,11 @@ class HiveClientCache {
       return hiveCache.get(cacheKey, new Callable<ICacheableMetaStoreClient>() {
         @Override
         public ICacheableMetaStoreClient call() throws MetaException {
-          return new CacheableHiveMetaStoreClient(cacheKey.getHiveConf(), timeout);
+          return
+              (ICacheableMetaStoreClient) RetryingMetaStoreClient.getProxy(cacheKey.getHiveConf(),
+                  new Class<?>[]{HiveConf.class, Integer.class},
+                  new Object[]{cacheKey.getHiveConf(), timeout},
+                  CacheableHiveMetaStoreClient.class.getName());
         }
       });
     } catch (ExecutionException e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/e0044e07/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java
index 735ab5f..8673b48 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
 import java.util.ArrayList;
 
 import org.apache.hadoop.conf.Configuration;
@@ -109,8 +110,10 @@ public class TestPassProperties {
       new FileOutputCommitterContainer(job, null).cleanupJob(job);
     } catch (Exception e) {
       caughtException = true;
-      assertTrue(e.getCause().getMessage().contains(
+      assertTrue(((InvocationTargetException)e.getCause().getCause().getCause()).getTargetException().getMessage().contains(
           "Could not connect to meta store using any of the URIs provided"));
+      assertTrue(e.getCause().getMessage().contains(
+          "Unable to instantiate org.apache.hive.hcatalog.common.HiveClientCache$CacheableHiveMetaStoreClient"));
     }
     assertTrue(caughtException);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/e0044e07/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
index 1b6487a..fb44484 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingMetaStoreClient.java
@@ -60,6 +60,18 @@ public class RetryingMetaStoreClient implements InvocationHandler {
 
   protected RetryingMetaStoreClient(HiveConf hiveConf, HiveMetaHookLoader hookLoader,
       Map<String, Long> metaCallTimeMap, Class<? extends IMetaStoreClient> msClientClass) throws MetaException {
+
+    this(hiveConf,
+        new Class[] {HiveConf.class, HiveMetaHookLoader.class},
+        new Object[] {hiveConf, hookLoader},
+        metaCallTimeMap,
+        msClientClass);
+  }
+
+  protected RetryingMetaStoreClient(HiveConf hiveConf, Class<?>[] constructorArgTypes,
+      Object[] constructorArgs, Map<String, Long> metaCallTimeMap, Class<? extends IMetaStoreClient> msClientClass)
+      throws MetaException {
+
     this.retryLimit = hiveConf.getIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES);
     this.retryDelaySeconds = hiveConf.getTimeVar(
         HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS);
@@ -71,12 +83,14 @@ public class RetryingMetaStoreClient implements InvocationHandler {
     localMetaStore = (msUri == null) || msUri.trim().isEmpty();
 
     reloginExpiringKeytabUser();
-    this.base = MetaStoreUtils.newInstance(msClientClass, new Class[] {
-        HiveConf.class, HiveMetaHookLoader.class}, new Object[] {hiveConf, hookLoader});
+    this.base = (IMetaStoreClient) MetaStoreUtils.newInstance(msClientClass, constructorArgTypes, constructorArgs);
   }
 
-  public static IMetaStoreClient getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader) throws MetaException {
-    return getProxy(hiveConf, hookLoader, null, HiveMetaStoreClient.class.getName());
+  public static IMetaStoreClient getProxy(HiveConf hiveConf) throws MetaException {
+
+    return getProxy(hiveConf, new Class[]{HiveConf.class}, new Object[]{hiveConf}, null,
+        HiveMetaStoreClient.class.getName()
+    );
   }
 
   public static IMetaStoreClient getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader,
@@ -84,19 +98,40 @@ public class RetryingMetaStoreClient implements InvocationHandler {
     return getProxy(hiveConf, hookLoader, null, mscClassName);
   }
 
+  public static IMetaStoreClient getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader,
+      Map<String, Long> metaCallTimeMap, String mscClassName) throws MetaException {
+
+    return getProxy(hiveConf,
+        new Class[] {HiveConf.class, HiveMetaHookLoader.class},
+        new Object[] {hiveConf, hookLoader},
+        metaCallTimeMap,
+        mscClassName
+    );
+  }
+
   /**
    * This constructor is meant for Hive internal use only.
    * Please use getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader) for external purpose.
    */
-  public static IMetaStoreClient getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader,
-      Map<String, Long> metaCallTimeMap, String mscClassName) throws MetaException {
+  public static IMetaStoreClient getProxy(HiveConf hiveConf, Class<?>[] constructorArgTypes,
+      Object[] constructorArgs, String mscClassName) throws MetaException {
+    return getProxy(hiveConf, constructorArgTypes, constructorArgs, null, mscClassName);
+  }
+
+  /**
+   * This constructor is meant for Hive internal use only.
+   * Please use getProxy(HiveConf hiveConf, HiveMetaHookLoader hookLoader) for external purpose.
+   */
+  public static IMetaStoreClient getProxy(HiveConf hiveConf, Class<?>[] constructorArgTypes,
+      Object[] constructorArgs, Map<String, Long> metaCallTimeMap,
+      String mscClassName) throws MetaException {
 
     Class<? extends IMetaStoreClient> baseClass = (Class<? extends IMetaStoreClient>) MetaStoreUtils
         .getClass(mscClassName);
 
-    RetryingMetaStoreClient handler = new RetryingMetaStoreClient(hiveConf, hookLoader,
-        metaCallTimeMap, baseClass);
-
+    RetryingMetaStoreClient handler =
+        new RetryingMetaStoreClient(hiveConf, constructorArgTypes, constructorArgs,
+            metaCallTimeMap, baseClass);
     return (IMetaStoreClient) Proxy.newProxyInstance(
         RetryingMetaStoreClient.class.getClassLoader(), baseClass.getInterfaces(), handler);
   }


[30/39] hive git commit: HIVE-10591: Support limited integer type promotion in ORC (Prasanth Jayachandran reviewed by Gunther Hagleitner)

Posted by pr...@apache.org.
HIVE-10591: Support limited integer type promotion in ORC (Prasanth Jayachandran reviewed by Gunther Hagleitner)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/08b0708b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/08b0708b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/08b0708b

Branch: refs/heads/llap
Commit: 08b0708b42da93a3d576210c4a2e8f6b286b12fa
Parents: 3e713bc
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Sat May 9 16:57:36 2015 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Sat May 9 16:57:36 2015 -0700

----------------------------------------------------------------------
 .../ql/io/orc/ConversionTreeReaderFactory.java  |  38 ++
 .../hadoop/hive/ql/io/orc/OrcRecordUpdater.java |  24 +-
 .../hive/ql/io/orc/RecordReaderFactory.java     | 269 +++++++++++++
 .../hadoop/hive/ql/io/orc/RecordReaderImpl.java |   2 +-
 .../clientpositive/orc_int_type_promotion.q     |  79 ++++
 .../clientpositive/orc_int_type_promotion.q.out | 377 +++++++++++++++++++
 6 files changed, 779 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/08b0708b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ConversionTreeReaderFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ConversionTreeReaderFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ConversionTreeReaderFactory.java
new file mode 100644
index 0000000..aaf4eb4
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ConversionTreeReaderFactory.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.io.orc;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Factory for creating ORC tree readers. These tree readers can handle type promotions and type
+ * conversions.
+ */
+public class ConversionTreeReaderFactory extends TreeReaderFactory {
+
+  // TODO: This is currently only a place holder for type conversions.
+
+  public static TreeReader createTreeReader(int columnId,
+      List<OrcProto.Type> types,
+      boolean[] included,
+      boolean skipCorrupt
+  ) throws IOException {
+    return TreeReaderFactory.createTreeReader(columnId, types, included, skipCorrupt);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/08b0708b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
index b62aa17..b576496 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
@@ -17,7 +17,14 @@
  */
 package org.apache.hadoop.hive.ql.io.orc;
 
-import com.google.common.annotations.VisibleForTesting;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.CharacterCodingException;
+import java.nio.charset.Charset;
+import java.nio.charset.CharsetDecoder;
+import java.util.ArrayList;
+import java.util.List;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -37,14 +44,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectIn
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.LongWritable;
 
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.charset.CharacterCodingException;
-import java.nio.charset.Charset;
-import java.nio.charset.CharsetDecoder;
-
-import java.util.ArrayList;
-import java.util.List;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Lists;
 
 /**
  * A RecordUpdater where the files are stored as ORC.
@@ -205,6 +206,11 @@ public class OrcRecordUpdater implements RecordUpdater {
     return new OrcStruct.OrcStructInspector(fields);
   }
 
+  public static List<String> getAcidEventFields() {
+    return Lists.newArrayList("operation", "originalTransaction", "bucket", "rowId",
+        "currentTransaction", "row");
+  }
+
   OrcRecordUpdater(Path path,
                    AcidOutputFormat.Options options) throws IOException {
     this.options = options;

http://git-wip-us.apache.org/repos/asf/hive/blob/08b0708b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderFactory.java
new file mode 100644
index 0000000..8740ee6
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderFactory.java
@@ -0,0 +1,269 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.io.orc;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.StructField;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.UnionObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Factory to create ORC tree readers. It also compares file schema with schema specified on read
+ * to see if type promotions are possible.
+ */
+public class RecordReaderFactory {
+  static final Log LOG = LogFactory.getLog(RecordReaderFactory.class);
+  private static final boolean isLogInfoEnabled = LOG.isInfoEnabled();
+
+  public static TreeReaderFactory.TreeReader createTreeReader(int colId,
+      Configuration conf,
+      List<OrcProto.Type> fileSchema,
+      boolean[] included,
+      boolean skipCorrupt) throws IOException {
+    final boolean isAcid = checkAcidSchema(fileSchema);
+    final List<OrcProto.Type> originalFileSchema;
+    if (isAcid) {
+      originalFileSchema = fileSchema.subList(fileSchema.get(0).getSubtypesCount(),
+          fileSchema.size());
+    } else {
+      originalFileSchema = fileSchema;
+    }
+    final int numCols = originalFileSchema.get(0).getSubtypesCount();
+    List<OrcProto.Type> schemaOnRead = getSchemaOnRead(numCols, conf);
+    List<OrcProto.Type> schemaUsed = getMatchingSchema(fileSchema, schemaOnRead);
+    if (schemaUsed == null) {
+      return TreeReaderFactory.createTreeReader(colId, fileSchema, included, skipCorrupt);
+    } else {
+      return ConversionTreeReaderFactory.createTreeReader(colId, schemaUsed, included, skipCorrupt);
+    }
+  }
+
+  private static boolean checkAcidSchema(List<OrcProto.Type> fileSchema) {
+    if (fileSchema.get(0).getKind().equals(OrcProto.Type.Kind.STRUCT)) {
+      List<String> acidFields = OrcRecordUpdater.getAcidEventFields();
+      List<String> rootFields = fileSchema.get(0).getFieldNamesList();
+      if (acidFields.equals(rootFields)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  private static List<OrcProto.Type> getMatchingSchema(List<OrcProto.Type> fileSchema,
+      List<OrcProto.Type> schemaOnRead) {
+    if (schemaOnRead == null) {
+      if (isLogInfoEnabled) {
+        LOG.info("Schema is not specified on read. Using file schema.");
+      }
+      return null;
+    }
+
+    if (fileSchema.size() != schemaOnRead.size()) {
+      if (isLogInfoEnabled) {
+        LOG.info("Schema on read column count does not match file schema's column count." +
+            " Falling back to using file schema.");
+      }
+      return null;
+    } else {
+      List<OrcProto.Type> result = Lists.newArrayList(fileSchema);
+      // check type promotion. ORC can only support type promotions for integer types
+      // short -> int -> bigint as same integer readers are used for the above types.
+      boolean canPromoteType = false;
+      for (int i = 0; i < fileSchema.size(); i++) {
+        OrcProto.Type fColType = fileSchema.get(i);
+        OrcProto.Type rColType = schemaOnRead.get(i);
+        if (!fColType.getKind().equals(rColType.getKind())) {
+
+          if (fColType.getKind().equals(OrcProto.Type.Kind.SHORT)) {
+
+            if (rColType.getKind().equals(OrcProto.Type.Kind.INT) ||
+                rColType.getKind().equals(OrcProto.Type.Kind.LONG)) {
+              // type promotion possible, converting SHORT to INT/LONG requested type
+              result.set(i, result.get(i).toBuilder().setKind(rColType.getKind()).build());
+              canPromoteType = true;
+            } else {
+              canPromoteType = false;
+            }
+
+          } else if (fColType.getKind().equals(OrcProto.Type.Kind.INT)) {
+
+            if (rColType.getKind().equals(OrcProto.Type.Kind.LONG)) {
+              // type promotion possible, converting INT to LONG requested type
+              result.set(i, result.get(i).toBuilder().setKind(rColType.getKind()).build());
+              canPromoteType = true;
+            } else {
+              canPromoteType = false;
+            }
+
+          } else {
+            canPromoteType = false;
+          }
+        }
+      }
+
+      if (canPromoteType) {
+        if (isLogInfoEnabled) {
+          LOG.info("Integer type promotion happened in ORC record reader. Using promoted schema.");
+        }
+        return result;
+      }
+    }
+
+    return null;
+  }
+
+  private static List<OrcProto.Type> getSchemaOnRead(int numCols, Configuration conf) {
+    String columnTypeProperty = conf.get(serdeConstants.LIST_COLUMN_TYPES);
+    final String columnNameProperty = conf.get(serdeConstants.LIST_COLUMNS);
+    if (columnTypeProperty == null || columnNameProperty == null) {
+      return null;
+    }
+
+    ArrayList<String> columnNames = Lists.newArrayList(columnNameProperty.split(","));
+    ArrayList<TypeInfo> fieldTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty);
+    StructTypeInfo structTypeInfo = new StructTypeInfo();
+    // Column types from conf includes virtual and partition columns at the end. We consider only
+    // the actual columns in the file.
+    structTypeInfo.setAllStructFieldNames(Lists.newArrayList(columnNames.subList(0, numCols)));
+    structTypeInfo.setAllStructFieldTypeInfos(Lists.newArrayList(fieldTypes.subList(0, numCols)));
+    ObjectInspector oi = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(structTypeInfo);
+    return getOrcTypes(oi);
+  }
+
+  private static List<OrcProto.Type> getOrcTypes(ObjectInspector inspector) {
+    List<OrcProto.Type> result = Lists.newArrayList();
+    getOrcTypesImpl(result, inspector);
+    return result;
+  }
+
+  private static void getOrcTypesImpl(List<OrcProto.Type> result, ObjectInspector inspector) {
+    OrcProto.Type.Builder type = OrcProto.Type.newBuilder();
+    switch (inspector.getCategory()) {
+      case PRIMITIVE:
+        switch (((PrimitiveObjectInspector) inspector).getPrimitiveCategory()) {
+          case BOOLEAN:
+            type.setKind(OrcProto.Type.Kind.BOOLEAN);
+            break;
+          case BYTE:
+            type.setKind(OrcProto.Type.Kind.BYTE);
+            break;
+          case SHORT:
+            type.setKind(OrcProto.Type.Kind.SHORT);
+            break;
+          case INT:
+            type.setKind(OrcProto.Type.Kind.INT);
+            break;
+          case LONG:
+            type.setKind(OrcProto.Type.Kind.LONG);
+            break;
+          case FLOAT:
+            type.setKind(OrcProto.Type.Kind.FLOAT);
+            break;
+          case DOUBLE:
+            type.setKind(OrcProto.Type.Kind.DOUBLE);
+            break;
+          case STRING:
+            type.setKind(OrcProto.Type.Kind.STRING);
+            break;
+          case CHAR:
+            // The char length needs to be written to file and should be available
+            // from the object inspector
+            CharTypeInfo charTypeInfo = (CharTypeInfo) ((PrimitiveObjectInspector) inspector)
+                .getTypeInfo();
+            type.setKind(OrcProto.Type.Kind.CHAR);
+            type.setMaximumLength(charTypeInfo.getLength());
+            break;
+          case VARCHAR:
+            // The varchar length needs to be written to file and should be available
+            // from the object inspector
+            VarcharTypeInfo typeInfo = (VarcharTypeInfo) ((PrimitiveObjectInspector) inspector)
+                .getTypeInfo();
+            type.setKind(OrcProto.Type.Kind.VARCHAR);
+            type.setMaximumLength(typeInfo.getLength());
+            break;
+          case BINARY:
+            type.setKind(OrcProto.Type.Kind.BINARY);
+            break;
+          case TIMESTAMP:
+            type.setKind(OrcProto.Type.Kind.TIMESTAMP);
+            break;
+          case DATE:
+            type.setKind(OrcProto.Type.Kind.DATE);
+            break;
+          case DECIMAL:
+            DecimalTypeInfo decTypeInfo = (DecimalTypeInfo) ((PrimitiveObjectInspector) inspector)
+                .getTypeInfo();
+            type.setKind(OrcProto.Type.Kind.DECIMAL);
+            type.setPrecision(decTypeInfo.precision());
+            type.setScale(decTypeInfo.scale());
+            break;
+          default:
+            throw new IllegalArgumentException("Unknown primitive category: " +
+                ((PrimitiveObjectInspector) inspector).getPrimitiveCategory());
+        }
+        result.add(type.build());
+        break;
+      case LIST:
+        type.setKind(OrcProto.Type.Kind.LIST);
+        result.add(type.build());
+        getOrcTypesImpl(result, ((ListObjectInspector) inspector).getListElementObjectInspector());
+        break;
+      case MAP:
+        type.setKind(OrcProto.Type.Kind.MAP);
+        result.add(type.build());
+        getOrcTypesImpl(result, ((MapObjectInspector) inspector).getMapKeyObjectInspector());
+        getOrcTypesImpl(result, ((MapObjectInspector) inspector).getMapValueObjectInspector());
+        break;
+      case STRUCT:
+        type.setKind(OrcProto.Type.Kind.STRUCT);
+        result.add(type.build());
+        for (StructField field : ((StructObjectInspector) inspector).getAllStructFieldRefs()) {
+          getOrcTypesImpl(result, field.getFieldObjectInspector());
+        }
+        break;
+      case UNION:
+        type.setKind(OrcProto.Type.Kind.UNION);
+        result.add(type.build());
+        for (ObjectInspector oi : ((UnionObjectInspector) inspector).getObjectInspectors()) {
+          getOrcTypesImpl(result, oi);
+        }
+        break;
+      default:
+        throw new IllegalArgumentException("Unknown category: " + inspector.getCategory());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/08b0708b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
index a5a5943..58e19cb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
@@ -199,7 +199,7 @@ class RecordReaderImpl implements RecordReader {
     firstRow = skippedRows;
     totalRowCount = rows;
     boolean skipCorrupt = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_ORC_SKIP_CORRUPT_DATA);
-    reader = TreeReaderFactory.createTreeReader(0, types, included, skipCorrupt);
+    reader = RecordReaderFactory.createTreeReader(0, conf, types, included, skipCorrupt);
     indexes = new OrcProto.RowIndex[types.size()];
     bloomFilterIndices = new OrcProto.BloomFilterIndex[types.size()];
     advanceToNextRow(reader, 0L, true);

http://git-wip-us.apache.org/repos/asf/hive/blob/08b0708b/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_int_type_promotion.q b/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
new file mode 100644
index 0000000..4a805a0
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/orc_int_type_promotion.q
@@ -0,0 +1,79 @@
+create table if not exists alltypes (
+ bo boolean,
+ ti tinyint,
+ si smallint,
+ i int,
+ bi bigint,
+ f float,
+ d double,
+ de decimal(10,3),
+ ts timestamp,
+ da date,
+ s string,
+ c char(5),
+ vc varchar(5),
+ m map<string, string>,
+ l array<int>,
+ st struct<c1:int, c2:string>
+) row format delimited fields terminated by '|'
+collection items terminated by ','
+map keys terminated by ':' stored as textfile;
+
+create table if not exists alltypes_orc (
+ bo boolean,
+ ti tinyint,
+ si smallint,
+ i int,
+ bi bigint,
+ f float,
+ d double,
+ de decimal(10,3),
+ ts timestamp,
+ da date,
+ s string,
+ c char(5),
+ vc varchar(5),
+ m map<string, string>,
+ l array<int>,
+ st struct<c1:int, c2:string>
+) stored as orc;
+
+load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes;
+
+insert overwrite table alltypes_orc select * from alltypes;
+
+select * from alltypes_orc;
+
+alter table alltypes_orc change si si int;
+select * from alltypes_orc;
+
+alter table alltypes_orc change si si bigint;
+alter table alltypes_orc change i i bigint;
+select * from alltypes_orc;
+
+alter table alltypes_orc change l l array<bigint>;
+select * from alltypes_orc;
+
+set hive.vectorized.execution.enabled=true;
+set hive.fetch.task.conversion=none;
+alter table alltypes_orc change si si smallint;
+alter table alltypes_orc change i i int;
+
+explain select ti, si, i, bi from alltypes_orc;
+select ti, si, i, bi from alltypes_orc;
+
+alter table alltypes_orc change si si int;
+select ti, si, i, bi from alltypes_orc;
+
+alter table alltypes_orc change si si bigint;
+alter table alltypes_orc change i i bigint;
+select ti, si, i, bi from alltypes_orc;
+
+set hive.exec.dynamic.partition.mode=nonstrict;
+create table src_part_orc (key int, value string) partitioned by (ds string) stored as orc;
+insert overwrite table src_part_orc partition(ds) select key, value, ds from srcpart where ds is not null;
+
+select * from src_part_orc limit 10;
+
+alter table src_part_orc change key key bigint;
+select * from src_part_orc limit 10;

http://git-wip-us.apache.org/repos/asf/hive/blob/08b0708b/ql/src/test/results/clientpositive/orc_int_type_promotion.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_int_type_promotion.q.out b/ql/src/test/results/clientpositive/orc_int_type_promotion.q.out
new file mode 100644
index 0000000..d26dff2
--- /dev/null
+++ b/ql/src/test/results/clientpositive/orc_int_type_promotion.q.out
@@ -0,0 +1,377 @@
+PREHOOK: query: create table if not exists alltypes (
+ bo boolean,
+ ti tinyint,
+ si smallint,
+ i int,
+ bi bigint,
+ f float,
+ d double,
+ de decimal(10,3),
+ ts timestamp,
+ da date,
+ s string,
+ c char(5),
+ vc varchar(5),
+ m map<string, string>,
+ l array<int>,
+ st struct<c1:int, c2:string>
+) row format delimited fields terminated by '|'
+collection items terminated by ','
+map keys terminated by ':' stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alltypes
+POSTHOOK: query: create table if not exists alltypes (
+ bo boolean,
+ ti tinyint,
+ si smallint,
+ i int,
+ bi bigint,
+ f float,
+ d double,
+ de decimal(10,3),
+ ts timestamp,
+ da date,
+ s string,
+ c char(5),
+ vc varchar(5),
+ m map<string, string>,
+ l array<int>,
+ st struct<c1:int, c2:string>
+) row format delimited fields terminated by '|'
+collection items terminated by ','
+map keys terminated by ':' stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alltypes
+PREHOOK: query: create table if not exists alltypes_orc (
+ bo boolean,
+ ti tinyint,
+ si smallint,
+ i int,
+ bi bigint,
+ f float,
+ d double,
+ de decimal(10,3),
+ ts timestamp,
+ da date,
+ s string,
+ c char(5),
+ vc varchar(5),
+ m map<string, string>,
+ l array<int>,
+ st struct<c1:int, c2:string>
+) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: create table if not exists alltypes_orc (
+ bo boolean,
+ ti tinyint,
+ si smallint,
+ i int,
+ bi bigint,
+ f float,
+ d double,
+ de decimal(10,3),
+ ts timestamp,
+ da date,
+ s string,
+ c char(5),
+ vc varchar(5),
+ m map<string, string>,
+ l array<int>,
+ st struct<c1:int, c2:string>
+) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alltypes_orc
+PREHOOK: query: load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@alltypes
+POSTHOOK: query: load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@alltypes
+PREHOOK: query: insert overwrite table alltypes_orc select * from alltypes
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypes
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: insert overwrite table alltypes_orc select * from alltypes
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypes
+POSTHOOK: Output: default@alltypes_orc
+POSTHOOK: Lineage: alltypes_orc.bi SIMPLE [(alltypes)alltypes.FieldSchema(name:bi, type:bigint, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.bo SIMPLE [(alltypes)alltypes.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.c SIMPLE [(alltypes)alltypes.FieldSchema(name:c, type:char(5), comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.d SIMPLE [(alltypes)alltypes.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.da SIMPLE [(alltypes)alltypes.FieldSchema(name:da, type:date, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.de SIMPLE [(alltypes)alltypes.FieldSchema(name:de, type:decimal(10,3), comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.f SIMPLE [(alltypes)alltypes.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.i SIMPLE [(alltypes)alltypes.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.l SIMPLE [(alltypes)alltypes.FieldSchema(name:l, type:array<int>, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.m SIMPLE [(alltypes)alltypes.FieldSchema(name:m, type:map<string,string>, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.s SIMPLE [(alltypes)alltypes.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.si SIMPLE [(alltypes)alltypes.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.st SIMPLE [(alltypes)alltypes.FieldSchema(name:st, type:struct<c1:int,c2:string>, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.ti SIMPLE [(alltypes)alltypes.FieldSchema(name:ti, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.ts SIMPLE [(alltypes)alltypes.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: alltypes_orc.vc SIMPLE [(alltypes)alltypes.FieldSchema(name:vc, type:varchar(5), comment:null), ]
+PREHOOK: query: select * from alltypes_orc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypes_orc
+#### A masked pattern was here ####
+POSTHOOK: query: select * from alltypes_orc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypes_orc
+#### A masked pattern was here ####
+true	10	100	1000	10000	4.0	20.0	4.222	1969-12-31 15:59:58.174	1970-01-01	string	hello	hello	{"k1":"v1","k2":"v2"}	[100,200]	{"c1":null,"c2":" \"foo\"}"}
+false	20	200	2000	20000	8.0	40.0	2.222	1970-12-31 15:59:58.174	1971-01-01	abcd	world	world	{"k3":"v3","k4":"v4"}	[200,300]	{"c1":null,"c2":" \"bar\"}"}
+PREHOOK: query: alter table alltypes_orc change si si int
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: default@alltypes_orc
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: alter table alltypes_orc change si si int
+POSTHOOK: type: ALTERTABLE_RENAMECOL
+POSTHOOK: Input: default@alltypes_orc
+POSTHOOK: Output: default@alltypes_orc
+PREHOOK: query: select * from alltypes_orc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypes_orc
+#### A masked pattern was here ####
+POSTHOOK: query: select * from alltypes_orc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypes_orc
+#### A masked pattern was here ####
+true	10	100	1000	10000	4.0	20.0	4.222	1969-12-31 15:59:58.174	1970-01-01	string	hello	hello	{"k1":"v1","k2":"v2"}	[100,200]	{"c1":null,"c2":" \"foo\"}"}
+false	20	200	2000	20000	8.0	40.0	2.222	1970-12-31 15:59:58.174	1971-01-01	abcd	world	world	{"k3":"v3","k4":"v4"}	[200,300]	{"c1":null,"c2":" \"bar\"}"}
+PREHOOK: query: alter table alltypes_orc change si si bigint
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: default@alltypes_orc
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: alter table alltypes_orc change si si bigint
+POSTHOOK: type: ALTERTABLE_RENAMECOL
+POSTHOOK: Input: default@alltypes_orc
+POSTHOOK: Output: default@alltypes_orc
+PREHOOK: query: alter table alltypes_orc change i i bigint
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: default@alltypes_orc
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: alter table alltypes_orc change i i bigint
+POSTHOOK: type: ALTERTABLE_RENAMECOL
+POSTHOOK: Input: default@alltypes_orc
+POSTHOOK: Output: default@alltypes_orc
+PREHOOK: query: select * from alltypes_orc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypes_orc
+#### A masked pattern was here ####
+POSTHOOK: query: select * from alltypes_orc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypes_orc
+#### A masked pattern was here ####
+true	10	100	1000	10000	4.0	20.0	4.222	1969-12-31 15:59:58.174	1970-01-01	string	hello	hello	{"k1":"v1","k2":"v2"}	[100,200]	{"c1":null,"c2":" \"foo\"}"}
+false	20	200	2000	20000	8.0	40.0	2.222	1970-12-31 15:59:58.174	1971-01-01	abcd	world	world	{"k3":"v3","k4":"v4"}	[200,300]	{"c1":null,"c2":" \"bar\"}"}
+PREHOOK: query: alter table alltypes_orc change l l array<bigint>
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: default@alltypes_orc
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: alter table alltypes_orc change l l array<bigint>
+POSTHOOK: type: ALTERTABLE_RENAMECOL
+POSTHOOK: Input: default@alltypes_orc
+POSTHOOK: Output: default@alltypes_orc
+PREHOOK: query: select * from alltypes_orc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypes_orc
+#### A masked pattern was here ####
+POSTHOOK: query: select * from alltypes_orc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypes_orc
+#### A masked pattern was here ####
+true	10	100	1000	10000	4.0	20.0	4.222	1969-12-31 15:59:58.174	1970-01-01	string	hello	hello	{"k1":"v1","k2":"v2"}	[100,200]	{"c1":null,"c2":" \"foo\"}"}
+false	20	200	2000	20000	8.0	40.0	2.222	1970-12-31 15:59:58.174	1971-01-01	abcd	world	world	{"k3":"v3","k4":"v4"}	[200,300]	{"c1":null,"c2":" \"bar\"}"}
+PREHOOK: query: alter table alltypes_orc change si si smallint
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: default@alltypes_orc
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: alter table alltypes_orc change si si smallint
+POSTHOOK: type: ALTERTABLE_RENAMECOL
+POSTHOOK: Input: default@alltypes_orc
+POSTHOOK: Output: default@alltypes_orc
+PREHOOK: query: alter table alltypes_orc change i i int
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: default@alltypes_orc
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: alter table alltypes_orc change i i int
+POSTHOOK: type: ALTERTABLE_RENAMECOL
+POSTHOOK: Input: default@alltypes_orc
+POSTHOOK: Output: default@alltypes_orc
+PREHOOK: query: explain select ti, si, i, bi from alltypes_orc
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select ti, si, i, bi from alltypes_orc
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: alltypes_orc
+            Statistics: Num rows: 88 Data size: 1772 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: ti (type: tinyint), si (type: smallint), i (type: int), bi (type: bigint)
+              outputColumnNames: _col0, _col1, _col2, _col3
+              Statistics: Num rows: 88 Data size: 1772 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 88 Data size: 1772 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select ti, si, i, bi from alltypes_orc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypes_orc
+#### A masked pattern was here ####
+POSTHOOK: query: select ti, si, i, bi from alltypes_orc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypes_orc
+#### A masked pattern was here ####
+10	100	1000	10000
+20	200	2000	20000
+PREHOOK: query: alter table alltypes_orc change si si int
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: default@alltypes_orc
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: alter table alltypes_orc change si si int
+POSTHOOK: type: ALTERTABLE_RENAMECOL
+POSTHOOK: Input: default@alltypes_orc
+POSTHOOK: Output: default@alltypes_orc
+PREHOOK: query: select ti, si, i, bi from alltypes_orc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypes_orc
+#### A masked pattern was here ####
+POSTHOOK: query: select ti, si, i, bi from alltypes_orc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypes_orc
+#### A masked pattern was here ####
+10	100	1000	10000
+20	200	2000	20000
+PREHOOK: query: alter table alltypes_orc change si si bigint
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: default@alltypes_orc
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: alter table alltypes_orc change si si bigint
+POSTHOOK: type: ALTERTABLE_RENAMECOL
+POSTHOOK: Input: default@alltypes_orc
+POSTHOOK: Output: default@alltypes_orc
+PREHOOK: query: alter table alltypes_orc change i i bigint
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: default@alltypes_orc
+PREHOOK: Output: default@alltypes_orc
+POSTHOOK: query: alter table alltypes_orc change i i bigint
+POSTHOOK: type: ALTERTABLE_RENAMECOL
+POSTHOOK: Input: default@alltypes_orc
+POSTHOOK: Output: default@alltypes_orc
+PREHOOK: query: select ti, si, i, bi from alltypes_orc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypes_orc
+#### A masked pattern was here ####
+POSTHOOK: query: select ti, si, i, bi from alltypes_orc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypes_orc
+#### A masked pattern was here ####
+10	100	1000	10000
+20	200	2000	20000
+PREHOOK: query: create table src_part_orc (key int, value string) partitioned by (ds string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_part_orc
+POSTHOOK: query: create table src_part_orc (key int, value string) partitioned by (ds string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_part_orc
+PREHOOK: query: insert overwrite table src_part_orc partition(ds) select key, value, ds from srcpart where ds is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@src_part_orc
+POSTHOOK: query: insert overwrite table src_part_orc partition(ds) select key, value, ds from srcpart where ds is not null
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@src_part_orc@ds=2008-04-08
+POSTHOOK: Output: default@src_part_orc@ds=2008-04-09
+POSTHOOK: Lineage: src_part_orc PARTITION(ds=2008-04-08).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_part_orc PARTITION(ds=2008-04-08).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_part_orc PARTITION(ds=2008-04-09).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_part_orc PARTITION(ds=2008-04-09).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: select * from src_part_orc limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_part_orc
+PREHOOK: Input: default@src_part_orc@ds=2008-04-08
+PREHOOK: Input: default@src_part_orc@ds=2008-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_part_orc limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_part_orc
+POSTHOOK: Input: default@src_part_orc@ds=2008-04-08
+POSTHOOK: Input: default@src_part_orc@ds=2008-04-09
+#### A masked pattern was here ####
+238	val_238	2008-04-08
+86	val_86	2008-04-08
+311	val_311	2008-04-08
+27	val_27	2008-04-08
+165	val_165	2008-04-08
+409	val_409	2008-04-08
+255	val_255	2008-04-08
+278	val_278	2008-04-08
+98	val_98	2008-04-08
+484	val_484	2008-04-08
+PREHOOK: query: alter table src_part_orc change key key bigint
+PREHOOK: type: ALTERTABLE_RENAMECOL
+PREHOOK: Input: default@src_part_orc
+PREHOOK: Output: default@src_part_orc
+POSTHOOK: query: alter table src_part_orc change key key bigint
+POSTHOOK: type: ALTERTABLE_RENAMECOL
+POSTHOOK: Input: default@src_part_orc
+POSTHOOK: Output: default@src_part_orc
+PREHOOK: query: select * from src_part_orc limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_part_orc
+PREHOOK: Input: default@src_part_orc@ds=2008-04-08
+PREHOOK: Input: default@src_part_orc@ds=2008-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_part_orc limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_part_orc
+POSTHOOK: Input: default@src_part_orc@ds=2008-04-08
+POSTHOOK: Input: default@src_part_orc@ds=2008-04-09
+#### A masked pattern was here ####
+238	val_238	2008-04-08
+86	val_86	2008-04-08
+311	val_311	2008-04-08
+27	val_27	2008-04-08
+165	val_165	2008-04-08
+409	val_409	2008-04-08
+255	val_255	2008-04-08
+278	val_278	2008-04-08
+98	val_98	2008-04-08
+484	val_484	2008-04-08


[05/39] hive git commit: HIVE-10484: Vectorization : RuntimeException "Big Table Retained Mapping duplicate column" (Matt McCline reviewed by Vikram Dixit)

Posted by pr...@apache.org.
HIVE-10484: Vectorization : RuntimeException "Big Table Retained Mapping duplicate column" (Matt McCline reviewed by Vikram Dixit)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bd8d59e4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bd8d59e4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bd8d59e4

Branch: refs/heads/llap
Commit: bd8d59e40ae87fc23c030fd1dda19a4ddfe3cb5b
Parents: 2531040
Author: vikram <vi...@hortonworks.com>
Authored: Wed May 6 17:27:52 2015 -0700
Committer: vikram <vi...@hortonworks.com>
Committed: Wed May 6 17:27:52 2015 -0700

----------------------------------------------------------------------
 .../ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java     | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/bd8d59e4/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
index b215f70..a9082eb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/VectorMapJoinCommonOperator.java
@@ -321,7 +321,10 @@ public abstract class VectorMapJoinCommonOperator extends MapJoinOperator implem
       projectionMapping.add(nextOutputColumn, batchColumnIndex, typeName);
 
       // Collect columns we copy from the big table batch to the overflow batch.
-      bigTableRetainedMapping.add(batchColumnIndex, batchColumnIndex, typeName);
+      if (!bigTableRetainedMapping.containsOutputColumn(batchColumnIndex)) {
+        // Tolerate repeated use of a big table column.
+        bigTableRetainedMapping.add(batchColumnIndex, batchColumnIndex, typeName);
+      }
 
       nextOutputColumn++;
     }


[21/39] hive git commit: HIVE-10453 : HS2 leaking open file descriptors when using UDFs (Yongzhi Chen via Szehon)

Posted by pr...@apache.org.
HIVE-10453 : HS2 leaking open file descriptors when using UDFs (Yongzhi Chen via Szehon)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/42f88ca9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/42f88ca9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/42f88ca9

Branch: refs/heads/llap
Commit: 42f88ca9a105956a2085b75dae681d62dd784cef
Parents: 6176333
Author: Szehon Ho <sz...@cloudera.com>
Authored: Thu May 7 11:20:53 2015 -0700
Committer: Szehon Ho <sz...@cloudera.com>
Committed: Thu May 7 11:20:53 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/exec/Registry.java    | 29 ++++++++++++++++++--
 .../apache/hadoop/hive/ql/exec/Utilities.java   |  5 ++++
 .../hadoop/hive/ql/session/SessionState.java    |  4 +--
 3 files changed, 34 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/42f88ca9/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
index 2ba91d0..a5d59ae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java
@@ -20,8 +20,10 @@ package org.apache.hadoop.hive.ql.exec;
 
 import com.google.common.base.Splitter;
 import com.google.common.collect.Sets;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.ql.exec.FunctionInfo.FunctionResource;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -43,9 +45,12 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.util.ReflectionUtils;
 
+import java.io.IOException;
+import java.net.URLClassLoader;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -66,6 +71,7 @@ public class Registry {
    */
   private final Map<String, FunctionInfo> mFunctions = new LinkedHashMap<String, FunctionInfo>();
   private final Set<Class<?>> builtIns = Collections.synchronizedSet(new HashSet<Class<?>>());
+  private final Set<ClassLoader> mSessionUDFLoaders = new LinkedHashSet<ClassLoader>();
 
   private final boolean isNative;
 
@@ -443,7 +449,6 @@ public class Registry {
   // should be called after session registry is checked
   private FunctionInfo registerToSessionRegistry(String qualifiedName, FunctionInfo function) {
     FunctionInfo ret = null;
-
     ClassLoader prev = Utilities.getSessionSpecifiedClassLoader();
     try {
       // Found UDF in metastore - now add it to the function registry
@@ -455,7 +460,6 @@ public class Registry {
         LOG.error("Unable to load resources for " + qualifiedName + ":" + e, e);
         return null;
       }
-
       ClassLoader loader = Utilities.getSessionSpecifiedClassLoader();
       Class<?> udfClass = Class.forName(function.getClassName(), true, loader);
 
@@ -463,6 +467,9 @@ public class Registry {
       if (ret == null) {
         LOG.error(function.getClassName() + " is not a valid UDF class and was not registered.");
       }
+      if (SessionState.get().isHiveServerQuery()) {
+        SessionState.getRegistryForWrite().addToUDFLoaders(loader);
+      }
     } catch (ClassNotFoundException e) {
       // Lookup of UDf class failed
       LOG.error("Unable to load UDF class: " + e);
@@ -489,6 +496,24 @@ public class Registry {
     builtIns.clear();
   }
 
+  public synchronized void closeCUDFLoaders() {
+    try {
+      for(ClassLoader loader: mSessionUDFLoaders) {
+        JavaUtils.closeClassLoader(loader);
+      }
+    } catch (IOException ie) {
+        LOG.error("Error in close loader: " + ie);
+    }
+    mSessionUDFLoaders.clear();
+  }
+
+  public synchronized void addToUDFLoaders(ClassLoader loader) {
+    mSessionUDFLoaders.add(loader);
+  }
+  public synchronized void removeFromUDFLoaders(ClassLoader loader) {
+    mSessionUDFLoaders.remove(loader);
+  }
+
   /**
    * Setup blocked flag for all builtin UDFs as per udf whitelist and blacklist
    * @param whiteListStr

http://git-wip-us.apache.org/repos/asf/hive/blob/42f88ca9/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index ad5c8f8..7b48b8b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -2250,6 +2250,11 @@ public final class Utilities {
       }
     }
     JavaUtils.closeClassLoader(loader);
+//this loader is closed, remove it from cached registry loaders to avoid remove it again.
+    Registry reg = SessionState.getRegistry();
+    if(reg != null) {
+      reg.removeFromUDFLoaders(loader);
+    }
 
     loader = new URLClassLoader(newPath.toArray(new URL[0]));
     curThread.setContextClassLoader(loader);

http://git-wip-us.apache.org/repos/asf/hive/blob/42f88ca9/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index 8db78e5..37b6d6f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -1458,7 +1458,7 @@ public class SessionState {
   }
 
   public void close() throws IOException {
-    registry.clear();;
+    registry.clear();
     if (txnMgr != null) txnMgr.closeTxnManager();
     JavaUtils.closeClassLoadersTo(conf.getClassLoader(), parentLoader);
     File resourceDir =
@@ -1493,7 +1493,7 @@ public class SessionState {
         sparkSession = null;
       }
     }
-
+    registry.closeCUDFLoaders();
     dropSessionPaths(conf);
   }
 


[38/39] hive git commit: HIVE-10676 : Update Hive's README to mention spark, and to remove jdk1.6 (Sushanth Sowmyan, reviewed by Alan Gates)

Posted by pr...@apache.org.
HIVE-10676 : Update Hive's README to mention spark, and to remove jdk1.6 (Sushanth Sowmyan, reviewed by Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/433714fa
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/433714fa
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/433714fa

Branch: refs/heads/llap
Commit: 433714fa31e347b4d6d3641be8e38186fe4a895b
Parents: f65528a
Author: Sushanth Sowmyan <kh...@gmail.com>
Authored: Mon May 11 16:56:49 2015 -0700
Committer: Sushanth Sowmyan <kh...@gmail.com>
Committed: Mon May 11 16:57:44 2015 -0700

----------------------------------------------------------------------
 README.txt | 32 +++++++++++++++++++-------------
 1 file changed, 19 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/433714fa/README.txt
----------------------------------------------------------------------
diff --git a/README.txt b/README.txt
index 4ec4615..662dba6 100644
--- a/README.txt
+++ b/README.txt
@@ -27,18 +27,24 @@ capabilities of the language. QL can also be extended with custom
 scalar functions (UDF's), aggregations (UDAF's), and table
 functions (UDTF's).
 
-Hive users have a choice of 2 runtimes when executing SQL queries.
-Users can choose to use the Apache Hadoop MapReduce framework,
-which is mature and proven at large scales. MapReduce is a purely
-batch framework, and queries run using the MapReduce framework
-may experience higher latencies (tens of seconds), even
-over small datasets. Alternatively, users can choose to use the
-newer Apache Tez framework to process SQL queries. Tez is
-designed for interactive query and has substantially reduced
-overheads versus MapReduce. Users are free to switch back and
-forth between these frameworks at any time. In either case,
-Hive is best suited for use cases where the amount of data
-processed is large enough to require a distributed system.
+Hive users have a choice of 3 runtimes when executing SQL queries.
+Users can choose between Apache Hadoop MapReduce, Apache Tez or
+Apache Spark frameworks as their execution backend. MapReduce is a
+mature framework that is proven at large scales. However, MapReduce
+is a purely batch framework, and queries using it may experience
+higher latencies (tens of seconds), even over small datasets. Apache
+Tez is designed for interactive query, and has substantially reduced
+overheads versus MapReduce. Apache Spark is a cluster computing
+framework that's built outside of MapReduce, but on top of HDFS,
+with a notion of composable and transformable distributed collection
+of items called Resilient Distributed Dataset (RDD) which allows
+processing and analysis without traditional intermediate stages that
+MapReduce introduces.
+
+Users are free to switch back and forth between these frameworks
+at any time. In each case, Hive is best suited for use cases
+where the amount of data processed is large enough to require a
+distributed system.
 
 Hive is not designed for online transaction processing and does
 not support row level insert/updates. It is best used for batch
@@ -73,7 +79,7 @@ Getting Started
 Requirements
 ============
 
-- Java 1.6, 1.7
+- Java 1.7
 
 - Hadoop 1.x, 2.x
 


[03/39] hive git commit: HIVE-9743: Incorrect result set for vectorized left outer join (Matt McCline, reviewed by Vikram Dixit)

Posted by pr...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/25310407/ql/src/test/results/clientpositive/tez/vector_leftsemi_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_leftsemi_mapjoin.q.out b/ql/src/test/results/clientpositive/tez/vector_leftsemi_mapjoin.q.out
new file mode 100644
index 0000000..564f59d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/vector_leftsemi_mapjoin.q.out
@@ -0,0 +1,13807 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+create table t1 stored as orc as select cast(key as int) key, value from src where key <= 10
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t1
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+create table t1 stored as orc as select cast(key as int) key, value from src where key <= 10
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t1
+PREHOOK: query: select * from t1 sort by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 sort by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+2	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: create table t2 stored as orc as select cast(2*key as int) key, value from t1
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@t1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t2
+POSTHOOK: query: create table t2 stored as orc as select cast(2*key as int) key, value from t1
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@t1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t2
+PREHOOK: query: select * from t2 sort by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t2 sort by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_5
+10	val_5
+10	val_5
+16	val_8
+18	val_9
+20	val_10
+4	val_2
+8	val_4
+PREHOOK: query: create table t3 stored as orc as select * from (select * from t1 union all select * from t2) b
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t3
+POSTHOOK: query: create table t3 stored as orc as select * from (select * from t1 union all select * from t2) b
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t3
+PREHOOK: query: select * from t3 sort by key, value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t3 sort by key, value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+10	val_5
+10	val_5
+10	val_5
+16	val_8
+18	val_9
+2	val_2
+20	val_10
+4	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+8	val_4
+8	val_8
+9	val_9
+PREHOOK: query: create table t4 (key int, value string) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t4
+POSTHOOK: query: create table t4 (key int, value string) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t4
+PREHOOK: query: select * from t4
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t4
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t4
+#### A masked pattern was here ####
+PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: key
+                      Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a left semi join t2 b on a.key=b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+4	val_4
+8	val_8
+PREHOOK: query: explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: key
+                      Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t2 a left semi join t1 b on b.key=a.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_5
+10	val_5
+10	val_5
+4	val_2
+8	val_4
+PREHOOK: query: explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: key
+                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a left semi join t4 b on b.key=a.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t4
+#### A masked pattern was here ####
+PREHOOK: query: explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col1 (type: int)
+                      outputColumnNames: _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col1 (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 15) (type: boolean)
+                    Statistics: Num rows: 59 Data size: 236 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: key (type: int), key (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 59 Data size: 236 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col1 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col1 (type: int)
+                        Statistics: Num rows: 59 Data size: 236 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.value from t1 a left semi join t3 b on (b.key = a.key and b.key < '15') sort by a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+val_0
+val_0
+val_0
+val_10
+val_2
+val_4
+val_5
+val_5
+val_5
+val_8
+val_9
+PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: ((value < 'val_10') and key is not null) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 186 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: key, value
+                      Statistics: Num rows: 2 Data size: 186 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int), value (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 2 Data size: 186 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 2 Data size: 186 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a left semi join t2 b on a.key = b.key and b.value < "val_10" sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+PREHOOK: query: explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t3
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key > 5) (type: boolean)
+                    Statistics: Num rows: 59 Data size: 236 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 59 Data size: 236 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 59 Data size: 236 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 59 Data size: 236 Basic stats: COMPLETE Column stats: NONE
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col1
+                      input vertices:
+                        1 Map 1
+                      Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col1 (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 64 Data size: 259 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.value from t1 a left semi join (select key from t3 where key > 5) b on a.key = b.key sort by a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+val_10
+val_8
+val_9
+PREHOOK: query: explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t2
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (((key > 5) and (value <= 'val_20')) and key is not null) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int), _col1 (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col1
+                      input vertices:
+                        1 Map 1
+                      Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col1 (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: string)
+                          sort order: +
+                          Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select a.value from t1 a left semi join (select key , value from t2 where key > 5) b on a.key = b.key and b.value <= 'val_20' sort by a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+PREHOOK: query: explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: t1
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key > 2) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 1
+                      Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t2 a left semi join (select key , value from t1 where key > 2) b on a.key = b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+10	val_5
+10	val_5
+10	val_5
+4	val_2
+8	val_4
+PREHOOK: query: explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 90 Data size: 360 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                      outputColumnNames: _col0
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 99 Data size: 396 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Statistics: Num rows: 99 Data size: 396 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: key
+                      Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 99 Data size: 396 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 99 Data size: 396 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+ mapjoin(b) */ a.key from t3 a left semi join t1 b on a.key = b.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+2
+4
+4
+5
+5
+5
+8
+8
+9
+PREHOOK: query: explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int)
+                        1 (2 * _col0) (type: int)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (2 * key) is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: key
+                      Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: (2 * _col0) (type: int)
+                          sort order: +
+                          Map-reduce partition columns: (2 * _col0) (type: int)
+                          Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6 Data size: 613 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a left semi join t2 b on a.key = 2*b.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+8	val_8
+PREHOOK: query: explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Inner Join 0 to 1
+                           Left Semi Join 1 to 2
+                      keys:
+                        0 key (type: int)
+                        1 key (type: int)
+                        2 _col0 (type: int)
+                      outputColumnNames: _col0, _col1, _col5, _col6
+                      input vertices:
+                        1 Map 3
+                        2 Map 4
+                      Statistics: Num rows: 198 Data size: 792 Basic stats: COMPLETE Column stats: NONE
+                      Select Operator
+                        expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string)
+                        outputColumnNames: _col0, _col1, _col2, _col3
+                        Statistics: Num rows: 198 Data size: 792 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int), _col1 (type: string)
+                          sort order: ++
+                          Statistics: Num rows: 198 Data size: 792 Basic stats: COMPLETE Column stats: NONE
+                          value expressions: _col2 (type: int), _col3 (type: string)
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: key (type: int)
+                      sort order: +
+                      Map-reduce partition columns: key (type: int)
+                      Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: value (type: string)
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 90 Data size: 360 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: key
+                      Statistics: Num rows: 90 Data size: 360 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 90 Data size: 360 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 90 Data size: 360 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: int), VALUE._col1 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 198 Data size: 792 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 198 Data size: 792 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t1 a join t2 b on a.key = b.key left semi join t3 c on b.key = c.key sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+10	val_10	10	val_5
+10	val_10	10	val_5
+10	val_10	10	val_5
+4	val_4	4	val_2
+8	val_8	8	val_4
+PREHOOK: query: explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 6 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and value is not null) (type: boolean)
+                    Statistics: Num rows: 2 Data size: 238 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                      keys:
+                        0 key (type: int), value (type: string)
+                        1 _col0 (type: int), _col1 (type: string)
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 3
+                      Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: string)
+                        sort order: ++
+                        Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and value is not null) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int), value (type: string)
+                      outputColumnNames: key, value
+                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int), value (type: string)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int), _col1 (type: string)
+                          sort order: ++
+                          Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
+                          Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select * from t3 a left semi join t1 b on a.key = b.key and a.value=b.value sort by a.key, a.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+2	val_2
+4	val_4
+5	val_5
+5	val_5
+5	val_5
+8	val_8
+9	val_9
+PREHOOK: query: explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 90 Data size: 360 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Left Semi Join 0 to 1
+                           Left Semi Join 0 to 2
+                      keys:
+                        0 key (type: int)
+                        1 _col0 (type: int)
+                        2 _col0 (type: int)
+                      outputColumnNames: _col0
+                      input vertices:
+                        1 Map 3
+                        2 Map 4
+                      Statistics: Num rows: 198 Data size: 792 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Statistics: Num rows: 198 Data size: 792 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: key
+                      Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: int)
+                      outputColumnNames: key
+                      Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: key (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 6 Data size: 558 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 198 Data size: 792 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 198 Data size: 792 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select /*+ mapjoin(b, c) */ a.key from t3 a left semi join t1 b on a.key = b.key left semi join t2 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+4
+4
+8
+8
+PREHOOK: query: explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Left Outer Join0 to 1
+                         Left Semi Join 1 to 2
+                    keys:
+                      0 key (type: int)
+                      1 key (type: int)
+                      2 _col0 (type: int)
+                    outputColumnNames: _col0
+                    input vertices:
+                      1 Map 3
+                      2 Map 4
+                    Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: key
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: key (type: int)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left outer join t1 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+4
+4
+8
+8
+PREHOOK: query: explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: key
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: key (type: int)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Outer Join 0 to 1
+                     Left Semi Join 1 to 2
+                keys:
+                  0 key (type: int)
+                  1 key (type: int)
+                  2 _col0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t1 a full outer join t3 b on a.key = b.key left semi join t2 c on b.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+4
+4
+8
+8
+NULL
+NULL
+NULL
+PREHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: key
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: key (type: int)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                     Left Outer Join0 to 2
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+16
+18
+20
+4
+4
+8
+8
+PREHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: key
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: key (type: int)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                     Right Outer Join0 to 2
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left semi join t2 b on a.key = b.key right outer join t1 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+4
+4
+8
+8
+NULL
+NULL
+NULL
+NULL
+NULL
+PREHOOK: query: explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 179 Data size: 716 Basic stats: COMPLETE Column stats: NONE
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: b
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: int)
+                    outputColumnNames: key
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: key (type: int)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: c
+                  Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: key (type: int)
+                    sort order: +
+                    Map-reduce partition columns: key (type: int)
+                    Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                     Outer Join 0 to 2
+                keys:
+                  0 key (type: int)
+                  1 _col0 (type: int)
+                  2 key (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int)
+                outputColumnNames: _col0
+                Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 393 Data size: 1575 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: select a.key from t3 a left semi join t1 b on a.key = b.key full outer join t2 c on a.key = c.key sort by a.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+0
+10
+10
+10
+10
+10
+10
+10
+10
+10
+10
+10
+10
+2
+4
+4
+5
+5
+5
+8
+8
+9
+NULL
+NULL
+NULL
+PREHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c on a.value = c.value sort by a.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+      Edges:
+        Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE)
+        Reducer 

<TRUNCATED>

[17/39] hive git commit: HIVE-9365: The Metastore should take port configuration from hive-site.xml (Reuben Kuhnert, reviewed by Sergio Pena)

Posted by pr...@apache.org.
HIVE-9365: The Metastore should take port configuration from hive-site.xml (Reuben Kuhnert, reviewed by Sergio Pena)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d434f645
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d434f645
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d434f645

Branch: refs/heads/llap
Commit: d434f6459050fbb1b12875b3015474fed2b6b914
Parents: e24662c
Author: Sergio Pena <se...@cloudera.com>
Authored: Thu May 7 10:05:45 2015 -0500
Committer: Sergio Pena <se...@cloudera.com>
Committed: Thu May 7 10:05:45 2015 -0500

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  3 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    | 27 +++++----
 .../hive/metastore/TestHiveMetastoreCli.java    | 63 ++++++++++++++++++++
 3 files changed, 82 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d434f645/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 85e732f..5d5a928 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -115,6 +115,7 @@ public class HiveConf extends Configuration {
   public static final HiveConf.ConfVars[] metaVars = {
       HiveConf.ConfVars.METASTOREWAREHOUSE,
       HiveConf.ConfVars.METASTOREURIS,
+      HiveConf.ConfVars.METASTORE_SERVER_PORT,
       HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES,
       HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES,
       HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY,
@@ -392,7 +393,7 @@ public class HiveConf extends Configuration {
         "Number of retries while opening a connection to metastore"),
     METASTORETHRIFTFAILURERETRIES("hive.metastore.failure.retries", 1,
         "Number of retries upon failure of Thrift metastore calls"),
-
+    METASTORE_SERVER_PORT("hive.metastore.port", 9083, "Hive metastore listener port"),
     METASTORE_CLIENT_CONNECT_RETRY_DELAY("hive.metastore.client.connect.retry.delay", "1s",
         new TimeValidator(TimeUnit.SECONDS),
         "Number of seconds for the client to wait between consecutive connection attempts"),

http://git-wip-us.apache.org/repos/asf/hive/blob/d434f645/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 3f267ff..986579a 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -259,7 +259,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
   /**
    * default port on which to start the Hive server
    */
-  private static final int DEFAULT_HIVE_METASTORE_PORT = 9083;
   public static final String ADMIN = "admin";
   public static final String PUBLIC = "public";
 
@@ -5775,18 +5774,19 @@ public class HiveMetaStore extends ThriftHiveMetastore {
    *
    */
   static public class HiveMetastoreCli extends CommonCliOptions {
-    int port = DEFAULT_HIVE_METASTORE_PORT;
+    private int port;
 
     @SuppressWarnings("static-access")
-    public HiveMetastoreCli() {
+    public HiveMetastoreCli(Configuration configuration) {
       super("hivemetastore", true);
+      this.port = HiveConf.getIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT);
 
       // -p port
       OPTIONS.addOption(OptionBuilder
           .hasArg()
           .withArgName("port")
           .withDescription("Hive Metastore port number, default:"
-              + DEFAULT_HIVE_METASTORE_PORT)
+              + this.port)
           .create('p'));
 
     }
@@ -5803,21 +5803,26 @@ public class HiveMetaStore extends ThriftHiveMetastore {
             "This usage has been deprecated, consider using the new command "
                 + "line syntax (run with -h to see usage information)");
 
-        port = new Integer(args[0]);
+        this.port = new Integer(args[0]);
       }
 
       // notice that command line options take precedence over the
       // deprecated (old style) naked args...
+
       if (commandLine.hasOption('p')) {
-        port = Integer.parseInt(commandLine.getOptionValue('p'));
+        this.port = Integer.parseInt(commandLine.getOptionValue('p'));
       } else {
         // legacy handling
         String metastorePort = System.getenv("METASTORE_PORT");
         if (metastorePort != null) {
-          port = Integer.parseInt(metastorePort);
+          this.port = Integer.parseInt(metastorePort);
         }
       }
     }
+
+    public int getPort() {
+      return this.port;
+    }
   }
 
   /**
@@ -5825,7 +5830,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
    */
   public static void main(String[] args) throws Throwable {
     HiveConf.setLoadMetastoreConfig(true);
-    HiveMetastoreCli cli = new HiveMetastoreCli();
+    HiveConf conf = new HiveConf(HMSHandler.class);
+
+    HiveMetastoreCli cli = new HiveMetastoreCli(conf);
     cli.parse(args);
     final boolean isCliVerbose = cli.isVerbose();
     // NOTE: It is critical to do this prior to initializing log4j, otherwise
@@ -5851,7 +5858,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         System.err.println(msg);
       }
 
-      HiveConf conf = new HiveConf(HMSHandler.class);
 
       // set all properties specified on the command line
       for (Map.Entry<Object, Object> item : hiveconf.entrySet()) {
@@ -5870,11 +5876,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
       });
 
+
       Lock startLock = new ReentrantLock();
       Condition startCondition = startLock.newCondition();
       AtomicBoolean startedServing = new AtomicBoolean();
       startMetaStoreThreads(conf, startLock, startCondition, startedServing);
-      startMetaStore(cli.port, ShimLoader.getHadoopThriftAuthBridge(), conf, startLock,
+      startMetaStore(cli.getPort(), ShimLoader.getHadoopThriftAuthBridge(), conf, startLock,
           startCondition, startedServing);
     } catch (Throwable t) {
       // Catch the exception, log it and rethrow it.

http://git-wip-us.apache.org/repos/asf/hive/blob/d434f645/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java
new file mode 100644
index 0000000..f581c7d
--- /dev/null
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetastoreCli.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.junit.Test;
+
+public class TestHiveMetastoreCli {
+  private static final String[] CLI_ARGUMENTS = { "9999" };
+
+  @Test
+  public void testDefaultCliPortValue() {
+    HiveConf configuration = new HiveConf();
+    HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
+    assert (cli.getPort() == HiveConf.getIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT));
+  }
+
+  @Test
+  public void testOverriddenCliPortValue() {
+    HiveConf configuration = new HiveConf();
+    HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
+    cli.parse(TestHiveMetastoreCli.CLI_ARGUMENTS);
+
+    assert (cli.getPort() == 9999);
+  }
+
+  @Test
+  public void testOverriddenMetastoreServerPortValue() {
+    HiveConf configuration = new HiveConf();
+    HiveConf.setIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT, 12345);
+
+    HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
+
+    assert (cli.getPort() == 12345);
+  }
+
+  @Test
+  public void testCliOverridesConfiguration() {
+    HiveConf configuration = new HiveConf();
+    HiveConf.setIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT, 12345);
+
+    HiveMetaStore.HiveMetastoreCli cli = new HiveMetaStore.HiveMetastoreCli(configuration);
+    cli.parse(CLI_ARGUMENTS);
+
+    assert (cli.getPort() == 9999);
+  }
+}