You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by na...@apache.org on 2012/09/06 18:34:47 UTC

svn commit: r1381669 - in /hive/trunk/ql/src: java/org/apache/hadoop/hive/ql/optimizer/ test/queries/clientpositive/ test/results/clientpositive/

Author: namit
Date: Thu Sep  6 16:34:46 2012
New Revision: 1381669

URL: http://svn.apache.org/viewvc?rev=1381669&view=rev
Log:
HIVE-3306 SMBJoin/BucketMapJoin should be allowed only when join key expression is exactly matches
with sort/cluster key (Navis via namit)



Added:
    hive/trunk/ql/src/test/queries/clientpositive/bucketmapjoin_negative3.q
    hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out
Modified:
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java
    hive/trunk/ql/src/test/queries/clientpositive/bucket_map_join_1.q
    hive/trunk/ql/src/test/queries/clientpositive/bucket_map_join_2.q
    hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_2.q.out

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java?rev=1381669&r1=1381668&r2=1381669&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapJoinOptimizer.java Thu Sep  6 16:34:46 2012
@@ -39,7 +39,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.ErrorMsg;
-import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
@@ -62,10 +61,8 @@ import org.apache.hadoop.hive.ql.parse.Q
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
-import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.ql.plan.MapJoinDesc;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
 
 /**
  *this transformation does bucket map join optimization.
@@ -182,7 +179,7 @@ public class BucketMapJoinOptimizer impl
         }
       }
 
-      MapJoinDesc mjDecs = mapJoinOp.getConf();
+      MapJoinDesc mjDesc = mapJoinOp.getConf();
       LinkedHashMap<String, List<Integer>> aliasToPartitionBucketNumberMapping =
           new LinkedHashMap<String, List<Integer>>();
       LinkedHashMap<String, List<List<String>>> aliasToPartitionBucketFileNamesMapping =
@@ -197,6 +194,7 @@ public class BucketMapJoinOptimizer impl
       LinkedHashMap<Partition, List<String>> bigTblPartsToBucketFileNames = new LinkedHashMap<Partition, List<String>>();
       LinkedHashMap<Partition, Integer> bigTblPartsToBucketNumber = new LinkedHashMap<Partition, Integer>();
 
+      Integer[] orders = null; // accessing order of join cols to bucket cols, should be same
       boolean bigTablePartitioned = true;
       for (int index = 0; index < joinAliases.size(); index++) {
         String alias = joinAliases.get(index);
@@ -204,6 +202,14 @@ public class BucketMapJoinOptimizer impl
         if (tso == null) {
           return false;
         }
+        List<String> keys = toColumns(mjDesc.getKeys().get((byte) index));
+        if (keys == null || keys.isEmpty()) {
+          return false;
+        }
+        if (orders == null) {
+          orders = new Integer[keys.size()];
+        }
+
         Table tbl = topToTable.get(tso);
         if(tbl.isPartitioned()) {
           PrunedPartitionList prunedParts;
@@ -231,7 +237,7 @@ public class BucketMapJoinOptimizer impl
             List<Integer> buckets = new ArrayList<Integer>();
             List<List<String>> files = new ArrayList<List<String>>();
             for (Partition p : partitions) {
-              if (!checkBucketColumns(p.getBucketCols(), mjDecs, index)) {
+              if (!checkBucketColumns(p.getBucketCols(), keys, orders)) {
                 return false;
               }
               List<String> fileNames = getOnePartitionBucketFileNames(p.getDataLocation());
@@ -258,7 +264,7 @@ public class BucketMapJoinOptimizer impl
             }
           }
         } else {
-          if (!checkBucketColumns(tbl.getBucketCols(), mjDecs, index)) {
+          if (!checkBucketColumns(tbl.getBucketCols(), keys, orders)) {
             return false;
           }
           List<String> fileNames = getOnePartitionBucketFileNames(tbl.getDataLocation());
@@ -360,6 +366,17 @@ public class BucketMapJoinOptimizer impl
       return null;
     }
 
+    private List<String> toColumns(List<ExprNodeDesc> keys) {
+      List<String> columns = new ArrayList<String>();
+      for (ExprNodeDesc key : keys) {
+        if (!(key instanceof ExprNodeColumnDesc)) {
+          return null;
+        }
+        columns.add(((ExprNodeColumnDesc) key).getColumn());
+      }
+      return columns;
+    }
+
     // convert partition to partition spec string
     private Map<String, List<String>> convert(Map<Partition, List<String>> mapping) {
       Map<String, List<String>> converted = new HashMap<String, List<String>>();
@@ -433,42 +450,23 @@ public class BucketMapJoinOptimizer impl
       return fileNames;
     }
 
-    private boolean checkBucketColumns(List<String> bucketColumns, MapJoinDesc mjDesc, int index) {
-      List<ExprNodeDesc> keys = mjDesc.getKeys().get((byte)index);
-      if (keys == null || bucketColumns == null || bucketColumns.size() == 0) {
+    private boolean checkBucketColumns(List<String> bucketColumns, List<String> keys,
+        Integer[] orders) {
+      if (keys == null || bucketColumns == null || bucketColumns.isEmpty()) {
         return false;
       }
-
-      //get all join columns from join keys stored in MapJoinDesc
-      List<String> joinCols = new ArrayList<String>();
-      List<ExprNodeDesc> joinKeys = new ArrayList<ExprNodeDesc>();
-      joinKeys.addAll(keys);
-      while (joinKeys.size() > 0) {
-        ExprNodeDesc node = joinKeys.remove(0);
-        if (node instanceof ExprNodeColumnDesc) {
-          joinCols.addAll(node.getCols());
-        } else if (node instanceof ExprNodeGenericFuncDesc) {
-          ExprNodeGenericFuncDesc udfNode = ((ExprNodeGenericFuncDesc) node);
-          GenericUDF udf = udfNode.getGenericUDF();
-          if (!FunctionRegistry.isDeterministic(udf)) {
-            return false;
-          }
-          joinKeys.addAll(0, udfNode.getChildExprs());
-        } else {
+      for (int i = 0; i < keys.size(); i++) {
+        int index = bucketColumns.indexOf(keys.get(i));
+        if (orders[i] != null && orders[i] != index) {
           return false;
         }
+        orders[i] = index;
       }
-
       // Check if the join columns contains all bucket columns.
       // If a table is bucketized on column B, but the join key is A and B,
       // it is easy to see joining on different buckets yield empty results.
-      if (joinCols.size() == 0 || !joinCols.containsAll(bucketColumns)) {
-        return false;
-      }
-
-      return true;
+      return keys.containsAll(bucketColumns);
     }
-
   }
 
   class BucketMapjoinOptProcCtx implements NodeProcessorCtx {

Modified: hive/trunk/ql/src/test/queries/clientpositive/bucket_map_join_1.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/bucket_map_join_1.q?rev=1381669&r1=1381668&r2=1381669&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/bucket_map_join_1.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/bucket_map_join_1.q Thu Sep  6 16:34:46 2012
@@ -18,7 +18,7 @@ set hive.input.format = org.apache.hadoo
 
 -- The tables are bucketed in same columns in different order,
 -- but sorted in different column orders
--- Bucketed map-join should be performed, not sort-merge join
+-- Neither bucketed map-join, nor sort-merge join should be performed
 
 explain extended
 select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value;

Modified: hive/trunk/ql/src/test/queries/clientpositive/bucket_map_join_2.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/bucket_map_join_2.q?rev=1381669&r1=1381668&r2=1381669&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/bucket_map_join_2.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/bucket_map_join_2.q Thu Sep  6 16:34:46 2012
@@ -18,7 +18,7 @@ set hive.input.format = org.apache.hadoo
 
 -- The tables are bucketed in same columns in different order,
 -- but sorted in different column orders
--- Bucketed map-join should be performed, not sort-merge join
+-- Neither bucketed map-join, nor sort-merge join should be performed
 
 explain extended
 select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value;

Added: hive/trunk/ql/src/test/queries/clientpositive/bucketmapjoin_negative3.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/bucketmapjoin_negative3.q?rev=1381669&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/bucketmapjoin_negative3.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/bucketmapjoin_negative3.q Thu Sep  6 16:34:46 2012
@@ -0,0 +1,39 @@
+drop table test1;
+drop table test2;
+drop table test3;
+drop table test4;
+
+create table test1 (key string, value string) clustered by (key) sorted by (key) into 3 buckets;
+create table test2 (key string, value string) clustered by (value) sorted by (value) into 3 buckets;
+create table test3 (key string, value string) clustered by (key, value) sorted by (key, value) into 3 buckets;
+create table test4 (key string, value string) clustered by (value, key) sorted by (value, key) into 3 buckets;
+
+load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test1;
+load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test1;
+load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test1;
+
+load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test2;
+load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test2;
+load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test2;
+
+load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test3;
+load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test3;
+load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test3;
+
+load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test4;
+load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test4;
+load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test4;
+
+set hive.optimize.bucketmapjoin = true;
+-- should be allowed
+explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value;
+explain extended select /* + MAPJOIN(R) */ * from test2 L join test2 R on L.key=R.key AND L.value=R.value;
+
+-- should not apply bucket mapjoin
+explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key;
+explain extended select /* + MAPJOIN(R) */ * from test1 L join test2 R on L.key=R.key AND L.value=R.value;
+explain extended select /* + MAPJOIN(R) */ * from test1 L join test3 R on L.key=R.key AND L.value=R.value;
+explain extended select /* + MAPJOIN(R) */ * from test1 L join test4 R on L.key=R.key AND L.value=R.value;
+explain extended select /* + MAPJOIN(R) */ * from test2 L join test3 R on L.key=R.key AND L.value=R.value;
+explain extended select /* + MAPJOIN(R) */ * from test2 L join test4 R on L.key=R.key AND L.value=R.value;
+explain extended select /* + MAPJOIN(R) */ * from test3 L join test4 R on L.key=R.key AND L.value=R.value;

Modified: hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_1.q.out?rev=1381669&r1=1381668&r2=1381669&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_1.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_1.q.out Thu Sep  6 16:34:46 2012
@@ -34,14 +34,14 @@ POSTHOOK: type: LOAD
 POSTHOOK: Output: default@table2
 PREHOOK: query: -- The tables are bucketed in same columns in different order,
 -- but sorted in different column orders
--- Bucketed map-join should be performed, not sort-merge join
+-- Neither bucketed map-join, nor sort-merge join should be performed
 
 explain extended
 select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value
 PREHOOK: type: QUERY
 POSTHOOK: query: -- The tables are bucketed in same columns in different order,
 -- but sorted in different column orders
--- Bucketed map-join should be performed, not sort-merge join
+-- Neither bucketed map-join, nor sort-merge join should be performed
 
 explain extended
 select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value
@@ -76,13 +76,6 @@ STAGE PLANS:
                 0 [Column[key], Column[value]]
                 1 [Column[key], Column[value]]
               Position of Big Table: 0
-      Bucket Mapjoin Context:
-          Alias Bucket Base File Name Mapping:
-            b {SortCol1Col2.txt=[SortCol2Col1.txt]}
-          Alias Bucket File Name Mapping:
-#### A masked pattern was here ####
-          Alias Bucket Output File Name Mapping:
-#### A masked pattern was here ####
 
   Stage: Stage-1
     Map Reduce
@@ -242,6 +235,7 @@ STAGE PLANS:
     Fetch Operator
       limit: -1
 
+
 PREHOOK: query: select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@table1

Modified: hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_2.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_2.q.out?rev=1381669&r1=1381668&r2=1381669&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_2.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucket_map_join_2.q.out Thu Sep  6 16:34:46 2012
@@ -34,14 +34,14 @@ POSTHOOK: type: LOAD
 POSTHOOK: Output: default@table2
 PREHOOK: query: -- The tables are bucketed in same columns in different order,
 -- but sorted in different column orders
--- Bucketed map-join should be performed, not sort-merge join
+-- Neither bucketed map-join, nor sort-merge join should be performed
 
 explain extended
 select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value
 PREHOOK: type: QUERY
 POSTHOOK: query: -- The tables are bucketed in same columns in different order,
 -- but sorted in different column orders
--- Bucketed map-join should be performed, not sort-merge join
+-- Neither bucketed map-join, nor sort-merge join should be performed
 
 explain extended
 select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value
@@ -76,13 +76,6 @@ STAGE PLANS:
                 0 [Column[key], Column[value]]
                 1 [Column[key], Column[value]]
               Position of Big Table: 0
-      Bucket Mapjoin Context:
-          Alias Bucket Base File Name Mapping:
-            b {SortCol1Col2.txt=[SortCol2Col1.txt]}
-          Alias Bucket File Name Mapping:
-#### A masked pattern was here ####
-          Alias Bucket Output File Name Mapping:
-#### A masked pattern was here ####
 
   Stage: Stage-1
     Map Reduce
@@ -242,6 +235,7 @@ STAGE PLANS:
     Fetch Operator
       limit: -1
 
+
 PREHOOK: query: select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@table1

Added: hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out?rev=1381669&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out (added)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out Thu Sep  6 16:34:46 2012
@@ -0,0 +1,1476 @@
+PREHOOK: query: drop table test1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table test1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table test2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table test2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table test3
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table test3
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table test4
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table test4
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table test1 (key string, value string) clustered by (key) sorted by (key) into 3 buckets
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table test1 (key string, value string) clustered by (key) sorted by (key) into 3 buckets
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@test1
+PREHOOK: query: create table test2 (key string, value string) clustered by (value) sorted by (value) into 3 buckets
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table test2 (key string, value string) clustered by (value) sorted by (value) into 3 buckets
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@test2
+PREHOOK: query: create table test3 (key string, value string) clustered by (key, value) sorted by (key, value) into 3 buckets
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table test3 (key string, value string) clustered by (key, value) sorted by (key, value) into 3 buckets
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@test3
+PREHOOK: query: create table test4 (key string, value string) clustered by (value, key) sorted by (value, key) into 3 buckets
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: create table test4 (key string, value string) clustered by (value, key) sorted by (value, key) into 3 buckets
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@test4
+PREHOOK: query: load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test1
+PREHOOK: type: LOAD
+PREHOOK: Output: default@test1
+POSTHOOK: query: load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test1
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@test1
+PREHOOK: query: load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test1
+PREHOOK: type: LOAD
+PREHOOK: Output: default@test1
+POSTHOOK: query: load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test1
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@test1
+PREHOOK: query: load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test1
+PREHOOK: type: LOAD
+PREHOOK: Output: default@test1
+POSTHOOK: query: load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test1
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@test1
+PREHOOK: query: load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test2
+PREHOOK: type: LOAD
+PREHOOK: Output: default@test2
+POSTHOOK: query: load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test2
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@test2
+PREHOOK: query: load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test2
+PREHOOK: type: LOAD
+PREHOOK: Output: default@test2
+POSTHOOK: query: load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test2
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@test2
+PREHOOK: query: load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test2
+PREHOOK: type: LOAD
+PREHOOK: Output: default@test2
+POSTHOOK: query: load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test2
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@test2
+PREHOOK: query: load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test3
+PREHOOK: type: LOAD
+PREHOOK: Output: default@test3
+POSTHOOK: query: load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test3
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@test3
+PREHOOK: query: load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test3
+PREHOOK: type: LOAD
+PREHOOK: Output: default@test3
+POSTHOOK: query: load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test3
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@test3
+PREHOOK: query: load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test3
+PREHOOK: type: LOAD
+PREHOOK: Output: default@test3
+POSTHOOK: query: load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test3
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@test3
+PREHOOK: query: load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test4
+PREHOOK: type: LOAD
+PREHOOK: Output: default@test4
+POSTHOOK: query: load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test4
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@test4
+PREHOOK: query: load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test4
+PREHOOK: type: LOAD
+PREHOOK: Output: default@test4
+POSTHOOK: query: load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test4
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@test4
+PREHOOK: query: load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test4
+PREHOOK: type: LOAD
+PREHOOK: Output: default@test4
+POSTHOOK: query: load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test4
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@test4
+PREHOOK: query: -- should be allowed
+explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value
+PREHOOK: type: QUERY
+POSTHOOK: query: -- should be allowed
+explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test1) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))
+
+STAGE DEPENDENCIES:
+  Stage-3 is a root stage
+  Stage-1 depends on stages: Stage-3
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-3
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        r 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        r 
+          TableScan
+            alias: r
+            GatherStats: false
+            HashTable Sink Operator
+              condition expressions:
+                0 {key} {value}
+                1 {key} {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key], Column[value]]
+                1 [Column[key], Column[value]]
+              Position of Big Table: 0
+      Bucket Mapjoin Context:
+          Alias Bucket Base File Name Mapping:
+            r {srcbucket20.txt=[srcbucket20.txt], srcbucket21.txt=[srcbucket21.txt], srcbucket22.txt=[srcbucket22.txt]}
+          Alias Bucket File Name Mapping:
+#### A masked pattern was here ####
+          Alias Bucket Output File Name Mapping:
+#### A masked pattern was here ####
+
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        l 
+          TableScan
+            alias: l
+            GatherStats: false
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              condition expressions:
+                0 {key} {value}
+                1 {key} {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key], Column[value]]
+                1 [Column[key], Column[value]]
+              outputColumnNames: _col0, _col1, _col4, _col5
+              Position of Big Table: 0
+              Select Operator
+                expressions:
+                      expr: _col0
+                      type: string
+                      expr: _col1
+                      type: string
+                      expr: _col4
+                      type: string
+                      expr: _col5
+                      type: string
+                outputColumnNames: _col0, _col1, _col4, _col5
+                Select Operator
+                  expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+                        expr: _col4
+                        type: string
+                        expr: _col5
+                        type: string
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2,_col3
+                          columns.types string:string:string:string
+                          escape.delim \
+                          serialization.format 1
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
+      Local Work:
+        Map Reduce Local Work
+      Needs Tagging: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: test1
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              SORTBUCKETCOLSPREFIX TRUE
+              bucket_count 3
+              bucket_field_name key
+              columns key,value
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.test1
+              numFiles 3
+              numPartitions 0
+              numRows 0
+              rawDataSize 0
+              serialization.ddl struct test1 { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 4200
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                SORTBUCKETCOLSPREFIX TRUE
+                bucket_count 3
+                bucket_field_name key
+                columns key,value
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.test1
+                numFiles 3
+                numPartitions 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct test1 { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 4200
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test1
+            name: default.test1
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test2 L join test2 R on L.key=R.key AND L.value=R.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test2 L join test2 R on L.key=R.key AND L.value=R.value
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test2) L) (TOK_TABREF (TOK_TABNAME test2) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))
+
+STAGE DEPENDENCIES:
+  Stage-3 is a root stage
+  Stage-1 depends on stages: Stage-3
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-3
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        r 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        r 
+          TableScan
+            alias: r
+            GatherStats: false
+            HashTable Sink Operator
+              condition expressions:
+                0 {key} {value}
+                1 {key} {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key], Column[value]]
+                1 [Column[key], Column[value]]
+              Position of Big Table: 0
+      Bucket Mapjoin Context:
+          Alias Bucket Base File Name Mapping:
+            r {srcbucket20.txt=[srcbucket20.txt], srcbucket21.txt=[srcbucket21.txt], srcbucket22.txt=[srcbucket22.txt]}
+          Alias Bucket File Name Mapping:
+#### A masked pattern was here ####
+          Alias Bucket Output File Name Mapping:
+#### A masked pattern was here ####
+
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        l 
+          TableScan
+            alias: l
+            GatherStats: false
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              condition expressions:
+                0 {key} {value}
+                1 {key} {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key], Column[value]]
+                1 [Column[key], Column[value]]
+              outputColumnNames: _col0, _col1, _col4, _col5
+              Position of Big Table: 0
+              Select Operator
+                expressions:
+                      expr: _col0
+                      type: string
+                      expr: _col1
+                      type: string
+                      expr: _col4
+                      type: string
+                      expr: _col5
+                      type: string
+                outputColumnNames: _col0, _col1, _col4, _col5
+                Select Operator
+                  expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+                        expr: _col4
+                        type: string
+                        expr: _col5
+                        type: string
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2,_col3
+                          columns.types string:string:string:string
+                          escape.delim \
+                          serialization.format 1
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
+      Local Work:
+        Map Reduce Local Work
+      Needs Tagging: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: test2
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              SORTBUCKETCOLSPREFIX TRUE
+              bucket_count 3
+              bucket_field_name value
+              columns key,value
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.test2
+              numFiles 3
+              numPartitions 0
+              numRows 0
+              rawDataSize 0
+              serialization.ddl struct test2 { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 4200
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                SORTBUCKETCOLSPREFIX TRUE
+                bucket_count 3
+                bucket_field_name value
+                columns key,value
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.test2
+                numFiles 3
+                numPartitions 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct test2 { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 4200
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test2
+            name: default.test2
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: -- should not apply bucket mapjoin
+explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key
+PREHOOK: type: QUERY
+POSTHOOK: query: -- should not apply bucket mapjoin
+explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test1) R) (= (+ (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL L) key)) (. (TOK_TABLE_OR_COL R) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))
+
+STAGE DEPENDENCIES:
+  Stage-3 is a root stage
+  Stage-1 depends on stages: Stage-3
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-3
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        r 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        r 
+          TableScan
+            alias: r
+            GatherStats: false
+            HashTable Sink Operator
+              condition expressions:
+                0 {key} {value}
+                1 {key} {value}
+              handleSkewJoin: false
+              keys:
+                0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key], Column[key]()]
+                1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key]()]
+              Position of Big Table: 0
+
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        l 
+          TableScan
+            alias: l
+            GatherStats: false
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              condition expressions:
+                0 {key} {value}
+                1 {key} {value}
+              handleSkewJoin: false
+              keys:
+                0 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key], Column[key]()]
+                1 [class org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge(Column[key]()]
+              outputColumnNames: _col0, _col1, _col4, _col5
+              Position of Big Table: 0
+              Select Operator
+                expressions:
+                      expr: _col0
+                      type: string
+                      expr: _col1
+                      type: string
+                      expr: _col4
+                      type: string
+                      expr: _col5
+                      type: string
+                outputColumnNames: _col0, _col1, _col4, _col5
+                Select Operator
+                  expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+                        expr: _col4
+                        type: string
+                        expr: _col5
+                        type: string
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2,_col3
+                          columns.types string:string:string:string
+                          escape.delim \
+                          serialization.format 1
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
+      Local Work:
+        Map Reduce Local Work
+      Needs Tagging: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: test1
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              SORTBUCKETCOLSPREFIX TRUE
+              bucket_count 3
+              bucket_field_name key
+              columns key,value
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.test1
+              numFiles 3
+              numPartitions 0
+              numRows 0
+              rawDataSize 0
+              serialization.ddl struct test1 { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 4200
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                SORTBUCKETCOLSPREFIX TRUE
+                bucket_count 3
+                bucket_field_name key
+                columns key,value
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.test1
+                numFiles 3
+                numPartitions 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct test1 { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 4200
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test1
+            name: default.test1
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L join test2 R on L.key=R.key AND L.value=R.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L join test2 R on L.key=R.key AND L.value=R.value
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test2) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))
+
+STAGE DEPENDENCIES:
+  Stage-3 is a root stage
+  Stage-1 depends on stages: Stage-3
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-3
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        r 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        r 
+          TableScan
+            alias: r
+            GatherStats: false
+            HashTable Sink Operator
+              condition expressions:
+                0 {key} {value}
+                1 {key} {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key], Column[value]]
+                1 [Column[key], Column[value]]
+              Position of Big Table: 0
+
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        l 
+          TableScan
+            alias: l
+            GatherStats: false
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              condition expressions:
+                0 {key} {value}
+                1 {key} {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key], Column[value]]
+                1 [Column[key], Column[value]]
+              outputColumnNames: _col0, _col1, _col4, _col5
+              Position of Big Table: 0
+              Select Operator
+                expressions:
+                      expr: _col0
+                      type: string
+                      expr: _col1
+                      type: string
+                      expr: _col4
+                      type: string
+                      expr: _col5
+                      type: string
+                outputColumnNames: _col0, _col1, _col4, _col5
+                Select Operator
+                  expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+                        expr: _col4
+                        type: string
+                        expr: _col5
+                        type: string
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2,_col3
+                          columns.types string:string:string:string
+                          escape.delim \
+                          serialization.format 1
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
+      Local Work:
+        Map Reduce Local Work
+      Needs Tagging: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: test1
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              SORTBUCKETCOLSPREFIX TRUE
+              bucket_count 3
+              bucket_field_name key
+              columns key,value
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.test1
+              numFiles 3
+              numPartitions 0
+              numRows 0
+              rawDataSize 0
+              serialization.ddl struct test1 { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 4200
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                SORTBUCKETCOLSPREFIX TRUE
+                bucket_count 3
+                bucket_field_name key
+                columns key,value
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.test1
+                numFiles 3
+                numPartitions 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct test1 { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 4200
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test1
+            name: default.test1
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L join test3 R on L.key=R.key AND L.value=R.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L join test3 R on L.key=R.key AND L.value=R.value
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test3) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))
+
+STAGE DEPENDENCIES:
+  Stage-3 is a root stage
+  Stage-1 depends on stages: Stage-3
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-3
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        r 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        r 
+          TableScan
+            alias: r
+            GatherStats: false
+            HashTable Sink Operator
+              condition expressions:
+                0 {key} {value}
+                1 {key} {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key], Column[value]]
+                1 [Column[key], Column[value]]
+              Position of Big Table: 0
+
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        l 
+          TableScan
+            alias: l
+            GatherStats: false
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              condition expressions:
+                0 {key} {value}
+                1 {key} {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key], Column[value]]
+                1 [Column[key], Column[value]]
+              outputColumnNames: _col0, _col1, _col4, _col5
+              Position of Big Table: 0
+              Select Operator
+                expressions:
+                      expr: _col0
+                      type: string
+                      expr: _col1
+                      type: string
+                      expr: _col4
+                      type: string
+                      expr: _col5
+                      type: string
+                outputColumnNames: _col0, _col1, _col4, _col5
+                Select Operator
+                  expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+                        expr: _col4
+                        type: string
+                        expr: _col5
+                        type: string
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2,_col3
+                          columns.types string:string:string:string
+                          escape.delim \
+                          serialization.format 1
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
+      Local Work:
+        Map Reduce Local Work
+      Needs Tagging: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: test1
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              SORTBUCKETCOLSPREFIX TRUE
+              bucket_count 3
+              bucket_field_name key
+              columns key,value
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.test1
+              numFiles 3
+              numPartitions 0
+              numRows 0
+              rawDataSize 0
+              serialization.ddl struct test1 { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 4200
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                SORTBUCKETCOLSPREFIX TRUE
+                bucket_count 3
+                bucket_field_name key
+                columns key,value
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.test1
+                numFiles 3
+                numPartitions 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct test1 { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 4200
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test1
+            name: default.test1
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L join test4 R on L.key=R.key AND L.value=R.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L join test4 R on L.key=R.key AND L.value=R.value
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test1) L) (TOK_TABREF (TOK_TABNAME test4) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))
+
+STAGE DEPENDENCIES:
+  Stage-3 is a root stage
+  Stage-1 depends on stages: Stage-3
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-3
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        r 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        r 
+          TableScan
+            alias: r
+            GatherStats: false
+            HashTable Sink Operator
+              condition expressions:
+                0 {key} {value}
+                1 {key} {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key], Column[value]]
+                1 [Column[key], Column[value]]
+              Position of Big Table: 0
+
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        l 
+          TableScan
+            alias: l
+            GatherStats: false
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              condition expressions:
+                0 {key} {value}
+                1 {key} {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key], Column[value]]
+                1 [Column[key], Column[value]]
+              outputColumnNames: _col0, _col1, _col4, _col5
+              Position of Big Table: 0
+              Select Operator
+                expressions:
+                      expr: _col0
+                      type: string
+                      expr: _col1
+                      type: string
+                      expr: _col4
+                      type: string
+                      expr: _col5
+                      type: string
+                outputColumnNames: _col0, _col1, _col4, _col5
+                Select Operator
+                  expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+                        expr: _col4
+                        type: string
+                        expr: _col5
+                        type: string
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2,_col3
+                          columns.types string:string:string:string
+                          escape.delim \
+                          serialization.format 1
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
+      Local Work:
+        Map Reduce Local Work
+      Needs Tagging: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: test1
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              SORTBUCKETCOLSPREFIX TRUE
+              bucket_count 3
+              bucket_field_name key
+              columns key,value
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.test1
+              numFiles 3
+              numPartitions 0
+              numRows 0
+              rawDataSize 0
+              serialization.ddl struct test1 { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 4200
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                SORTBUCKETCOLSPREFIX TRUE
+                bucket_count 3
+                bucket_field_name key
+                columns key,value
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.test1
+                numFiles 3
+                numPartitions 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct test1 { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 4200
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test1
+            name: default.test1
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test2 L join test3 R on L.key=R.key AND L.value=R.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test2 L join test3 R on L.key=R.key AND L.value=R.value
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test2) L) (TOK_TABREF (TOK_TABNAME test3) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))
+
+STAGE DEPENDENCIES:
+  Stage-3 is a root stage
+  Stage-1 depends on stages: Stage-3
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-3
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        r 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        r 
+          TableScan
+            alias: r
+            GatherStats: false
+            HashTable Sink Operator
+              condition expressions:
+                0 {key} {value}
+                1 {key} {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key], Column[value]]
+                1 [Column[key], Column[value]]
+              Position of Big Table: 0
+
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        l 
+          TableScan
+            alias: l
+            GatherStats: false
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              condition expressions:
+                0 {key} {value}
+                1 {key} {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key], Column[value]]
+                1 [Column[key], Column[value]]
+              outputColumnNames: _col0, _col1, _col4, _col5
+              Position of Big Table: 0
+              Select Operator
+                expressions:
+                      expr: _col0
+                      type: string
+                      expr: _col1
+                      type: string
+                      expr: _col4
+                      type: string
+                      expr: _col5
+                      type: string
+                outputColumnNames: _col0, _col1, _col4, _col5
+                Select Operator
+                  expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+                        expr: _col4
+                        type: string
+                        expr: _col5
+                        type: string
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2,_col3
+                          columns.types string:string:string:string
+                          escape.delim \
+                          serialization.format 1
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
+      Local Work:
+        Map Reduce Local Work
+      Needs Tagging: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: test2
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              SORTBUCKETCOLSPREFIX TRUE
+              bucket_count 3
+              bucket_field_name value
+              columns key,value
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.test2
+              numFiles 3
+              numPartitions 0
+              numRows 0
+              rawDataSize 0
+              serialization.ddl struct test2 { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 4200
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                SORTBUCKETCOLSPREFIX TRUE
+                bucket_count 3
+                bucket_field_name value
+                columns key,value
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.test2
+                numFiles 3
+                numPartitions 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct test2 { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 4200
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test2
+            name: default.test2
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test2 L join test4 R on L.key=R.key AND L.value=R.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test2 L join test4 R on L.key=R.key AND L.value=R.value
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test2) L) (TOK_TABREF (TOK_TABNAME test4) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))
+
+STAGE DEPENDENCIES:
+  Stage-3 is a root stage
+  Stage-1 depends on stages: Stage-3
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-3
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        r 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        r 
+          TableScan
+            alias: r
+            GatherStats: false
+            HashTable Sink Operator
+              condition expressions:
+                0 {key} {value}
+                1 {key} {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key], Column[value]]
+                1 [Column[key], Column[value]]
+              Position of Big Table: 0
+
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        l 
+          TableScan
+            alias: l
+            GatherStats: false
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              condition expressions:
+                0 {key} {value}
+                1 {key} {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key], Column[value]]
+                1 [Column[key], Column[value]]
+              outputColumnNames: _col0, _col1, _col4, _col5
+              Position of Big Table: 0
+              Select Operator
+                expressions:
+                      expr: _col0
+                      type: string
+                      expr: _col1
+                      type: string
+                      expr: _col4
+                      type: string
+                      expr: _col5
+                      type: string
+                outputColumnNames: _col0, _col1, _col4, _col5
+                Select Operator
+                  expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+                        expr: _col4
+                        type: string
+                        expr: _col5
+                        type: string
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2,_col3
+                          columns.types string:string:string:string
+                          escape.delim \
+                          serialization.format 1
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
+      Local Work:
+        Map Reduce Local Work
+      Needs Tagging: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: test2
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              SORTBUCKETCOLSPREFIX TRUE
+              bucket_count 3
+              bucket_field_name value
+              columns key,value
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.test2
+              numFiles 3
+              numPartitions 0
+              numRows 0
+              rawDataSize 0
+              serialization.ddl struct test2 { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 4200
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                SORTBUCKETCOLSPREFIX TRUE
+                bucket_count 3
+                bucket_field_name value
+                columns key,value
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.test2
+                numFiles 3
+                numPartitions 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct test2 { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 4200
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test2
+            name: default.test2
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+
+PREHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test3 L join test4 R on L.key=R.key AND L.value=R.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test3 L join test4 R on L.key=R.key AND L.value=R.value
+POSTHOOK: type: QUERY
+ABSTRACT SYNTAX TREE:
+  (TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME test3) L) (TOK_TABREF (TOK_TABNAME test4) R) (AND (= (. (TOK_TABLE_OR_COL L) key) (. (TOK_TABLE_OR_COL R) key)) (= (. (TOK_TABLE_OR_COL L) value) (. (TOK_TABLE_OR_COL R) value))))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_HINTLIST (TOK_HINT TOK_MAPJOIN (TOK_HINTARGLIST R))) (TOK_SELEXPR TOK_ALLCOLREF))))
+
+STAGE DEPENDENCIES:
+  Stage-3 is a root stage
+  Stage-1 depends on stages: Stage-3
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-3
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        r 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        r 
+          TableScan
+            alias: r
+            GatherStats: false
+            HashTable Sink Operator
+              condition expressions:
+                0 {key} {value}
+                1 {key} {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key], Column[value]]
+                1 [Column[key], Column[value]]
+              Position of Big Table: 0
+
+  Stage: Stage-1
+    Map Reduce
+      Alias -> Map Operator Tree:
+        l 
+          TableScan
+            alias: l
+            GatherStats: false
+            Map Join Operator
+              condition map:
+                   Inner Join 0 to 1
+              condition expressions:
+                0 {key} {value}
+                1 {key} {value}
+              handleSkewJoin: false
+              keys:
+                0 [Column[key], Column[value]]
+                1 [Column[key], Column[value]]
+              outputColumnNames: _col0, _col1, _col4, _col5
+              Position of Big Table: 0
+              Select Operator
+                expressions:
+                      expr: _col0
+                      type: string
+                      expr: _col1
+                      type: string
+                      expr: _col4
+                      type: string
+                      expr: _col5
+                      type: string
+                outputColumnNames: _col0, _col1, _col4, _col5
+                Select Operator
+                  expressions:
+                        expr: _col0
+                        type: string
+                        expr: _col1
+                        type: string
+                        expr: _col4
+                        type: string
+                        expr: _col5
+                        type: string
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2,_col3
+                          columns.types string:string:string:string
+                          escape.delim \
+                          serialization.format 1
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
+      Local Work:
+        Map Reduce Local Work
+      Needs Tagging: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: test3
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              SORTBUCKETCOLSPREFIX TRUE
+              bucket_count 3
+              bucket_field_name key
+              columns key,value
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.test3
+              numFiles 3
+              numPartitions 0
+              numRows 0
+              rawDataSize 0
+              serialization.ddl struct test3 { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 4200
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                SORTBUCKETCOLSPREFIX TRUE
+                bucket_count 3
+                bucket_field_name key
+                columns key,value
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.test3
+                numFiles 3
+                numPartitions 0
+                numRows 0
+                rawDataSize 0
+                serialization.ddl struct test3 { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 4200
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.test3
+            name: default.test3
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+
+