You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2014/05/23 03:17:16 UTC

svn commit: r1596993 - in /hive/trunk: itests/util/src/main/java/org/apache/hadoop/hive/ql/ itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/ ql/src/java/org/apache/hadoop/hive/ql/ ql/src/java/org/apache/hadoop/hive/ql/exec/ ql/src/java/org/ap...

Author: hashutosh
Date: Fri May 23 01:17:15 2014
New Revision: 1596993

URL: http://svn.apache.org/r1596993
Log:
HIVE-7095 : Fix test fails for both hadoop-1 and hadoop-2 (Navis via Ashutosh Chauhan)

Added:
    hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/ShowMapredStatsHook.java
    hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_1_23.q
    hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q
    hive/trunk/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
    hive/trunk/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
Modified:
    hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
    hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyNumReducersHook.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/MapRedStats.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java
    hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
    hive/trunk/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q
    hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_1.q
    hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
    hive/trunk/ql/src/test/results/clientnegative/unset_table_property.q.out
    hive/trunk/ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out
    hive/trunk/ql/src/test/results/clientpositive/groupby_sort_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/groupby_sort_skew_1.q.out
    hive/trunk/ql/src/test/results/clientpositive/nullformatCTAS.q.out
    hive/trunk/ql/src/test/results/clientpositive/show_create_table_alter.q.out
    hive/trunk/ql/src/test/results/clientpositive/show_tblproperties.q.out
    hive/trunk/ql/src/test/results/clientpositive/unset_table_view_property.q.out

Modified: hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
URL: http://svn.apache.org/viewvc/hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java?rev=1596993&r1=1596992&r2=1596993&view=diff
==============================================================================
--- hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java (original)
+++ hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java Fri May 23 01:17:15 2014
@@ -543,6 +543,19 @@ public class QTestUtil {
         db.dropDatabase(dbName, true, true, true);
       }
     }
+
+    // delete remaining directories for external tables (can affect stats for following tests)
+    try {
+      Path p = new Path(testWarehouse);
+      FileSystem fileSystem = p.getFileSystem(conf);
+      for (FileStatus status : fileSystem.listStatus(p)) {
+        if (status.isDir() && !srcTables.contains(status.getPath().getName())) {
+          fileSystem.delete(status.getPath(), true);
+        }
+      }
+    } catch (IllegalArgumentException e) {
+      // ignore.. provides invalid url sometimes intentionally
+    }
     SessionState.get().setCurrentDatabase(DEFAULT_DATABASE_NAME);
 
     List<String> roleNames = db.getAllRoleNames();

Added: hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/ShowMapredStatsHook.java
URL: http://svn.apache.org/viewvc/hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/ShowMapredStatsHook.java?rev=1596993&view=auto
==============================================================================
--- hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/ShowMapredStatsHook.java (added)
+++ hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/ShowMapredStatsHook.java Fri May 23 01:17:15 2014
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.hooks;
+
+import junit.framework.Assert;
+import org.apache.hadoop.hive.ql.MapRedStats;
+import org.apache.hadoop.hive.ql.session.SessionState;
+
+import java.util.Map;
+
+public class ShowMapredStatsHook implements ExecuteWithHookContext {
+
+  public void run(HookContext hookContext) {
+    SessionState ss = SessionState.get();
+    Assert.assertNotNull("SessionState returned null");
+
+    Map<String, MapRedStats> stats = ss.getMapRedStats();
+    if (stats != null && !stats.isEmpty()) {
+      for (Map.Entry<String, MapRedStats> stat : stats.entrySet()) {
+        SessionState.getConsole().printError(
+            stat.getKey() + "=" + stat.getValue().getTaskNumbers());
+      }
+    }
+  }
+}

Modified: hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyNumReducersHook.java
URL: http://svn.apache.org/viewvc/hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyNumReducersHook.java?rev=1596993&r1=1596992&r2=1596993&view=diff
==============================================================================
--- hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyNumReducersHook.java (original)
+++ hive/trunk/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/VerifyNumReducersHook.java Fri May 23 01:17:15 2014
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hive.ql.hooks;
 
-import java.util.List;
+import java.util.Map;
 
 import junit.framework.Assert;
 
@@ -42,9 +42,10 @@ public class VerifyNumReducersHook imple
     Assert.assertNotNull("SessionState returned null");
 
     int expectedReducers = hookContext.getConf().getInt(BUCKET_CONFIG, 0);
-    List<MapRedStats> stats = ss.getLastMapRedStatsList();
+    Map<String, MapRedStats> stats = ss.getMapRedStats();
     Assert.assertEquals("Number of MapReduce jobs is incorrect", 1, stats.size());
 
-    Assert.assertEquals("NumReducers is incorrect", expectedReducers, stats.get(0).getNumReduce());
+    MapRedStats stat = stats.values().iterator().next();
+    Assert.assertEquals("NumReducers is incorrect", expectedReducers, stat.getNumReduce());
   }
 }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Driver.java?rev=1596993&r1=1596992&r2=1596993&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Driver.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/Driver.java Fri May 23 01:17:15 2014
@@ -1247,7 +1247,7 @@ public class Driver implements CommandPr
 
       this.driverCxt = driverCxt; // for canceling the query (should be bound to session?)
 
-      SessionState.get().setLastMapRedStatsList(new ArrayList<MapRedStats>());
+      SessionState.get().setMapRedStats(new LinkedHashMap<String, MapRedStats>());
       SessionState.get().setStackTraces(new HashMap<String, List<List<String>>>());
       SessionState.get().setLocalMapRedErrors(new HashMap<String, List<String>>());
 
@@ -1420,13 +1420,13 @@ public class Driver implements CommandPr
       }
       perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DRIVER_EXECUTE);
 
-      if (SessionState.get().getLastMapRedStatsList() != null
-          && SessionState.get().getLastMapRedStatsList().size() > 0) {
+      Map<String, MapRedStats> stats = SessionState.get().getMapRedStats();
+      if (stats != null && !stats.isEmpty()) {
         long totalCpu = 0;
         console.printInfo("MapReduce Jobs Launched: ");
-        for (int i = 0; i < SessionState.get().getLastMapRedStatsList().size(); i++) {
-          console.printInfo("Job " + i + ": " + SessionState.get().getLastMapRedStatsList().get(i));
-          totalCpu += SessionState.get().getLastMapRedStatsList().get(i).getCpuMSec();
+        for (Map.Entry<String, MapRedStats> entry : stats.entrySet()) {
+          console.printInfo("Stage-" + entry.getKey() + ": " + entry.getValue());
+          totalCpu += entry.getValue().getCpuMSec();
         }
         console.printInfo("Total MapReduce CPU Time Spent: " + Utilities.formatMsecToStr(totalCpu));
       }

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/MapRedStats.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/MapRedStats.java?rev=1596993&r1=1596992&r2=1596993&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/MapRedStats.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/MapRedStats.java Fri May 23 01:17:15 2014
@@ -94,6 +94,17 @@ public class MapRedStats {
     this.jobId = jobId;
   }
 
+  public String getTaskNumbers() {
+    StringBuilder sb = new StringBuilder();
+    if (numMap > 0) {
+      sb.append("Map: " + numMap + "  ");
+    }
+    if (numReduce > 0) {
+      sb.append("Reduce: " + numReduce + "  ");
+    }
+    return sb.toString();
+  }
+
   @Override
   public String toString() {
     StringBuilder sb = new StringBuilder();

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1596993&r1=1596992&r2=1596993&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Fri May 23 01:17:15 2014
@@ -42,6 +42,7 @@ import java.util.Properties;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.SortedSet;
+import java.util.TreeMap;
 import java.util.TreeSet;
 
 import org.apache.commons.lang.ArrayUtils;
@@ -2370,7 +2371,7 @@ public class DDLTask extends Task<DDLWor
 
       // Table properties
       String tbl_properties = "";
-      Map<String, String> properties = tbl.getParameters();
+      Map<String, String> properties = new TreeMap<String, String>(tbl.getParameters());
       if (properties.size() > 0) {
         List<String> realProps = new ArrayList<String>();
         for (String key : properties.keySet()) {
@@ -3310,7 +3311,7 @@ public class DDLTask extends Task<DDLWor
         }
       }
       else {
-        Map<String, String> properties = tbl.getParameters();
+        Map<String, String> properties = new TreeMap<String, String>(tbl.getParameters());
         for (Entry<String, String> entry : properties.entrySet()) {
           appendNonNull(builder, entry.getKey(), true);
           appendNonNull(builder, entry.getValue());

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java?rev=1596993&r1=1596992&r2=1596993&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java Fri May 23 01:17:15 2014
@@ -44,7 +44,6 @@ import org.apache.hadoop.hive.ql.exec.Ta
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.history.HiveHistory.Keys;
 import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
-import org.apache.hadoop.hive.ql.lockmgr.LockException;
 import org.apache.hadoop.hive.ql.plan.ReducerTimeStatsPerJob;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
@@ -550,7 +549,7 @@ public class HadoopJobExecHelper {
     // Not always there is a SessionState. Sometimes ExeDriver is directly invoked
     // for special modes. In that case, SessionState.get() is empty.
     if (SessionState.get() != null) {
-      SessionState.get().getLastMapRedStatsList().add(mapRedStats);
+      SessionState.get().getMapRedStats().put(getId(), mapRedStats);
 
       // Computes the skew for all the MapReduce irrespective
       // of Success or Failure

Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java?rev=1596993&r1=1596992&r2=1596993&view=diff
==============================================================================
--- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java (original)
+++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java Fri May 23 01:17:15 2014
@@ -141,7 +141,7 @@ public class SessionState {
 
   private CreateTableAutomaticGrant createTableGrants;
 
-  private List<MapRedStats> lastMapRedStatsList;
+  private Map<String, MapRedStats> mapRedStats;
 
   private Map<String, String> hiveVariables;
 
@@ -885,12 +885,12 @@ public class SessionState {
     this.createTableGrants = createTableGrants;
   }
 
-  public List<MapRedStats> getLastMapRedStatsList() {
-    return lastMapRedStatsList;
+  public Map<String, MapRedStats> getMapRedStats() {
+    return mapRedStats;
   }
 
-  public void setLastMapRedStatsList(List<MapRedStats> lastMapRedStatsList) {
-    this.lastMapRedStatsList = lastMapRedStatsList;
+  public void setMapRedStats(Map<String, MapRedStats> mapRedStats) {
+    this.mapRedStats = mapRedStats;
   }
 
   public void setStackTraces(Map<String, List<List<String>>> stackTraces) {

Modified: hive/trunk/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q?rev=1596993&r1=1596992&r2=1596993&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q Fri May 23 01:17:15 2014
@@ -1,5 +1,4 @@
-set hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
-set mapred.min.split.size = 64;
+set mapred.max.split.size = 32000000;
 
 CREATE TABLE T1(name STRING) STORED AS TEXTFILE;
 
@@ -7,14 +6,6 @@ LOAD DATA LOCAL INPATH '../../data/files
 
 CREATE TABLE T2(name STRING) STORED AS SEQUENCEFILE;
 
-EXPLAIN INSERT OVERWRITE TABLE T2 SELECT * FROM (
-SELECT tmp1.name as name FROM (
-  SELECT name, 'MMM' AS n FROM T1) tmp1 
-  JOIN (SELECT 'MMM' AS n FROM T1) tmp2
-  JOIN (SELECT 'MMM' AS n FROM T1) tmp3
-  ON tmp1.n = tmp2.n AND tmp1.n = tmp3.n) ttt LIMIT 5000000;
-
-
 INSERT OVERWRITE TABLE T2 SELECT * FROM (
 SELECT tmp1.name as name FROM (
   SELECT name, 'MMM' AS n FROM T1) tmp1 
@@ -22,12 +13,24 @@ SELECT tmp1.name as name FROM (
   JOIN (SELECT 'MMM' AS n FROM T1) tmp3
   ON tmp1.n = tmp2.n AND tmp1.n = tmp3.n) ttt LIMIT 5000000;
 
-EXPLAIN SELECT COUNT(1) FROM T2;
-SELECT COUNT(1) FROM T2;
-
 CREATE TABLE T3(name STRING) STORED AS TEXTFILE;
 LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T3;
 LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T3;
 
-EXPLAIN SELECT COUNT(1) FROM T3;
+set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecutePrinter,org.apache.hadoop.hive.ql.hooks.ShowMapredStatsHook;
+set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
+
+-- 2 split by max.split.size
+SELECT COUNT(1) FROM T2;
+
+-- 1 split for two file
+SELECT COUNT(1) FROM T3;
+
+set hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
+
+-- 1 split
+SELECT COUNT(1) FROM T2;
+
+-- 2 split for two file
 SELECT COUNT(1) FROM T3;
+

Modified: hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_1.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_1.q?rev=1596993&r1=1596992&r2=1596993&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_1.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_1.q Fri May 23 01:17:15 2014
@@ -3,6 +3,8 @@ set hive.enforce.sorting = true;
 set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S)
+
 CREATE TABLE T1(key STRING, val STRING)
 CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
 

Added: hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_1_23.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_1_23.q?rev=1596993&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_1_23.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_1_23.q Fri May 23 01:17:15 2014
@@ -0,0 +1,284 @@
+set hive.enforce.bucketing = true;
+set hive.enforce.sorting = true;
+set hive.exec.reducers.max = 10;
+set hive.map.groupby.sorted=true;
+
+-- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S)
+
+CREATE TABLE T1(key STRING, val STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+
+-- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 select key, val from T1;
+
+CREATE TABLE outputTbl1(key int, cnt int);
+
+-- The plan should be converted to a map-side group by if the group by key
+-- matches the sorted key
+-- addind a order by at the end to make the test results deterministic
+EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE outputTbl1
+SELECT key, count(1) FROM T1 GROUP BY key;
+
+INSERT OVERWRITE TABLE outputTbl1
+SELECT key, count(1) FROM T1 GROUP BY key;
+
+SELECT * FROM outputTbl1 ORDER BY key;
+
+CREATE TABLE outputTbl2(key1 int, key2 string, cnt int);
+
+-- no map-side group by even if the group by key is a superset of sorted key
+EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE outputTbl2
+SELECT key, val, count(1) FROM T1 GROUP BY key, val;
+
+INSERT OVERWRITE TABLE outputTbl2
+SELECT key, val, count(1) FROM T1 GROUP BY key, val;
+
+SELECT * FROM outputTbl2 ORDER BY key1, key2;
+
+-- It should work for sub-queries
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl1
+SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key;
+
+INSERT OVERWRITE TABLE outputTbl1
+SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key;
+
+SELECT * FROM outputTbl1 ORDER BY key;
+
+-- It should work for sub-queries with column aliases
+EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE outputTbl1
+SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k;
+
+INSERT OVERWRITE TABLE outputTbl1
+SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k;
+
+SELECT * FROM outputTbl1 ORDER BY key;
+
+CREATE TABLE outputTbl3(key1 int, key2 int, cnt int);
+
+-- The plan should be converted to a map-side group by if the group by key contains a constant followed
+-- by a match to the sorted key
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl3
+SELECT 1, key, count(1) FROM T1 GROUP BY 1, key;
+
+INSERT OVERWRITE TABLE outputTbl3
+SELECT 1, key, count(1) FROM T1 GROUP BY 1, key;
+
+SELECT * FROM outputTbl3 ORDER BY key1, key2;
+
+CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt int);
+
+-- no map-side group by if the group by key contains a constant followed by another column
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl4
+SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val;
+
+INSERT OVERWRITE TABLE outputTbl4
+SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val;
+
+SELECT * FROM outputTbl4 ORDER BY key1, key2, key3;
+
+-- no map-side group by if the group by key contains a function
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl3
+SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1;
+
+INSERT OVERWRITE TABLE outputTbl3
+SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1;
+
+SELECT * FROM outputTbl3 ORDER BY key1, key2;
+
+-- it should not matter what follows the group by
+-- test various cases
+
+-- group by followed by another group by
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl1
+SELECT key + key, sum(cnt) from
+(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1
+group by key + key;
+
+INSERT OVERWRITE TABLE outputTbl1
+SELECT key + key, sum(cnt) from
+(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1
+group by key + key;
+
+SELECT * FROM outputTbl1 ORDER BY key;
+
+-- group by followed by a union
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl1
+SELECT * FROM (
+SELECT key, count(1) FROM T1 GROUP BY key
+  UNION ALL
+SELECT key, count(1) FROM T1 GROUP BY key
+) subq1;
+
+INSERT OVERWRITE TABLE outputTbl1
+SELECT * FROM (
+SELECT key, count(1) FROM T1 GROUP BY key
+  UNION ALL
+SELECT key, count(1) FROM T1 GROUP BY key
+) subq1;
+
+SELECT * FROM outputTbl1 ORDER BY key;
+
+-- group by followed by a union where one of the sub-queries is map-side group by
+EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE outputTbl1
+SELECT * FROM (
+SELECT key, count(1) FROM T1 GROUP BY key
+  UNION ALL
+SELECT key + key as key, count(1) FROM T1 GROUP BY key + key
+) subq1;
+
+INSERT OVERWRITE TABLE outputTbl1
+SELECT * FROM (
+SELECT key, count(1) as cnt FROM T1 GROUP BY key
+  UNION ALL
+SELECT key + key as key, count(1) as cnt FROM T1 GROUP BY key + key
+) subq1;
+
+SELECT * FROM outputTbl1 ORDER BY key;
+
+-- group by followed by a join
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl1
+SELECT subq1.key, subq1.cnt+subq2.cnt FROM 
+(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1
+JOIN
+(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2
+ON subq1.key = subq2.key;
+
+INSERT OVERWRITE TABLE outputTbl1
+SELECT subq1.key, subq1.cnt+subq2.cnt FROM 
+(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1
+JOIN
+(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2
+ON subq1.key = subq2.key;
+
+SELECT * FROM outputTbl1 ORDER BY key;
+
+-- group by followed by a join where one of the sub-queries can be performed in the mapper
+EXPLAIN EXTENDED 
+SELECT * FROM 
+(SELECT key, count(1) FROM T1 GROUP BY key) subq1
+JOIN
+(SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2
+ON subq1.key = subq2.key;
+
+CREATE TABLE T2(key STRING, val STRING)
+CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE;
+
+-- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T2 select key, val from T1;
+
+-- no mapside sort group by if the group by is a prefix of the sorted key
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl1
+SELECT key, count(1) FROM T2 GROUP BY key;
+
+INSERT OVERWRITE TABLE outputTbl1
+SELECT key, count(1) FROM T2 GROUP BY key;
+
+SELECT * FROM outputTbl1 ORDER BY key;
+
+-- The plan should be converted to a map-side group by if the group by key contains a constant in between the
+-- sorted keys
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl4
+SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val;
+
+INSERT OVERWRITE TABLE outputTbl4
+SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val;
+
+SELECT * FROM outputTbl4 ORDER BY key1, key2, key3;
+
+CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 int, cnt int);
+
+-- The plan should be converted to a map-side group by if the group by key contains a constant in between the
+-- sorted keys followed by anything
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl5
+SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2;
+
+INSERT OVERWRITE TABLE outputTbl5
+SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2;
+
+SELECT * FROM outputTbl5 
+ORDER BY key1, key2, key3, key4;
+
+-- contants from sub-queries should work fine
+EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE outputTbl4
+SELECT key, constant, val, count(1) from 
+(SELECT key, 1 as constant, val from T2)subq
+group by key, constant, val;
+
+INSERT OVERWRITE TABLE outputTbl4
+SELECT key, constant, val, count(1) from 
+(SELECT key, 1 as constant, val from T2)subq
+group by key, constant, val;
+
+SELECT * FROM outputTbl4 ORDER BY key1, key2, key3;
+
+-- multiple levels of contants from sub-queries should work fine
+EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE outputTbl4
+select key, constant3, val, count(1) from
+(
+SELECT key, constant as constant2, val, 2 as constant3 from 
+(SELECT key, 1 as constant, val from T2)subq
+)subq2
+group by key, constant3, val;
+
+INSERT OVERWRITE TABLE outputTbl4
+select key, constant3, val, count(1) from
+(
+SELECT key, constant as constant2, val, 2 as constant3 from 
+(SELECT key, 1 as constant, val from T2)subq
+)subq2
+group by key, constant3, val;
+
+SELECT * FROM outputTbl4 ORDER BY key1, key2, key3;
+
+set hive.map.aggr=true;
+set hive.multigroupby.singlereducer=false;
+set mapred.reduce.tasks=31;
+
+CREATE TABLE DEST1(key INT, cnt INT);
+CREATE TABLE DEST2(key INT, val STRING, cnt INT);
+
+SET hive.exec.compress.intermediate=true;
+SET hive.exec.compress.output=true; 
+
+EXPLAIN
+FROM T2
+INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key
+INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val;
+
+FROM T2
+INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key
+INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val;
+
+select * from DEST1 ORDER BY key, cnt;
+select * from DEST2 ORDER BY key, val, val;
+
+-- multi-table insert with a sub-query
+EXPLAIN
+FROM (select key, val from T2 where key = 8) x
+INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key
+INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val;
+
+FROM (select key, val from T2 where key = 8) x
+INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key
+INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val;
+
+select * from DEST1 ORDER BY key, cnt;
+select * from DEST2 ORDER BY key, val, cnt;

Modified: hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q?rev=1596993&r1=1596992&r2=1596993&view=diff
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q (original)
+++ hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q Fri May 23 01:17:15 2014
@@ -4,6 +4,8 @@ set hive.exec.reducers.max = 10;
 set hive.map.groupby.sorted=true;
 set hive.groupby.skewindata=true;
 
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S)
+
 CREATE TABLE T1(key STRING, val STRING)
 CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
 

Added: hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q?rev=1596993&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q (added)
+++ hive/trunk/ql/src/test/queries/clientpositive/groupby_sort_skew_1_23.q Fri May 23 01:17:15 2014
@@ -0,0 +1,285 @@
+set hive.enforce.bucketing = true;
+set hive.enforce.sorting = true;
+set hive.exec.reducers.max = 10;
+set hive.map.groupby.sorted=true;
+set hive.groupby.skewindata=true;
+
+-- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S)
+
+CREATE TABLE T1(key STRING, val STRING)
+CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+
+-- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T1 select key, val from T1;
+
+CREATE TABLE outputTbl1(key int, cnt int);
+
+-- The plan should be converted to a map-side group by if the group by key
+-- matches the sorted key
+-- addind a order by at the end to make the test results deterministic
+EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE outputTbl1
+SELECT key, count(1) FROM T1 GROUP BY key;
+
+INSERT OVERWRITE TABLE outputTbl1
+SELECT key, count(1) FROM T1 GROUP BY key;
+
+SELECT * FROM outputTbl1 ORDER BY key;
+
+CREATE TABLE outputTbl2(key1 int, key2 string, cnt int);
+
+-- no map-side group by even if the group by key is a superset of sorted key
+EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE outputTbl2
+SELECT key, val, count(1) FROM T1 GROUP BY key, val;
+
+INSERT OVERWRITE TABLE outputTbl2
+SELECT key, val, count(1) FROM T1 GROUP BY key, val;
+
+SELECT * FROM outputTbl2 ORDER BY key1, key2;
+
+-- It should work for sub-queries
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl1
+SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key;
+
+INSERT OVERWRITE TABLE outputTbl1
+SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key;
+
+SELECT * FROM outputTbl1 ORDER BY key;
+
+-- It should work for sub-queries with column aliases
+EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE outputTbl1
+SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k;
+
+INSERT OVERWRITE TABLE outputTbl1
+SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k;
+
+SELECT * FROM outputTbl1 ORDER BY key;
+
+CREATE TABLE outputTbl3(key1 int, key2 int, cnt int);
+
+-- The plan should be converted to a map-side group by if the group by key contains a constant followed
+-- by a match to the sorted key
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl3
+SELECT 1, key, count(1) FROM T1 GROUP BY 1, key;
+
+INSERT OVERWRITE TABLE outputTbl3
+SELECT 1, key, count(1) FROM T1 GROUP BY 1, key;
+
+SELECT * FROM outputTbl3 ORDER BY key1, key2;
+
+CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt int);
+
+-- no map-side group by if the group by key contains a constant followed by another column
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl4
+SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val;
+
+INSERT OVERWRITE TABLE outputTbl4
+SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val;
+
+SELECT * FROM outputTbl4 ORDER BY key1, key2, key3;
+
+-- no map-side group by if the group by key contains a function
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl3
+SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1;
+
+INSERT OVERWRITE TABLE outputTbl3
+SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1;
+
+SELECT * FROM outputTbl3 ORDER BY key1, key2;
+
+-- it should not matter what follows the group by
+-- test various cases
+
+-- group by followed by another group by
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl1
+SELECT key + key, sum(cnt) from
+(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1
+group by key + key;
+
+INSERT OVERWRITE TABLE outputTbl1
+SELECT key + key, sum(cnt) from
+(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1
+group by key + key;
+
+SELECT * FROM outputTbl1 ORDER BY key;
+
+-- group by followed by a union
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl1
+SELECT * FROM (
+SELECT key, count(1) FROM T1 GROUP BY key
+  UNION ALL
+SELECT key, count(1) FROM T1 GROUP BY key
+) subq1;
+
+INSERT OVERWRITE TABLE outputTbl1
+SELECT * FROM (
+SELECT key, count(1) FROM T1 GROUP BY key
+  UNION ALL
+SELECT key, count(1) FROM T1 GROUP BY key
+) subq1;
+
+SELECT * FROM outputTbl1 ORDER BY key;
+
+-- group by followed by a union where one of the sub-queries is map-side group by
+EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE outputTbl1
+SELECT * FROM (
+SELECT key, count(1) FROM T1 GROUP BY key
+  UNION ALL
+SELECT key + key as key, count(1) FROM T1 GROUP BY key + key
+) subq1;
+
+INSERT OVERWRITE TABLE outputTbl1
+SELECT * FROM (
+SELECT key, count(1) as cnt FROM T1 GROUP BY key
+  UNION ALL
+SELECT key + key as key, count(1) as cnt FROM T1 GROUP BY key + key
+) subq1;
+
+SELECT * FROM outputTbl1 ORDER BY key;
+
+-- group by followed by a join
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl1
+SELECT subq1.key, subq1.cnt+subq2.cnt FROM 
+(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1
+JOIN
+(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2
+ON subq1.key = subq2.key;
+
+INSERT OVERWRITE TABLE outputTbl1
+SELECT subq1.key, subq1.cnt+subq2.cnt FROM 
+(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1
+JOIN
+(SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2
+ON subq1.key = subq2.key;
+
+SELECT * FROM outputTbl1 ORDER BY key;
+
+-- group by followed by a join where one of the sub-queries can be performed in the mapper
+EXPLAIN EXTENDED 
+SELECT * FROM 
+(SELECT key, count(1) FROM T1 GROUP BY key) subq1
+JOIN
+(SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2
+ON subq1.key = subq2.key;
+
+CREATE TABLE T2(key STRING, val STRING)
+CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE;
+
+-- perform an insert to make sure there are 2 files
+INSERT OVERWRITE TABLE T2 select key, val from T1;
+
+-- no mapside sort group by if the group by is a prefix of the sorted key
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl1
+SELECT key, count(1) FROM T2 GROUP BY key;
+
+INSERT OVERWRITE TABLE outputTbl1
+SELECT key, count(1) FROM T2 GROUP BY key;
+
+SELECT * FROM outputTbl1 ORDER BY key;
+
+-- The plan should be converted to a map-side group by if the group by key contains a constant in between the
+-- sorted keys
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl4
+SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val;
+
+INSERT OVERWRITE TABLE outputTbl4
+SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val;
+
+SELECT * FROM outputTbl4 ORDER BY key1, key2, key3;
+
+CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 int, cnt int);
+
+-- The plan should be converted to a map-side group by if the group by key contains a constant in between the
+-- sorted keys followed by anything
+EXPLAIN EXTENDED 
+INSERT OVERWRITE TABLE outputTbl5
+SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2;
+
+INSERT OVERWRITE TABLE outputTbl5
+SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2;
+
+SELECT * FROM outputTbl5 
+ORDER BY key1, key2, key3, key4;
+
+-- contants from sub-queries should work fine
+EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE outputTbl4
+SELECT key, constant, val, count(1) from 
+(SELECT key, 1 as constant, val from T2)subq
+group by key, constant, val;
+
+INSERT OVERWRITE TABLE outputTbl4
+SELECT key, constant, val, count(1) from 
+(SELECT key, 1 as constant, val from T2)subq
+group by key, constant, val;
+
+SELECT * FROM outputTbl4 ORDER BY key1, key2, key3;
+
+-- multiple levels of contants from sub-queries should work fine
+EXPLAIN EXTENDED
+INSERT OVERWRITE TABLE outputTbl4
+select key, constant3, val, count(1) from
+(
+SELECT key, constant as constant2, val, 2 as constant3 from 
+(SELECT key, 1 as constant, val from T2)subq
+)subq2
+group by key, constant3, val;
+
+INSERT OVERWRITE TABLE outputTbl4
+select key, constant3, val, count(1) from
+(
+SELECT key, constant as constant2, val, 2 as constant3 from 
+(SELECT key, 1 as constant, val from T2)subq
+)subq2
+group by key, constant3, val;
+
+SELECT * FROM outputTbl4 ORDER BY key1, key2, key3;
+
+set hive.map.aggr=true;
+set hive.multigroupby.singlereducer=false;
+set mapred.reduce.tasks=31;
+
+CREATE TABLE DEST1(key INT, cnt INT);
+CREATE TABLE DEST2(key INT, val STRING, cnt INT);
+
+SET hive.exec.compress.intermediate=true;
+SET hive.exec.compress.output=true; 
+
+EXPLAIN
+FROM T2
+INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key
+INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val;
+
+FROM T2
+INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key
+INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val;
+
+select * from DEST1 ORDER BY key, cnt;
+select * from DEST2 ORDER BY key, val, val;
+
+-- multi-table insert with a sub-query
+EXPLAIN
+FROM (select key, val from T2 where key = 8) x
+INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key
+INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val;
+
+FROM (select key, val from T2 where key = 8) x
+INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key
+INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val;
+
+select * from DEST1 ORDER BY key, cnt;
+select * from DEST2 ORDER BY key, val, cnt;

Modified: hive/trunk/ql/src/test/results/clientnegative/unset_table_property.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientnegative/unset_table_property.q.out?rev=1596993&r1=1596992&r2=1596993&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientnegative/unset_table_property.q.out (original)
+++ hive/trunk/ql/src/test/results/clientnegative/unset_table_property.q.out Fri May 23 01:17:15 2014
@@ -17,16 +17,15 @@ PREHOOK: query: SHOW TBLPROPERTIES testT
 PREHOOK: type: SHOW_TBLPROPERTIES
 POSTHOOK: query: SHOW TBLPROPERTIES testTable
 POSTHOOK: type: SHOW_TBLPROPERTIES
-numFiles	0
-#### A masked pattern was here ####
-c	3
-#### A masked pattern was here ####
+COLUMN_STATS_ACCURATE	false
 a	1
+c	3
 #### A masked pattern was here ####
-COLUMN_STATS_ACCURATE	false
-totalSize	0
+numFiles	0
 numRows	-1
 rawDataSize	-1
+totalSize	0
+#### A masked pattern was here ####
 FAILED: SemanticException [Error 10215]: Please use the following syntax if not sure whether the property existed or not:
 ALTER TABLE tableName UNSET TBLPROPERTIES IF EXISTS (key1, key2, ...)
  The following property z does not exist in testtable

Modified: hive/trunk/ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out?rev=1596993&r1=1596992&r2=1596993&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out Fri May 23 01:17:15 2014
@@ -20,128 +20,6 @@ POSTHOOK: query: CREATE TABLE T2(name ST
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@T2
-PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE T2 SELECT * FROM (
-SELECT tmp1.name as name FROM (
-  SELECT name, 'MMM' AS n FROM T1) tmp1 
-  JOIN (SELECT 'MMM' AS n FROM T1) tmp2
-  JOIN (SELECT 'MMM' AS n FROM T1) tmp3
-  ON tmp1.n = tmp2.n AND tmp1.n = tmp3.n) ttt LIMIT 5000000
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE T2 SELECT * FROM (
-SELECT tmp1.name as name FROM (
-  SELECT name, 'MMM' AS n FROM T1) tmp1 
-  JOIN (SELECT 'MMM' AS n FROM T1) tmp2
-  JOIN (SELECT 'MMM' AS n FROM T1) tmp3
-  ON tmp1.n = tmp2.n AND tmp1.n = tmp3.n) ttt LIMIT 5000000
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t1
-            Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
-            Select Operator
-              expressions: 'MMM' (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
-              Reduce Output Operator
-                key expressions: _col0 (type: string)
-                sort order: +
-                Map-reduce partition columns: _col0 (type: string)
-                Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
-          TableScan
-            alias: t1
-            Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
-            Select Operator
-              expressions: 'MMM' (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
-              Reduce Output Operator
-                key expressions: _col0 (type: string)
-                sort order: +
-                Map-reduce partition columns: _col0 (type: string)
-                Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE
-          TableScan
-            alias: t1
-            Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: name (type: string), 'MMM' (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col1 (type: string)
-                sort order: +
-                Map-reduce partition columns: _col1 (type: string)
-                Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col0 (type: string)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-               Inner Join 0 to 2
-          condition expressions:
-            0 {VALUE._col0}
-            1 
-            2 
-          outputColumnNames: _col0
-          Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: string)
-            outputColumnNames: _col0
-            Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
-            Limit
-              Number of rows: 5000000
-              Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: string)
-      Reduce Operator Tree:
-        Extract
-          Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
-          Limit
-            Number of rows: 5000000
-            Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  name: default.t2
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.t2
-
-  Stage: Stage-3
-    Stats-Aggr Operator
-
 PREHOOK: query: INSERT OVERWRITE TABLE T2 SELECT * FROM (
 SELECT tmp1.name as name FROM (
   SELECT name, 'MMM' AS n FROM T1) tmp1 
@@ -161,67 +39,6 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@t2
 POSTHOOK: Lineage: t2.name SIMPLE [(t1)t1.FieldSchema(name:name, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN SELECT COUNT(1) FROM T2
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT COUNT(1) FROM T2
-POSTHOOK: type: QUERY
-POSTHOOK: Lineage: t2.name SIMPLE [(t1)t1.FieldSchema(name:name, type:string, comment:null), ]
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t2
-            Statistics: Num rows: 0 Data size: 79536648 Basic stats: PARTIAL Column stats: COMPLETE
-            Select Operator
-              Statistics: Num rows: 0 Data size: 79536648 Basic stats: PARTIAL Column stats: COMPLETE
-              Group By Operator
-                aggregations: count(1)
-                mode: hash
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col0 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: _col0 (type: bigint)
-            outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT COUNT(1) FROM T2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@t2
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(1) FROM T2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@t2
-#### A masked pattern was here ####
-POSTHOOK: Lineage: t2.name SIMPLE [(t1)t1.FieldSchema(name:name, type:string, comment:null), ]
-5000000
 PREHOOK: query: CREATE TABLE T3(name STRING) STORED AS TEXTFILE
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -248,64 +65,55 @@ POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@t3
 POSTHOOK: Lineage: t2.name SIMPLE [(t1)t1.FieldSchema(name:name, type:string, comment:null), ]
-PREHOOK: query: EXPLAIN SELECT COUNT(1) FROM T3
+PREHOOK: query: -- 2 split by max.split.size
+SELECT COUNT(1) FROM T2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: -- 2 split by max.split.size
+SELECT COUNT(1) FROM T2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: Lineage: t2.name SIMPLE [(t1)t1.FieldSchema(name:name, type:string, comment:null), ]
+Stage-1=Map: 2  Reduce: 1  
+5000000
+PREHOOK: query: -- 1 split for two file
+SELECT COUNT(1) FROM T3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: -- 1 split for two file
+SELECT COUNT(1) FROM T3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: Lineage: t2.name SIMPLE [(t1)t1.FieldSchema(name:name, type:string, comment:null), ]
+Stage-1=Map: 1  Reduce: 1  
+1000
+PREHOOK: query: -- 1 split
+SELECT COUNT(1) FROM T2
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT COUNT(1) FROM T3
+PREHOOK: Input: default@t2
+#### A masked pattern was here ####
+POSTHOOK: query: -- 1 split
+SELECT COUNT(1) FROM T2
 POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+#### A masked pattern was here ####
 POSTHOOK: Lineage: t2.name SIMPLE [(t1)t1.FieldSchema(name:name, type:string, comment:null), ]
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: t3
-            Statistics: Num rows: 0 Data size: 11603 Basic stats: PARTIAL Column stats: COMPLETE
-            Select Operator
-              Statistics: Num rows: 0 Data size: 11603 Basic stats: PARTIAL Column stats: COMPLETE
-              Group By Operator
-                aggregations: count(1)
-                mode: hash
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col0 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: _col0 (type: bigint)
-            outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT COUNT(1) FROM T3
+Stage-1=Map: 1  Reduce: 1  
+5000000
+PREHOOK: query: -- 2 split for two file
+SELECT COUNT(1) FROM T3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t3
 #### A masked pattern was here ####
-POSTHOOK: query: SELECT COUNT(1) FROM T3
+POSTHOOK: query: -- 2 split for two file
+SELECT COUNT(1) FROM T3
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t3
 #### A masked pattern was here ####
 POSTHOOK: Lineage: t2.name SIMPLE [(t1)t1.FieldSchema(name:name, type:string, comment:null), ]
+Stage-1=Map: 2  Reduce: 1  
 1000

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_sort_1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_sort_1.q.out?rev=1596993&r1=1596992&r2=1596993&view=diff
==============================================================================
Files hive/trunk/ql/src/test/results/clientpositive/groupby_sort_1.q.out (original) and hive/trunk/ql/src/test/results/clientpositive/groupby_sort_1.q.out Fri May 23 01:17:15 2014 differ

Added: hive/trunk/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out?rev=1596993&view=auto
==============================================================================
Files hive/trunk/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out (added) and hive/trunk/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out Fri May 23 01:17:15 2014 differ

Modified: hive/trunk/ql/src/test/results/clientpositive/groupby_sort_skew_1.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_sort_skew_1.q.out?rev=1596993&r1=1596992&r2=1596993&view=diff
==============================================================================
Files hive/trunk/ql/src/test/results/clientpositive/groupby_sort_skew_1.q.out (original) and hive/trunk/ql/src/test/results/clientpositive/groupby_sort_skew_1.q.out Fri May 23 01:17:15 2014 differ

Added: hive/trunk/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out?rev=1596993&view=auto
==============================================================================
Files hive/trunk/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out (added) and hive/trunk/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out Fri May 23 01:17:15 2014 differ

Modified: hive/trunk/ql/src/test/results/clientpositive/nullformatCTAS.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/nullformatCTAS.q.out?rev=1596993&r1=1596992&r2=1596993&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/nullformatCTAS.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/nullformatCTAS.q.out Fri May 23 01:17:15 2014
@@ -167,12 +167,12 @@ OUTPUTFORMAT 
 LOCATION
 #### A masked pattern was here ####
 TBLPROPERTIES (
-  'numFiles'='1', 
-#### A masked pattern was here ####
   'COLUMN_STATS_ACCURATE'='true', 
-  'totalSize'='80', 
+  'numFiles'='1', 
   'numRows'='10', 
-  'rawDataSize'='70')
+  'rawDataSize'='70', 
+  'totalSize'='80', 
+#### A masked pattern was here ####
 1.01
 1.01
 1.01

Modified: hive/trunk/ql/src/test/results/clientpositive/show_create_table_alter.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/show_create_table_alter.q.out?rev=1596993&r1=1596992&r2=1596993&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/show_create_table_alter.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/show_create_table_alter.q.out Fri May 23 01:17:15 2014
@@ -69,13 +69,14 @@ OUTPUTFORMAT 
 LOCATION
 #### A masked pattern was here ####
 TBLPROPERTIES (
-  'numFiles'='0', 
+  'COLUMN_STATS_ACCURATE'='false', 
   'EXTERNAL'='FALSE', 
 #### A masked pattern was here ####
-  'COLUMN_STATS_ACCURATE'='false', 
-  'totalSize'='0', 
+  'numFiles'='0', 
   'numRows'='-1', 
-  'rawDataSize'='-1')
+  'rawDataSize'='-1', 
+  'totalSize'='0', 
+#### A masked pattern was here ####
 PREHOOK: query: -- Alter the table comment, change the EXTERNAL property back and test SHOW CREATE TABLE on the change.
 ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('comment'='changed comment', 'EXTERNAL'='TRUE')
 PREHOOK: type: ALTERTABLE_PROPERTIES
@@ -110,12 +111,13 @@ OUTPUTFORMAT 
 LOCATION
 #### A masked pattern was here ####
 TBLPROPERTIES (
-  'numFiles'='0', 
-#### A masked pattern was here ####
   'COLUMN_STATS_ACCURATE'='false', 
-  'totalSize'='0', 
+#### A masked pattern was here ####
+  'numFiles'='0', 
   'numRows'='-1', 
-  'rawDataSize'='-1')
+  'rawDataSize'='-1', 
+  'totalSize'='0', 
+#### A masked pattern was here ####
 PREHOOK: query: -- Change the 'SORTBUCKETCOLSPREFIX' property and test SHOW CREATE TABLE. The output should not change.
 ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('SORTBUCKETCOLSPREFIX'='FALSE')
 PREHOOK: type: ALTERTABLE_PROPERTIES
@@ -150,12 +152,13 @@ OUTPUTFORMAT 
 LOCATION
 #### A masked pattern was here ####
 TBLPROPERTIES (
-  'numFiles'='0', 
-#### A masked pattern was here ####
   'COLUMN_STATS_ACCURATE'='false', 
-  'totalSize'='0', 
+#### A masked pattern was here ####
+  'numFiles'='0', 
   'numRows'='-1', 
-  'rawDataSize'='-1')
+  'rawDataSize'='-1', 
+  'totalSize'='0', 
+#### A masked pattern was here ####
 PREHOOK: query: -- Alter the storage handler of the table, and test SHOW CREATE TABLE.
 ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('storage_handler'='org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler')
 PREHOOK: type: ALTERTABLE_PROPERTIES
@@ -190,12 +193,13 @@ WITH SERDEPROPERTIES ( 
 LOCATION
 #### A masked pattern was here ####
 TBLPROPERTIES (
-  'numFiles'='0', 
-#### A masked pattern was here ####
   'COLUMN_STATS_ACCURATE'='false', 
-  'totalSize'='0', 
+#### A masked pattern was here ####
+  'numFiles'='0', 
   'numRows'='-1', 
-  'rawDataSize'='-1')
+  'rawDataSize'='-1', 
+  'totalSize'='0', 
+#### A masked pattern was here ####
 PREHOOK: query: DROP TABLE tmp_showcrt1
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@tmp_showcrt1

Modified: hive/trunk/ql/src/test/results/clientpositive/show_tblproperties.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/show_tblproperties.q.out?rev=1596993&r1=1596992&r2=1596993&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/show_tblproperties.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/show_tblproperties.q.out Fri May 23 01:17:15 2014
@@ -30,15 +30,15 @@ PREHOOK: query: show tblproperties tmpfo
 PREHOOK: type: SHOW_TBLPROPERTIES
 POSTHOOK: query: show tblproperties tmpfoo
 POSTHOOK: type: SHOW_TBLPROPERTIES
-numFiles	0
-#### A masked pattern was here ####
-tmp	true
-#### A masked pattern was here ####
 COLUMN_STATS_ACCURATE	false
-totalSize	0
-numRows	-1
 bar	bar value
+#### A masked pattern was here ####
+numFiles	0
+numRows	-1
 rawDataSize	-1
+tmp	true
+totalSize	0
+#### A masked pattern was here ####
 PREHOOK: query: show tblproperties tmpfoo("bar")
 PREHOOK: type: SHOW_TBLPROPERTIES
 POSTHOOK: query: show tblproperties tmpfoo("bar")

Modified: hive/trunk/ql/src/test/results/clientpositive/unset_table_view_property.q.out
URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/unset_table_view_property.q.out?rev=1596993&r1=1596992&r2=1596993&view=diff
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/unset_table_view_property.q.out (original)
+++ hive/trunk/ql/src/test/results/clientpositive/unset_table_view_property.q.out Fri May 23 01:17:15 2014
@@ -24,16 +24,15 @@ PREHOOK: query: SHOW TBLPROPERTIES testT
 PREHOOK: type: SHOW_TBLPROPERTIES
 POSTHOOK: query: SHOW TBLPROPERTIES testTable
 POSTHOOK: type: SHOW_TBLPROPERTIES
-numFiles	0
-#### A masked pattern was here ####
-c	3
-#### A masked pattern was here ####
+COLUMN_STATS_ACCURATE	false
 a	1
+c	3
 #### A masked pattern was here ####
-COLUMN_STATS_ACCURATE	false
-totalSize	0
+numFiles	0
 numRows	-1
 rawDataSize	-1
+totalSize	0
+#### A masked pattern was here ####
 PREHOOK: query: -- UNSET all the properties
 ALTER TABLE testTable UNSET TBLPROPERTIES ('a', 'c')
 PREHOOK: type: ALTERTABLE_PROPERTIES
@@ -48,12 +47,13 @@ PREHOOK: query: SHOW TBLPROPERTIES testT
 PREHOOK: type: SHOW_TBLPROPERTIES
 POSTHOOK: query: SHOW TBLPROPERTIES testTable
 POSTHOOK: type: SHOW_TBLPROPERTIES
-numFiles	0
-#### A masked pattern was here ####
 COLUMN_STATS_ACCURATE	false
-totalSize	0
+#### A masked pattern was here ####
+numFiles	0
 numRows	-1
 rawDataSize	-1
+totalSize	0
+#### A masked pattern was here ####
 PREHOOK: query: ALTER TABLE testTable SET TBLPROPERTIES ('a'='1', 'c'='3', 'd'='4')
 PREHOOK: type: ALTERTABLE_PROPERTIES
 PREHOOK: Input: default@testtable
@@ -66,17 +66,16 @@ PREHOOK: query: SHOW TBLPROPERTIES testT
 PREHOOK: type: SHOW_TBLPROPERTIES
 POSTHOOK: query: SHOW TBLPROPERTIES testTable
 POSTHOOK: type: SHOW_TBLPROPERTIES
-numFiles	0
-d	4
-#### A masked pattern was here ####
-c	3
-#### A masked pattern was here ####
+COLUMN_STATS_ACCURATE	false
 a	1
+c	3
+d	4
 #### A masked pattern was here ####
-COLUMN_STATS_ACCURATE	false
-totalSize	0
+numFiles	0
 numRows	-1
 rawDataSize	-1
+totalSize	0
+#### A masked pattern was here ####
 PREHOOK: query: -- UNSET a subset of the properties
 ALTER TABLE testTable UNSET TBLPROPERTIES ('a', 'd')
 PREHOOK: type: ALTERTABLE_PROPERTIES
@@ -91,14 +90,14 @@ PREHOOK: query: SHOW TBLPROPERTIES testT
 PREHOOK: type: SHOW_TBLPROPERTIES
 POSTHOOK: query: SHOW TBLPROPERTIES testTable
 POSTHOOK: type: SHOW_TBLPROPERTIES
-numFiles	0
-#### A masked pattern was here ####
+COLUMN_STATS_ACCURATE	false
 c	3
 #### A masked pattern was here ####
-COLUMN_STATS_ACCURATE	false
-totalSize	0
+numFiles	0
 numRows	-1
 rawDataSize	-1
+totalSize	0
+#### A masked pattern was here ####
 PREHOOK: query: -- the same property being UNSET multiple times
 ALTER TABLE testTable UNSET TBLPROPERTIES ('c', 'c', 'c')
 PREHOOK: type: ALTERTABLE_PROPERTIES
@@ -113,12 +112,13 @@ PREHOOK: query: SHOW TBLPROPERTIES testT
 PREHOOK: type: SHOW_TBLPROPERTIES
 POSTHOOK: query: SHOW TBLPROPERTIES testTable
 POSTHOOK: type: SHOW_TBLPROPERTIES
-numFiles	0
-#### A masked pattern was here ####
 COLUMN_STATS_ACCURATE	false
-totalSize	0
+#### A masked pattern was here ####
+numFiles	0
 numRows	-1
 rawDataSize	-1
+totalSize	0
+#### A masked pattern was here ####
 PREHOOK: query: ALTER TABLE testTable SET TBLPROPERTIES ('a'='1', 'b' = '2', 'c'='3', 'd'='4')
 PREHOOK: type: ALTERTABLE_PROPERTIES
 PREHOOK: Input: default@testtable
@@ -131,18 +131,17 @@ PREHOOK: query: SHOW TBLPROPERTIES testT
 PREHOOK: type: SHOW_TBLPROPERTIES
 POSTHOOK: query: SHOW TBLPROPERTIES testTable
 POSTHOOK: type: SHOW_TBLPROPERTIES
-numFiles	0
-d	4
-#### A masked pattern was here ####
+COLUMN_STATS_ACCURATE	false
+a	1
 b	2
 c	3
+d	4
 #### A masked pattern was here ####
-a	1
-#### A masked pattern was here ####
-COLUMN_STATS_ACCURATE	false
-totalSize	0
+numFiles	0
 numRows	-1
 rawDataSize	-1
+totalSize	0
+#### A masked pattern was here ####
 PREHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS
 ALTER TABLE testTable UNSET TBLPROPERTIES IF EXISTS ('b', 'd', 'b', 'f')
 PREHOOK: type: ALTERTABLE_PROPERTIES
@@ -157,16 +156,15 @@ PREHOOK: query: SHOW TBLPROPERTIES testT
 PREHOOK: type: SHOW_TBLPROPERTIES
 POSTHOOK: query: SHOW TBLPROPERTIES testTable
 POSTHOOK: type: SHOW_TBLPROPERTIES
-numFiles	0
-#### A masked pattern was here ####
-c	3
-#### A masked pattern was here ####
+COLUMN_STATS_ACCURATE	false
 a	1
+c	3
 #### A masked pattern was here ####
-COLUMN_STATS_ACCURATE	false
-totalSize	0
+numFiles	0
 numRows	-1
 rawDataSize	-1
+totalSize	0
+#### A masked pattern was here ####
 PREHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS
 ALTER TABLE testTable UNSET TBLPROPERTIES IF EXISTS ('b', 'd', 'c', 'f', 'x', 'y', 'z')
 PREHOOK: type: ALTERTABLE_PROPERTIES
@@ -181,14 +179,14 @@ PREHOOK: query: SHOW TBLPROPERTIES testT
 PREHOOK: type: SHOW_TBLPROPERTIES
 POSTHOOK: query: SHOW TBLPROPERTIES testTable
 POSTHOOK: type: SHOW_TBLPROPERTIES
-numFiles	0
-#### A masked pattern was here ####
+COLUMN_STATS_ACCURATE	false
 a	1
 #### A masked pattern was here ####
-COLUMN_STATS_ACCURATE	false
-totalSize	0
+numFiles	0
 numRows	-1
 rawDataSize	-1
+totalSize	0
+#### A masked pattern was here ####
 PREHOOK: query: -- UNSET VIEW PROPERTIES
 CREATE VIEW testView AS SELECT value FROM src WHERE key=86
 PREHOOK: type: CREATEVIEW
@@ -243,9 +241,9 @@ POSTHOOK: query: SHOW TBLPROPERTIES test
 POSTHOOK: type: SHOW_TBLPROPERTIES
 #### A masked pattern was here ####
 propA	100
-#### A masked pattern was here ####
-propD	400
 propC	300
+propD	400
+#### A masked pattern was here ####
 PREHOOK: query: -- UNSET a subset of the properties
 ALTER VIEW testView UNSET TBLPROPERTIES ('propA', 'propC')
 PREHOOK: type: ALTERVIEW_PROPERTIES
@@ -262,6 +260,7 @@ POSTHOOK: query: SHOW TBLPROPERTIES test
 POSTHOOK: type: SHOW_TBLPROPERTIES
 #### A masked pattern was here ####
 propD	400
+#### A masked pattern was here ####
 PREHOOK: query: -- the same property being UNSET multiple times
 ALTER VIEW testView UNSET TBLPROPERTIES ('propD', 'propD', 'propD')
 PREHOOK: type: ALTERVIEW_PROPERTIES
@@ -292,9 +291,9 @@ POSTHOOK: type: SHOW_TBLPROPERTIES
 #### A masked pattern was here ####
 propA	100
 propB	200
-#### A masked pattern was here ####
-propD	400
 propC	300
+propD	400
+#### A masked pattern was here ####
 PREHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS
 ALTER VIEW testView UNSET TBLPROPERTIES IF EXISTS ('propC', 'propD', 'propD', 'propC', 'propZ')
 PREHOOK: type: ALTERVIEW_PROPERTIES