You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by da...@apache.org on 2015/09/22 07:04:12 UTC

[41/50] [abbrv] hive git commit: HIVE-11621 Fix TestMiniTezCliDriver test failures when HBase Metastore is used (Daniel Dai via gates)

HIVE-11621 Fix TestMiniTezCliDriver test failures when HBase Metastore is used (Daniel Dai via gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f014f0da
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f014f0da
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f014f0da

Branch: refs/heads/master
Commit: f014f0da457d1e959cb7da2824f6cf2c5ee0c971
Parents: 4d66206
Author: Alan Gates <ga...@hortonworks.com>
Authored: Fri Aug 28 10:38:49 2015 -0700
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Fri Aug 28 10:38:49 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ant/QTestGenTask.java    |  11 ++
 data/conf/tez/hive-site.xml                     |   9 ++
 itests/qtest/pom.xml                            |   3 +-
 .../org/apache/hadoop/hive/ql/QTestUtil.java    |  80 +++++-------
 .../hadoop/hive/metastore/HiveAlterHandler.java |  22 ++--
 .../hive/metastore/hbase/HBaseReadWrite.java    |  17 ++-
 .../hadoop/hive/metastore/hbase/HBaseStore.java | 127 +++++++++++++++----
 .../hadoop/hive/metastore/hbase/HBaseUtils.java |  60 ++++++---
 .../hadoop/hive/metastore/hbase/StatsCache.java |   2 +-
 .../stats/ColumnStatsAggregatorFactory.java     |   9 +-
 .../apache/hadoop/hive/ql/stats/StatsUtils.java |   3 +
 .../dynpart_sort_opt_vectorization.q            |   2 +
 .../clientpositive/dynpart_sort_optimization.q  |   2 +
 .../tez/dynpart_sort_opt_vectorization.q.out    |  12 +-
 .../tez/dynpart_sort_optimization.q.out         |  12 +-
 ql/src/test/templates/TestCliDriver.vm          |   3 +-
 16 files changed, 248 insertions(+), 126 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f014f0da/ant/src/org/apache/hadoop/hive/ant/QTestGenTask.java
----------------------------------------------------------------------
diff --git a/ant/src/org/apache/hadoop/hive/ant/QTestGenTask.java b/ant/src/org/apache/hadoop/hive/ant/QTestGenTask.java
index 7fffe13..8b1c4fe 100644
--- a/ant/src/org/apache/hadoop/hive/ant/QTestGenTask.java
+++ b/ant/src/org/apache/hadoop/hive/ant/QTestGenTask.java
@@ -149,6 +149,8 @@ public class QTestGenTask extends Task {
 
   private String cleanupScript;
 
+  private String useHBaseMetastore;
+
   public void setHadoopVersion(String ver) {
     this.hadoopVersion = ver;
   }
@@ -221,6 +223,14 @@ public class QTestGenTask extends Task {
     this.cleanupScript = cleanupScript;
   }
 
+  public String getUseHBaseMetastore() {
+    return useHBaseMetastore;
+  }
+
+  public void setUseHBaseMetastore(String useHBaseMetastore) {
+    this.useHBaseMetastore = useHBaseMetastore;
+  }
+
   public void setHiveRootDirectory(File hiveRootDirectory) {
     try {
       this.hiveRootDirectory = hiveRootDirectory.getCanonicalPath();
@@ -530,6 +540,7 @@ public class QTestGenTask extends Task {
       ctx.put("hadoopVersion", hadoopVersion);
       ctx.put("initScript", initScript);
       ctx.put("cleanupScript", cleanupScript);
+      ctx.put("useHBaseMetastore", useHBaseMetastore);
 
       File outFile = new File(outDir, className + ".java");
       FileWriter writer = new FileWriter(outFile);

http://git-wip-us.apache.org/repos/asf/hive/blob/f014f0da/data/conf/tez/hive-site.xml
----------------------------------------------------------------------
diff --git a/data/conf/tez/hive-site.xml b/data/conf/tez/hive-site.xml
index e0238aa..bcda3ea 100644
--- a/data/conf/tez/hive-site.xml
+++ b/data/conf/tez/hive-site.xml
@@ -253,5 +253,14 @@
   </description>
 </property>
 
+<property>
+  <name>hive.metastore.fastpath</name>
+  <value>true</value>
+</property>
+
+<property>
+  <name>hive.metastore.rawstore.impl</name>
+  <value>org.apache.hadoop.hive.metastore.hbase.HBaseStore</value>
+</property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/hive/blob/f014f0da/itests/qtest/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml
index c2cb2f6..664068b 100644
--- a/itests/qtest/pom.xml
+++ b/itests/qtest/pom.xml
@@ -509,7 +509,8 @@
                               logDirectory="${project.build.directory}/qfile-results/clientpositive/"
                               hadoopVersion="${active.hadoop.version}"
                               initScript="${initScript}"
-                              cleanupScript="q_test_cleanup.sql"/>
+                              cleanupScript="q_test_cleanup.sql"
+                              useHBaseMetastore="true"/>
 
                     <qtestgen hiveRootDirectory="${basedir}/${hive.path.to.root}/"
                               outputDirectory="${project.build.directory}/generated-test-sources/java/org/apache/hadoop/hive/cli/"

http://git-wip-us.apache.org/repos/asf/hive/blob/f014f0da/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index 358fdbd..9f112ad 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -84,7 +84,6 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.metastore.hbase.HBaseReadWrite;
-import org.apache.hadoop.hive.metastore.hbase.TephraHBaseConnection;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.Utilities;
@@ -109,6 +108,8 @@ import org.apache.hadoop.hive.shims.HadoopShims;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.util.Shell;
 import org.apache.hive.common.util.StreamPrinter;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
 import org.apache.tools.ant.BuildException;
 import org.apache.zookeeper.WatchedEvent;
 import org.apache.zookeeper.Watcher;
@@ -166,12 +167,12 @@ public class QTestUtil {
 
   private final String initScript;
   private final String cleanupScript;
+  private boolean useHBaseMetastore = false;
 
   public interface SuiteAddTestFunctor {
     public void addTestToSuite(TestSuite suite, Object setup, String tName);
   }
   private HBaseTestingUtility utility;
-  private boolean snapshotTaken = false;
 
   static {
     for (String srcTable : System.getProperty("test.src.tables", "").trim().split(",")) {
@@ -348,61 +349,46 @@ public class QTestUtil {
     return "jceks://file" + new Path(keyDir, "test.jks").toUri();
   }
 
-  private void rebuildHBase() throws Exception {
-    HBaseAdmin admin = utility.getHBaseAdmin();
-    if (!snapshotTaken) {
-      for (String tableName : HBaseReadWrite.tableNames) {
-        List<byte[]> families = HBaseReadWrite.columnFamilies.get(tableName);
-        HTableDescriptor desc = new HTableDescriptor(
-            TableName.valueOf(tableName));
-        for (byte[] family : families) {
-          HColumnDescriptor columnDesc = new HColumnDescriptor(family);
-          desc.addFamily(columnDesc);
-        }
-        try {
-          admin.disableTable(tableName);
-          admin.deleteTable(tableName);
-        } catch (IOException e) {
-          System.out.println(e.getMessage());
-        }
-        admin.createTable(desc);
-      }
-    } else {
-      for (String tableName : HBaseReadWrite.tableNames) {
-        admin.disableTable(tableName);
-        admin.restoreSnapshot("snapshot_" + tableName);
-        admin.enableTable(tableName);
-      }
-      try {
-        db.createDatabase(new org.apache.hadoop.hive.metastore.api.Database(
-            DEFAULT_DATABASE_NAME, DEFAULT_DATABASE_COMMENT, new Warehouse(conf)
-                .getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(), null));
-      } catch (Exception e) {
-        // Ignore if default database already exist
-      }
-      SessionState.get().setCurrentDatabase(DEFAULT_DATABASE_NAME);
-    }
-    admin.close();
-  }
-
   private void startMiniHBaseCluster() throws Exception {
     utility = new HBaseTestingUtility();
     utility.startMiniCluster();
     conf = new HiveConf(utility.getConfiguration(), Driver.class);
-    rebuildHBase();
+    conf = new HiveConf(utility.getConfiguration(), Driver.class);
+    HBaseAdmin admin = utility.getHBaseAdmin();
+    for (String tableName : HBaseReadWrite.tableNames) {
+      List<byte[]> families = HBaseReadWrite.columnFamilies.get(tableName);
+      HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
+      for (byte[] family : families) {
+        HColumnDescriptor columnDesc = new HColumnDescriptor(family);
+        desc.addFamily(columnDesc);
+      }
+      admin.createTable(desc);
+    }
+    admin.close();
     HBaseReadWrite.getInstance(conf);
   }
 
   public QTestUtil(String outDir, String logDir, MiniClusterType clusterType,
       String confDir, String hadoopVer, String initScript, String cleanupScript)
     throws Exception {
+    this(outDir, logDir, clusterType, confDir, hadoopVer, initScript, cleanupScript, false);
+  }
+  public QTestUtil(String outDir, String logDir, MiniClusterType clusterType,
+      String confDir, String hadoopVer, String initScript, String cleanupScript, boolean useHBaseMetastore)
+    throws Exception {
     this.outDir = outDir;
     this.logDir = logDir;
+    this.useHBaseMetastore = useHBaseMetastore;
+
+    Logger hadoopLog = Logger.getLogger("org.apache.hadoop");
+    hadoopLog.setLevel(Level.INFO);
     if (confDir != null && !confDir.isEmpty()) {
       HiveConf.setHiveSiteLocation(new URL("file://"+ new File(confDir).toURI().getPath() + "/hive-site.xml"));
       System.out.println("Setting hive-site: "+HiveConf.getHiveSiteLocation());
     }
-    startMiniHBaseCluster();
+    if (useHBaseMetastore) {
+      startMiniHBaseCluster();
+    }
     conf = new HiveConf(Driver.class);
     this.hadoopVer = getHadoopMainVersion(hadoopVer);
     qMap = new TreeMap<String, String>();
@@ -491,7 +477,9 @@ public class QTestUtil {
         sparkSession = null;
       }
     }
-    utility.shutdownMiniCluster();
+    if (useHBaseMetastore) {
+      utility.shutdownMiniCluster();
+    }
     if (mr != null) {
       mr.shutdown();
       mr = null;
@@ -779,8 +767,6 @@ public class QTestUtil {
       return;
     }
 
-    rebuildHBase();
-
     clearTablesCreatedDuringTests();
     clearKeysCreatedInTests();
 
@@ -880,12 +866,6 @@ public class QTestUtil {
     cliDriver.processLine(initCommands);
 
     conf.setBoolean("hive.test.init.phase", false);
-
-    HBaseAdmin admin = utility.getHBaseAdmin();
-    for (String tableName : HBaseReadWrite.tableNames) {
-      admin.snapshot("snapshot_" + tableName, tableName);
-    }
-    snapshotTaken = true;
   }
 
   public void init() throws Exception {

http://git-wip-us.apache.org/repos/asf/hive/blob/f014f0da/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index f402f73..d9382ff 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -668,17 +668,19 @@ public class HiveAlterHandler implements AlterHandler {
           }
 
           List<ColumnStatisticsObj> statsObjs = cs.getStatsObj();
-          for (ColumnStatisticsObj statsObj : statsObjs) {
-            boolean found = false;
-            for (FieldSchema newCol : newCols) {
-              if (statsObj.getColName().equalsIgnoreCase(newCol.getName())
-                  && statsObj.getColType().equals(newCol.getType())) {
-                found = true;
-                break;
+          if (statsObjs != null) {
+            for (ColumnStatisticsObj statsObj : statsObjs) {
+              boolean found = false;
+              for (FieldSchema newCol : newCols) {
+                if (statsObj.getColName().equalsIgnoreCase(newCol.getName())
+                    && statsObj.getColType().equals(newCol.getType())) {
+                  found = true;
+                  break;
+                }
+              }
+              if (!found) {
+                msdb.deleteTableColumnStatistics(dbName, tableName, statsObj.getColName());
               }
-            }
-            if (!found) {
-              msdb.deleteTableColumnStatistics(dbName, tableName, statsObj.getColName());
             }
           }
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/f014f0da/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
index f1336dc..8a1448c 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
@@ -680,7 +680,13 @@ public class HBaseReadWrite {
         firstStar = i;
         break;
       } else {
-        keyElements.add(partVals.get(i));
+        // empty string equals to null partition,
+        // means star
+        if (partVals.get(i).equals("")) {
+          break;
+        } else {
+          keyElements.add(partVals.get(i));
+        }
       }
     }
 
@@ -693,7 +699,7 @@ public class HBaseReadWrite {
     }
     keyPrefix = HBaseUtils.buildPartitionKey(dbName, tableName,
         HBaseUtils.getPartitionKeyTypes(table.getPartitionKeys().subList(0, keyElements.size()-2)),
-          keyElements.subList(0, keyElements.size()-2));
+          keyElements.subList(2, keyElements.size()));
 
     // Now, build a filter out of the remaining keys
     List<PartitionKeyComparator.Range> ranges = new ArrayList<PartitionKeyComparator.Range>();
@@ -809,7 +815,7 @@ public class HBaseReadWrite {
     for (int i = 0; i < numToFetch && iter.hasNext(); i++) {
       Result result = iter.next();
       HBaseUtils.StorageDescriptorParts sdParts = HBaseUtils.deserializePartition(dbName, tableName,
-          tablePartitions, result.getRow(), result.getValue(CATALOG_CF, CATALOG_COL));
+          tablePartitions, result.getRow(), result.getValue(CATALOG_CF, CATALOG_COL), staticConf);
       StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash);
       HBaseUtils.assembleStorageDescriptor(sd, sdParts);
       parts.add(sdParts.containingPartition);
@@ -1604,9 +1610,10 @@ public class HBaseReadWrite {
             // recontruct the key.  We have to pull the dbName and tableName out of the key to
             // find the partition values.
             byte[] key = results[i].getRow();
-            String[] reconstructedKey = HBaseUtils.parseKey(key);
+            List<String> reconstructedKey = HBaseUtils.parseKey(key, HBaseUtils.getPartitionNames(getTable(dbName, tblName).getPartitionKeys()),
+                HBaseUtils.getPartitionKeyTypes(getTable(dbName, tblName).getPartitionKeys()));
             List<String> reconstructedPartVals =
-                Arrays.asList(reconstructedKey).subList(2, reconstructedKey.length);
+                reconstructedKey.subList(2, reconstructedKey.size());
             String partName = valToPartMap.get(reconstructedPartVals);
             assert partName != null;
             csd.setIsTblLevel(false);

http://git-wip-us.apache.org/repos/asf/hive/blob/f014f0da/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
index f30fcab..568a347 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
@@ -20,15 +20,18 @@ package org.apache.hadoop.hive.metastore.hbase;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.cache.CacheLoader;
+
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.FileUtils;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStore;
 import org.apache.hadoop.hive.metastore.PartFilterExprUtil;
 import org.apache.hadoop.hive.metastore.PartitionExpressionProxy;
 import org.apache.hadoop.hive.metastore.RawStore;
+import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
@@ -64,6 +67,9 @@ import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.PlanResult;
 import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.ScanPlan;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hive.common.util.HiveStringUtils;
 import org.apache.thrift.TException;
 
 import java.io.IOException;
@@ -71,6 +77,7 @@ import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -150,7 +157,7 @@ public class HBaseStore implements RawStore {
     boolean commit = false;
     openTransaction();
     try {
-      Database db = getHBase().getDb(name);
+      Database db = getHBase().getDb(HiveStringUtils.normalizeIdentifier(name));
       if (db == null) {
         throw new NoSuchObjectException("Unable to find db " + name);
       }
@@ -169,7 +176,7 @@ public class HBaseStore implements RawStore {
     boolean commit = false;
     openTransaction();
     try {
-      getHBase().deleteDb(dbname);
+      getHBase().deleteDb(HiveStringUtils.normalizeIdentifier(dbname));
       commit = true;
       return true;
     } catch (IOException e) {
@@ -259,7 +266,8 @@ public class HBaseStore implements RawStore {
     boolean commit = false;
     openTransaction();
     try {
-      getHBase().deleteTable(dbName, tableName);
+      getHBase().deleteTable(HiveStringUtils.normalizeIdentifier(dbName),
+          HiveStringUtils.normalizeIdentifier(tableName));
       commit = true;
       return true;
     } catch (IOException e) {
@@ -275,7 +283,8 @@ public class HBaseStore implements RawStore {
     boolean commit = false;
     openTransaction();
     try {
-      Table table = getHBase().getTable(dbName, tableName);
+      Table table = getHBase().getTable(HiveStringUtils.normalizeIdentifier(dbName),
+          HiveStringUtils.normalizeIdentifier(tableName));
       if (table == null) {
         LOG.debug("Unable to find table " + tableNameForErrorMsg(dbName, tableName));
       }
@@ -334,7 +343,8 @@ public class HBaseStore implements RawStore {
     boolean commit = false;
     openTransaction();
     try {
-      Partition part = getHBase().getPartition(dbName, tableName, part_vals);
+      Partition part = getHBase().getPartition(HiveStringUtils.normalizeIdentifier(dbName),
+          HiveStringUtils.normalizeIdentifier(tableName), part_vals);
       if (part == null) {
         throw new NoSuchObjectException("Unable to find partition " +
             partNameForErrorMsg(dbName, tableName, part_vals));
@@ -355,7 +365,8 @@ public class HBaseStore implements RawStore {
     boolean commit = false;
     openTransaction();
     try {
-      boolean exists = getHBase().getPartition(dbName, tableName, part_vals) != null;
+      boolean exists = getHBase().getPartition(HiveStringUtils.normalizeIdentifier(dbName),
+          HiveStringUtils.normalizeIdentifier(tableName), part_vals) != null;
       commit = true;
       return exists;
     } catch (IOException e) {
@@ -372,9 +383,11 @@ public class HBaseStore implements RawStore {
     boolean commit = false;
     openTransaction();
     try {
-      getHBase().deletePartition(dbName, tableName, part_vals);
+      getHBase().deletePartition(HiveStringUtils.normalizeIdentifier(dbName),
+          HiveStringUtils.normalizeIdentifier(tableName), part_vals);
       // Drop any cached stats that reference this partitions
-      getHBase().getStatsCache().invalidate(dbName, tableName,
+      getHBase().getStatsCache().invalidate(HiveStringUtils.normalizeIdentifier(dbName),
+          HiveStringUtils.normalizeIdentifier(tableName),
           buildExternalPartName(dbName, tableName, part_vals));
       commit = true;
       return true;
@@ -393,7 +406,8 @@ public class HBaseStore implements RawStore {
     boolean commit = false;
     openTransaction();
     try {
-      List<Partition> parts = getHBase().scanPartitionsInTable(dbName, tableName, max);
+      List<Partition> parts = getHBase().scanPartitionsInTable(HiveStringUtils.normalizeIdentifier(dbName),
+          HiveStringUtils.normalizeIdentifier(tableName), max);
       commit = true;
       return parts;
     } catch (IOException e) {
@@ -410,7 +424,8 @@ public class HBaseStore implements RawStore {
     boolean commit = false;
     openTransaction();
     try {
-      getHBase().replaceTable(getHBase().getTable(dbname, name), newTable);
+      getHBase().replaceTable(getHBase().getTable(HiveStringUtils.normalizeIdentifier(dbname),
+          HiveStringUtils.normalizeIdentifier(name)), newTable);
       if (newTable.getPartitionKeys() != null && newTable.getPartitionKeys().size() > 0
           && !name.equals(newTable.getTableName())) {
         // They renamed the table, so we need to change each partition as well, since it changes
@@ -443,7 +458,8 @@ public class HBaseStore implements RawStore {
     boolean commit = false;
     openTransaction();
     try {
-      List<Table> tables = getHBase().scanTables(dbName, likeToRegex(pattern));
+      List<Table> tables = getHBase().scanTables(HiveStringUtils.normalizeIdentifier(dbName),
+          likeToRegex(pattern));
       List<String> tableNames = new ArrayList<String>(tables.size());
       for (Table table : tables) tableNames.add(table.getTableName());
       commit = true;
@@ -462,7 +478,12 @@ public class HBaseStore implements RawStore {
     boolean commit = false;
     openTransaction();
     try {
-      List<Table> tables = getHBase().getTables(dbname, tableNames);
+      List<String> normalizedTableNames = new ArrayList<String>(tableNames.size());
+      for (String tableName : tableNames) {
+        normalizedTableNames.add(HiveStringUtils.normalizeIdentifier(tableName));
+      }
+      List<Table> tables = getHBase().getTables(HiveStringUtils.normalizeIdentifier(dbname),
+          normalizedTableNames);
       commit = true;
       return tables;
     } catch (IOException e) {
@@ -491,10 +512,12 @@ public class HBaseStore implements RawStore {
     boolean commit = false;
     openTransaction();
     try {
-      List<Partition> parts = getHBase().scanPartitionsInTable(db_name, tbl_name, max_parts);
+      List<Partition> parts = getHBase().scanPartitionsInTable(HiveStringUtils.normalizeIdentifier(db_name),
+          HiveStringUtils.normalizeIdentifier(tbl_name), max_parts);
       if (parts == null) return null;
       List<String> names = new ArrayList<String>(parts.size());
-      Table table = getHBase().getTable(db_name, tbl_name);
+      Table table = getHBase().getTable(HiveStringUtils.normalizeIdentifier(db_name),
+          HiveStringUtils.normalizeIdentifier(tbl_name));
       for (Partition p : parts) {
         names.add(buildExternalPartName(table, p));
       }
@@ -521,10 +544,12 @@ public class HBaseStore implements RawStore {
     boolean commit = false;
     openTransaction();
     try {
-      Partition oldPart = getHBase().getPartition(db_name, tbl_name, part_vals);
+      Partition oldPart = getHBase().getPartition(HiveStringUtils.normalizeIdentifier(db_name),
+          HiveStringUtils.normalizeIdentifier(tbl_name), part_vals);
       getHBase().replacePartition(oldPart, new_part);
       // Drop any cached stats that reference this partitions
-      getHBase().getStatsCache().invalidate(db_name, tbl_name,
+      getHBase().getStatsCache().invalidate(HiveStringUtils.normalizeIdentifier(db_name),
+          HiveStringUtils.normalizeIdentifier(tbl_name),
           buildExternalPartName(db_name, tbl_name, part_vals));
       commit = true;
     } catch (IOException e) {
@@ -542,11 +567,14 @@ public class HBaseStore implements RawStore {
     boolean commit = false;
     openTransaction();
     try {
-      List<Partition> oldParts = getHBase().getPartitions(db_name, tbl_name,
-          HBaseUtils.getPartitionKeyTypes(getTable(db_name, tbl_name).getPartitionKeys()), part_vals_list);
+      List<Partition> oldParts = getHBase().getPartitions(HiveStringUtils.normalizeIdentifier(db_name),
+          HiveStringUtils.normalizeIdentifier(tbl_name),
+          HBaseUtils.getPartitionKeyTypes(getTable(HiveStringUtils.normalizeIdentifier(db_name),
+          HiveStringUtils.normalizeIdentifier(tbl_name)).getPartitionKeys()), part_vals_list);
       getHBase().replacePartitions(oldParts, new_parts);
       for (List<String> part_vals : part_vals_list) {
-        getHBase().getStatsCache().invalidate(db_name, tbl_name,
+        getHBase().getStatsCache().invalidate(HiveStringUtils.normalizeIdentifier(db_name),
+            HiveStringUtils.normalizeIdentifier(tbl_name),
             buildExternalPartName(db_name, tbl_name, part_vals));
       }
       commit = true;
@@ -604,7 +632,8 @@ public class HBaseStore implements RawStore {
     boolean commit = false;
     openTransaction();
     try {
-      getPartitionsByExprInternal(dbName, tblName, exprTree, maxParts, result);
+      getPartitionsByExprInternal(HiveStringUtils.normalizeIdentifier(dbName),
+          HiveStringUtils.normalizeIdentifier(tblName), exprTree, maxParts, result);
       return result;
     } finally {
       commitOrRoleBack(commit);
@@ -616,22 +645,62 @@ public class HBaseStore implements RawStore {
                                      String defaultPartitionName, short maxParts,
                                      List<Partition> result) throws TException {
     final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr);
-    // TODO: investigate if there should be any role for defaultPartitionName in this
-    // implementation. direct sql code path in ObjectStore does not use it.
-
+    dbName = HiveStringUtils.normalizeIdentifier(dbName);
+    tblName = HiveStringUtils.normalizeIdentifier(tblName);
+    Table table = getTable(dbName, tblName);
     boolean commit = false;
     openTransaction();
     try {
-      return getPartitionsByExprInternal(dbName, tblName, exprTree, maxParts, result);
+      if (exprTree == null) {
+        List<String> partNames = new LinkedList<String>();
+        boolean hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn(
+            table, expr, defaultPartitionName, maxParts, partNames);
+        result.addAll(getPartitionsByNames(dbName, tblName, partNames));
+        return hasUnknownPartitions;
+      } else {
+        return getPartitionsByExprInternal(dbName, tblName, exprTree, maxParts, result);
+      }
     } finally {
       commitOrRoleBack(commit);
     }
   }
 
+  /**
+   * Gets the partition names from a table, pruned using an expression.
+   * @param table Table.
+   * @param expr Expression.
+   * @param defaultPartName Default partition name from job config, if any.
+   * @param maxParts Maximum number of partition names to return.
+   * @param result The resulting names.
+   * @return Whether the result contains any unknown partitions.
+   * @throws NoSuchObjectException
+   */
+  private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr,
+      String defaultPartName, short maxParts, List<String> result) throws MetaException, NoSuchObjectException {
+    List<Partition> parts = getPartitions(
+        table.getDbName(), table.getTableName(), maxParts);
+    for (Partition part : parts) {
+      result.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues()));
+    }
+    List<String> columnNames = new ArrayList<String>();
+    List<PrimitiveTypeInfo> typeInfos = new ArrayList<PrimitiveTypeInfo>();
+    for (FieldSchema fs : table.getPartitionKeys()) {
+      columnNames.add(fs.getName());
+      typeInfos.add(TypeInfoFactory.getPrimitiveTypeInfo(fs.getType()));
+    }
+    if (defaultPartName == null || defaultPartName.isEmpty()) {
+      defaultPartName = HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME);
+    }
+    return expressionProxy.filterPartitionsByExpr(
+        columnNames, typeInfos, expr, defaultPartName, result);
+  }
+
   private boolean getPartitionsByExprInternal(String dbName, String tblName,
       ExpressionTree exprTree, short maxParts, List<Partition> result) throws MetaException,
       NoSuchObjectException {
 
+    dbName = HiveStringUtils.normalizeIdentifier(dbName);
+    tblName = HiveStringUtils.normalizeIdentifier(tblName);
     Table table = getTable(dbName, tblName);
     if (table == null) {
       throw new NoSuchObjectException("Unable to find table " + dbName + "." + tblName);
@@ -1453,7 +1522,8 @@ public class HBaseStore implements RawStore {
         listPartitionsPsWithAuth(db_name, tbl_name, part_vals, max_parts, null, null);
     List<String> partNames = new ArrayList<String>(parts.size());
     for (Partition part : parts) {
-      partNames.add(buildExternalPartName(db_name, tbl_name, part.getValues()));
+      partNames.add(buildExternalPartName(HiveStringUtils.normalizeIdentifier(db_name),
+          HiveStringUtils.normalizeIdentifier(tbl_name), part.getValues()));
     }
     return partNames;
   }
@@ -1468,7 +1538,8 @@ public class HBaseStore implements RawStore {
     boolean commit = false;
     openTransaction();
     try {
-      List<Partition> parts = getHBase().scanPartitions(db_name, tbl_name, part_vals, max_parts);
+      List<Partition> parts = getHBase().scanPartitions(HiveStringUtils.normalizeIdentifier(db_name),
+          HiveStringUtils.normalizeIdentifier(tbl_name), part_vals, max_parts);
       commit = true;
       return parts;
     } catch (IOException e) {
@@ -1596,7 +1667,7 @@ public class HBaseStore implements RawStore {
               getHBase().getStatsCache().get(dbName, tblName, partNames, colName);
           if (oneCol.getColStatsSize() > 0) {
             assert oneCol.getColStatsSize() == 1;
-            aggrStats.setPartsFound(aggrStats.getPartsFound() + oneCol.getPartsFound());
+            aggrStats.setPartsFound(oneCol.getPartsFound());
             aggrStats.addToColStats(oneCol.getColStats().get(0));
           }
         } catch (CacheLoader.InvalidCacheLoadException e) {
@@ -2204,7 +2275,7 @@ public class HBaseStore implements RawStore {
     List<String> vals = new ArrayList<String>();
     String[] kvp = name.split("/");
     for (String kv : kvp) {
-      vals.add(kv.substring(kv.indexOf('=') + 1));
+      vals.add(FileUtils.unescapePathName(kv.substring(kv.indexOf('=') + 1)));
     }
     return vals;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/f014f0da/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
index 841afd4..cc90a76 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java
@@ -26,6 +26,7 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData;
 import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
@@ -67,6 +68,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hive.common.util.BloomFilter;
+import org.apache.hive.common.util.HiveStringUtils;
 
 import java.io.IOException;
 import java.nio.charset.Charset;
@@ -115,9 +117,22 @@ class HBaseUtils {
     return protoKey.getBytes(ENCODING);
   }
 
-  static String[] parseKey(byte[] serialized) {
-    String munged = new String(serialized, ENCODING);
-    return munged.split(KEY_SEPARATOR_STR);
+  static List<String> parseKey(byte[] serialized, List<String> partNames, List<String> partTypes) {
+    BinarySortableSerDe serDe = new BinarySortableSerDe();
+    Properties props = new Properties();
+    props.setProperty(serdeConstants.LIST_COLUMNS, "dbName,tableName," + StringUtils.join(partNames, ","));
+    props.setProperty(serdeConstants.LIST_COLUMN_TYPES, "string,string," + StringUtils.join(partTypes, ","));
+    List<String> partVals = null;
+    try {
+      serDe.initialize(new Configuration(), props);
+      List deserializedkeys = ((List)serDe.deserialize(new BytesWritable(serialized)));
+      partVals = new ArrayList<String>();
+      for (Object deserializedkey : deserializedkeys) {
+        partVals.add(deserializedkey.toString());
+      }
+    } catch (SerDeException e) {
+    }
+    return partVals;
   }
 
   private static HbaseMetastoreProto.Parameters buildParameters(Map<String, String> params) {
@@ -240,9 +255,16 @@ class HBaseUtils {
 
   private static PrincipalPrivilegeSet buildPrincipalPrivilegeSet(
       HbaseMetastoreProto.PrincipalPrivilegeSet proto) throws InvalidProtocolBufferException {
-    PrincipalPrivilegeSet pps = new PrincipalPrivilegeSet();
-    pps.setUserPrivileges(convertPrincipalPrivilegeSetEntries(proto.getUsersList()));
-    pps.setRolePrivileges(convertPrincipalPrivilegeSetEntries(proto.getRolesList()));
+    PrincipalPrivilegeSet pps = null;
+    if (!proto.getUsersList().isEmpty() || !proto.getRolesList().isEmpty()) {
+      pps = new PrincipalPrivilegeSet();
+      if (!proto.getUsersList().isEmpty()) {
+        pps.setUserPrivileges(convertPrincipalPrivilegeSetEntries(proto.getUsersList()));
+      }
+      if (!proto.getRolesList().isEmpty()) {
+        pps.setRolePrivileges(convertPrincipalPrivilegeSetEntries(proto.getRolesList()));
+      }
+    }
     return pps;
   }
   /**
@@ -339,7 +361,7 @@ class HBaseUtils {
    */
   static byte[][] serializeDatabase(Database db) {
     byte[][] result = new byte[2][];
-    result[0] = buildKey(db.getName());
+    result[0] = buildKey(HiveStringUtils.normalizeIdentifier(db.getName()));
     HbaseMetastoreProto.Database.Builder builder = HbaseMetastoreProto.Database.newBuilder();
 
     if (db.getDescription() != null) builder.setDescription(db.getDescription());
@@ -696,8 +718,10 @@ class HBaseUtils {
     sd.setNumBuckets(proto.getNumBuckets());
     if (proto.hasSerdeInfo()) {
       SerDeInfo serde = new SerDeInfo();
-      serde.setName(proto.getSerdeInfo().getName());
-      serde.setSerializationLib(proto.getSerdeInfo().getSerializationLib());
+      serde.setName(proto.getSerdeInfo().hasName()?
+          proto.getSerdeInfo().getName():null);
+      serde.setSerializationLib(proto.getSerdeInfo().hasSerializationLib()?
+          proto.getSerdeInfo().getSerializationLib():null);
       serde.setParameters(buildParameters(proto.getSerdeInfo().getParameters()));
       sd.setSerdeInfo(serde);
     }
@@ -848,8 +872,8 @@ class HBaseUtils {
    * @return A struct that contains the partition plus parts of the storage descriptor
    */
   static StorageDescriptorParts deserializePartition(String dbName, String tableName, List<FieldSchema> partitions,
-      byte[] key, byte[] serialized) throws InvalidProtocolBufferException {
-    List keys = deserializePartitionKey(partitions, key);
+      byte[] key, byte[] serialized, Configuration conf) throws InvalidProtocolBufferException {
+    List keys = deserializePartitionKey(partitions, key, conf);
     return deserializePartition(dbName, tableName, keys, serialized);
   }
 
@@ -886,7 +910,8 @@ class HBaseUtils {
     return k.split(KEY_SEPARATOR_STR);
   }
 
-  private static List<String> deserializePartitionKey(List<FieldSchema> partitions, byte[] key) {
+  private static List<String> deserializePartitionKey(List<FieldSchema> partitions, byte[] key,
+      Configuration conf) {
     StringBuffer names = new StringBuffer();
     names.append("dbName,tableName,");
     StringBuffer types = new StringBuffer();
@@ -908,7 +933,8 @@ class HBaseUtils {
       List deserializedkeys = ((List)serDe.deserialize(new BytesWritable(key))).subList(2, partitions.size()+2);
       List<String> partitionKeys = new ArrayList<String>();
       for (Object deserializedKey : deserializedkeys) {
-        partitionKeys.add(deserializedKey.toString());
+        partitionKeys.add(deserializedKey!=null?deserializedKey.toString():
+          HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULTPARTITIONNAME));
       }
       return partitionKeys;
     } catch (SerDeException e) {
@@ -924,7 +950,8 @@ class HBaseUtils {
    */
   static byte[][] serializeTable(Table table, byte[] sdHash) {
     byte[][] result = new byte[2][];
-    result[0] = buildKey(table.getDbName(), table.getTableName());
+    result[0] = buildKey(HiveStringUtils.normalizeIdentifier(table.getDbName()),
+        HiveStringUtils.normalizeIdentifier(table.getTableName()));
     HbaseMetastoreProto.Table.Builder builder = HbaseMetastoreProto.Table.newBuilder();
     if (table.getOwner() != null) builder.setOwner(table.getOwner());
     builder
@@ -952,7 +979,10 @@ class HBaseUtils {
     if (table.getPrivileges() != null) {
       builder.setPrivileges(buildPrincipalPrivilegeSet(table.getPrivileges()));
     }
-    builder.setIsTemporary(table.isTemporary());
+    // Set only if table is temporary
+    if (table.isTemporary()) {
+      builder.setIsTemporary(table.isTemporary());
+    }
     result[1] = builder.build().toByteArray();
     return result;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/f014f0da/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java
index 0d3ed40..42efe94 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/StatsCache.java
@@ -99,7 +99,7 @@ class StatsCache {
                   for (ColumnStatisticsObj cso : cs.getStatsObj()) {
                     if (statsObj == null) {
                       statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(key.colName,
-                          cso.getStatsData().getSetField());
+                          cso.getColType(), cso.getStatsData().getSetField());
                     }
                     if (aggregator == null) {
                       aggregator = ColumnStatsAggregatorFactory.getColumnStatsAggregator(

http://git-wip-us.apache.org/repos/asf/hive/blob/f014f0da/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregatorFactory.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregatorFactory.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregatorFactory.java
index ebecfe3..a8dbc1f 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregatorFactory.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/ColumnStatsAggregatorFactory.java
@@ -53,39 +53,34 @@ public class ColumnStatsAggregatorFactory {
     }
   }
 
-  public static ColumnStatisticsObj newColumnStaticsObj(String colName, _Fields type) {
+  public static ColumnStatisticsObj newColumnStaticsObj(String colName, String colType, _Fields type) {
     ColumnStatisticsObj cso = new ColumnStatisticsObj();
     ColumnStatisticsData csd = new ColumnStatisticsData();
     cso.setColName(colName);
+    cso.setColType(colType);
     switch (type) {
     case BOOLEAN_STATS:
       csd.setBooleanStats(new BooleanColumnStatsData());
-      cso.setColType("boolean");
       break;
 
     case LONG_STATS:
       csd.setLongStats(new LongColumnStatsData());
-      cso.setColType("long");
       break;
 
     case DOUBLE_STATS:
       csd.setDoubleStats(new DoubleColumnStatsData());
-      cso.setColType("double");
       break;
 
     case STRING_STATS:
       csd.setStringStats(new StringColumnStatsData());
-      cso.setColType("string");
       break;
 
     case BINARY_STATS:
       csd.setBinaryStats(new BinaryColumnStatsData());
-      cso.setColType("binary");
       break;
 
     case DECIMAL_STATS:
       csd.setDecimalStats(new DecimalColumnStatsData());
-      cso.setColType("decimal");
       break;
 
     default:

http://git-wip-us.apache.org/repos/asf/hive/blob/f014f0da/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
index 55aea0e..351cb2b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
@@ -737,6 +737,9 @@ public class StatsUtils {
   }
 
   private static List<ColStatistics> convertColStats(List<ColumnStatisticsObj> colStats, String tabName) {
+    if (colStats==null) {
+      return new ArrayList<ColStatistics>();
+    }
     List<ColStatistics> stats = new ArrayList<ColStatistics>(colStats.size());
     for (ColumnStatisticsObj statObj : colStats) {
       ColStatistics cs = getColStatistics(statObj, tabName, statObj.getColName());

http://git-wip-us.apache.org/repos/asf/hive/blob/f014f0da/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
index 8001081..7e94f23 100644
--- a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
+++ b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
@@ -123,6 +123,7 @@ insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t
 desc formatted over1k_part2_orc partition(ds="foo",t=27);
 desc formatted over1k_part2_orc partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__");
 
+-- SORT_BEFORE_DIFF
 select * from over1k_part2_orc;
 select count(*) from over1k_part2_orc;
 
@@ -132,6 +133,7 @@ insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t
 desc formatted over1k_part2_orc partition(ds="foo",t=27);
 desc formatted over1k_part2_orc partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__");
 
+-- SORT_BEFORE_DIFF
 select * from over1k_part2_orc;
 select count(*) from over1k_part2_orc;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/f014f0da/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
index f842efe..ea670e9 100644
--- a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
+++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
@@ -117,6 +117,7 @@ insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from
 desc formatted over1k_part2 partition(ds="foo",t=27);
 desc formatted over1k_part2 partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__");
 
+-- SORT_BEFORE_DIFF
 select * from over1k_part2;
 select count(*) from over1k_part2;
 
@@ -126,6 +127,7 @@ insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from
 desc formatted over1k_part2 partition(ds="foo",t=27);
 desc formatted over1k_part2 partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__");
 
+-- SORT_BEFORE_DIFF
 select * from over1k_part2;
 select count(*) from over1k_part2;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/f014f0da/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
index 4451046..f0fc221 100644
--- a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
@@ -1835,13 +1835,15 @@ Bucket Columns:     	[]
 Sort Columns:       	[]                  	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
-PREHOOK: query: select * from over1k_part2_orc
+PREHOOK: query: -- SORT_BEFORE_DIFF
+select * from over1k_part2_orc
 PREHOOK: type: QUERY
 PREHOOK: Input: default@over1k_part2_orc
 PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=27
 PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__
 #### A masked pattern was here ####
-POSTHOOK: query: select * from over1k_part2_orc
+POSTHOOK: query: -- SORT_BEFORE_DIFF
+select * from over1k_part2_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1k_part2_orc
 POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=27
@@ -1980,13 +1982,15 @@ Bucket Columns:     	[]
 Sort Columns:       	[]                  	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
-PREHOOK: query: select * from over1k_part2_orc
+PREHOOK: query: -- SORT_BEFORE_DIFF
+select * from over1k_part2_orc
 PREHOOK: type: QUERY
 PREHOOK: Input: default@over1k_part2_orc
 PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=27
 PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__
 #### A masked pattern was here ####
-POSTHOOK: query: select * from over1k_part2_orc
+POSTHOOK: query: -- SORT_BEFORE_DIFF
+select * from over1k_part2_orc
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1k_part2_orc
 POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=27

http://git-wip-us.apache.org/repos/asf/hive/blob/f014f0da/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
index cb001b9..8d4c1b7 100644
--- a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
@@ -1735,13 +1735,15 @@ Bucket Columns:     	[]
 Sort Columns:       	[]                  	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
-PREHOOK: query: select * from over1k_part2
+PREHOOK: query: -- SORT_BEFORE_DIFF
+select * from over1k_part2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@over1k_part2
 PREHOOK: Input: default@over1k_part2@ds=foo/t=27
 PREHOOK: Input: default@over1k_part2@ds=foo/t=__HIVE_DEFAULT_PARTITION__
 #### A masked pattern was here ####
-POSTHOOK: query: select * from over1k_part2
+POSTHOOK: query: -- SORT_BEFORE_DIFF
+select * from over1k_part2
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1k_part2
 POSTHOOK: Input: default@over1k_part2@ds=foo/t=27
@@ -1880,13 +1882,15 @@ Bucket Columns:     	[]
 Sort Columns:       	[]                  	 
 Storage Desc Params:	 	 
 	serialization.format	1                   
-PREHOOK: query: select * from over1k_part2
+PREHOOK: query: -- SORT_BEFORE_DIFF
+select * from over1k_part2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@over1k_part2
 PREHOOK: Input: default@over1k_part2@ds=foo/t=27
 PREHOOK: Input: default@over1k_part2@ds=foo/t=__HIVE_DEFAULT_PARTITION__
 #### A masked pattern was here ####
-POSTHOOK: query: select * from over1k_part2
+POSTHOOK: query: -- SORT_BEFORE_DIFF
+select * from over1k_part2
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1k_part2
 POSTHOOK: Input: default@over1k_part2@ds=foo/t=27

http://git-wip-us.apache.org/repos/asf/hive/blob/f014f0da/ql/src/test/templates/TestCliDriver.vm
----------------------------------------------------------------------
diff --git a/ql/src/test/templates/TestCliDriver.vm b/ql/src/test/templates/TestCliDriver.vm
index ae449c5..01745da 100644
--- a/ql/src/test/templates/TestCliDriver.vm
+++ b/ql/src/test/templates/TestCliDriver.vm
@@ -45,13 +45,14 @@ public class $className extends TestCase {
     String hiveConfDir = "$hiveConfDir";
     String initScript = "$initScript";
     String cleanupScript = "$cleanupScript";
+    boolean useHBaseMetastore = Boolean.valueOf("$useHBaseMetastore");
     try {
       String hadoopVer = "$hadoopVersion";
       if (!hiveConfDir.isEmpty()) {
         hiveConfDir = HIVE_ROOT + hiveConfDir;
       }
       qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR,
-      hiveConfDir, hadoopVer, initScript, cleanupScript);
+      hiveConfDir, hadoopVer, initScript, cleanupScript, useHBaseMetastore);
 
       // do a one time initialization
       qt.cleanUp();