You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by we...@apache.org on 2017/05/08 20:43:26 UTC

[36/51] [partial] hive git commit: Revert "HIVE-14671 : merge master into hive-14535 (Wei Zheng)"

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/main/java/org/hadoop/hive/jdbc/SSLTestUtils.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/main/java/org/hadoop/hive/jdbc/SSLTestUtils.java b/itests/hive-unit/src/main/java/org/hadoop/hive/jdbc/SSLTestUtils.java
deleted file mode 100644
index 6cbcf8c..0000000
--- a/itests/hive-unit/src/main/java/org/hadoop/hive/jdbc/SSLTestUtils.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.hadoop.hive.jdbc;
-
-import java.io.File;
-import java.net.URLEncoder;
-import java.sql.Connection;
-import java.sql.Statement;
-import java.util.Map;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-
-
-public class SSLTestUtils {
-
-  private static final String LOCALHOST_KEY_STORE_NAME = "keystore.jks";
-  private static final String TRUST_STORE_NAME = "truststore.jks";
-  private static final String KEY_STORE_TRUST_STORE_PASSWORD = "HiveJdbc";
-  private static final String HS2_BINARY_MODE = "binary";
-  private static final String HS2_HTTP_MODE = "http";
-  private static final String HS2_HTTP_ENDPOINT = "cliservice";
-  private static final String HS2_BINARY_AUTH_MODE = "NONE";
-
-  private static final HiveConf conf = new HiveConf();
-  private static final String dataFileDir = !System.getProperty("test.data.files", "").isEmpty() ? System.getProperty(
-          "test.data.files") : conf.get("test.data.files").replace('\\', '/').replace("c:", "");
-
-  public static final String SSL_CONN_PARAMS = "ssl=true;sslTrustStore="
-      + URLEncoder.encode(dataFileDir + File.separator + TRUST_STORE_NAME) + ";trustStorePassword="
-      + KEY_STORE_TRUST_STORE_PASSWORD;
-
-  public static void setSslConfOverlay(Map<String, String> confOverlay) {
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_USE_SSL.varname, "true");
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PATH.varname,
-            dataFileDir + File.separator + LOCALHOST_KEY_STORE_NAME);
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname,
-            KEY_STORE_TRUST_STORE_PASSWORD);
-  }
-
-  public static void setMetastoreSslConf(HiveConf conf) {
-    conf.setBoolVar(HiveConf.ConfVars.HIVE_METASTORE_USE_SSL, true);
-    conf.setVar(HiveConf.ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PATH,
-            dataFileDir + File.separator + LOCALHOST_KEY_STORE_NAME);
-    conf.setVar(HiveConf.ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PASSWORD,
-            KEY_STORE_TRUST_STORE_PASSWORD);
-    conf.setVar(HiveConf.ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PATH,
-            dataFileDir + File.separator + TRUST_STORE_NAME);
-    conf.setVar(HiveConf.ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PASSWORD,
-            KEY_STORE_TRUST_STORE_PASSWORD);
-  }
-
-  public static void clearSslConfOverlay(Map<String, String> confOverlay) {
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_USE_SSL.varname, "false");
-  }
-
-  public static void setHttpConfOverlay(Map<String, String> confOverlay) {
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_TRANSPORT_MODE.varname, HS2_HTTP_MODE);
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_HTTP_PATH.varname, HS2_HTTP_ENDPOINT);
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "true");
-  }
-
-  public static void setBinaryConfOverlay(Map<String, String> confOverlay) {
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_TRANSPORT_MODE.varname, HS2_BINARY_MODE);
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION.varname, HS2_BINARY_AUTH_MODE);
-    confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "true");
-  }
-
-  public static void setupTestTableWithData(String tableName, Path dataFilePath,
-      Connection hs2Conn) throws Exception {
-    Statement stmt = hs2Conn.createStatement();
-    stmt.execute("set hive.support.concurrency = false");
-
-    stmt.execute("drop table if exists " + tableName);
-    stmt.execute("create table " + tableName
-        + " (under_col int comment 'the under column', value string)");
-
-    // load data
-    stmt.execute("load data local inpath '"
-        + dataFilePath.toString() + "' into table " + tableName);
-    stmt.close();
-  }
-
-  public static String getDataFileDir() {
-    return dataFileDir;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
index bc00d11..c6a906a 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
@@ -26,6 +26,8 @@ public class TestEmbeddedHiveMetaStore extends TestHiveMetaStore {
   @Override
   protected void setUp() throws Exception {
     super.setUp();
+    hiveConf.setBoolean(
+        HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS.varname, true);
     warehouse = new Warehouse(hiveConf);
     try {
       client = new HiveMetaStoreClient(hiveConf);

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
index b95c25c..af125c3 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
@@ -171,6 +171,14 @@ public abstract class TestHiveMetaStore extends TestCase {
       db = client.getDatabase(dbName);
       Path dbPath = new Path(db.getLocationUri());
       FileSystem fs = FileSystem.get(dbPath.toUri(), hiveConf);
+      boolean inheritPerms = hiveConf.getBoolVar(
+          HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
+      FsPermission dbPermission = fs.getFileStatus(dbPath).getPermission();
+      if (inheritPerms) {
+         //Set different perms for the database dir for further tests
+         dbPermission = new FsPermission((short)488);
+         fs.setPermission(dbPath, dbPermission);
+      }
 
       client.dropType(typeName);
       Type typ1 = new Type();
@@ -231,6 +239,9 @@ public abstract class TestHiveMetaStore extends TestCase {
         tbl = client.getTable(dbName, tblName);
       }
 
+      assertEquals(dbPermission, fs.getFileStatus(new Path(tbl.getSd().getLocation()))
+          .getPermission());
+
       Partition part = makePartitionObject(dbName, tblName, vals, tbl, "/part1");
       Partition part2 = makePartitionObject(dbName, tblName, vals2, tbl, "/part2");
       Partition part3 = makePartitionObject(dbName, tblName, vals3, tbl, "/part3");
@@ -248,12 +259,20 @@ public abstract class TestHiveMetaStore extends TestCase {
       assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
       Partition retp = client.add_partition(part);
       assertNotNull("Unable to create partition " + part, retp);
+      assertEquals(dbPermission, fs.getFileStatus(new Path(retp.getSd().getLocation()))
+          .getPermission());
       Partition retp2 = client.add_partition(part2);
       assertNotNull("Unable to create partition " + part2, retp2);
+      assertEquals(dbPermission, fs.getFileStatus(new Path(retp2.getSd().getLocation()))
+          .getPermission());
       Partition retp3 = client.add_partition(part3);
       assertNotNull("Unable to create partition " + part3, retp3);
+      assertEquals(dbPermission, fs.getFileStatus(new Path(retp3.getSd().getLocation()))
+          .getPermission());
       Partition retp4 = client.add_partition(part4);
       assertNotNull("Unable to create partition " + part4, retp4);
+      assertEquals(dbPermission, fs.getFileStatus(new Path(retp4.getSd().getLocation()))
+          .getPermission());
 
       Partition part_get = client.getPartition(dbName, tblName, part.getValues());
       if(isThriftClient) {
@@ -375,6 +394,8 @@ public abstract class TestHiveMetaStore extends TestCase {
       // tested
       retp = client.add_partition(part);
       assertNotNull("Unable to create partition " + part, retp);
+      assertEquals(dbPermission, fs.getFileStatus(new Path(retp.getSd().getLocation()))
+          .getPermission());
 
       // test add_partitions
 
@@ -410,8 +431,9 @@ public abstract class TestHiveMetaStore extends TestCase {
 
       // create dir for /mpart5
       Path mp5Path = new Path(mpart5.getSd().getLocation());
-      warehouse.mkdirs(mp5Path);
+      warehouse.mkdirs(mp5Path, true);
       assertTrue(fs.exists(mp5Path));
+      assertEquals(dbPermission, fs.getFileStatus(mp5Path).getPermission());
 
       // add_partitions(5,4) : err = duplicate keyvals on mpart4
       savedException = null;
@@ -2480,7 +2502,7 @@ public abstract class TestHiveMetaStore extends TestCase {
       //test params
       //test_param_2 = "50"
       filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS +
-          "test_param_2 LIKE \"50\"";
+          "test_param_2 = \"50\"";
 
       tableNames = client.listTableNamesByFilter(dbName, filter, (short)-1);
       assertEquals(2, tableNames.size());
@@ -2489,31 +2511,30 @@ public abstract class TestHiveMetaStore extends TestCase {
 
       //test_param_2 = "75"
       filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS +
-          "test_param_2 LIKE \"75\"";
+          "test_param_2 = \"75\"";
 
       tableNames = client.listTableNamesByFilter(dbName, filter, (short)-1);
       assertEquals(0, tableNames.size());
 
       //key_dne = "50"
       filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS +
-          "key_dne LIKE \"50\"";
+          "key_dne = \"50\"";
 
       tableNames = client.listTableNamesByFilter(dbName, filter, (short)-1);
       assertEquals(0, tableNames.size());
 
       //test_param_1 != "yellow"
       filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS +
-          "test_param_1 NOT LIKE \"yellow\"";
+          "test_param_1 <> \"yellow\"";
 
-      // Commenting as part of HIVE-12274 != and <> are not supported for CLOBs
-      // tableNames = client.listTableNamesByFilter(dbName, filter, (short) 2);
-      // assertEquals(2, tableNames.size());
+      tableNames = client.listTableNamesByFilter(dbName, filter, (short) 2);
+      assertEquals(2, tableNames.size());
 
       filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS +
-          "test_param_1 NOT LIKE \"yellow\"";
+          "test_param_1 != \"yellow\"";
 
-      // tableNames = client.listTableNamesByFilter(dbName, filter, (short) 2);
-      // assertEquals(2, tableNames.size());
+      tableNames = client.listTableNamesByFilter(dbName, filter, (short) 2);
+      assertEquals(2, tableNames.size());
 
       //owner = "testOwner1" and (lastAccessTime = 30 or test_param_1 = "hi")
       filter = org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_OWNER +
@@ -2521,7 +2542,7 @@ public abstract class TestHiveMetaStore extends TestCase {
         org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_LAST_ACCESS +
         " = 30 or " +
         org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS +
-        "test_param_1 LIKE \"hi\")";
+        "test_param_1 = \"hi\")";
       tableNames = client.listTableNamesByFilter(dbName, filter, (short)-1);
 
       assertEquals(2, tableNames.size());

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
index 1002be7..a0f18c6 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
@@ -128,7 +128,7 @@ public class TestHiveMetaStoreTxns {
     Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
         validTxns.isTxnRangeValid(5L, 10L));
 
-    validTxns = new ValidReadTxnList("10:5:4,5,6:");
+    validTxns = new ValidReadTxnList("10:5:4:5:6");
     Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
         validTxns.isTxnRangeValid(4,6));
     Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
@@ -223,15 +223,15 @@ public class TestHiveMetaStoreTxns {
   @Test
   public void stringifyValidTxns() throws Exception {
     // Test with just high water mark
-    ValidTxnList validTxns = new ValidReadTxnList("1:" + Long.MAX_VALUE + "::");
+    ValidTxnList validTxns = new ValidReadTxnList("1:" + Long.MAX_VALUE + ":");
     String asString = validTxns.toString();
-    Assert.assertEquals("1:" + Long.MAX_VALUE + "::", asString);
+    Assert.assertEquals("1:" + Long.MAX_VALUE + ":", asString);
     validTxns = new ValidReadTxnList(asString);
     Assert.assertEquals(1, validTxns.getHighWatermark());
     Assert.assertNotNull(validTxns.getInvalidTransactions());
     Assert.assertEquals(0, validTxns.getInvalidTransactions().length);
     asString = validTxns.toString();
-    Assert.assertEquals("1:" + Long.MAX_VALUE + "::", asString);
+    Assert.assertEquals("1:" + Long.MAX_VALUE + ":", asString);
     validTxns = new ValidReadTxnList(asString);
     Assert.assertEquals(1, validTxns.getHighWatermark());
     Assert.assertNotNull(validTxns.getInvalidTransactions());

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
index 7188af6..6d1673d 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
@@ -189,13 +189,13 @@ public class TestMetastoreVersion extends TestCase {
 
   //  write the given version to metastore
   private String getVersion(HiveConf conf) throws HiveMetaException {
-    MetaStoreSchemaInfo schemInfo = new MetaStoreSchemaInfo(metaStoreRoot, "derby");
+    MetaStoreSchemaInfo schemInfo = new MetaStoreSchemaInfo(metaStoreRoot, conf, "derby");
     return getMetaStoreVersion();
   }
 
   //  write the given version to metastore
   private void setVersion(HiveConf conf, String version) throws HiveMetaException {
-    MetaStoreSchemaInfo schemInfo = new MetaStoreSchemaInfo(metaStoreRoot, "derby");
+    MetaStoreSchemaInfo schemInfo = new MetaStoreSchemaInfo(metaStoreRoot, conf, "derby");
     setMetaStoreVersion(version, "setVersion test");
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
index 3f9eec3..1ac4d01 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java
@@ -151,15 +151,15 @@ public class TestReplChangeManager {
     Partition part3 = createPartition(dbName, tblName, columns, values, serdeInfo);
     client.add_partition(part3);
 
-    Path part1Path = new Path(warehouse.getDefaultPartitionPath(db, tblName, ImmutableMap.of("dt", "20160101")), "part");
+    Path part1Path = new Path(warehouse.getPartitionPath(db, tblName, ImmutableMap.of("dt", "20160101")), "part");
     createFile(part1Path, "p1");
     String path1Chksum = ReplChangeManager.getChksumString(part1Path, fs);
 
-    Path part2Path = new Path(warehouse.getDefaultPartitionPath(db, tblName, ImmutableMap.of("dt", "20160102")), "part");
+    Path part2Path = new Path(warehouse.getPartitionPath(db, tblName, ImmutableMap.of("dt", "20160102")), "part");
     createFile(part2Path, "p2");
     String path2Chksum = ReplChangeManager.getChksumString(part2Path, fs);
 
-    Path part3Path = new Path(warehouse.getDefaultPartitionPath(db, tblName, ImmutableMap.of("dt", "20160103")), "part");
+    Path part3Path = new Path(warehouse.getPartitionPath(db, tblName, ImmutableMap.of("dt", "20160103")), "part");
     createFile(part3Path, "p3");
     String path3Chksum = ReplChangeManager.getChksumString(part3Path, fs);
 
@@ -221,15 +221,15 @@ public class TestReplChangeManager {
 
     client.createTable(tbl);
 
-    Path filePath1 = new Path(warehouse.getDefaultTablePath(db, tblName), "part1");
+    Path filePath1 = new Path(warehouse.getTablePath(db, tblName), "part1");
     createFile(filePath1, "f1");
     String fileChksum1 = ReplChangeManager.getChksumString(filePath1, fs);
 
-    Path filePath2 = new Path(warehouse.getDefaultTablePath(db, tblName), "part2");
+    Path filePath2 = new Path(warehouse.getTablePath(db, tblName), "part2");
     createFile(filePath2, "f2");
     String fileChksum2 = ReplChangeManager.getChksumString(filePath2, fs);
 
-    Path filePath3 = new Path(warehouse.getDefaultTablePath(db, tblName), "part3");
+    Path filePath3 = new Path(warehouse.getTablePath(db, tblName), "part3");
     createFile(filePath3, "f3");
     String fileChksum3 = ReplChangeManager.getChksumString(filePath3, fs);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAutoPurgeTables.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAutoPurgeTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAutoPurgeTables.java
deleted file mode 100644
index abf9769..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAutoPurgeTables.java
+++ /dev/null
@@ -1,436 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.HashMap;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.shims.ShimLoader;
-import org.apache.hive.jdbc.miniHS2.MiniHS2;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class TestAutoPurgeTables {
-  private static final String driverName = "org.apache.hive.jdbc.HiveDriver";
-  private static final String testDbName = "auto_purge_test_db";
-  //private static final String testTableName = "auto_purge_test_table";
-  private static final String INSERT_OVERWRITE_COMMAND_FORMAT =
-      "insert overwrite table " + testDbName + ".%s select 1, \"test\"";
-  private static final String TRUNCATE_TABLE_COMMAND_FORMAT =
-      "truncate table " + testDbName + ".%s";
-  private static final String partitionedColumnName = "partCol";
-  private static final String partitionedColumnValue1 = "20090619";
-  private static final String INSERT_OVERWRITE_COMMAND_PARTITIONED_FORMAT =
-      "insert overwrite table " + testDbName + ".%s PARTITION ("
-          + partitionedColumnName + "=" + partitionedColumnValue1 + ")" + " select 1, \"test\"";
-  private static final String partitionedColumnValue2 = "20100720";
-  private static HiveConf conf;
-  private static Connection con;
-  private static MiniHS2 miniHS2;
-  static final private Logger LOG = LoggerFactory.getLogger("TestAutoPurgeTables");
-
-  @Rule
-  public TestName name = new TestName();
-
-  private static Connection getConnection(String url) throws SQLException {
-    Connection con1;
-    con1 = DriverManager.getConnection(url, "", "");
-    Assert.assertNotNull("Connection is null", con1);
-    Assert.assertFalse("Connection should not be closed", con1.isClosed());
-    return con1;
-  }
-
-  private static void createTestTable(Statement stmt, String isAutopurge, boolean isExternal,
-      boolean isPartitioned, String testTableName) throws SQLException {
-    String createTablePrefix;
-    if (isExternal) {
-      createTablePrefix = "create external table ";
-    } else {
-      createTablePrefix = "create table ";
-    }
-    if (isPartitioned) {
-      // create a partitioned table
-      stmt.execute(createTablePrefix + testDbName + "." + testTableName + " (id int, value string) "
-          + " partitioned by (" + partitionedColumnName + " STRING)");
-      // load data
-      stmt.execute("insert into " + testDbName + "." + testTableName + " PARTITION ("
-          + partitionedColumnName + "=" + partitionedColumnValue1
-          + ") values (1, \"dummy1\"), (2, \"dummy2\"), (3, \"dummy3\")");
-      stmt.execute("insert into " + testDbName + "." + testTableName + " PARTITION ("
-          + partitionedColumnName + "=" + partitionedColumnValue2
-          + ") values (4, \"dummy4\"), (5, \"dummy5\"), (6, \"dummy6\")");
-    } else {
-      // create a table
-      stmt.execute(createTablePrefix + testDbName + "." + testTableName + " (id int, value string)");
-      // load data
-      stmt.execute("insert into " + testDbName + "." + testTableName
-          + " values (1, \"dummy1\"), (2, \"dummy2\"), (3, \"dummy3\")");
-    }
-    if (isAutopurge != null) {
-      stmt.execute("alter table " + testDbName + "." + testTableName
-          + " set tblproperties (\"auto.purge\"=\"" + isAutopurge + "\")");
-    }
-  }
-
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    conf = new HiveConf(TestAutoPurgeTables.class);
-    // enable trash so it can be tested
-    conf.setFloat("fs.trash.checkpoint.interval", 30);
-    conf.setFloat("fs.trash.interval", 30);
-    // Create test database and base tables once for all the test
-    miniHS2 = new MiniHS2.Builder().withConf(conf).build();
-    miniHS2.start(new HashMap<String, String>());
-    Class.forName(driverName);
-    con = getConnection(miniHS2.getBaseJdbcURL() + ";create=true");
-    try (Statement stmt = con.createStatement()) {
-      Assert.assertNotNull("Statement is null", stmt);
-      stmt.execute("set hive.support.concurrency = false");
-      stmt.execute("drop database if exists " + testDbName + " cascade");
-      stmt.execute("create database " + testDbName);
-    }
-  }
-
-  @AfterClass
-  public static void tearDownAfterClass() {
-    Statement stmt = null;
-    try {
-      stmt = con.createStatement();
-      // drop test db and its tables and views
-      stmt.execute("set hive.support.concurrency = false");
-      stmt.execute("drop database if exists " + testDbName + " cascade");
-      FileSystem fs = FileSystem.get(conf);
-      fs.deleteOnExit(ShimLoader.getHadoopShims().getCurrentTrashPath(conf, fs));
-    } catch (SQLException | IOException e) {
-      e.printStackTrace();
-    } finally {
-      if (stmt != null) {
-        try {
-          stmt.close();
-        } catch (SQLException e) {
-          //
-        }
-      }
-      if (con != null) {
-        try {
-          con.close();
-        } catch (SQLException e) {
-          //
-        }
-      }
-      if (miniHS2 != null) {
-        miniHS2.cleanup();
-        miniHS2.stop();
-        miniHS2 = null;
-      }
-    }
-  }
-
-  @Before
-  public void afterTest() throws Exception {
-    FileSystem fs = FileSystem.get(conf);
-    Path trashDir = ShimLoader.getHadoopShims().getCurrentTrashPath(conf, fs);
-    fs.delete(trashDir, true);
-  }
-
-  /**
-   * Tests if previous table data skips trash when insert overwrite table .. is run against a table
-   * which has auto.purge property set
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("true", false, false, false, name.getMethodName());
-  }
-
-  /**
-   * Tests when auto.purge is set to a invalid string, trash should be used for insert overwrite
-   * queries
-   * 
-   * @throws Exception
-   */
-  @Test
-  public void testAutoPurgeInvalid() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("invalid", false, false, false, name.getMethodName());
-  }
-
-  /**
-   * Test when auto.purge property is not set. Data should be moved to trash for insert overwrite
-   * queries
-   * 
-   * @throws Exception
-   */
-  @Test
-  public void testAutoPurgeUnset() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil(null, false, false, false, name.getMethodName());
-  }
-
-  /**
-   * Tests if the auto.purge property works correctly for external tables. Old data should skip
-   * trash when insert overwrite table .. is run when auto.purge is set to true
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testExternalTable() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("true", true, false, false, name.getMethodName());
-  }
-
-  /**
-   * Tests auto.purge when managed table is partitioned. Old data should skip trash when insert
-   * overwrite table .. is run and auto.purge property is set to true
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testPartitionedTable() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("true", false, true, false, name.getMethodName());
-  }
-
-  /**
-   * Tests auto.purge for an external, partitioned table. Old partition data should skip trash when
-   * auto.purge is set to true
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testExternalPartitionedTable() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("true", true, true, false, name.getMethodName());
-  }
-
-  /**
-   * Tests when auto.purge is set to false, older data is moved to Trash when insert overwrite table
-   * .. is run
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testNoAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("false", false, false, false, name.getMethodName());
-  }
-
-  /**
-   * Tests when auto.purge is set to false on a external table, older data is moved to Trash when
-   * insert overwrite table .. is run
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testExternalNoAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("false", true, false, false, name.getMethodName());
-  }
-
-  /**
-   * Tests when auto.purge is set to false on a partitioned table, older data is moved to Trash when
-   * insert overwrite table .. is run
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testPartitionedNoAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("false", false, true, false, name.getMethodName());
-  }
-
-  /**
-   * Tests when auto.purge is set to false on a partitioned external table, older data is moved to
-   * Trash when insert overwrite table .. is run
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testPartitionedExternalNoAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("false", true, true, false, name.getMethodName());
-  }
-
-  //truncate on external table is not allowed
-  @Test(expected = SQLException.class)
-  public void testTruncatePartitionedExternalNoAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil(String.valueOf(false), true, true, true, name.getMethodName());
-  }
-
-  //truncate on external table is not allowed
-  @Test(expected = SQLException.class)
-  public void testTruncateExternalNoAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil(String.valueOf(false), true, false, true, name.getMethodName());
-  }
-
-  @Test
-  public void testTruncatePartitionedNoAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil(String.valueOf(false), false, true, true, name.getMethodName());
-  }
-
-  @Test
-  public void testTruncateNoAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil(String.valueOf(false), false, false, true, name.getMethodName());
-  }
-
-  @Test
-  public void testTruncateInvalidAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil("invalid", false, false, true, name.getMethodName());
-  }
-
-  @Test
-  public void testTruncateUnsetAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil(null, false, false, true, name.getMethodName());
-  }
-
-  //truncate on external table is not allowed
-  @Test(expected = SQLException.class)
-  public void testTruncatePartitionedExternalAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil(String.valueOf(true), true, true, true, name.getMethodName());
-  }
-
-  //truncate on external table is not allowed
-  @Test(expected = SQLException.class)
-  public void testTruncateExternalAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil(String.valueOf(true), true, false, true, name.getMethodName());
-  }
-
-  @Test
-  public void testTruncatePartitionedAutoPurge() throws Exception {
-    LOG.info("Running " + name.getMethodName());
-    testUtil(String.valueOf(true), false, true, true, name.getMethodName());
-  }
-
-  /**
-   * Test util method to run the insert overwrite table or truncate table test on a table
-   * 
-   * @param autoPurgePropValue - string value of the auto.purge property for the test table. Ignored
-   *          if null
-   * @param isExternal - if set creates a external table for the test
-   * @param isPartitioned - if set creates a partitioned table for the test
-   * @param isTruncateTest - if set uses truncate table command for the test. Otherwise uses Insert
-   *          overwrite table command for the test
-   * @param testTableName - test table name
-   * @throws Exception
-   */
-  private void testUtil(String autoPurgePropValue, boolean isExternal, boolean isPartitioned,
-      boolean isTruncateTest, String testTableName) throws Exception {
-    testUtil(autoPurgePropValue, isExternal, isPartitioned,
-        !"true".equalsIgnoreCase(autoPurgePropValue), isTruncateTest, testTableName);
-  }
-  /**
-   * Test util method to run the insert overwrite table or truncate table test on a table
-   * 
-   * @param isAutoPurge - If set, creates a table with auto.purge with the given value
-   * @param isExternal - if set creates a external table for the test
-   * @param isPartitioned - if set creates a partitioned table for the test
-   * @param purgeExpected - if set the assert condition for the test is such that it expectes old
-   *          table data to be moved to trash. If not creates a assert condition to make sure that
-   *          data is not moved to trash
-   * @param isTruncateTest - if set uses truncate table command for the test. Otherwise uses Insert
-   *          overwrite table command for the test
-   * @param testTableName - table name for the test table
-   * @throws Exception
-   */
-  private void testUtil(String isAutoPurge, boolean isExternal, boolean isPartitioned,
-      boolean purgeExpected, boolean isTruncateTest, String testTableName) throws Exception {
-    try (Statement stmt = con.createStatement()) {
-      // create a test table with auto.purge = true
-      createTestTable(stmt, isAutoPurge, isExternal, isPartitioned, testTableName);
-      int numFilesInTrashBefore = getTrashFileCount();
-      String command = getCommand(isTruncateTest, isPartitioned, testTableName);
-      stmt.execute(command);
-      int numFilesInTrashAfter = getTrashFileCount();
-      if (purgeExpected) {
-        Assert.assertTrue(
-            String.format(
-                "Data should have been moved to trash. Number of files in trash: before : %d after %d",
-                numFilesInTrashBefore, numFilesInTrashAfter),
-            numFilesInTrashBefore < numFilesInTrashAfter);
-      } else {
-        Assert.assertEquals(
-            String.format(
-                "Data should not have been moved to trash. Number of files in trash: before : %d after %d",
-                numFilesInTrashBefore, numFilesInTrashAfter),
-            numFilesInTrashBefore, numFilesInTrashAfter);
-      }
-    }
-  }
-
-  private static String getCommand(boolean isTruncateTest, boolean isPartitioned, String testTableName) {
-    if (isTruncateTest) {
-      return String.format(TRUNCATE_TABLE_COMMAND_FORMAT, testTableName);
-    } else if (isPartitioned) {
-      return String.format(INSERT_OVERWRITE_COMMAND_PARTITIONED_FORMAT, testTableName);
-    } else {
-      return String.format(INSERT_OVERWRITE_COMMAND_FORMAT, testTableName);
-    }
-  }
-
-  private int getTrashFileCount() throws Exception {
-    FileSystem fs = FileSystem.get(conf);
-    Path trashDir = ShimLoader.getHadoopShims().getCurrentTrashPath(conf, fs);
-    return getFileCount(fs, trashDir);
-  }
-
-  private int getFileCount(FileSystem fs, Path path) throws Exception {
-    try {
-      int count = 0;
-      if (!fs.exists(path)) {
-        return count;
-      }
-      RemoteIterator<LocatedFileStatus> lfs = fs.listFiles(path, true);
-      while (lfs.hasNext()) {
-        LocatedFileStatus lf = lfs.next();
-        LOG.info(lf.getPath().toString());
-        if (lf.isFile()) {
-          count++;
-        }
-      }
-      return count;
-    } catch (IOException e) {
-      throw new Exception("Exception while list files on " + path, e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
index ce8fe60..bfb25aa 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
@@ -52,7 +52,6 @@ public class TestDDLWithRemoteMetastoreSecondNamenode extends TestCase {
   private static final String Table4Name = "table4_nondefault_nn";
   private static final String Table5Name = "table5_nondefault_nn";
   private static final String Table6Name = "table6_nondefault_nn";
-  private static final String Table7Name = "table7_nondefault_nn";
   private static final String Index1Name = "index1_table1_nondefault_nn";
   private static final String Index2Name = "index2_table1_nondefault_nn";
   private static final String tmpdir = System.getProperty("test.tmp.dir");
@@ -198,27 +197,6 @@ public class TestDDLWithRemoteMetastoreSecondNamenode extends TestCase {
     }
   }
 
-  private void alterPartitionAndCheck(Table table, String column,
-      String value, String location) throws CommandNeedRetryException, HiveException {
-    assertNotNull(location);
-    executeQuery("ALTER TABLE " + table.getTableName() +
-        " PARTITION (" + column + "='" + value + "')" +
-        " SET LOCATION '" + location + "'");
-    HashMap<String, String> partitions = new HashMap<String, String>();
-    partitions.put(column, value);
-    Partition partition = db.getPartition(table, partitions, false);
-    assertNotNull("Partition object is expected for " + table.getTableName() , partition);
-    String locationActual = partition.getLocation();
-    if (new Path(location).toUri().getScheme() != null) {
-      assertEquals("Partition should be located in the first filesystem",
-          fs.makeQualified(new Path(location)).toString(), locationActual);
-    }
-    else {
-      assertEquals("Partition should be located in the second filesystem",
-          fs2.makeQualified(new Path(location)).toString(), locationActual);
-    }
-  }
-
   private Table createTableAndCheck(String tableName, String tableLocation)
           throws CommandNeedRetryException, HiveException, URISyntaxException {
     return createTableAndCheck(null, tableName, tableLocation);
@@ -316,15 +294,6 @@ public class TestDDLWithRemoteMetastoreSecondNamenode extends TestCase {
     createTableAndCheck(table1, Table6Name, null);
   }
 
-  public void testAlterPartitionSetLocationNonDefaultNameNode() throws Exception {
-    assertTrue("Test suite should have been initialized", isInitialized);
-    String tableLocation = tmppathFs2 + "/" + "test_set_part_loc";
-    Table table = createTableAndCheck(Table7Name, tableLocation);
-
-    addPartitionAndCheck(table, "p", "p1", "/tmp/test/1");
-    alterPartitionAndCheck(table, "p", "p1", "/tmp/test/2");
-  }
-
   public void testCreateDatabaseWithTableNonDefaultNameNode() throws Exception {
     assertTrue("Test suite should be initialied", isInitialized );
     final String tableLocation = tmppathFs2 + "/" + Table3Name;

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java
index c17ca10..0688846 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestLocationQueries.java
@@ -53,11 +53,10 @@ public class TestLocationQueries extends BaseTestQueries {
      * @return non-zero if it failed
      */
     @Override
-    public QTestProcessExecResult checkCliDriverResults(String tname) throws Exception {
+    public int checkCliDriverResults(String tname) throws Exception {
       File logFile = new File(logDir, tname + ".out");
 
       int failedCount = 0;
-      StringBuilder fileNames = new StringBuilder("Files failing the location check:");
       FileReader fr = new FileReader(logFile);
       BufferedReader in = new BufferedReader(fr);
       try {
@@ -70,20 +69,19 @@ public class TestLocationQueries extends BaseTestQueries {
             File f = new File(m.group(1));
             if (!f.getName().equals(locationSubdir)) {
               failedCount++;
-              fileNames.append(f.getName()).append("\r\n");
             }
             locationCount++;
           }
         }
         // we always have to find at least one location, otw the test is useless
         if (locationCount == 0) {
-          return QTestProcessExecResult.create(Integer.MAX_VALUE, "0 locations tested");
+          return Integer.MAX_VALUE;
         }
       } finally {
         in.close();
       }
 
-      return QTestProcessExecResult.create(failedCount, fileNames.toString());
+      return failedCount;
     }
 
     public CheckResults(String outDir, String logDir, MiniClusterType miniMr,

http://git-wip-us.apache.org/repos/asf/hive/blob/ed64a74e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java
deleted file mode 100644
index 191d4a3..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java
+++ /dev/null
@@ -1,319 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.HiveMetaStore;
-import org.apache.hive.jdbc.miniHS2.MiniHS2;
-import org.apache.hive.service.cli.HiveSQLException;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * This unit test is for testing HIVE-13884 with more complex queries and
- * hive.metastore.limit.partition.request enabled.
- * It covers cases when the query predicates can be pushed down and the
- * number of partitions can be retrieved via directSQL.
- * It also covers cases when the number of partitions cannot be retrieved
- * via directSQL, so it falls back to ORM.
- */
-public class TestMetaStoreLimitPartitionRequest {
-
-  private static final String DB_NAME = "max_partition_test_db";
-  private static final String TABLE_NAME = "max_partition_test_table";
-  private static int PARTITION_REQUEST_LIMIT = 4;
-  private static MiniHS2 miniHS2 = null;
-  private static HiveConf conf;
-  private Connection hs2Conn = null;
-  private Statement stmt;
-
-  @BeforeClass
-  public static void beforeTest() throws Exception {
-    Class.forName(MiniHS2.getJdbcDriverName());
-    conf = new HiveConf();
-    DriverManager.setLoginTimeout(0);
-
-    conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
-    conf.setIntVar(HiveConf.ConfVars.METASTORE_LIMIT_PARTITION_REQUEST, PARTITION_REQUEST_LIMIT);
-    conf.setBoolVar(HiveConf.ConfVars.METASTORE_INTEGER_JDO_PUSHDOWN, true);
-    conf.setBoolVar(HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL, true);
-    conf.setBoolVar(HiveConf.ConfVars.DYNAMICPARTITIONING, true);
-    conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
-    conf.setBoolVar(HiveConf.ConfVars.HIVE_CBO_ENABLED, false);
-
-    miniHS2 = new MiniHS2.Builder().withConf(conf).build();
-    Map<String, String> overlayProps = new HashMap<String, String>();
-    miniHS2.start(overlayProps);
-    createDb();
-  }
-
-  private static void createDb() throws Exception {
-    Connection conn =
-        DriverManager.getConnection(miniHS2.getJdbcURL(), System.getProperty("user.name"), "bar");
-    Statement stmt2 = conn.createStatement();
-    stmt2.execute("DROP DATABASE IF EXISTS " + DB_NAME + " CASCADE");
-    stmt2.execute("CREATE DATABASE " + DB_NAME);
-    stmt2.close();
-    conn.close();
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL(DB_NAME),
-        System.getProperty("user.name"), "bar");
-    stmt = hs2Conn.createStatement();
-    stmt.execute("USE " + DB_NAME);
-    createTable();
-  }
-
-  private void createTable() throws Exception {
-    String tmpTableName = TABLE_NAME + "_tmp";
-    stmt.execute("CREATE TABLE " + tmpTableName
-        + " (id string, value string, num string, ds date) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' LINES TERMINATED BY '\n' STORED AS TEXTFILE");
-    stmt.execute("INSERT OVERWRITE TABLE " + tmpTableName
-        + " VALUES ('1', 'value1', '25', '2008-04-09'), ('2', 'value2', '30', '2008-04-09'), "
-        + "('3', 'value3', '35', '2008-04-09'), ('4', 'value4', '40', '2008-04-09'), "
-        + "('5', 'value5', '25', '2008-05-09'), ('6', 'value6', '30', '2008-05-09'), "
-        + "('7', 'value7', '35', '2008-05-09'), ('8', 'value8', '40', '2008-05-09'), "
-        + "('9', 'value9', '25', '2009-04-09'), ('10', 'value10', '30', '2009-04-09'), "
-        + "('11', 'value11', '35', '2009-04-09'), ('12', 'value12', '40', '2009-04-09')");
-
-    stmt.execute("CREATE TABLE " + TABLE_NAME + " (id string, value string) PARTITIONED BY (num string, ds date)");
-    stmt.execute("INSERT OVERWRITE TABLE " + TABLE_NAME + " PARTITION (num, ds) SELECT id, value, num, ds FROM " + tmpTableName);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    String tmpTableName = TABLE_NAME + "_tmp";
-    stmt.execute("DROP TABLE IF EXISTS " + TABLE_NAME);
-    stmt.execute("DROP TABLE IF EXISTS " + tmpTableName);
-    stmt.execute("DROP TABLE IF EXISTS " + TABLE_NAME + "_num_tmp");
-
-    if (hs2Conn != null) {
-      hs2Conn.close();
-    }
-  }
-
-  @AfterClass
-  public static void afterTest() throws Exception {
-    if (miniHS2 != null && miniHS2.isStarted()) {
-      miniHS2.stop();
-    }
-  }
-
-  /* Tests with queries which can be pushed down and executed with directSQL */
-
-  @Test
-  public void testSimpleQueryWithDirectSql() throws Exception {
-    String queryString = "select value from %s where num='25' and ds='2008-04-09'";
-    executeQuery(queryString, "value1");
-  }
-
-  @Test
-  public void testMoreComplexQueryWithDirectSql() throws Exception {
-    String queryString = "select value from %s where (ds between '2009-01-01' and '2009-12-31' and num='25') or (ds between '2008-01-01' and '2008-12-31' and num='30')";
-    executeQuery(queryString, "value2", "value6", "value9");
-  }
-
-  /*
-   * Tests with queries which can be pushed down and executed with directSQL, but the number of
-   * partitions which should be fetched is bigger than the maximum set by the
-   * hive.metastore.limit.partition.request parameter.
-   */
-
-  @Test
-  public void testSimpleQueryWithDirectSqlTooManyPartitions() throws Exception {
-    String queryString = "select value from %s where ds>'2008-04-20'";
-    executeQueryExceedPartitionLimit(queryString, 8);
-  }
-
-  @Test
-  public void testMoreComplexQueryWithDirectSqlTooManyPartitions() throws Exception {
-    String queryString = "select value from %s where num='25' or (num='30' and ds between '2008-01-01' and '2008-12-31')";
-    executeQueryExceedPartitionLimit(queryString, 5);
-  }
-
-  /*
-   * Tests with queries which cannot be executed with directSQL, because of type mismatch. The type
-   * of the num column is string, but the parameters used in the where clause are numbers. After
-   * falling back to ORM, the number of partitions can be fetched by the
-   * ObjectStore.getNumPartitionsViaOrmFilter method.
-   */
-
-  @Test
-  public void testQueryWithFallbackToORM1() throws Exception {
-    String queryString = "select value from %s where num!=25 and num!=35 and num!=40";
-    executeQuery(queryString, "value2", "value6", "value10");
-  }
-
-  @Test
-  public void testQueryWithFallbackToORMTooManyPartitions1() throws Exception {
-    String queryString = "select value from %s where num=30 or num=25";
-    executeQueryExceedPartitionLimit(queryString, 6);
-  }
-
-  /*
-   * Tests with queries which cannot be executed with directSQL, because of type mismatch. The type
-   * of the num column is string, but the parameters used in the where clause are numbers. After
-   * falling back to ORM the number of partitions cannot be fetched by the
-   * ObjectStore.getNumPartitionsViaOrmFilter method. They are fetched by the
-   * ObjectStore.getPartitionNamesPrunedByExprNoTxn method.
-   */
-
-  @Test
-  public void testQueryWithFallbackToORM2() throws Exception {
-    String queryString = "select value from %s where num!=25 and ds='2008-04-09'";
-    executeQuery(queryString, "value2", "value3", "value4");
-  }
-
-  @Test
-  public void testQueryWithFallbackToORM3() throws Exception {
-    String queryString = "select value from %s where num between 26 and 31";
-    executeQuery(queryString, "value2", "value6", "value10");
-  }
-
-  @Test
-  public void testQueryWithFallbackToORMTooManyPartitions2() throws Exception {
-    String queryString = "select value from %s where num!=25 and (ds='2008-04-09' or ds='2008-05-09')";
-    executeQueryExceedPartitionLimit(queryString, 6);
-  }
-
-  @Test
-  public void testQueryWithFallbackToORMTooManyPartitions3() throws Exception {
-    String queryString = "select value from %s where num>=30";
-    executeQueryExceedPartitionLimit(queryString, 9);
-  }
-
-  @Test
-  public void testQueryWithFallbackToORMTooManyPartitions4() throws Exception {
-    String queryString = "select value from %s where num between 20 and 50";
-    executeQueryExceedPartitionLimit(queryString, 12);
-  }
-
-  /*
-   * Tests with queries which cannot be executed with directSQL, because the contain like or in.
-   * After falling back to ORM the number of partitions cannot be fetched by the
-   * ObjectStore.getNumPartitionsViaOrmFilter method. They are fetched by the
-   * ObjectStore.getPartitionNamesPrunedByExprNoTxn method.
-   */
-
-  @Test
-  public void testQueryWithInWithFallbackToORM() throws Exception {
-    setupNumTmpTable();
-    String queryString = "select value from %s a where ds='2008-04-09' and a.num in (select value from " + TABLE_NAME + "_num_tmp)";
-    executeQuery(queryString, "value1", "value2");
-  }
-
-  @Test
-  public void testQueryWithInWithFallbackToORMTooManyPartitions() throws Exception {
-    setupNumTmpTable();
-    String queryString = "select value from %s a where a.num in (select value from " + TABLE_NAME + "_num_tmp)";
-    executeQueryExceedPartitionLimit(queryString, 12);
-  }
-
-  @Test
-  public void testQueryWithInWithFallbackToORMTooManyPartitions2() throws Exception {
-    setupNumTmpTable();
-    String queryString = "select value from %s a where a.num in (select value from " + TABLE_NAME + "_num_tmp where value='25')";
-    executeQueryExceedPartitionLimit(queryString, 12);
-  }
-
-  @Test
-  public void testQueryWithLikeWithFallbackToORMTooManyPartitions() throws Exception {
-    String queryString = "select value from %s where num like '3%%'";
-    executeQueryExceedPartitionLimit(queryString, 6);
-  }
-
-  private void setupNumTmpTable() throws SQLException {
-    stmt.execute("CREATE TABLE " + TABLE_NAME + "_num_tmp (value string)");
-    stmt.execute("INSERT INTO " + TABLE_NAME + "_num_tmp VALUES ('25')");
-    stmt.execute("INSERT INTO " + TABLE_NAME + "_num_tmp VALUES ('30')");
-  }
-
-  private void executeQuery(String query, String... expectedValues) throws SQLException {
-    String queryStr = String.format(query, TABLE_NAME);
-    ResultSet result = stmt.executeQuery(queryStr);
-    assertTrue(result != null);
-    Set<String> expectedValueSet = new HashSet<>(Arrays.asList(expectedValues));
-    Set<String> resultValues = getResultValues(result);
-    String errorMsg = getWrongResultErrorMsg(queryStr, expectedValueSet.toString(), resultValues.toString());
-    assertTrue(errorMsg, resultValues.equals(expectedValueSet));
-  }
-
-  private Set<String> getResultValues(ResultSet result) throws SQLException {
-    Set<String> resultValues = new HashSet<>();
-    while(result.next()) {
-      resultValues.add(result.getString(1));
-    }
-    return resultValues;
-  }
-
-  private void executeQueryExceedPartitionLimit(String query, int expectedPartitionNumber) throws Exception {
-    try {
-      String queryStr = String.format(query, TABLE_NAME);
-      stmt.executeQuery(queryStr);
-      fail("The query should have failed, because the number of requested partitions are bigger than "
-              + PARTITION_REQUEST_LIMIT);
-    } catch (HiveSQLException e) {
-      String exceedLimitMsg = String.format(HiveMetaStore.PARTITION_NUMBER_EXCEED_LIMIT_MSG, expectedPartitionNumber,
-          TABLE_NAME, PARTITION_REQUEST_LIMIT, ConfVars.METASTORE_LIMIT_PARTITION_REQUEST.varname);
-      assertTrue(getWrongExceptionMessage(exceedLimitMsg, e.getMessage()),
-          e.getMessage().contains(exceedLimitMsg.toString()));
-    }
-  }
-
-  private String getWrongResultErrorMsg(String query, String expectedValues, String resultValues) {
-    StringBuilder errorMsg = new StringBuilder();
-    errorMsg.append("The query '");
-    errorMsg.append(query);
-    errorMsg.append("' returned wrong values. It returned the values ");
-    errorMsg.append(resultValues);
-    errorMsg.append(" instead of the expected ");
-    errorMsg.append(expectedValues);
-    return errorMsg.toString();
-  }
-
-  private String getWrongExceptionMessage(String exceedLimitMsg, String exceptionMessage) {
-    StringBuilder errorMsg = new StringBuilder();
-    errorMsg.append("The message of the exception doesn't contain the expected '");
-    errorMsg.append(exceedLimitMsg.toString());
-    errorMsg.append("'. It is: ");
-    errorMsg.append(exceptionMessage);
-    return errorMsg.toString();
-  }
-
-}