You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by pr...@apache.org on 2017/11/15 01:46:53 UTC

[07/22] drill git commit: DRILL-5783, DRILL-5841, DRILL-5894: Rationalize test temp directories

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetMetadataCache.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetMetadataCache.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetMetadataCache.java
index 1be8352..463021f 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetMetadataCache.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetMetadataCache.java
@@ -17,19 +17,15 @@
  */
 package org.apache.drill.exec.store.parquet;
 
-import com.google.common.io.Resources;
 import mockit.Mock;
 import mockit.MockUp;
-import mockit.integration.junit4.JMockit;
 import com.google.common.collect.Lists;
+import mockit.integration.junit4.JMockit;
 import org.apache.drill.PlanTestBase;
 import org.apache.drill.categories.UnlikelyTest;
-import org.apache.drill.common.util.TestTools;
 import org.apache.commons.io.FileUtils;
 import org.apache.drill.exec.store.dfs.MetadataContext;
 import org.apache.drill.exec.planner.physical.PlannerSettings;
-import org.apache.hadoop.fs.Path;
-import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -37,7 +33,10 @@ import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 
 import java.io.File;
+import java.io.IOException;
 import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
 import java.util.List;
 
 import static org.junit.Assert.assertEquals;
@@ -46,42 +45,23 @@ import static org.junit.Assert.assertTrue;
 
 @RunWith(JMockit.class)
 public class TestParquetMetadataCache extends PlanTestBase {
-  private static final String WORKING_PATH = TestTools.getWorkingPath();
-  private static final String TEST_RES_PATH = WORKING_PATH + "/src/test/resources";
-  private static final String tableName1 = "parquetTable1";
-  private static final String tableName2 = "parquetTable2";
-  private static File dataDir1;
-  private static File dataDir2;
-
+  private static final String TABLE_NAME_1 = "parquetTable1";
+  private static final String TABLE_NAME_2 = "parquetTable2";
 
   @BeforeClass
   public static void copyData() throws Exception {
-    // copy the data into the temporary location
-    String tmpLocation = getDfsTestTmpSchemaLocation();
-    dataDir1 = new File(tmpLocation, tableName1);
-    dataDir1.mkdir();
-    FileUtils.copyDirectory(new File(String.format(String.format("%s/multilevel/parquet", TEST_RES_PATH))),
-        dataDir1);
-
-    dataDir2 = new File(tmpLocation, tableName2);
-    dataDir2.mkdir();
-    FileUtils.copyDirectory(new File(String.format(String.format("%s/multilevel/parquet2", TEST_RES_PATH))),
-        dataDir2);
-  }
-
-  @AfterClass
-  public static void cleanupTestData() throws Exception {
-    FileUtils.deleteQuietly(dataDir1);
-    FileUtils.deleteQuietly(dataDir2);
+    dirTestWatcher.copyResourceToRoot(Paths.get("multilevel"));
+    dirTestWatcher.copyResourceToRoot(Paths.get("multilevel/parquet"), Paths.get(TABLE_NAME_1));
+    dirTestWatcher.copyResourceToRoot(Paths.get("multilevel/parquet2"), Paths.get(TABLE_NAME_2));
+    dirTestWatcher.copyResourceToRoot(Paths.get("parquet"));
   }
 
   @Test
   public void testPartitionPruningWithMetadataCache_1() throws Exception {
-    test(String.format("refresh table metadata dfs_test.`%s/%s`", getDfsTestTmpSchemaLocation(), tableName1));
-    checkForMetadataFile(tableName1);
-    String query = String.format("select dir0, dir1, o_custkey, o_orderdate from dfs_test.`%s/%s` " +
-            " where dir0=1994 and dir1 in ('Q1', 'Q2')",
-        getDfsTestTmpSchemaLocation(), tableName1);
+    test("refresh table metadata dfs.`%s`", TABLE_NAME_1);
+    checkForMetadataFile(TABLE_NAME_1);
+    String query = String.format("select dir0, dir1, o_custkey, o_orderdate from dfs.`%s` " +
+            " where dir0=1994 and dir1 in ('Q1', 'Q2')", TABLE_NAME_1);
     int expectedRowCount = 20;
     int expectedNumFiles = 2;
 
@@ -89,7 +69,7 @@ public class TestParquetMetadataCache extends PlanTestBase {
     assertEquals(expectedRowCount, actualRowCount);
     String numFilesPattern = "numFiles=" + expectedNumFiles;
     String usedMetaPattern = "usedMetadataFile=true";
-    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s/1994", getDfsTestTmpSchemaLocation(), tableName1);
+    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s/1994", dirTestWatcher.getRootDir().getCanonicalPath(), TABLE_NAME_1);
     PlanTestBase.testPlanMatchingPatterns(query, new String[]{numFilesPattern, usedMetaPattern, cacheFileRootPattern},
         new String[] {});
   }
@@ -97,11 +77,10 @@ public class TestParquetMetadataCache extends PlanTestBase {
   @Test // DRILL-3917, positive test case for DRILL-4530
   @Category(UnlikelyTest.class)
   public void testPartitionPruningWithMetadataCache_2() throws Exception {
-    test(String.format("refresh table metadata dfs_test.`%s/%s`", getDfsTestTmpSchemaLocation(), tableName1));
-    checkForMetadataFile(tableName1);
-    String query = String.format("select dir0, dir1, o_custkey, o_orderdate from dfs_test.`%s/%s` " +
-            " where dir0=1994",
-        getDfsTestTmpSchemaLocation(), tableName1);
+    test("refresh table metadata dfs.`%s`", TABLE_NAME_1);
+    checkForMetadataFile(TABLE_NAME_1);
+    String query = String.format("select dir0, dir1, o_custkey, o_orderdate from dfs.`%s` " +
+            " where dir0=1994", TABLE_NAME_1);
     int expectedRowCount = 40;
     int expectedNumFiles = 4;
 
@@ -109,7 +88,7 @@ public class TestParquetMetadataCache extends PlanTestBase {
     assertEquals(expectedRowCount, actualRowCount);
     String numFilesPattern = "numFiles=" + expectedNumFiles;
     String usedMetaPattern = "usedMetadataFile=true";
-    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s/1994", getDfsTestTmpSchemaLocation(), tableName1);
+    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s/1994", dirTestWatcher.getRootDir().getCanonicalPath(), TABLE_NAME_1);
     PlanTestBase.testPlanMatchingPatterns(query, new String[]{numFilesPattern, usedMetaPattern, cacheFileRootPattern},
         new String[] {"Filter"});
   }
@@ -118,11 +97,10 @@ public class TestParquetMetadataCache extends PlanTestBase {
   @Category(UnlikelyTest.class)
   public void testPartitionPruningWithMetadataCache_3() throws Exception {
     String tableName = "orders_ctas_varchar";
-    test("use dfs_test.tmp");
-
-    test(String.format("create table %s (o_orderdate, o_orderpriority) partition by (o_orderpriority) "
-        + "as select o_orderdate, o_orderpriority from dfs_test.`%s/multilevel/parquet/1994/Q1`", tableName, TEST_RES_PATH));
-    test(String.format("refresh table metadata %s", tableName));
+    test("use dfs");
+    test("create table %s (o_orderdate, o_orderpriority) partition by (o_orderpriority) "
+        + "as select o_orderdate, o_orderpriority from dfs.`multilevel/parquet/1994/Q1`", tableName);
+    test("refresh table metadata %s", tableName);
     checkForMetadataFile(tableName);
     String query = String.format("select * from %s where o_orderpriority = '1-URGENT'", tableName);
     int expectedRowCount = 3;
@@ -140,12 +118,12 @@ public class TestParquetMetadataCache extends PlanTestBase {
   @Category(UnlikelyTest.class)
   public void testPartitionPruningWithMetadataCache_4() throws Exception {
     String tableName = "orders_ctas_binary";
-    test("use dfs_test.tmp");
+    test("use dfs");
 
-    test(String.format("create table %s (o_orderdate, o_orderpriority) partition by (o_orderpriority) "
+    test("create table %s (o_orderdate, o_orderpriority) partition by (o_orderpriority) "
         + "as select o_orderdate, convert_to(o_orderpriority, 'UTF8') as o_orderpriority "
-        + "from dfs_test.`%s/multilevel/parquet/1994/Q1`", tableName, TEST_RES_PATH));
-    test(String.format("refresh table metadata %s", tableName));
+        + "from dfs.`multilevel/parquet/1994/Q1`", tableName);
+    test("refresh table metadata %s", tableName);
     checkForMetadataFile(tableName);
     String query = String.format("select * from %s where o_orderpriority = '1-URGENT'", tableName);
     int expectedRowCount = 3;
@@ -162,10 +140,10 @@ public class TestParquetMetadataCache extends PlanTestBase {
   @Test
   public void testCache() throws Exception {
     String tableName = "nation_ctas";
-    test("use dfs_test.tmp");
-    test(String.format("create table `%s/t1` as select * from cp.`tpch/nation.parquet`", tableName));
-    test(String.format("create table `%s/t2` as select * from cp.`tpch/nation.parquet`", tableName));
-    test(String.format("refresh table metadata %s", tableName));
+    test("use dfs");
+    test("create table `%s/t1` as select * from cp.`tpch/nation.parquet`", tableName);
+    test("create table `%s/t2` as select * from cp.`tpch/nation.parquet`", tableName);
+    test("refresh table metadata %s", tableName);
     checkForMetadataFile(tableName);
     String query = String.format("select * from %s", tableName);
     int rowCount = testSql(query);
@@ -176,12 +154,12 @@ public class TestParquetMetadataCache extends PlanTestBase {
   @Test
   public void testUpdate() throws Exception {
     String tableName = "nation_ctas_update";
-    test("use dfs_test.tmp");
-    test(String.format("create table `%s/t1` as select * from cp.`tpch/nation.parquet`", tableName));
-    test(String.format("refresh table metadata %s", tableName));
+    test("use dfs");
+    test("create table `%s/t1` as select * from cp.`tpch/nation.parquet`", tableName);
+    test("refresh table metadata %s", tableName);
     checkForMetadataFile(tableName);
     Thread.sleep(1000);
-    test(String.format("create table `%s/t2` as select * from cp.`tpch/nation.parquet`", tableName));
+    test("create table `%s/t2` as select * from cp.`tpch/nation.parquet`", tableName);
     int rowCount = testSql(String.format("select * from %s", tableName));
     Assert.assertEquals(50, rowCount);
   }
@@ -189,23 +167,23 @@ public class TestParquetMetadataCache extends PlanTestBase {
   @Test
   public void testCacheWithSubschema() throws Exception {
     String tableName = "nation_ctas_subschema";
-    test(String.format("create table dfs_test.tmp.`%s/t1` as select * from cp.`tpch/nation.parquet`", tableName));
-    test(String.format("refresh table metadata dfs_test.tmp.%s", tableName));
+    test("create table dfs.`%s/t1` as select * from cp.`tpch/nation.parquet`", tableName);
+    test("refresh table metadata dfs.%s", tableName);
     checkForMetadataFile(tableName);
-    int rowCount = testSql(String.format("select * from dfs_test.tmp.%s", tableName));
+    int rowCount = testSql(String.format("select * from dfs.%s", tableName));
     Assert.assertEquals(25, rowCount);
   }
 
   @Test
   public void testFix4449() throws Exception {
-    runSQL("CREATE TABLE dfs_test.tmp.`4449` PARTITION BY(l_discount) AS SELECT l_orderkey, l_discount FROM cp.`tpch/lineitem.parquet`");
-    runSQL("REFRESH TABLE METADATA dfs_test.tmp.`4449`");
+    runSQL("CREATE TABLE dfs.tmp.`4449` PARTITION BY(l_discount) AS SELECT l_orderkey, l_discount FROM cp.`tpch/lineitem.parquet`");
+    runSQL("REFRESH TABLE METADATA dfs.tmp.`4449`");
 
     testBuilder()
       .sqlQuery("SELECT COUNT(*) cnt FROM (" +
-        "SELECT l_orderkey FROM dfs_test.tmp.`4449` WHERE l_discount < 0.05" +
+        "SELECT l_orderkey FROM dfs.tmp.`4449` WHERE l_discount < 0.05" +
         " UNION ALL" +
-        " SELECT l_orderkey FROM dfs_test.tmp.`4449` WHERE l_discount > 0.02)")
+        " SELECT l_orderkey FROM dfs.tmp.`4449` WHERE l_discount > 0.02)")
       .unOrdered()
       .baselineColumns("cnt")
       .baselineValues(71159L)
@@ -215,10 +193,10 @@ public class TestParquetMetadataCache extends PlanTestBase {
   @Test
   public void testAbsentPluginOrWorkspaceError() throws Exception {
     testBuilder()
-        .sqlQuery("refresh table metadata dfs_test.incorrect.table_name")
+        .sqlQuery("refresh table metadata dfs.incorrect.table_name")
         .unOrdered()
         .baselineColumns("ok", "summary")
-        .baselineValues(false, "Storage plugin or workspace does not exist [dfs_test.incorrect]")
+        .baselineValues(false, "Storage plugin or workspace does not exist [dfs.incorrect]")
         .go();
 
     testBuilder()
@@ -232,15 +210,17 @@ public class TestParquetMetadataCache extends PlanTestBase {
   @Test //DRILL-4511
   @Category(UnlikelyTest.class)
   public void testTableDoesNotExistWithEmptyDirectory() throws Exception {
-    File path = new File(getTempDir("empty_directory"));
-    String pathString = path.toURI().getPath();
+    String dirName = "empty_directory";
+    File path = new File(dirTestWatcher.getRootDir(), dirName);
+    path.mkdirs();
+
     try {
       path.mkdir();
       testBuilder()
-          .sqlQuery("refresh table metadata dfs.`%s`", pathString)
+          .sqlQuery("refresh table metadata dfs.`%s`", dirName)
           .unOrdered()
           .baselineColumns("ok", "summary")
-          .baselineValues(false, String.format("Table %s does not exist.", pathString))
+          .baselineValues(false, String.format("Table %s does not exist.", dirName))
           .go();
     } finally {
       FileUtils.deleteQuietly(path);
@@ -274,13 +254,12 @@ public class TestParquetMetadataCache extends PlanTestBase {
   @Category(UnlikelyTest.class)
   public void testDrill4530_1() throws Exception {
     // create metadata cache
-    test(String.format("refresh table metadata dfs_test.`%s/%s`", getDfsTestTmpSchemaLocation(), tableName2));
-    checkForMetadataFile(tableName2);
+    test("refresh table metadata dfs.`%s`", TABLE_NAME_2);
+    checkForMetadataFile(TABLE_NAME_2);
 
     // run query and check correctness
-    String query1 = String.format("select dir0, dir1, o_custkey, o_orderdate from dfs_test.`%s/%s` " +
-            " where dir0=1995 and dir1='Q3'",
-        getDfsTestTmpSchemaLocation(), tableName2);
+    String query1 = String.format("select dir0, dir1, o_custkey, o_orderdate from dfs.`%s` " +
+            " where dir0=1995 and dir1='Q3'", TABLE_NAME_2);
     int expectedRowCount = 20;
     int expectedNumFiles = 2;
 
@@ -288,7 +267,7 @@ public class TestParquetMetadataCache extends PlanTestBase {
     assertEquals(expectedRowCount, actualRowCount);
     String numFilesPattern = "numFiles=" + expectedNumFiles;
     String usedMetaPattern = "usedMetadataFile=true";
-    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s/1995/Q3", getDfsTestTmpSchemaLocation(), tableName2);
+    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s/1995/Q3", dirTestWatcher.getRootDir().getCanonicalPath(), TABLE_NAME_2);
     PlanTestBase.testPlanMatchingPatterns(query1, new String[]{numFilesPattern, usedMetaPattern, cacheFileRootPattern},
         new String[] {"Filter"});
   }
@@ -297,13 +276,12 @@ public class TestParquetMetadataCache extends PlanTestBase {
   @Category(UnlikelyTest.class)
   public void testDrill4530_2() throws Exception {
     // create metadata cache
-    test(String.format("refresh table metadata dfs_test.`%s/%s`", getDfsTestTmpSchemaLocation(), tableName2));
-    checkForMetadataFile(tableName2);
+    test("refresh table metadata dfs.`%s`", TABLE_NAME_2);
+    checkForMetadataFile(TABLE_NAME_2);
 
     // run query and check correctness
-    String query1 = String.format("select dir0, dir1, o_custkey, o_orderdate from dfs_test.`%s/%s` " +
-            " where dir0=1995",
-        getDfsTestTmpSchemaLocation(), tableName2);
+    String query1 = String.format("select dir0, dir1, o_custkey, o_orderdate from dfs.`%s` " +
+            " where dir0=1995", TABLE_NAME_2);
     int expectedRowCount = 80;
     int expectedNumFiles = 8;
 
@@ -311,7 +289,7 @@ public class TestParquetMetadataCache extends PlanTestBase {
     assertEquals(expectedRowCount, actualRowCount);
     String numFilesPattern = "numFiles=" + expectedNumFiles;
     String usedMetaPattern = "usedMetadataFile=true";
-    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s/1995", getDfsTestTmpSchemaLocation(), tableName2);
+    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s/1995", dirTestWatcher.getRootDir().getCanonicalPath(), TABLE_NAME_2);
     PlanTestBase.testPlanMatchingPatterns(query1, new String[]{numFilesPattern, usedMetaPattern, cacheFileRootPattern},
         new String[] {"Filter"});
   }
@@ -320,13 +298,12 @@ public class TestParquetMetadataCache extends PlanTestBase {
   @Category(UnlikelyTest.class)
   public void testDrill4530_3() throws Exception {
     // create metadata cache
-    test(String.format("refresh table metadata dfs_test.`%s/%s`", getDfsTestTmpSchemaLocation(), tableName2));
-    checkForMetadataFile(tableName2);
+    test("refresh table metadata dfs.`%s`", TABLE_NAME_2);
+    checkForMetadataFile(TABLE_NAME_2);
 
     // run query and check correctness
-    String query1 = String.format("select dir0, dir1, o_custkey, o_orderdate from dfs_test.`%s/%s` " +
-            " where dir1='Q3'",
-        getDfsTestTmpSchemaLocation(), tableName2);
+    String query1 = String.format("select dir0, dir1, o_custkey, o_orderdate from dfs.`%s` " +
+            " where dir1='Q3'", TABLE_NAME_2);
     int expectedRowCount = 40;
     int expectedNumFiles = 4;
 
@@ -334,7 +311,7 @@ public class TestParquetMetadataCache extends PlanTestBase {
     assertEquals(expectedRowCount, actualRowCount);
     String numFilesPattern = "numFiles=" + expectedNumFiles;
     String usedMetaPattern = "usedMetadataFile=true";
-    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s", getDfsTestTmpSchemaLocation(), tableName2);
+    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s", dirTestWatcher.getRootDir().getCanonicalPath(), TABLE_NAME_2);
     PlanTestBase.testPlanMatchingPatterns(query1, new String[]{numFilesPattern, usedMetaPattern, cacheFileRootPattern},
         new String[] {});
   }
@@ -343,13 +320,12 @@ public class TestParquetMetadataCache extends PlanTestBase {
   @Category(UnlikelyTest.class)
   public void testDrill4530_4() throws Exception {
     // create metadata cache
-    test(String.format("refresh table metadata dfs_test.`%s/%s`", getDfsTestTmpSchemaLocation(), tableName2));
-    checkForMetadataFile(tableName2);
+    test("refresh table metadata dfs.`%s`", TABLE_NAME_2);
+    checkForMetadataFile(TABLE_NAME_2);
 
     // run query and check correctness
-    String query1 = String.format("select dir0, dir1, o_custkey, o_orderdate from dfs_test.`%s/%s` " +
-            " where dir0=1995 and dir1='Q6'",
-        getDfsTestTmpSchemaLocation(), tableName2);
+    String query1 = String.format("select dir0, dir1, o_custkey, o_orderdate from dfs.`%s` " +
+            " where dir0=1995 and dir1='Q6'", TABLE_NAME_2);
     int expectedRowCount = 0;
     int expectedNumFiles = 1;
 
@@ -357,7 +333,7 @@ public class TestParquetMetadataCache extends PlanTestBase {
     assertEquals(expectedRowCount, actualRowCount);
     String numFilesPattern = "numFiles=" + expectedNumFiles;
     String usedMetaPattern = "usedMetadataFile=true";
-    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s/*/*", getDfsTestTmpSchemaLocation(), tableName2);
+    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s/*/*", dirTestWatcher.getRootDir().getCanonicalPath(), TABLE_NAME_2);
     PlanTestBase.testPlanMatchingPatterns(query1, new String[]{numFilesPattern, usedMetaPattern, cacheFileRootPattern},
         new String[] {});
   }
@@ -365,11 +341,10 @@ public class TestParquetMetadataCache extends PlanTestBase {
   @Test // DRILL-4794
   @Category(UnlikelyTest.class)
   public void testDrill4794() throws Exception {
-    test(String.format("refresh table metadata dfs_test.`%s/%s`", getDfsTestTmpSchemaLocation(), tableName1));
-    checkForMetadataFile(tableName1);
-    String query = String.format("select dir0, dir1, o_custkey, o_orderdate from dfs_test.`%s/%s` " +
-            " where dir0=1994 or dir1='Q3'",
-        getDfsTestTmpSchemaLocation(), tableName1);
+    test("refresh table metadata dfs.`%s`", TABLE_NAME_1);
+    checkForMetadataFile(TABLE_NAME_1);
+    String query = String.format("select dir0, dir1, o_custkey, o_orderdate from dfs.`%s` " +
+            " where dir0=1994 or dir1='Q3'", TABLE_NAME_1);
 
     int expectedRowCount = 60;
     int expectedNumFiles = 6;
@@ -378,7 +353,7 @@ public class TestParquetMetadataCache extends PlanTestBase {
     assertEquals(expectedRowCount, actualRowCount);
     String numFilesPattern = "numFiles=" + expectedNumFiles;
     String usedMetaPattern = "usedMetadataFile=true";
-    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s", getDfsTestTmpSchemaLocation(), tableName1);
+    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s", dirTestWatcher.getRootDir().getCanonicalPath(), TABLE_NAME_1);
     PlanTestBase.testPlanMatchingPatterns(query, new String[]{numFilesPattern, usedMetaPattern, cacheFileRootPattern},
         new String[] {});
   }
@@ -387,13 +362,12 @@ public class TestParquetMetadataCache extends PlanTestBase {
   @Category(UnlikelyTest.class)
   public void testDrill4786_1() throws Exception {
     // create metadata cache
-    test(String.format("refresh table metadata dfs_test.`%s/%s`", getDfsTestTmpSchemaLocation(), tableName2));
-    checkForMetadataFile(tableName2);
+    test("refresh table metadata dfs.`%s`", TABLE_NAME_2);
+    checkForMetadataFile(TABLE_NAME_2);
 
     // run query and check correctness
-    String query1 = String.format("select dir0, dir1, o_custkey, o_orderdate from dfs_test.`%s/%s` " +
-            " where dir0=1995 and dir1 in ('Q1', 'Q2')",
-        getDfsTestTmpSchemaLocation(), tableName2);
+    String query1 = String.format("select dir0, dir1, o_custkey, o_orderdate from dfs.`%s` " +
+            " where dir0=1995 and dir1 in ('Q1', 'Q2')", TABLE_NAME_2);
 
     int expectedRowCount = 40;
     int expectedNumFiles = 4;
@@ -402,7 +376,7 @@ public class TestParquetMetadataCache extends PlanTestBase {
     assertEquals(expectedRowCount, actualRowCount);
     String numFilesPattern = "numFiles=" + expectedNumFiles;
     String usedMetaPattern = "usedMetadataFile=true";
-    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s/1995", getDfsTestTmpSchemaLocation(), tableName2);
+    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s/1995", dirTestWatcher.getRootDir().getCanonicalPath(), TABLE_NAME_2);
     PlanTestBase.testPlanMatchingPatterns(query1, new String[]{numFilesPattern, usedMetaPattern, cacheFileRootPattern},
         new String[] {});
 
@@ -412,13 +386,12 @@ public class TestParquetMetadataCache extends PlanTestBase {
   @Category(UnlikelyTest.class)
   public void testDrill4786_2() throws Exception {
     // create metadata cache
-    test(String.format("refresh table metadata dfs_test.`%s/%s`", getDfsTestTmpSchemaLocation(), tableName2));
-    checkForMetadataFile(tableName2);
+    test("refresh table metadata dfs.`%s`", TABLE_NAME_2);
+    checkForMetadataFile(TABLE_NAME_2);
 
     // run query and check correctness
-    String query1 = String.format("select dir0, dir1, o_custkey, o_orderdate from dfs_test.`%s/%s` " +
-            " where dir0 in (1994, 1995) and dir1 = 'Q3'",
-        getDfsTestTmpSchemaLocation(), tableName2);
+    String query1 = String.format("select dir0, dir1, o_custkey, o_orderdate from dfs.`%s` " +
+            " where dir0 in (1994, 1995) and dir1 = 'Q3'", TABLE_NAME_2);
 
     int expectedRowCount = 40;
     int expectedNumFiles = 4;
@@ -427,22 +400,20 @@ public class TestParquetMetadataCache extends PlanTestBase {
     assertEquals(expectedRowCount, actualRowCount);
     String numFilesPattern = "numFiles=" + expectedNumFiles;
     String usedMetaPattern = "usedMetadataFile=true";
-    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s", getDfsTestTmpSchemaLocation(), tableName2);
+    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s", dirTestWatcher.getRootDir().getCanonicalPath(), TABLE_NAME_2);
     PlanTestBase.testPlanMatchingPatterns(query1, new String[]{numFilesPattern, usedMetaPattern, cacheFileRootPattern},
         new String[] {});
-
   }
 
   @Test // DRILL-4877
   @Category(UnlikelyTest.class)
   public void testDrill4877() throws Exception {
     // create metadata cache
-    test(String.format("refresh table metadata dfs_test.`%s/%s`", getDfsTestTmpSchemaLocation(), tableName2));
-    checkForMetadataFile(tableName2);
+    test("refresh table metadata dfs.`%s`", TABLE_NAME_2);
+    checkForMetadataFile(TABLE_NAME_2);
 
     // run query and check correctness
-    String query1 = String.format("select max(dir0) as max0, max(dir1) as max1 from dfs_test.`%s/%s` ",
-        getDfsTestTmpSchemaLocation(), tableName2);
+    String query1 = String.format("select max(dir0) as max0, max(dir1) as max1 from dfs.`%s` ", TABLE_NAME_2);
 
     testBuilder()
     .sqlQuery(query1)
@@ -455,7 +426,7 @@ public class TestParquetMetadataCache extends PlanTestBase {
 
     String numFilesPattern = "numFiles=" + expectedNumFiles;
     String usedMetaPattern = "usedMetadataFile=true";
-    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s", getDfsTestTmpSchemaLocation(), tableName2);
+    String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s", dirTestWatcher.getRootDir().getCanonicalPath(), TABLE_NAME_2);
     PlanTestBase.testPlanMatchingPatterns(query1, new String[]{numFilesPattern, usedMetaPattern, cacheFileRootPattern},
         new String[] {});
 
@@ -466,13 +437,13 @@ public class TestParquetMetadataCache extends PlanTestBase {
     final String tableName = "nation_move";
     final String newTableName = "nation_moved";
     try {
-      test("use dfs_test.tmp");
+      test("use dfs");
       test("create table `%s/t1` as select * from cp.`tpch/nation.parquet`", tableName);
       test("create table `%s/t2` as select * from cp.`tpch/nation.parquet`", tableName);
       test("refresh table metadata %s", tableName);
       checkForMetadataFile(tableName);
-      File srcFile = new File(getDfsTestTmpSchemaLocation(), tableName);
-      File dstFile = new File(getDfsTestTmpSchemaLocation(), newTableName);
+      File srcFile = new File(dirTestWatcher.getRootDir(), tableName);
+      File dstFile = new File(dirTestWatcher.getRootDir(), newTableName);
       FileUtils.moveDirectory(srcFile, dstFile);
       assertFalse("Cache file was not moved successfully", srcFile.exists());
       int rowCount = testSql(String.format("select * from %s", newTableName));
@@ -484,28 +455,49 @@ public class TestParquetMetadataCache extends PlanTestBase {
 
   @Test
   public void testOldMetadataVersions() throws Exception {
-    final String tablePath = "absolute_paths_metadata";
-    String rootMetadataPath =  new Path("parquet", "metadata_files_with_old_versions").toUri().getPath();
+    final Path tablePath = Paths.get("absolute_paths_metadata");
+    final Path rootMetadataPath = Paths.get("parquet", "metadata_files_with_old_versions");
     // gets folders with different metadata cache versions
-    String[] metadataPaths = new File(Resources.getResource(rootMetadataPath).getFile()).list();
+    String[] metadataPaths = dirTestWatcher.getRootDir()
+      .toPath()
+      .resolve(rootMetadataPath)
+      .toFile()
+      .list();
+
     for (String metadataPath : metadataPaths) {
       try {
-        test("use dfs_test.tmp");
+        test("use dfs.tmp");
         // creating two inner directories to leverage METADATA_DIRECTORIES_FILENAME metadata file as well
-        final String absolutePathsMetadataT1 = new Path(tablePath, "t1").toUri().getPath();
-        final String absolutePathsMetadataT2 = new Path(tablePath, "t2").toUri().getPath();
+        final Path absolutePathsMetadataT1 = tablePath.resolve("t1");
+        final Path absolutePathsMetadataT2 = tablePath.resolve("t2");
+
         String createQuery = "create table `%s` as select * from cp.`tpch/nation.parquet`";
+
         test(createQuery, absolutePathsMetadataT1);
         test(createQuery, absolutePathsMetadataT2);
-        Path relativePath = new Path(rootMetadataPath, metadataPath);
-        copyMetaDataCacheToTempReplacingInternalPaths(new Path(relativePath, "metadata_directories.requires_replace.txt"),
-                                                      tablePath, Metadata.METADATA_DIRECTORIES_FILENAME);
-        copyMetaDataCacheToTempReplacingInternalPaths(new Path(relativePath, "metadata_table.requires_replace.txt"),
-                                                      tablePath, Metadata.METADATA_FILENAME);
-        copyMetaDataCacheToTempReplacingInternalPaths(new Path(relativePath, "metadata_table_t1.requires_replace.txt"),
-                                                      absolutePathsMetadataT1, Metadata.METADATA_FILENAME);
-        copyMetaDataCacheToTempReplacingInternalPaths(new Path(relativePath, "metadata_table_t2.requires_replace.txt"),
-                                                      absolutePathsMetadataT2, Metadata.METADATA_FILENAME);
+
+        Path relativePath = rootMetadataPath.resolve(metadataPath);
+
+        File metaFile = dirTestWatcher.copyResourceToTestTmp(
+          relativePath.resolve("metadata_directories.requires_replace.txt"),
+          tablePath.resolve(Metadata.METADATA_DIRECTORIES_FILENAME));
+        dirTestWatcher.replaceMetaDataContents(metaFile, dirTestWatcher.getDfsTestTmpDir(),
+          null);
+        metaFile = dirTestWatcher.copyResourceToTestTmp(
+          relativePath.resolve("metadata_table.requires_replace.txt"),
+          tablePath.resolve(Metadata.METADATA_FILENAME));
+        dirTestWatcher.replaceMetaDataContents(metaFile, dirTestWatcher.getDfsTestTmpDir(),
+          null);
+        metaFile = dirTestWatcher.copyResourceToTestTmp(
+          relativePath.resolve("metadata_table_t1.requires_replace.txt"),
+          absolutePathsMetadataT1.resolve(Metadata.METADATA_FILENAME));
+        dirTestWatcher.replaceMetaDataContents(metaFile, dirTestWatcher.getDfsTestTmpDir(),
+          null);
+        metaFile = dirTestWatcher.copyResourceToTestTmp(
+          relativePath.resolve("metadata_table_t2.requires_replace.txt"),
+          absolutePathsMetadataT2.resolve(Metadata.METADATA_FILENAME));
+        dirTestWatcher.replaceMetaDataContents(metaFile, dirTestWatcher.getDfsTestTmpDir(),
+          null);
         String query = String.format("select * from %s", tablePath);
         int expectedRowCount = 50;
         int expectedNumFiles = 1; // point to selectionRoot since no pruning is done in this query
@@ -514,7 +506,7 @@ public class TestParquetMetadataCache extends PlanTestBase {
                       expectedRowCount, actualRowCount);
         String numFilesPattern = "numFiles=" + expectedNumFiles;
         String usedMetaPattern = "usedMetadataFile=true";
-        String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s", getDfsTestTmpSchemaLocation(), tablePath);
+        String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s", dirTestWatcher.getDfsTestTmpDir().getCanonicalPath(), tablePath);
         PlanTestBase.testPlanMatchingPatterns(query, new String[]{numFilesPattern, usedMetaPattern, cacheFileRootPattern},
                                               new String[]{"Filter"});
       } finally {
@@ -527,7 +519,7 @@ public class TestParquetMetadataCache extends PlanTestBase {
   public void testSpacesInMetadataCachePath() throws Exception {
     final String pathWithSpaces = "path with spaces";
     try {
-      test("use dfs_test.tmp");
+      test("use dfs");
       // creating multilevel table to store path with spaces in both metadata files (METADATA and METADATA_DIRECTORIES)
       test("create table `%s` as select * from cp.`tpch/nation.parquet`", pathWithSpaces);
       test("create table `%1$s/%1$s` as select * from cp.`tpch/nation.parquet`", pathWithSpaces);
@@ -541,7 +533,7 @@ public class TestParquetMetadataCache extends PlanTestBase {
           expectedRowCount, actualRowCount);
       String numFilesPattern = "numFiles=" + expectedNumFiles;
       String usedMetaPattern = "usedMetadataFile=true";
-      String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s", getDfsTestTmpSchemaLocation(), pathWithSpaces);
+      String cacheFileRootPattern = String.format("cacheFileRoot=%s/%s", dirTestWatcher.getRootDir().getCanonicalPath(), pathWithSpaces);
       PlanTestBase.testPlanMatchingPatterns(query, new String[]{numFilesPattern, usedMetaPattern, cacheFileRootPattern},
           new String[] {"Filter"});
     } finally {
@@ -553,13 +545,15 @@ public class TestParquetMetadataCache extends PlanTestBase {
   public void testFutureUnsupportedMetadataVersion() throws Exception {
     final String unsupportedMetadataVersion = "unsupported_metadata_version";
     try {
-      test("use dfs_test.tmp");
+      test("use dfs.tmp");
       test("create table `%s` as select * from cp.`tpch/nation.parquet`", unsupportedMetadataVersion);
       MetadataVersion lastVersion = MetadataVersion.Constants.SUPPORTED_VERSIONS.last();
       // Get the future version, which is absent in MetadataVersions.SUPPORTED_VERSIONS set
       String futureVersion = new MetadataVersion(lastVersion.getMajor() + 1, 0).toString();
-      copyMetaDataCacheToTempWithReplacements("parquet/unsupported_metadata/unsupported_metadata_version.requires_replace.txt",
-          unsupportedMetadataVersion, Metadata.METADATA_FILENAME, futureVersion);
+      File metaDataFile = dirTestWatcher.copyResourceToTestTmp(
+        Paths.get("parquet", "unsupported_metadata", "unsupported_metadata_version.requires_replace.txt"),
+        Paths.get(unsupportedMetadataVersion, Metadata.METADATA_FILENAME));
+      dirTestWatcher.replaceMetaDataContents(metaDataFile, dirTestWatcher.getDfsTestTmpDir(), futureVersion);
       String query = String.format("select * from %s", unsupportedMetadataVersion);
       int expectedRowCount = 25;
       int expectedNumFiles = 1;
@@ -579,10 +573,11 @@ public class TestParquetMetadataCache extends PlanTestBase {
   public void testCorruptedMetadataFile() throws Exception {
     final String corruptedMetadata = "corrupted_metadata";
     try {
-      test("use dfs_test.tmp");
+      test("use dfs.tmp");
       test("create table `%s` as select * from cp.`tpch/nation.parquet`", corruptedMetadata);
-      copyMetaDataCacheToTempReplacingInternalPaths("parquet/unsupported_metadata/" +
-          "corrupted_metadata.requires_replace.txt", corruptedMetadata, Metadata.METADATA_FILENAME);
+      dirTestWatcher.copyResourceToTestTmp(
+        Paths.get("parquet", "unsupported_metadata", "corrupted_metadata.requires_replace.txt"),
+        Paths.get(corruptedMetadata, Metadata.METADATA_FILENAME));
       String query = String.format("select * from %s", corruptedMetadata);
       int expectedRowCount = 25;
       int expectedNumFiles = 1;
@@ -602,10 +597,11 @@ public class TestParquetMetadataCache extends PlanTestBase {
   public void testEmptyMetadataFile() throws Exception {
     final String emptyMetadataFile = "empty_metadata_file";
     try {
-      test("use dfs_test.tmp");
+      test("use dfs.tmp");
       test("create table `%s` as select * from cp.`tpch/nation.parquet`", emptyMetadataFile);
-      copyMetaDataCacheToTempReplacingInternalPaths("parquet/unsupported_metadata/" +
-          "empty_metadata_file.requires_replace.txt", emptyMetadataFile, Metadata.METADATA_FILENAME);
+      dirTestWatcher.copyResourceToTestTmp(
+        Paths.get("parquet", "unsupported_metadata", "empty_metadata_file.requires_replace.txt"),
+        Paths.get(emptyMetadataFile, Metadata.METADATA_FILENAME));
       String query = String.format("select * from %s", emptyMetadataFile);
       int expectedRowCount = 25;
       int expectedNumFiles = 1;
@@ -623,40 +619,35 @@ public class TestParquetMetadataCache extends PlanTestBase {
 
   @Test
   public void testRootMetadataFileIsAbsent() throws Exception {
-    final String tmpDir = getDfsTestTmpSchemaLocation();
     final String rootMetaCorruptedTable = "root_meta_corrupted_table";
-    File dataDir = new File(tmpDir, rootMetaCorruptedTable);
-    try {
-      // copy the data into the temporary location, delete root metadata file
-      dataDir.mkdir();
-      FileUtils.copyDirectory(new File(String.format(String.format("%s/multilevel/parquet", TEST_RES_PATH))), dataDir);
+    File dataDir = dirTestWatcher.copyResourceToRoot(
+      Paths.get("multilevel", "parquet"),
+      Paths.get(rootMetaCorruptedTable));
 
-      test("use dfs_test.tmp");
+    try {
+      test("use dfs");
       test("refresh table metadata `%s`", rootMetaCorruptedTable);
       checkForMetadataFile(rootMetaCorruptedTable);
-      File rootMetadataFile = FileUtils.getFile(tmpDir, rootMetaCorruptedTable,  Metadata.METADATA_FILENAME);
-      assertTrue(String.format("Metadata cache file '%s' isn't deleted", rootMetadataFile.getPath()),
-          rootMetadataFile.delete());
+      File rootMetadataFile = FileUtils.getFile(dataDir, Metadata.METADATA_FILENAME);
+      assertTrue(String.format("Metadata cache file '%s' isn't deleted", rootMetadataFile.getPath()), rootMetadataFile.delete());
 
       // mock Metadata tableModified method to avoid occasional metadata files updating
       new MockUp<Metadata>() {
         @Mock
-        boolean tableModified(List<String> directories, Path metaFilePath, Path parentDir, MetadataContext metaContext) {
+        boolean tableModified(List<String> directories, org.apache.hadoop.fs.Path metaFilePath,
+                              org.apache.hadoop.fs.Path parentDir, MetadataContext metaContext) {
           return false;
         }
       };
 
-      String query = String.format("select dir0, dir1, o_custkey, o_orderdate from `%s` " +
-          " where dir0=1994 or dir1='Q3'", rootMetaCorruptedTable);
+      String query = String.format("select dir0, dir1, o_custkey, o_orderdate from `%s` " + " where dir0=1994 or dir1='Q3'", rootMetaCorruptedTable);
       int expectedRowCount = 60;
       int expectedNumFiles = 6;
       int actualRowCount = testSql(query);
-      assertEquals("An incorrect result was obtained while querying a table with metadata cache files",
-          expectedRowCount, actualRowCount);
+      assertEquals("An incorrect result was obtained while querying a table with metadata cache files", expectedRowCount, actualRowCount);
       String numFilesPattern = "numFiles=" + expectedNumFiles;
       String usedMetaPattern = "usedMetadataFile=false";
-      PlanTestBase.testPlanMatchingPatterns(query, new String[]{numFilesPattern, usedMetaPattern},
-          new String[] {"cacheFileRoot", "Filter"});
+      PlanTestBase.testPlanMatchingPatterns(query, new String[]{numFilesPattern, usedMetaPattern}, new String[]{"cacheFileRoot", "Filter"});
     } finally {
       FileUtils.deleteQuietly(dataDir);
     }
@@ -664,19 +655,17 @@ public class TestParquetMetadataCache extends PlanTestBase {
 
   @Test
   public void testInnerMetadataFilesAreAbsent() throws Exception {
-    final String tmpDir = getDfsTestTmpSchemaLocation();
     final String innerMetaCorruptedTable = "inner_meta_corrupted_table";
-    File dataDir = new File(tmpDir, innerMetaCorruptedTable);
-    try {
-      // copy the data into the temporary location, delete a few inner metadata files
-      dataDir.mkdir();
-      FileUtils.copyDirectory(new File(String.format(String.format("%s/multilevel/parquet", TEST_RES_PATH))), dataDir);
+    File dataDir = dirTestWatcher.copyResourceToRoot(
+      Paths.get("multilevel", "parquet"),
+      Paths.get(innerMetaCorruptedTable));
 
-      test("use dfs_test.tmp");
+    try {
+      test("use dfs");
       test("refresh table metadata `%s`", innerMetaCorruptedTable);
       checkForMetadataFile(innerMetaCorruptedTable);
-      File firstInnerMetadataFile = FileUtils.getFile(tmpDir, innerMetaCorruptedTable, "1994", Metadata.METADATA_FILENAME);
-      File secondInnerMetadataFile = FileUtils.getFile(tmpDir, innerMetaCorruptedTable, "1994", "Q3", Metadata.METADATA_FILENAME);
+      File firstInnerMetadataFile = FileUtils.getFile(dataDir, "1994", Metadata.METADATA_FILENAME);
+      File secondInnerMetadataFile = FileUtils.getFile(dataDir, "1994", "Q3", Metadata.METADATA_FILENAME);
       assertTrue(String.format("Metadata cache file '%s' isn't deleted", firstInnerMetadataFile.getPath()),
           firstInnerMetadataFile.delete());
       assertTrue(String.format("Metadata cache file '%s' isn't deleted", secondInnerMetadataFile.getPath()),
@@ -685,7 +674,8 @@ public class TestParquetMetadataCache extends PlanTestBase {
       // mock Metadata tableModified method to avoid occasional metadata files updating
       new MockUp<Metadata>() {
         @Mock
-        boolean tableModified(List<String> directories, Path metaFilePath, Path parentDir, MetadataContext metaContext) {
+        boolean tableModified(List<String> directories, org.apache.hadoop.fs.Path metaFilePath,
+                              org.apache.hadoop.fs.Path parentDir, MetadataContext metaContext) {
           return false;
         }
       };
@@ -709,7 +699,7 @@ public class TestParquetMetadataCache extends PlanTestBase {
   @Test // DRILL-4264
   @Category(UnlikelyTest.class)
   public void testMetadataCacheFieldWithDots() throws Exception {
-    final String tableWithDots = "dfs_test.tmp.`complex_table`";
+    final String tableWithDots = "dfs.tmp.`complex_table`";
     try {
       test("create table %s as\n" +
         "select cast(1 as int) as `column.with.dots`, t.`column`.`with.dots`\n" +
@@ -734,7 +724,7 @@ public class TestParquetMetadataCache extends PlanTestBase {
 
   @Test // DRILL-4139
   public void testBooleanPartitionPruning() throws Exception {
-    final String boolPartitionTable = "dfs_test.tmp.`interval_bool_partition`";
+    final String boolPartitionTable = "dfs.tmp.`interval_bool_partition`";
     try {
       test("create table %s partition by (col_bln) as " +
         "select * from cp.`parquet/alltypes_required.parquet`", boolPartitionTable);
@@ -758,7 +748,7 @@ public class TestParquetMetadataCache extends PlanTestBase {
 
   @Test // DRILL-4139
   public void testIntervalDayPartitionPruning() throws Exception {
-    final String intervalDayPartitionTable = "dfs_test.tmp.`interval_day_partition`";
+    final String intervalDayPartitionTable = "dfs.tmp.`interval_day_partition`";
     try {
       test("create table %s partition by (col_intrvl_day) as " +
         "select * from cp.`parquet/alltypes_optional.parquet`", intervalDayPartitionTable);
@@ -783,7 +773,7 @@ public class TestParquetMetadataCache extends PlanTestBase {
 
   @Test // DRILL-4139
   public void testIntervalYearPartitionPruning() throws Exception {
-    final String intervalYearPartitionTable = "dfs_test.tmp.`interval_yr_partition`";
+    final String intervalYearPartitionTable = "dfs.tmp.`interval_yr_partition`";
     try {
       test("create table %s partition by (col_intrvl_yr) as " +
         "select * from cp.`parquet/alltypes_optional.parquet`", intervalYearPartitionTable);
@@ -808,7 +798,7 @@ public class TestParquetMetadataCache extends PlanTestBase {
 
   @Test // DRILL-4139
   public void testVarCharWithNullsPartitionPruning() throws Exception {
-    final String intervalYearPartitionTable = "dfs_test.tmp.`varchar_optional_partition`";
+    final String intervalYearPartitionTable = "dfs.tmp.`varchar_optional_partition`";
     try {
       test("create table %s partition by (col_vrchr) as " +
         "select * from cp.`parquet/alltypes_optional.parquet`", intervalYearPartitionTable);
@@ -845,7 +835,7 @@ public class TestParquetMetadataCache extends PlanTestBase {
     ctasQueries.add("create table %s partition by (manager_id) as " +
       "select cast(manager_id as decimal(18, 6)) as manager_id, EMPLOYEE_ID, FIRST_NAME, LAST_NAME " +
       "from cp.`parquet/fixedlenDecimal.parquet`");
-    final String decimalPartitionTable = "dfs_test.tmp.`decimal_optional_partition`";
+    final String decimalPartitionTable = "dfs.tmp.`decimal_optional_partition`";
     for (String ctasQuery : ctasQueries) {
       try {
         test("alter session set `%s` = true", PlannerSettings.ENABLE_DECIMAL_DATA_TYPE_KEY);
@@ -873,76 +863,76 @@ public class TestParquetMetadataCache extends PlanTestBase {
   @Test // DRILL-4139
   public void testIntWithNullsPartitionPruning() throws Exception {
     try {
-      test("create table dfs_test.tmp.`t5/a` as\n" +
+      test("create table dfs.tmp.`t5/a` as\n" +
         "select 100 as mykey from cp.`tpch/nation.parquet`\n" +
         "union all\n" +
         "select col_notexist from cp.`tpch/region.parquet`");
 
-      test("create table dfs_test.tmp.`t5/b` as\n" +
+      test("create table dfs.tmp.`t5/b` as\n" +
         "select 200 as mykey from cp.`tpch/nation.parquet`\n" +
         "union all\n" +
         "select col_notexist from cp.`tpch/region.parquet`");
 
-      String query = "select mykey from dfs_test.tmp.`t5` where mykey = 100";
+      String query = "select mykey from dfs.tmp.`t5` where mykey = 100";
       int actualRowCount = testSql(query);
       assertEquals("Row count does not match the expected value", 25, actualRowCount);
 
-      test("refresh table metadata dfs_test.tmp.`t5`");
+      test("refresh table metadata dfs.tmp.`t5`");
 
       actualRowCount = testSql(query);
       assertEquals("Row count does not match the expected value", 25, actualRowCount);
     } finally {
-      test("drop table if exists dfs_test.tmp.`t5`");
+      test("drop table if exists dfs.tmp.`t5`");
     }
   }
 
   @Test // DRILL-4139
   public void testPartitionPruningWithIsNull() throws Exception {
     try {
-      test("create table dfs_test.tmp.`t6/a` as\n" +
+      test("create table dfs.tmp.`t6/a` as\n" +
         "select col_notexist as mykey from cp.`tpch/region.parquet`");
 
-      test("create table dfs_test.tmp.`t6/b` as\n" +
+      test("create table dfs.tmp.`t6/b` as\n" +
         "select 100 as mykey from cp.`tpch/region.parquet`");
 
-      String query = "select mykey from dfs_test.tmp.t6 where mykey is null";
+      String query = "select mykey from dfs.tmp.t6 where mykey is null";
 
       int actualRowCount = testSql(query);
       assertEquals("Row count does not match the expected value", 5, actualRowCount);
       PlanTestBase.testPlanMatchingPatterns(query, new String[]{"usedMetadataFile=false"}, new String[]{"Filter"});
 
-      test("refresh table metadata dfs_test.tmp.`t6`");
+      test("refresh table metadata dfs.tmp.`t6`");
 
       actualRowCount = testSql(query);
       assertEquals("Row count does not match the expected value", 5, actualRowCount);
       PlanTestBase.testPlanMatchingPatterns(query, new String[]{"usedMetadataFile=true"}, new String[]{"Filter"});
     } finally {
-      test("drop table if exists dfs_test.tmp.`t6`");
+      test("drop table if exists dfs.tmp.`t6`");
     }
   }
 
   @Test // DRILL-4139
   public void testPartitionPruningWithIsNotNull() throws Exception {
     try {
-      test("create table dfs_test.tmp.`t7/a` as\n" +
+      test("create table dfs.tmp.`t7/a` as\n" +
         "select col_notexist as mykey from cp.`tpch/region.parquet`");
 
-      test("create table dfs_test.tmp.`t7/b` as\n" +
+      test("create table dfs.tmp.`t7/b` as\n" +
         "select 100 as mykey from cp.`tpch/region.parquet`");
 
-      String query = "select mykey from dfs_test.tmp.t7 where mykey is null";
+      String query = "select mykey from dfs.tmp.t7 where mykey is null";
 
       int actualRowCount = testSql(query);
       assertEquals("Row count does not match the expected value", 5, actualRowCount);
       PlanTestBase.testPlanMatchingPatterns(query, new String[]{"usedMetadataFile=false"}, new String[]{"Filter"});
 
-      test("refresh table metadata dfs_test.tmp.`t7`");
+      test("refresh table metadata dfs.tmp.`t7`");
 
       actualRowCount = testSql(query);
       assertEquals("Row count does not match the expected value", 5, actualRowCount);
       PlanTestBase.testPlanMatchingPatterns(query, new String[]{"usedMetadataFile=true"}, new String[]{"Filter"});
     } finally {
-      test("drop table if exists dfs_test.tmp.`t7`");
+      test("drop table if exists dfs.tmp.`t7`");
     }
   }
 
@@ -952,7 +942,14 @@ public class TestParquetMetadataCache extends PlanTestBase {
    * @param table table name or table path
    */
   private void checkForMetadataFile(String table) {
-    String tmpDir = getDfsTestTmpSchemaLocation();
+    final String tmpDir;
+
+    try {
+      tmpDir = dirTestWatcher.getRootDir().getCanonicalPath();
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+
     File metaFile = table.startsWith(tmpDir) ? FileUtils.getFile(table, Metadata.METADATA_FILENAME)
         : FileUtils.getFile(tmpDir, table, Metadata.METADATA_FILENAME);
     assertTrue(String.format("There is no metadata cache file for the %s table", table),

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetScan.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetScan.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetScan.java
index 39df4e0..c95b201 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetScan.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetScan.java
@@ -18,41 +18,29 @@
 package org.apache.drill.exec.store.parquet;
 
 import com.google.common.io.Resources;
-import org.apache.drill.BaseTestQuery;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.BeforeClass;
+import org.apache.commons.io.FileUtils;
+import org.apache.drill.test.BaseTestQuery;
 import org.junit.Test;
 
-public class TestParquetScan extends BaseTestQuery {
-
-  static FileSystem fs;
-
-  @BeforeClass
-  public static void initFs() throws Exception {
-    fs = getLocalFileSystem();
-  }
+import java.io.File;
+import java.nio.file.Path;
 
+public class TestParquetScan extends BaseTestQuery {
   @Test
   public void testSuccessFile() throws Exception {
-    Path p = new Path("/tmp/nation_test_parquet_scan");
-    if (fs.exists(p)) {
-      fs.delete(p, true);
-    }
-
-    fs.mkdirs(p);
+    final byte[] bytes = Resources.toByteArray(Resources.getResource("tpch/nation.parquet"));
 
-    byte[] bytes = Resources.toByteArray(Resources.getResource("tpch/nation.parquet"));
+    final Path rootPath = dirTestWatcher.getRootDir().toPath();
+    final File scanFile = rootPath.resolve("nation_test_parquet_scan").toFile();
+    final File successFile = rootPath.resolve("_SUCCESS").toFile();
+    final File logsFile = rootPath.resolve("_logs").toFile();
 
-    FSDataOutputStream os = fs.create(new Path(p, "nation.parquet"));
-    os.write(bytes);
-    os.close();
-    fs.create(new Path(p, "_SUCCESS")).close();
-    fs.create(new Path(p, "_logs")).close();
+    FileUtils.writeByteArrayToFile(scanFile, bytes);
+    successFile.createNewFile();
+    logsFile.createNewFile();
 
     testBuilder()
-        .sqlQuery("select count(*) c from dfs.tmp.nation_test_parquet_scan where 1 = 1")
+        .sqlQuery("select count(*) c from dfs.nation_test_parquet_scan where 1 = 1")
         .unOrdered()
         .baselineColumns("c")
         .baselineValues(25L)

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/columnreaders/TestColumnReaderFactory.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/columnreaders/TestColumnReaderFactory.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/columnreaders/TestColumnReaderFactory.java
index bfd894d..cf11d5f 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/columnreaders/TestColumnReaderFactory.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/columnreaders/TestColumnReaderFactory.java
@@ -17,7 +17,7 @@
  */
 package org.apache.drill.exec.store.parquet.columnreaders;
 
-import org.apache.drill.BaseTestQuery;
+import org.apache.drill.test.BaseTestQuery;
 import org.apache.drill.exec.planner.physical.PlannerSettings;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/columnreaders/TestDateReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/columnreaders/TestDateReader.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/columnreaders/TestDateReader.java
index e95842c..b641707 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/columnreaders/TestDateReader.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/columnreaders/TestDateReader.java
@@ -17,7 +17,7 @@
  */
 package org.apache.drill.exec.store.parquet.columnreaders;
 
-import org.apache.drill.BaseTestQuery;
+import org.apache.drill.test.BaseTestQuery;
 import org.junit.Test;
 
 public class TestDateReader extends BaseTestQuery {

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet2/TestDrillParquetReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet2/TestDrillParquetReader.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet2/TestDrillParquetReader.java
index 477b825..ec2651d 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet2/TestDrillParquetReader.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet2/TestDrillParquetReader.java
@@ -17,7 +17,7 @@
  */
 package org.apache.drill.exec.store.parquet2;
 
-import org.apache.drill.BaseTestQuery;
+import org.apache.drill.test.BaseTestQuery;
 import org.apache.drill.exec.planner.physical.PlannerSettings;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -75,12 +75,12 @@ public class TestDrillParquetReader extends BaseTestQuery {
   @Test
   public void test4349() throws Exception {
     // start by creating a parquet file from the input csv file
-    runSQL("CREATE TABLE dfs_test.tmp.`4349` AS SELECT columns[0] id, CAST(NULLIF(columns[1], '') AS DOUBLE) val FROM cp.`parquet2/4349.csv.gz`");
+    runSQL("CREATE TABLE dfs.tmp.`4349` AS SELECT columns[0] id, CAST(NULLIF(columns[1], '') AS DOUBLE) val FROM cp.`parquet2/4349.csv.gz`");
 
     // querying the parquet file should return the same results found in the csv file
     testBuilder()
       .unOrdered()
-      .sqlQuery("SELECT * FROM dfs_test.tmp.`4349` WHERE id = 'b'")
+      .sqlQuery("SELECT * FROM dfs.tmp.`4349` WHERE id = 'b'")
       .sqlBaselineQuery("SELECT columns[0] id, CAST(NULLIF(columns[1], '') AS DOUBLE) val FROM cp.`parquet2/4349.csv.gz` WHERE columns[0] = 'b'")
       .go();
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcap/TestPcapDecoder.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcap/TestPcapDecoder.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcap/TestPcapDecoder.java
index 56c2dbe..6e58ccf 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcap/TestPcapDecoder.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcap/TestPcapDecoder.java
@@ -17,10 +17,9 @@
 package org.apache.drill.exec.store.pcap;
 
 import com.google.common.io.Resources;
-import org.apache.drill.BaseTestQuery;
+import org.apache.drill.test.BaseTestQuery;
 import org.apache.drill.exec.store.pcap.decoder.Packet;
 import org.apache.drill.exec.store.pcap.decoder.PacketDecoder;
-import org.junit.BeforeClass;
 import org.junit.Test;
 
 import java.io.BufferedInputStream;

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcap/TestPcapRecordReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcap/TestPcapRecordReader.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcap/TestPcapRecordReader.java
index ce2a4f8..706694d 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcap/TestPcapRecordReader.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/pcap/TestPcapRecordReader.java
@@ -16,33 +16,39 @@
  */
 package org.apache.drill.exec.store.pcap;
 
-import org.apache.drill.BaseTestQuery;
+import org.apache.drill.test.BaseTestQuery;
 import org.apache.drill.exec.exception.SchemaChangeException;
 import org.apache.drill.exec.rpc.user.QueryDataBatch;
 import org.junit.Assert;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
+import java.nio.file.Paths;
 import java.util.List;
 
 public class TestPcapRecordReader extends BaseTestQuery {
+  @BeforeClass
+  public static void setupTestFiles() {
+    dirTestWatcher.copyResourceToRoot(Paths.get("store", "pcap"));
+  }
 
   @Test
   public void testStarQuery() throws Exception {
-    runSQLVerifyCount("select * from dfs.`${WORKING_PATH}/src/test/resources/store/pcap/tcp-1.pcap`", 16);
-    runSQLVerifyCount("select distinct DST_IP from dfs.`${WORKING_PATH}/src/test/resources/store/pcap/tcp-1.pcap`", 1);
-    runSQLVerifyCount("select distinct DsT_IP from dfs.`${WORKING_PATH}/src/test/resources/store/pcap/tcp-1.pcap`", 1);
-    runSQLVerifyCount("select distinct dst_ip from dfs.`${WORKING_PATH}/src/test/resources/store/pcap/tcp-1.pcap`", 1);
+    runSQLVerifyCount("select * from dfs.`store/pcap/tcp-1.pcap`", 16);
+    runSQLVerifyCount("select distinct DST_IP from dfs.`store/pcap/tcp-1.pcap`", 1);
+    runSQLVerifyCount("select distinct DsT_IP from dfs.`store/pcap/tcp-1.pcap`", 1);
+    runSQLVerifyCount("select distinct dst_ip from dfs.`store/pcap/tcp-1.pcap`", 1);
   }
 
   @Test
   public void testCountQuery() throws Exception {
-    runSQLVerifyCount("select count(*) from dfs.`${WORKING_PATH}/src/test/resources/store/pcap/tcp-1.pcap`", 1);
-    runSQLVerifyCount("select count(*) from dfs.`${WORKING_PATH}/src/test/resources/store/pcap/tcp-2.pcap`", 1);
+    runSQLVerifyCount("select count(*) from dfs.`store/pcap/tcp-1.pcap`", 1);
+    runSQLVerifyCount("select count(*) from dfs.`store/pcap/tcp-2.pcap`", 1);
   }
 
   @Test
   public void testDistinctQuery() throws Exception {
-    runSQLVerifyCount("select distinct * from dfs.`${WORKING_PATH}/src/test/resources/store/pcap/tcp-1.pcap`", 1);
+    runSQLVerifyCount("select distinct * from dfs.`store/pcap/tcp-1.pcap`", 1);
   }
 
   private void runSQLVerifyCount(String sql, int expectedRowCount) throws Exception {

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/sequencefile/TestSequenceFileReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/sequencefile/TestSequenceFileReader.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/sequencefile/TestSequenceFileReader.java
index f48eb2c..6d3ce64 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/sequencefile/TestSequenceFileReader.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/sequencefile/TestSequenceFileReader.java
@@ -21,8 +21,7 @@ import java.io.DataOutputStream;
 import java.io.ByteArrayOutputStream;
 
 import org.junit.Test;
-import org.apache.drill.BaseTestQuery;
-import org.apache.drill.common.util.FileUtils;
+import org.apache.drill.test.BaseTestQuery;
 import org.apache.hadoop.io.BytesWritable;
 
 public class TestSequenceFileReader extends BaseTestQuery {
@@ -37,11 +36,9 @@ public class TestSequenceFileReader extends BaseTestQuery {
 
   @Test
   public void testSequenceFileReader() throws Exception {
-    String root = FileUtils.getResourceAsFile("/sequencefiles/simple.seq").toURI().toString();
-    final String query = String.format("select convert_from(t.binary_key, 'UTF8') as k, convert_from(t.binary_value, 'UTF8') as v " +
-      "from dfs_test.`%s` t", root);
     testBuilder()
-      .sqlQuery(query)
+      .sqlQuery("select convert_from(t.binary_key, 'UTF8') as k, convert_from(t.binary_value, 'UTF8') as v " +
+        "from cp.`sequencefiles/simple.seq` t")
       .ordered()
       .baselineColumns("k", "v")
       .baselineValues(byteWritableString("key0"), byteWritableString("value0"))

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/sys/PStoreTestUtil.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/sys/PStoreTestUtil.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/sys/PStoreTestUtil.java
index bcd2f5e..d3116c7 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/sys/PStoreTestUtil.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/sys/PStoreTestUtil.java
@@ -41,7 +41,10 @@ public class PStoreTestUtil {
       expectedMap.put(keys[i], values[i]);
       store.put(keys[i], values[i]);
     }
-    // allow one second for puts to propagate back to cache
+
+    // Wait for store caches to update, this is necessary because ZookeeperClient caches can update asynchronously in some cases.
+    waitForNumProps(store, keys.length);
+
     {
       Iterator<Map.Entry<String, String>> iter = store.getAll();
       for(int i =0; i < keys.length; i++){
@@ -61,8 +64,29 @@ public class PStoreTestUtil {
       }
     }
 
-    // allow one second for deletes to propagate back to cache
-
+    // Wait for store caches to update, this is necessary because ZookeeperClient caches can update asynchronously in some cases.
+    waitForNumProps(store, 0);
     assertFalse(store.getAll().hasNext());
   }
+
+  private static void waitForNumProps(PersistentStore store, int expected) throws InterruptedException {
+    for (int numProps = getNumProps(store);
+         numProps < expected;
+         numProps = getNumProps(store)) {
+      Thread.sleep(100L);
+    }
+  }
+
+  private static int getNumProps(PersistentStore store) {
+    Iterator<Map.Entry<String, String>> iter = store.getAll();
+
+    int numProps = 0;
+
+    while (iter.hasNext()) {
+      iter.next();
+      numProps++;
+    }
+
+    return numProps;
+  }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/sys/TestPStoreProviders.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/sys/TestPStoreProviders.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/sys/TestPStoreProviders.java
index e7b150c..7325fd7 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/sys/TestPStoreProviders.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/sys/TestPStoreProviders.java
@@ -22,8 +22,9 @@ import com.fasterxml.jackson.databind.ObjectMapper;
 import org.apache.curator.framework.CuratorFramework;
 import org.apache.curator.framework.CuratorFrameworkFactory;
 import org.apache.curator.retry.RetryNTimes;
+import org.apache.drill.categories.SlowTest;
 import org.apache.drill.common.config.DrillConfig;
-import org.apache.drill.common.util.FileUtils;
+import org.apache.drill.common.util.DrillFileUtils;
 import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.TestWithZookeeper;
 import org.apache.drill.exec.coord.zk.PathUtils;
@@ -32,22 +33,22 @@ import org.apache.drill.exec.planner.physical.PlannerSettings;
 import org.apache.drill.exec.server.options.PersistedOptionValue;
 import org.apache.drill.exec.store.sys.store.provider.LocalPersistentStoreProvider;
 import org.apache.drill.exec.store.sys.store.provider.ZookeeperPersistentStoreProvider;
+import org.apache.drill.test.BaseDirTestWatcher;
 import org.apache.drill.test.ClientFixture;
 import org.apache.drill.test.ClusterFixture;
 import org.apache.drill.test.ClusterFixtureBuilder;
-import org.apache.drill.testutils.DirTestWatcher;
 import org.apache.zookeeper.CreateMode;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
+import org.junit.experimental.categories.Category;
 
 import java.io.File;
 
+@Category(SlowTest.class)
 public class TestPStoreProviders extends TestWithZookeeper {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(TestPStoreProviders.class);
-
   @Rule
-  public DirTestWatcher dirTestWatcher = new DirTestWatcher();
+  public BaseDirTestWatcher dirTestWatcher = new BaseDirTestWatcher();
 
   @Test
   public void verifyLocalStore() throws Exception {
@@ -79,18 +80,14 @@ public class TestPStoreProviders extends TestWithZookeeper {
   private void localLoadTestHelper(String propertiesDir) throws Exception {
     File localOptionsResources = new File(propertiesDir);
 
-    String optionsDirPath = String.format("%s/sys.options", dirTestWatcher.getDirPath());
-    File optionsDir = new File(optionsDirPath);
-    optionsDir.mkdirs();
-
-    org.apache.commons.io.FileUtils.copyDirectory(localOptionsResources, optionsDir);
-
-    ClusterFixtureBuilder builder = ClusterFixture.builder().
-      configProperty(ExecConstants.HTTP_ENABLE, false).
+    ClusterFixtureBuilder builder = ClusterFixture.builder(dirTestWatcher).
       configProperty(ExecConstants.SYS_STORE_PROVIDER_CLASS, LocalPersistentStoreProvider.class.getCanonicalName()).
-      configProperty(ExecConstants.SYS_STORE_PROVIDER_LOCAL_PATH, String.format("file://%s", dirTestWatcher.getDirPath())).
       configProperty(ExecConstants.SYS_STORE_PROVIDER_LOCAL_ENABLE_WRITE, true);
 
+    File optionsDir = new File(dirTestWatcher.getStoreDir(), "sys.options");
+    optionsDir.mkdirs();
+    org.apache.commons.io.FileUtils.copyDirectory(localOptionsResources, optionsDir);
+
     try (ClusterFixture cluster = builder.build();
          ClientFixture client = cluster.clientFixture()) {
       String parquetPushdown = client.queryBuilder().
@@ -129,7 +126,7 @@ public class TestPStoreProviders extends TestWithZookeeper {
       try (ZookeeperClient zkClient = new ZookeeperClient(curator,
         PathUtils.join("/", storeConfig.getName()), CreateMode.PERSISTENT)) {
         zkClient.start();
-        String oldOptionJson = FileUtils.getResourceAsString("/options/old_booleanopt.json");
+        String oldOptionJson = DrillFileUtils.getResourceAsString("/options/old_booleanopt.json");
         zkClient.put(oldName, oldOptionJson.getBytes(), null);
       }
 

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/sys/TestSystemTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/sys/TestSystemTable.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/sys/TestSystemTable.java
index dff1504..c6c5a05 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/sys/TestSystemTable.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/sys/TestSystemTable.java
@@ -17,7 +17,7 @@
  */
 package org.apache.drill.exec.store.sys;
 
-import org.apache.drill.BaseTestQuery;
+import org.apache.drill.test.BaseTestQuery;
 import org.apache.drill.categories.UnlikelyTest;
 import org.apache.drill.exec.ExecConstants;
 import org.junit.BeforeClass;

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TestCsvHeader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TestCsvHeader.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TestCsvHeader.java
index cf54bb0..2685496 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TestCsvHeader.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TestCsvHeader.java
@@ -18,13 +18,13 @@
 package org.apache.drill.exec.store.text;
 
 import com.google.common.collect.Lists;
-import org.apache.drill.BaseTestQuery;
-import org.apache.drill.TestBuilder;
-import org.apache.drill.common.util.FileUtils;
+import org.apache.drill.test.BaseTestQuery;
+import org.apache.drill.test.TestBuilder;
 
 import java.io.BufferedOutputStream;
 import java.io.File;
 import java.io.FileOutputStream;
+import java.nio.file.Paths;
 import java.util.List;
 
 import org.junit.Before;
@@ -32,20 +32,20 @@ import org.junit.Test;
 
 public class TestCsvHeader extends BaseTestQuery{
 
-  private String root;
+  private static final String ROOT = "store/text/data/cars.csvh";
 
   @Before
   public void initialize() throws Exception {
-    root = FileUtils.getResourceAsFile("/store/text/data/cars.csvh").toURI().toString();
+    dirTestWatcher.copyResourceToRoot(Paths.get("store", "text", "data"));
+
     test("alter session set `exec.errors.verbose` = true ");
   }
 
   @Test //DRILL-951
   public void testCsvWithHeader() throws Exception {
-    //Pre DRILL-951: Qry: select * from dfs_test.`%s` LIMIT 2
-    String query = String.format("select * from dfs_test.`%s` LIMIT 2", root);
+    //Pre DRILL-951: Qry: select * from cp.`%s` LIMIT 2
     testBuilder()
-            .sqlQuery(query)
+            .sqlQuery("select * from cp.`%s` LIMIT 2", ROOT)
             .unOrdered()
             .baselineColumns("Year", "Make", "Model", "Description", "Price")
             .baselineValues("1997", "Ford", "E350", "ac, abs, moon", "3000.00")
@@ -55,11 +55,9 @@ public class TestCsvHeader extends BaseTestQuery{
 
   @Test //DRILL-951
   public void testCsvWhereWithHeader() throws Exception {
-    //Pre DRILL-951: Qry: select * from dfs_test.`%s` where columns[1] = 'Chevy'
-    String query = String.format("select * from dfs_test.`%s` where Make = 'Chevy'", root);
-
+    //Pre DRILL-951: Qry: select * from cp.`%s` where columns[1] = 'Chevy'
     testBuilder()
-            .sqlQuery(query)
+            .sqlQuery("select * from cp.`%s` where Make = 'Chevy'", ROOT)
             .unOrdered()
             .baselineColumns("Year", "Make", "Model", "Description", "Price")
             .baselineValues("1999", "Chevy", "Venture \"Extended Edition\"", "", "4900.00")
@@ -69,11 +67,9 @@ public class TestCsvHeader extends BaseTestQuery{
 
   @Test //DRILL-951
   public void testCsvStarPlusWithHeader() throws Exception {
-    //Pre DRILL-951: Qry: select *,columns[1] from dfs_test.`%s` where columns[1] = 'Chevy'
-    String query = String.format("select *, Make from dfs_test.`%s` where Make = 'Chevy'", root);
-
+    //Pre DRILL-951: Qry: select *,columns[1] from cp.`%s` where columns[1] = 'Chevy'
     testBuilder()
-            .sqlQuery(query)
+            .sqlQuery("select *, Make from cp.`%s` where Make = 'Chevy'", ROOT)
             .unOrdered()
             .baselineColumns("Year", "Make", "Model", "Description", "Price", "Make0")
             .baselineValues("1999", "Chevy", "Venture \"Extended Edition\"", "", "4900.00", "Chevy")
@@ -83,10 +79,9 @@ public class TestCsvHeader extends BaseTestQuery{
 
   @Test //DRILL-951
   public void testCsvWhereColumnsWithHeader() throws Exception {
-    //Pre DRILL-951: Qry: select columns[1] from dfs_test.`%s` where columns[1] = 'Chevy'
-    String query = String.format("select Make from dfs_test.`%s` where Make = 'Chevy'", root);
+    //Pre DRILL-951: Qry: select columns[1] from cp.`%s` where columns[1] = 'Chevy'
     testBuilder()
-            .sqlQuery(query)
+            .sqlQuery("select Make from cp.`%s` where Make = 'Chevy'", ROOT)
             .unOrdered()
             .baselineColumns("Make")
             .baselineValues("Chevy")
@@ -96,11 +91,9 @@ public class TestCsvHeader extends BaseTestQuery{
 
   @Test //DRILL-951
   public void testCsvColumnsWithHeader() throws Exception {
-    //Pre DRILL-951: Qry: select columns[0],columns[2],columns[4] from dfs_test.`%s` where columns[1] = 'Chevy'
-    String query = String.format("select `Year`, Model, Price from dfs_test.`%s` where Make = 'Chevy'", root);
-
+    //Pre DRILL-951: Qry: select columns[0],columns[2],columns[4] from cp.`%s` where columns[1] = 'Chevy'
     testBuilder()
-            .sqlQuery(query)
+            .sqlQuery("select `Year`, Model, Price from cp.`%s` where Make = 'Chevy'", ROOT)
             .unOrdered()
             .baselineColumns("Year", "Model", "Price")
             .baselineValues("1999", "Venture \"Extended Edition\"", "4900.00")
@@ -110,10 +103,8 @@ public class TestCsvHeader extends BaseTestQuery{
 
   @Test //DRILL-951
   public void testCsvHeaderShortCircuitReads() throws Exception {
-    String query = String.format("select `Year`, Model from dfs_test.`%s` where Make = 'Chevy'", root);
-
     testBuilder()
-            .sqlQuery(query)
+            .sqlQuery("select `Year`, Model from cp.`%s` where Make = 'Chevy'", ROOT)
             .unOrdered()
             .baselineColumns("Year", "Model")
             .baselineValues("1999", "Venture \"Extended Edition\"")
@@ -123,10 +114,8 @@ public class TestCsvHeader extends BaseTestQuery{
 
   @Test //DRILL-4108
   public void testCsvHeaderNonExistingColumn() throws Exception {
-    String query = String.format("select `Year`, Model, Category from dfs_test.`%s` where Make = 'Chevy'", root);
-
     testBuilder()
-            .sqlQuery(query)
+            .sqlQuery("select `Year`, Model, Category from cp.`%s` where Make = 'Chevy'", ROOT)
             .unOrdered()
             .baselineColumns("Year", "Model", "Category")
             .baselineValues("1999", "Venture \"Extended Edition\"", "")
@@ -136,10 +125,8 @@ public class TestCsvHeader extends BaseTestQuery{
 
   @Test //DRILL-4108
   public void testCsvHeaderMismatch() throws Exception {
-    String ddir = FileUtils.getResourceAsFile("/store/text/data/d2").toURI().toString();
-    String query = String.format("select `Year`, Model, Category from dfs_test.`%s` where Make = 'Chevy'", ddir);
     testBuilder()
-            .sqlQuery(query)
+            .sqlQuery("select `Year`, Model, Category from dfs.`store/text/data/d2` where Make = 'Chevy'")
             .unOrdered()
             .baselineColumns("Year", "Model", "Category")
             .baselineValues("1999", "", "Venture \"Extended Edition\"")
@@ -151,12 +138,8 @@ public class TestCsvHeader extends BaseTestQuery{
 
   @Test //DRILL-4108
   public void testCsvHeaderSkipFirstLine() throws Exception {
-    // test that header is not skipped when skipFirstLine is true
-    // testing by defining new format plugin with skipFirstLine set to true and diff file extension
-    String dfile = FileUtils.getResourceAsFile("/store/text/data/cars.csvh-test").toURI().toString();
-    String query = String.format("select `Year`, Model from dfs_test.`%s` where Make = 'Chevy'", dfile);
     testBuilder()
-            .sqlQuery(query)
+            .sqlQuery("select `Year`, Model from cp.`store/text/data/cars.csvh-test` where Make = 'Chevy'")
             .unOrdered()
             .baselineColumns("Year", "Model")
             .baselineValues("1999", "Venture \"Extended Edition\"")
@@ -166,9 +149,7 @@ public class TestCsvHeader extends BaseTestQuery{
 
   @Test
   public void testEmptyFinalColumn() throws Exception {
-    String dfs_temp = getDfsTestTmpSchemaLocation();
-    File table_dir = new File(dfs_temp, "emptyFinalColumn");
-    table_dir.mkdir();
+    File table_dir = dirTestWatcher.makeTestTmpSubDir(Paths.get("emptyFinalColumn"));
     BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(new File(table_dir, "a.csvh")));
     os.write("field1,field2\n".getBytes());
     for (int i = 0; i < 10000; i++) {
@@ -176,7 +157,7 @@ public class TestCsvHeader extends BaseTestQuery{
     }
     os.flush();
     os.close();
-    String query = "select * from dfs_test.tmp.emptyFinalColumn";
+    String query = "select * from dfs.tmp.emptyFinalColumn";
       TestBuilder builder = testBuilder()
               .sqlQuery(query)
               .ordered()
@@ -189,12 +170,12 @@ public class TestCsvHeader extends BaseTestQuery{
 
   @Test
   public void testCountOnCsvWithHeader() throws Exception {
-    final String query = "select count(%s) as cnt from %s.`%s`";
+    final String query = "select count(%s) as cnt from cp.`%s`";
     final List<Object> options = Lists.<Object>newArrayList("*", 1, "'A'");
 
     for (Object option : options) {
       testBuilder()
-          .sqlQuery(query, option, TEMP_SCHEMA, root)
+          .sqlQuery(query, option, ROOT)
           .unOrdered()
           .baselineColumns("cnt")
           .baselineValues(4L)
@@ -202,5 +183,4 @@ public class TestCsvHeader extends BaseTestQuery{
           .run();
     }
   }
-
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TestNewTextReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TestNewTextReader.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TestNewTextReader.java
index bfc3cc2..8aa939e 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TestNewTextReader.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TestNewTextReader.java
@@ -21,10 +21,9 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import org.apache.drill.BaseTestQuery;
+import org.apache.drill.test.BaseTestQuery;
 import org.apache.drill.categories.UnlikelyTest;
 import org.apache.drill.common.exceptions.UserRemoteException;
-import org.apache.drill.common.util.FileUtils;
 import org.apache.drill.exec.proto.UserBitShared.DrillPBError.ErrorType;
 import org.junit.Ignore;
 import org.junit.Test;
@@ -69,12 +68,9 @@ public class TestNewTextReader extends BaseTestQuery {
   @Test // see DRILL-3718
   @Category(UnlikelyTest.class)
   public void testTabSeparatedWithQuote() throws Exception {
-    final String root = FileUtils.getResourceAsFile("/store/text/WithQuote.tsv").toURI().toString();
-    final String query = String.format("select columns[0] as c0, columns[1] as c1, columns[2] as c2 \n" +
-        "from dfs_test.`%s` ", root);
-
     testBuilder()
-        .sqlQuery(query)
+        .sqlQuery("select columns[0] as c0, columns[1] as c1, columns[2] as c2 \n" +
+          "from cp.`store/text/WithQuote.tsv`")
         .unOrdered()
         .baselineColumns("c0", "c1", "c2")
         .baselineValues("a", "a", "a")
@@ -87,12 +83,9 @@ public class TestNewTextReader extends BaseTestQuery {
   @Test // see DRILL-3718
   @Category(UnlikelyTest.class)
   public void testSpaceSeparatedWithQuote() throws Exception {
-    final String root = FileUtils.getResourceAsFile("/store/text/WithQuote.ssv").toURI().toString();
-    final String query = String.format("select columns[0] as c0, columns[1] as c1, columns[2] as c2 \n" +
-        "from dfs_test.`%s` ", root);
-
     testBuilder()
-        .sqlQuery(query)
+        .sqlQuery("select columns[0] as c0, columns[1] as c1, columns[2] as c2 \n" +
+          "from cp.`store/text/WithQuote.ssv`")
         .unOrdered()
         .baselineColumns("c0", "c1", "c2")
         .baselineValues("a", "a", "a")
@@ -105,12 +98,9 @@ public class TestNewTextReader extends BaseTestQuery {
   @Test // see DRILL-3718
   @Category(UnlikelyTest.class)
   public void testPipSeparatedWithQuote() throws Exception {
-    final String root = FileUtils.getResourceAsFile("/store/text/WithQuote.tbl").toURI().toString();
-    final String query = String.format("select columns[0] as c0, columns[1] as c1, columns[2] as c2 \n" +
-            "from dfs_test.`%s` ", root);
-
     testBuilder()
-        .sqlQuery(query)
+        .sqlQuery("select columns[0] as c0, columns[1] as c1, columns[2] as c2 \n" +
+          "from cp.`store/text/WithQuote.tbl`")
         .unOrdered()
         .baselineColumns("c0", "c1", "c2")
         .baselineValues("a", "a", "a")
@@ -123,12 +113,9 @@ public class TestNewTextReader extends BaseTestQuery {
   @Test // see DRILL-3718
   @Category(UnlikelyTest.class)
   public void testCrLfSeparatedWithQuote() throws Exception {
-    final String root = FileUtils.getResourceAsFile("/store/text/WithQuotedCrLf.tbl").toURI().toString();
-    final String query = String.format("select columns[0] as c0, columns[1] as c1, columns[2] as c2 \n" +
-        "from dfs_test.`%s` ", root);
-
     testBuilder()
-        .sqlQuery(query)
+        .sqlQuery("select columns[0] as c0, columns[1] as c1, columns[2] as c2 \n" +
+          "from cp.`store/text/WithQuotedCrLf.tbl`")
         .unOrdered()
         .baselineColumns("c0", "c1", "c2")
         .baselineValues("a\n1", "a", "a")

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TestTextColumn.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TestTextColumn.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TestTextColumn.java
index 1ff6818..f106cca 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TestTextColumn.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TestTextColumn.java
@@ -14,24 +14,21 @@ package org.apache.drill.exec.store.text;
 
 import java.util.List;
 
-import org.apache.drill.BaseTestQuery;
+import org.apache.drill.test.BaseTestQuery;
 import org.apache.drill.exec.rpc.user.QueryDataBatch;
 import org.junit.Test;
 import static org.junit.Assert.*;
 
 public class TestTextColumn extends BaseTestQuery {
-
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(TestTextColumn.class);
-
   @Test
   public void testCsvColumnSelection() throws Exception {
-    test("select columns[0] as region_id, columns[1] as country from dfs_test.`[WORKING_PATH]/src/test/resources/store/text/data/regions.csv`");
+    test("select columns[0] as region_id, columns[1] as country from cp.`store/text/data/regions.csv`");
   }
 
   @Test
   public void testDefaultDelimiterColumnSelection() throws Exception {
     List<QueryDataBatch> actualResults = testSqlWithResults("SELECT columns[0] as entire_row "
-        + "from dfs_test.`[WORKING_PATH]/src/test/resources/store/text/data/letters.txt`");
+        + "from cp.`store/text/data/letters.txt`");
 
     final TestResultSet expectedResultSet = new TestResultSet();
     expectedResultSet.addRow("a, b,\",\"c\",\"d,, \\n e");
@@ -45,7 +42,7 @@ public class TestTextColumn extends BaseTestQuery {
   @Test
   public void testCsvColumnSelectionCommasInsideQuotes() throws Exception {
     List<QueryDataBatch> actualResults = testSqlWithResults("SELECT columns[0] as col1, columns[1] as col2, columns[2] as col3,"
-        + "columns[3] as col4 from dfs_test.`[WORKING_PATH]/src/test/resources/store/text/data/letters.csv`");
+        + "columns[3] as col4 from cp.`store/text/data/letters.csv`");
 
     final TestResultSet expectedResultSet = new TestResultSet();
     expectedResultSet.addRow("a, b,", "c", "d,, \\n e", "f\\\"g");

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TextRecordReaderTest.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TextRecordReaderTest.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TextRecordReaderTest.java
index 5e781d2..d4098f9 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TextRecordReaderTest.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TextRecordReaderTest.java
@@ -21,7 +21,7 @@ import static org.junit.Assert.assertEquals;
 
 import java.util.List;
 
-import org.apache.drill.common.util.FileUtils;
+import org.apache.drill.common.util.DrillFileUtils;
 import org.apache.drill.exec.client.DrillClient;
 import org.apache.drill.exec.pop.PopUnitTestBase;
 import org.apache.drill.exec.record.RecordBatchLoader;
@@ -47,8 +47,8 @@ public class TextRecordReaderTest extends PopUnitTestBase {
       client.connect();
       List<QueryDataBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL,
               Files.toString(
-                      FileUtils.getResourceAsFile("/store/text/test.json"), Charsets.UTF_8)
-                      .replace("#{DATA_FILE}", FileUtils.getResourceAsFile("/store/text/data/regions.csv").toURI().toString()));
+                      DrillFileUtils.getResourceAsFile("/store/text/test.json"), Charsets.UTF_8)
+                      .replace("#{DATA_FILE}", DrillFileUtils.getResourceAsFile("/store/text/data/regions.csv").toURI().toString()));
       int count = 0;
       RecordBatchLoader loader = new RecordBatchLoader(bit1.getContext().getAllocator());
       for(QueryDataBatch b : results) {

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestCountDownLatchInjection.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestCountDownLatchInjection.java b/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestCountDownLatchInjection.java
index 407cb7c..6404e27 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestCountDownLatchInjection.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestCountDownLatchInjection.java
@@ -22,7 +22,7 @@ import static org.junit.Assert.fail;
 
 import java.util.concurrent.CountDownLatch;
 
-import org.apache.drill.BaseTestQuery;
+import org.apache.drill.test.BaseTestQuery;
 import org.apache.drill.common.concurrent.ExtendedLatch;
 import org.apache.drill.exec.ops.QueryContext;
 import org.apache.drill.exec.proto.UserBitShared.QueryId;

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestExceptionInjection.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestExceptionInjection.java b/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestExceptionInjection.java
index 22da20b..44f8727 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestExceptionInjection.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestExceptionInjection.java
@@ -22,7 +22,7 @@ import static org.junit.Assert.fail;
 
 import java.io.IOException;
 
-import org.apache.drill.BaseTestQuery;
+import org.apache.drill.test.BaseTestQuery;
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.exec.ZookeeperHelper;
 import org.apache.drill.exec.exception.DrillbitStartupException;

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestPauseInjection.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestPauseInjection.java b/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestPauseInjection.java
index c618739..2e8aa02 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestPauseInjection.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestPauseInjection.java
@@ -22,7 +22,7 @@ import static org.junit.Assert.fail;
 
 import java.util.concurrent.CountDownLatch;
 
-import org.apache.drill.BaseTestQuery;
+import org.apache.drill.test.BaseTestQuery;
 import org.apache.drill.common.concurrent.ExtendedLatch;
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.exec.ZookeeperHelper;