You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by pr...@apache.org on 2017/11/15 01:46:54 UTC

[08/22] drill git commit: DRILL-5783, DRILL-5841, DRILL-5894: Rationalize test temp directories

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestSchemaNotFoundException.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestSchemaNotFoundException.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestSchemaNotFoundException.java
index cca2bd0..194b78b 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestSchemaNotFoundException.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestSchemaNotFoundException.java
@@ -17,8 +17,8 @@
  */
 package org.apache.drill.exec.store.dfs;
 
-import org.apache.drill.BaseTestQuery;
-import org.apache.drill.common.util.TestTools;
+import org.apache.drill.test.BaseTestQuery;
+import org.apache.drill.test.TestTools;
 import org.junit.Test;
 
 import static org.junit.Assert.assertTrue;
@@ -27,7 +27,7 @@ public class TestSchemaNotFoundException extends BaseTestQuery {
 
     @Test(expected = Exception.class)
     public void testSchemaNotFoundForWrongStoragePlgn() throws Exception {
-        final String table = String.format("%s/empty", TestTools.getTestResourcesPath());
+        final String table = String.format("%s/empty", TestTools.WORKING_PATH.resolve(TestTools.TEST_RESOURCES));
         final String query = String.format("select * from dfs1.`%s`", table);
         try {
             testNoResult(query);
@@ -41,7 +41,7 @@ public class TestSchemaNotFoundException extends BaseTestQuery {
 
     @Test(expected = Exception.class)
     public void testSchemaNotFoundForWrongWorkspace() throws Exception {
-        final String table = String.format("%s/empty", TestTools.getTestResourcesPath());
+        final String table = String.format("%s/empty", TestTools.WORKING_PATH.resolve(TestTools.TEST_RESOURCES));
         final String query = String.format("select * from dfs.tmp1.`%s`", table);
         try {
             testNoResult(query);
@@ -55,7 +55,7 @@ public class TestSchemaNotFoundException extends BaseTestQuery {
 
     @Test(expected = Exception.class)
     public void testSchemaNotFoundForWrongWorkspaceUsingDefaultWorkspace() throws Exception {
-        final String table = String.format("%s/empty", TestTools.getTestResourcesPath());
+        final String table = String.format("%s/empty", TestTools.WORKING_PATH.resolve(TestTools.TEST_RESOURCES));
         final String query = String.format("select * from tmp1.`%s`", table);
         try {
             testNoResult("use dfs");
@@ -70,7 +70,7 @@ public class TestSchemaNotFoundException extends BaseTestQuery {
 
     @Test(expected = Exception.class)
     public void testTableNotFoundException() throws Exception {
-        final String table = String.format("%s/empty1", TestTools.getTestResourcesPath());
+        final String table = String.format("%s/empty1", TestTools.WORKING_PATH.resolve(TestTools.TEST_RESOURCES));
         final String query = String.format("select * from tmp.`%s`", table);
         try {
             testNoResult("use dfs");

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/easy/text/compliant/TestCsv.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/easy/text/compliant/TestCsv.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/easy/text/compliant/TestCsv.java
index b7bc9fd..c792233 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/easy/text/compliant/TestCsv.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/easy/text/compliant/TestCsv.java
@@ -29,13 +29,16 @@ import java.io.PrintWriter;
 import org.apache.drill.common.types.TypeProtos.MinorType;
 import org.apache.drill.exec.record.BatchSchema;
 import org.apache.drill.exec.store.easy.text.TextFormatPlugin.TextFormatConfig;
+import org.apache.drill.test.BaseDirTestWatcher;
 import org.apache.drill.test.ClusterFixture;
 import org.apache.drill.test.ClusterTest;
 import org.apache.drill.test.rowSet.RowSet;
 import org.apache.drill.test.rowSet.RowSetBuilder;
 import org.apache.drill.test.rowSet.RowSetComparison;
 import org.apache.drill.test.rowSet.SchemaBuilder;
+import org.apache.drill.test.DirTestWatcher;
 import org.junit.BeforeClass;
+import org.junit.ClassRule;
 import org.junit.Test;
 
 /**
@@ -49,20 +52,21 @@ public class TestCsv extends ClusterTest {
 
   private static File testDir;
 
+  @ClassRule
+  public static final BaseDirTestWatcher dirTestWatcher = new BaseDirTestWatcher();
+
   @BeforeClass
   public static void setup() throws Exception {
-    startCluster(ClusterFixture.builder()
-        .maxParallelization(1)
-        );
+    startCluster(ClusterFixture.builder(dirTestWatcher).maxParallelization(1));
 
     // Set up CSV storage plugin using headers.
 
-    testDir = cluster.makeTempDir("csv");
     TextFormatConfig csvFormat = new TextFormatConfig();
     csvFormat.fieldDelimiter = ',';
     csvFormat.skipFirstLine = false;
     csvFormat.extractHeader = true;
-    cluster.defineWorkspace("dfs", "data", testDir.getAbsolutePath(), "csv", csvFormat);
+
+    testDir = cluster.makeDataDir("data", "csv", csvFormat);
   }
 
   String emptyHeaders[] = {
@@ -136,8 +140,8 @@ public class TestCsv extends ClusterTest {
   public void testCsvHeadersCaseInsensitive() throws IOException {
     String fileName = "case2.csv";
     buildFile(fileName, validHeaders);
-    String sql = "SELECT A, b, C FROM `dfs.data`.`" + fileName + "`";
-    RowSet actual = client.queryBuilder().sql(sql).rowSet();
+    String sql = "SELECT A, b, C FROM `dfs.data`.`%s`";
+    RowSet actual = client.queryBuilder().sql(sql, fileName).rowSet();
 
     BatchSchema expectedSchema = new SchemaBuilder()
         .add("A", MinorType.VARCHAR)

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/Drill2283InfoSchemaVarchar1BugTest.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/Drill2283InfoSchemaVarchar1BugTest.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/Drill2283InfoSchemaVarchar1BugTest.java
index 6322224..5061346 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/Drill2283InfoSchemaVarchar1BugTest.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/Drill2283InfoSchemaVarchar1BugTest.java
@@ -17,7 +17,7 @@
  */
 package org.apache.drill.exec.store.ischema;
 
-import org.apache.drill.BaseTestQuery;
+import org.apache.drill.test.BaseTestQuery;
 import org.apache.drill.categories.UnlikelyTest;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/json/TestJsonRecordReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/json/TestJsonRecordReader.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/json/TestJsonRecordReader.java
index 7f787b8..7eb7259 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/json/TestJsonRecordReader.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/json/TestJsonRecordReader.java
@@ -17,35 +17,35 @@
  */
 package org.apache.drill.exec.store.json;
 
-import org.apache.drill.BaseTestQuery;
+import org.apache.drill.test.BaseTestQuery;
 import org.apache.drill.categories.UnlikelyTest;
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.exec.proto.UserBitShared;
 import org.apache.drill.exec.ExecConstants;
-import org.junit.Ignore;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Assert;
 import org.junit.experimental.categories.Category;
 
+import java.nio.file.Paths;
+
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 public class TestJsonRecordReader extends BaseTestQuery {
-  // private static final org.slf4j.Logger logger =
-  // org.slf4j.LoggerFactory.getLogger(TestJsonRecordReader.class);
+  @BeforeClass
+  public static void setupTestFiles() {
+    dirTestWatcher.copyResourceToRoot(Paths.get("jsoninput/drill_3353"));
+  }
 
   @Test
   public void testComplexJsonInput() throws Exception {
-    // test("select z[0]['orange']  from cp.`jsoninput/input2.json` limit 10");
     test("select `integer`, x['y'] as x1, x['y'] as x2, z[0], z[0]['orange'], z[1]['pink']  from cp.`jsoninput/input2.json` limit 10 ");
-    // test("select x from cp.`jsoninput/input2.json`");
-
-    // test("select z[0]  from cp.`jsoninput/input2.json` limit 10");
   }
 
   @Test
   public void testContainingArray() throws Exception {
-    test("select * from dfs.`${WORKING_PATH}/src/test/resources/store/json/listdoc.json`");
+    test("select * from cp.`store/json/listdoc.json`");
   }
 
   @Test
@@ -110,7 +110,7 @@ public class TestJsonRecordReader extends BaseTestQuery {
   @Category(UnlikelyTest.class)
   // DRILL-1832
   public void testJsonWithNulls2() throws Exception {
-    final String query = "select SUM(1) as `sum_Number_of_Records_ok` from cp.`/jsoninput/twitter_43.json` having (COUNT(1) > 0)";
+    final String query = "select SUM(1) as `sum_Number_of_Records_ok` from cp.`jsoninput/twitter_43.json` having (COUNT(1) > 0)";
     testBuilder().sqlQuery(query).unOrdered()
         .jsonBaselineFile("jsoninput/drill-1832-2-result.json").go();
   }
@@ -160,8 +160,8 @@ public class TestJsonRecordReader extends BaseTestQuery {
   public void drill_3353() throws Exception {
     try {
       testNoResult("alter session set `store.json.all_text_mode` = true");
-      test("create table dfs_test.tmp.drill_3353 as select a from dfs.`${WORKING_PATH}/src/test/resources/jsoninput/drill_3353` where e = true");
-      String query = "select t.a.d cnt from dfs_test.tmp.drill_3353 t where t.a.d is not null";
+      test("create table dfs.tmp.drill_3353 as select a from dfs.`jsoninput/drill_3353` where e = true");
+      String query = "select t.a.d cnt from dfs.tmp.drill_3353 t where t.a.d is not null";
       test(query);
       testBuilder().sqlQuery(query).unOrdered().baselineColumns("cnt")
           .baselineValues("1").go();

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetInternalsTest.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetInternalsTest.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetInternalsTest.java
index 0f7c213..5a5207b 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetInternalsTest.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetInternalsTest.java
@@ -17,7 +17,8 @@
  */
 package org.apache.drill.exec.store.parquet;
 
-import org.apache.drill.TestBuilder;
+import org.apache.drill.test.BaseDirTestWatcher;
+import org.apache.drill.test.TestBuilder;
 import org.apache.drill.common.expression.SchemaPath;
 import org.apache.drill.common.types.TypeProtos;
 import org.apache.drill.common.types.Types;
@@ -25,6 +26,7 @@ import org.apache.drill.test.ClusterFixture;
 import org.apache.drill.test.ClusterTest;
 import org.apache.drill.test.ClusterFixtureBuilder;
 import org.junit.BeforeClass;
+import org.junit.ClassRule;
 import org.junit.Test;
 
 import java.util.HashMap;
@@ -32,9 +34,12 @@ import java.util.Map;
 
 public class ParquetInternalsTest extends ClusterTest {
 
+  @ClassRule
+  public static BaseDirTestWatcher dirTestWatcher = new BaseDirTestWatcher();
+
   @BeforeClass
   public static void setup( ) throws Exception {
-    ClusterFixtureBuilder builder = ClusterFixture.builder()
+    ClusterFixtureBuilder builder = ClusterFixture.builder(dirTestWatcher)
       // Set options, etc.
       ;
     startCluster(builder);
@@ -44,7 +49,6 @@ public class ParquetInternalsTest extends ClusterTest {
   public void testFixedWidth() throws Exception {
     String sql = "SELECT l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity\n" +
                  "FROM `cp`.`tpch/lineitem.parquet` LIMIT 20";
-//    client.queryBuilder().sql(sql).printCsv();
 
     Map<SchemaPath, TypeProtos.MajorType> typeMap = new HashMap<>();
     typeMap.put(TestBuilder.parsePath("l_orderkey"), Types.required(TypeProtos.MinorType.INT));
@@ -66,7 +70,6 @@ public class ParquetInternalsTest extends ClusterTest {
   public void testVariableWidth() throws Exception {
     String sql = "SELECT s_name, s_address, s_phone, s_comment\n" +
                  "FROM `cp`.`tpch/supplier.parquet` LIMIT 20";
-//    client.queryBuilder().sql(sql).printCsv();
 
     Map<SchemaPath, TypeProtos.MajorType> typeMap = new HashMap<>();
     typeMap.put(TestBuilder.parsePath("s_name"), Types.required(TypeProtos.MinorType.VARCHAR));
@@ -87,7 +90,6 @@ public class ParquetInternalsTest extends ClusterTest {
   public void testMixedWidth() throws Exception {
     String sql = "SELECT s_suppkey, s_name, s_address, s_phone, s_acctbal\n" +
                  "FROM `cp`.`tpch/supplier.parquet` LIMIT 20";
-//    client.queryBuilder().sql(sql).printCsv();
 
     Map<SchemaPath, TypeProtos.MajorType> typeMap = new HashMap<>();
     typeMap.put(TestBuilder.parsePath("s_suppkey"), Types.required(TypeProtos.MinorType.INT));
@@ -109,7 +111,6 @@ public class ParquetInternalsTest extends ClusterTest {
   public void testStar() throws Exception {
     String sql = "SELECT *\n" +
                  "FROM `cp`.`tpch/supplier.parquet` LIMIT 20";
-//    client.queryBuilder().sql(sql).printCsv();
 
     Map<SchemaPath, TypeProtos.MajorType> typeMap = new HashMap<>();
     typeMap.put(TestBuilder.parsePath("s_suppkey"), Types.required(TypeProtos.MinorType.INT));
@@ -140,19 +141,17 @@ public class ParquetInternalsTest extends ClusterTest {
     // TODO: Once the "row set" fixture is available, use that to verify
     // that all rows are null.
 
-//    client.queryBuilder().sql(sql).printCsv();
-
     // Can't handle nulls this way...
-//    Map<SchemaPath, TypeProtos.MajorType> typeMap = new HashMap<>();
-//    typeMap.put(TestBuilder.parsePath("s_suppkey"), Types.required(TypeProtos.MinorType.INT));
-//    typeMap.put(TestBuilder.parsePath("bogus"), Types.optional(TypeProtos.MinorType.INT));
-//    client.testBuilder()
-//      .sqlQuery(sql)
-//      .unOrdered()
-//      .csvBaselineFile("parquet/expected/bogus.csv")
-//      .baselineColumns("s_suppkey", "bogus")
-//      .baselineTypes(typeMap)
-//      .build()
-//      .run();
+    //    Map<SchemaPath, TypeProtos.MajorType> typeMap = new HashMap<>();
+    //    typeMap.put(TestBuilder.parsePath("s_suppkey"), Types.required(TypeProtos.MinorType.INT));
+    //    typeMap.put(TestBuilder.parsePath("bogus"), Types.optional(TypeProtos.MinorType.INT));
+    //    client.testBuilder()
+    //      .sqlQuery(sql)
+    //      .unOrdered()
+    //      .csvBaselineFile("parquet/expected/bogus.csv")
+    //      .baselineColumns("s_suppkey", "bogus")
+    //      .baselineTypes(typeMap)
+    //      .build()
+    //      .run();
   }
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetRecordReaderTest.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetRecordReaderTest.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetRecordReaderTest.java
index 375ab75..397c512 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetRecordReaderTest.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/ParquetRecordReaderTest.java
@@ -32,12 +32,12 @@ import java.util.concurrent.TimeUnit;
 
 import mockit.Injectable;
 
-import org.apache.drill.BaseTestQuery;
+import org.apache.drill.test.BaseTestQuery;
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.common.expression.ExpressionPosition;
 import org.apache.drill.common.expression.SchemaPath;
 import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.common.util.FileUtils;
+import org.apache.drill.common.util.DrillFileUtils;
 import org.apache.drill.exec.exception.SchemaChangeException;
 import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
 import org.apache.drill.exec.memory.BufferAllocator;
@@ -90,7 +90,7 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
   private static final int numberRowGroups = 1;
   private static final int recordsPerRowGroup = 300;
   private static int DEFAULT_BYTES_PER_PAGE = 1024 * 1024 * 1;
-  private static final String fileName = "/tmp/parquet_test_file_many_types";
+  private static final String fileName = "tmp/parquet_test_file_many_types";
 
   @BeforeClass
   public static void generateFile() throws Exception {
@@ -103,15 +103,14 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
     }
   }
 
-
   @Test
   public void testMultipleRowGroupsAndReads3() throws Exception {
-    final String planName = "/parquet/parquet_scan_screen.json";
+    final String planName = "parquet/parquet_scan_screen.json";
     testParquetFullEngineLocalPath(planName, fileName, 2, numberRowGroups, recordsPerRowGroup);
   }
 
   public String getPlanForFile(String pathFileName, String parquetFileName) throws IOException {
-    return Files.toString(FileUtils.getResourceAsFile(pathFileName), Charsets.UTF_8)
+    return Files.toString(DrillFileUtils.getResourceAsFile(pathFileName), Charsets.UTF_8)
         .replaceFirst("&REPLACED_IN_PARQUET_TEST&", parquetFileName);
   }
 
@@ -129,22 +128,21 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
       }
     }
 
-    final String planText = Files.toString(FileUtils.getResourceAsFile(
-        "/parquet/parquet_scan_screen_read_entry_replace.json"), Charsets.UTF_8).replaceFirst(
+    final String planText = Files.toString(DrillFileUtils.getResourceAsFile(
+        "parquet/parquet_scan_screen_read_entry_replace.json"), Charsets.UTF_8).replaceFirst(
             "&REPLACED_IN_PARQUET_TEST&", readEntries.toString());
     testParquetFullEngineLocalText(planText, fileName, i, numberRowGroups, recordsPerRowGroup, true);
   }
 
   @Test
-
   public void testDictionaryError() throws Exception {
-    testFull(QueryType.SQL, "select L_RECEIPTDATE from dfs.`/tmp/lineitem_null_dict.parquet`", "", 1, 1, 100000, false);
+    testFull(QueryType.SQL, "select L_RECEIPTDATE from dfs.`tmp/lineitem_null_dict.parquet`", "", 1, 1, 100000, false);
   }
 
   @Test
   public void testNullableAgg() throws Exception {
     final List<QueryDataBatch> result = testSqlWithResults(
-        "select sum(a) as total_sum from dfs.`/tmp/parquet_with_nulls_should_sum_100000_nulls_first.parquet`");
+        "select sum(a) as total_sum from dfs.`tmp/parquet_with_nulls_should_sum_100000_nulls_first.parquet`");
     assertEquals("Only expected one batch with data, and then the empty finishing batch.", 2, result.size());
     final RecordBatchLoader loader = new RecordBatchLoader(getDrillbitContext().getAllocator());
 
@@ -163,7 +161,7 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
   @Test
   public void testNullableFilter() throws Exception {
     final List<QueryDataBatch> result = testSqlWithResults(
-        "select count(wr_return_quantity) as row_count from dfs.`/tmp/web_returns` where wr_return_quantity = 1");
+        "select count(wr_return_quantity) as row_count from dfs.`tmp/web_returns` where wr_return_quantity = 1");
     assertEquals("Only expected one batch with data, and then the empty finishing batch.", 2, result.size());
     final RecordBatchLoader loader = new RecordBatchLoader(getDrillbitContext().getAllocator());
 
@@ -182,9 +180,9 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
 
   @Test
   public void testFixedBinary() throws Exception {
-    final String readEntries = "\"/tmp/drilltest/fixed_binary.parquet\"";
-    final String planText = Files.toString(FileUtils.getResourceAsFile(
-        "/parquet/parquet_scan_screen_read_entry_replace.json"), Charsets.UTF_8)
+    final String readEntries = "\"tmp/drilltest/fixed_binary.parquet\"";
+    final String planText = Files.toString(DrillFileUtils.getResourceAsFile(
+        "parquet/parquet_scan_screen_read_entry_replace.json"), Charsets.UTF_8)
           .replaceFirst( "&REPLACED_IN_PARQUET_TEST&", readEntries);
     testParquetFullEngineLocalText(planText, fileName, 1, 1, 1000000, false);
   }
@@ -192,35 +190,35 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
   @Test
   public void testNonNullableDictionaries() throws Exception {
     testFull(QueryType.SQL,
-        "select * from dfs.`/tmp/drilltest/non_nullable_dictionary.parquet`", "", 1, 1, 30000000, false);
+        "select * from dfs.`tmp/drilltest/non_nullable_dictionary.parquet`", "", 1, 1, 30000000, false);
   }
 
   @Test
   public void testNullableVarCharMemory() throws Exception {
     testFull(QueryType.SQL,
-        "select s_comment,s_suppkey from dfs.`/tmp/sf100_supplier.parquet`", "", 1, 1, 1000, false);
+        "select s_comment,s_suppkey from dfs.`tmp/sf100_supplier.parquet`", "", 1, 1, 1000, false);
   }
 
   @Test
   public void testReadVoter() throws Exception {
-    testFull(QueryType.SQL, "select * from dfs.`/tmp/voter.parquet`", "", 1, 1, 1000, false);
+    testFull(QueryType.SQL, "select * from dfs.`tmp/voter.parquet`", "", 1, 1, 1000, false);
   }
 
   @Test
   public void testDrill_1314() throws Exception {
     testFull(QueryType.SQL, "select l_partkey " +
-        "from dfs.`/tmp/drill_1314.parquet`", "", 1, 1, 10000, false);
+        "from dfs.`tmp/drill_1314.parquet`", "", 1, 1, 10000, false);
   }
 
   @Test
   public void testDrill_1314_all_columns() throws Exception {
-    testFull(QueryType.SQL, "select * from dfs.`/tmp/drill_1314.parquet`", "", 1, 1, 10000, false);
+    testFull(QueryType.SQL, "select * from dfs.`tmp/drill_1314.parquet`", "", 1, 1, 10000, false);
   }
 
   @Test
   public void testDictionaryError_419() throws Exception {
     testFull(QueryType.SQL,
-        "select c_address from dfs.`/tmp/customer_snappyimpala_drill_419.parquet`", "", 1, 1, 150000, false);
+        "select c_address from dfs.`tmp/customer_snappyimpala_drill_419.parquet`", "", 1, 1, 150000, false);
   }
 
   @Test
@@ -234,14 +232,14 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
 
   public void testNonExistentColumnLargeFile() throws Exception {
     testFull(QueryType.SQL,
-        "select non_existent_column, non_existent_col_2 from dfs.`/tmp/customer.dict.parquet`", "", 1, 1, 150000, false);
+        "select non_existent_column, non_existent_col_2 from dfs.`tmp/customer.dict.parquet`", "", 1, 1, 150000, false);
   }
 
   @Test
 
   public void testNonExistentColumnsSomePresentColumnsLargeFile() throws Exception {
     testFull(QueryType.SQL,
-        "select cust_key, address,  non_existent_column, non_existent_col_2 from dfs.`/tmp/customer.dict.parquet`",
+        "select cust_key, address,  non_existent_column, non_existent_col_2 from dfs.`tmp/customer.dict.parquet`",
         "", 1, 1, 150000, false);
   }
 
@@ -249,19 +247,19 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
   @Test
   public void testTPCHPerformace_SF1() throws Exception {
     testFull(QueryType.SQL,
-        "select * from dfs.`/tmp/orders_part-m-00001.parquet`", "", 1, 1, 150000, false);
+        "select * from dfs.`tmp/orders_part-m-00001.parquet`", "", 1, 1, 150000, false);
   }
 
   @Test
   public void testLocalDistributed() throws Exception {
-    final String planName = "/parquet/parquet_scan_union_screen_physical.json";
+    final String planName = "parquet/parquet_scan_union_screen_physical.json";
     testParquetFullEngineLocalTextDistributed(planName, fileName, 1, numberRowGroups, recordsPerRowGroup);
   }
 
   @Test
   @Ignore
   public void testRemoteDistributed() throws Exception {
-    final String planName = "/parquet/parquet_scan_union_screen_physical.json";
+    final String planName = "parquet/parquet_scan_union_screen_physical.json";
     testParquetFullEngineRemote(planName, fileName, 1, numberRowGroups, recordsPerRowGroup);
   }
 
@@ -269,7 +267,7 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
   public void testParquetFullEngineLocalPath(String planFileName, String filename,
       int numberOfTimesRead /* specified in json plan */,
       int numberOfRowGroups, int recordsPerRowGroup) throws Exception {
-    testParquetFullEngineLocalText(Files.toString(FileUtils.getResourceAsFile(planFileName), Charsets.UTF_8), filename,
+    testParquetFullEngineLocalText(Files.toString(DrillFileUtils.getResourceAsFile(planFileName), Charsets.UTF_8), filename,
         numberOfTimesRead, numberOfRowGroups, recordsPerRowGroup, true);
   }
 
@@ -302,7 +300,7 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
   public void testParquetFullEngineLocalTextDistributed(String planName, String filename,
       int numberOfTimesRead /* specified in json plan */,
       int numberOfRowGroups, int recordsPerRowGroup) throws Exception {
-    String planText = Files.toString(FileUtils.getResourceAsFile(planName), Charsets.UTF_8);
+    String planText = Files.toString(DrillFileUtils.getResourceAsFile(planName), Charsets.UTF_8);
     testFull(QueryType.PHYSICAL, planText, filename, numberOfTimesRead, numberOfRowGroups, recordsPerRowGroup, true);
   }
 
@@ -330,7 +328,7 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
     TestFileGenerator.populateFieldInfoMap(props);
     final ParquetResultListener resultListener =
         new ParquetResultListener(getAllocator(), props, numberOfTimesRead, true);
-    testWithListener(QueryType.PHYSICAL, Files.toString(FileUtils.getResourceAsFile(plan), Charsets.UTF_8), resultListener);
+    testWithListener(QueryType.PHYSICAL, Files.toString(DrillFileUtils.getResourceAsFile(plan), Charsets.UTF_8), resultListener);
     resultListener.getResults();
   }
 
@@ -399,7 +397,7 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
     HashMap<String, FieldInfo> fields = new HashMap<>();
     ParquetTestProperties props = new ParquetTestProperties(2, 300, DEFAULT_BYTES_PER_PAGE, fields);
     populateFieldInfoMap(props);
-    testParquetFullEngineEventBased(true, "/parquet/parquet_scan_screen.json", "/tmp/test.parquet", 1, props);
+    testParquetFullEngineEventBased(true, "parquet/parquet_scan_screen.json", "/tmp/test.parquet", 1, props);
   }
 
   // TODO - Test currently marked ignore to prevent breaking of the build process, requires a binary file that was
@@ -411,7 +409,7 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
     ParquetTestProperties props = new ParquetTestProperties(1, 1500000, DEFAULT_BYTES_PER_PAGE, fields);
     Object[] boolVals = {true, null, null};
     props.fields.put("a", new FieldInfo("boolean", "a", 1, boolVals, TypeProtos.MinorType.BIT, props));
-    testParquetFullEngineEventBased(false, "/parquet/parquet_nullable.json", "/tmp/nullable_test.parquet", 1, props);
+    testParquetFullEngineEventBased(false, "parquet/parquet_nullable.json", "/tmp/nullable_test.parquet", 1, props);
   }
 
 
@@ -429,14 +427,14 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
     byte[] val4 = { 'l','o','n','g','e','r',' ','s','t','r','i','n','g'};
     Object[] byteArrayVals = { val, val2, val4};
     props.fields.put("a", new FieldInfo("boolean", "a", 1, byteArrayVals, TypeProtos.MinorType.BIT, props));
-    testParquetFullEngineEventBased(false, "/parquet/parquet_nullable_varlen.json", "/tmp/nullable_varlen.parquet", 1, props);
+    testParquetFullEngineEventBased(false, "parquet/parquet_nullable_varlen.json", "/tmp/nullable_varlen.parquet", 1, props);
     HashMap<String, FieldInfo> fields2 = new HashMap<>();
     // pass strings instead of byte arrays
     Object[] textVals = { new org.apache.drill.exec.util.Text("b"), new org.apache.drill.exec.util.Text("b2"),
         new org.apache.drill.exec.util.Text("b3") };
     ParquetTestProperties props2 = new ParquetTestProperties(1, 30000, DEFAULT_BYTES_PER_PAGE, fields2);
     props2.fields.put("a", new FieldInfo("boolean", "a", 1, textVals, TypeProtos.MinorType.BIT, props2));
-    testParquetFullEngineEventBased(false, "/parquet/parquet_scan_screen_read_entry_replace.json",
+    testParquetFullEngineEventBased(false, "parquet/parquet_scan_screen_read_entry_replace.json",
         "\"/tmp/varLen.parquet/a\"", "unused", 1, props2);
 
   }
@@ -449,7 +447,7 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
     // actually include null values
     Object[] valuesWithNull = {new Text(""), new Text("longer string"), null};
     props3.fields.put("a", new FieldInfo("boolean", "a", 1, valuesWithNull, TypeProtos.MinorType.BIT, props3));
-    testParquetFullEngineEventBased(false, "/parquet/parquet_scan_screen_read_entry_replace.json",
+    testParquetFullEngineEventBased(false, "parquet/parquet_scan_screen_read_entry_replace.json",
         "\"/tmp/nullable_with_nulls.parquet\"", "unused", 1, props3);
 
   }
@@ -464,7 +462,7 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
     props.fields.put("n_nationkey", null);
     props.fields.put("n_regionkey", null);
     props.fields.put("n_comment", null);
-    testParquetFullEngineEventBased(false, false, "/parquet/parquet_scan_screen_read_entry_replace.json",
+    testParquetFullEngineEventBased(false, false, "parquet/parquet_scan_screen_read_entry_replace.json",
         "\"/tmp/nation_dictionary_fail.parquet\"", "unused", 1, props, QueryType.LOGICAL);
 
     fields = new HashMap<>();
@@ -477,7 +475,7 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
     props.fields.put("gender_male", null);
     props.fields.put("height", null);
     props.fields.put("hair_thickness", null);
-    testParquetFullEngineEventBased(false, false, "/parquet/parquet_scan_screen_read_entry_replace.json",
+    testParquetFullEngineEventBased(false, false, "parquet/parquet_scan_screen_read_entry_replace.json",
         "\"/tmp/employees_5_16_14.parquet\"", "unused", 1, props, QueryType.LOGICAL);
   }
 
@@ -495,17 +493,17 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
         readEntries += ",";
       }
     }
-    testParquetFullEngineEventBased(true, "/parquet/parquet_scan_screen_read_entry_replace.json", readEntries,
+    testParquetFullEngineEventBased(true, "parquet/parquet_scan_screen_read_entry_replace.json", readEntries,
         "/tmp/test.parquet", i, props);
   }
 
 
   @Test
   public void testReadError_Drill_901() throws Exception {
-    // select cast( L_COMMENT as varchar) from  dfs_test.`/tmp/drilltest/employee_parquet`
+    // select cast( L_COMMENT as varchar) from  dfs.`/tmp/drilltest/employee_parquet`
     HashMap<String, FieldInfo> fields = new HashMap<>();
     ParquetTestProperties props = new ParquetTestProperties(1, 60175, DEFAULT_BYTES_PER_PAGE, fields);
-    testParquetFullEngineEventBased(false, false, "/parquet/par_writer_test.json", null,
+    testParquetFullEngineEventBased(false, false, "parquet/par_writer_test.json", null,
         "unused, no file is generated", 1, props, QueryType.PHYSICAL);
   }
 
@@ -516,7 +514,7 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
     HashMap<String, FieldInfo> fields = new HashMap<>();
     ParquetTestProperties props = new ParquetTestProperties(1, 150000, DEFAULT_BYTES_PER_PAGE, fields);
     String readEntries = "\"/tmp/customer_nonull.parquet\"";
-    testParquetFullEngineEventBased(false, false, "/parquet/parquet_scan_screen_read_entry_replace.json", readEntries,
+    testParquetFullEngineEventBased(false, false, "parquet/parquet_scan_screen_read_entry_replace.json", readEntries,
         "unused, no file is generated", 1, props, QueryType.LOGICAL);
   }
 
@@ -527,7 +525,7 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
     ParquetTestProperties props = new ParquetTestProperties(1, 150000, DEFAULT_BYTES_PER_PAGE, fields);
     TestFileGenerator.populateDrill_418_fields(props);
     String readEntries = "\"/tmp/customer.plain.parquet\"";
-    testParquetFullEngineEventBased(false, false, "/parquet/parquet_scan_screen_read_entry_replace.json", readEntries,
+    testParquetFullEngineEventBased(false, false, "parquet/parquet_scan_screen_read_entry_replace.json", readEntries,
         "unused, no file is generated", 1, props, QueryType.LOGICAL);
   }
 
@@ -539,21 +537,21 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
     ParquetTestProperties props = new ParquetTestProperties(1, 1500000, DEFAULT_BYTES_PER_PAGE, fields);
     TestFileGenerator.populatePigTPCHCustomerFields(props);
     String readEntries = "\"/tmp/tpc-h/customer\"";
-    testParquetFullEngineEventBased(false, false, "/parquet/parquet_scan_screen_read_entry_replace.json", readEntries,
+    testParquetFullEngineEventBased(false, false, "parquet/parquet_scan_screen_read_entry_replace.json", readEntries,
         "unused, no file is generated", 1, props, QueryType.LOGICAL);
 
     fields = new HashMap<>();
     props = new ParquetTestProperties(1, 100000, DEFAULT_BYTES_PER_PAGE, fields);
     TestFileGenerator.populatePigTPCHSupplierFields(props);
-    readEntries = "\"/tmp/tpc-h/supplier\"";
-    testParquetFullEngineEventBased(false, false, "/parquet/parquet_scan_screen_read_entry_replace.json", readEntries,
+    readEntries = "\"tmp/tpc-h/supplier\"";
+    testParquetFullEngineEventBased(false, false, "parquet/parquet_scan_screen_read_entry_replace.json", readEntries,
         "unused, no file is generated", 1, props, QueryType.LOGICAL);
   }
 
   @Test
   public void test958_sql() throws Exception {
     // testFull(QueryType.SQL, "select ss_ext_sales_price from dfs.`/tmp/store_sales`", "", 1, 1, 30000000, false);
-    testFull(QueryType.SQL, "select * from dfs.`/tmp/store_sales`", "", 1, 1, 30000000, false);
+    testFull(QueryType.SQL, "select * from dfs.`tmp/store_sales`", "", 1, 1, 30000000, false);
   }
 
   @Test
@@ -561,8 +559,8 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
     HashMap<String, FieldInfo> fields = new HashMap<>();
     ParquetTestProperties props = new ParquetTestProperties(1, 2880404, DEFAULT_BYTES_PER_PAGE, fields);
     TestFileGenerator.populatePigTPCHCustomerFields(props);
-    String readEntries = "\"/tmp/store_sales\"";
-    testParquetFullEngineEventBased(false, false, "/parquet/parquet_scan_screen_read_entry_replace.json", readEntries,
+    String readEntries = "\"tmp/store_sales\"";
+    testParquetFullEngineEventBased(false, false, "parquet/parquet_scan_screen_read_entry_replace.json", readEntries,
         "unused, no file is generated", 1, props, QueryType.LOGICAL);
   }
 
@@ -571,7 +569,7 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
     HashMap<String, FieldInfo> fields = new HashMap<>();
     ParquetTestProperties props = new ParquetTestProperties(2, 300, DEFAULT_BYTES_PER_PAGE, fields);
     populateFieldInfoMap(props);
-    testParquetFullEngineEventBased(true, "/parquet/parquet_scan_screen.json", "/tmp/test.parquet", 1, props);
+    testParquetFullEngineEventBased(true, "parquet/parquet_scan_screen.json", "tmp/test.parquet", 1, props);
   }
 
 
@@ -681,7 +679,7 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
 
     final ParquetResultListener resultListener = new ParquetResultListener(getAllocator(), props, numberOfTimesRead, testValues);
     final long C = System.nanoTime();
-    String planText = Files.toString(FileUtils.getResourceAsFile(plan), Charsets.UTF_8);
+    String planText = Files.toString(DrillFileUtils.getResourceAsFile(plan), Charsets.UTF_8);
     // substitute in the string for the read entries, allows reuse of the plan file for several tests
     if (readEntries != null) {
       planText = planText.replaceFirst( "&REPLACED_IN_PARQUET_TEST&", readEntries);
@@ -694,7 +692,7 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
 
   @Test
   public void testLimit() throws Exception {
-    List<QueryDataBatch> results = testSqlWithResults("SELECT * FROM cp.`/parquet/tpch/nation/01.parquet` LIMIT 1");
+    List<QueryDataBatch> results = testSqlWithResults("SELECT * FROM cp.`parquet/tpch/nation/01.parquet` LIMIT 1");
 
     int recordsInOutput = 0;
     for (QueryDataBatch batch : results) {
@@ -707,7 +705,7 @@ public class ParquetRecordReaderTest extends BaseTestQuery {
 
   @Test
   public void testLimitBeyondRowCount() throws Exception {
-    List<QueryDataBatch> results = testSqlWithResults("SELECT * FROM cp.`/parquet/tpch/nation/01.parquet` LIMIT 100");
+    List<QueryDataBatch> results = testSqlWithResults("SELECT * FROM cp.`parquet/tpch/nation/01.parquet` LIMIT 100");
 
     int recordsInOutput = 0;
     for (QueryDataBatch batch : results) {

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestFixedlenDecimal.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestFixedlenDecimal.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestFixedlenDecimal.java
index 124a8c8..7635551 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestFixedlenDecimal.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestFixedlenDecimal.java
@@ -17,9 +17,8 @@
  */
 package org.apache.drill.exec.store.parquet;
 
-import org.apache.drill.BaseTestQuery;
+import org.apache.drill.test.BaseTestQuery;
 import org.apache.drill.exec.planner.physical.PlannerSettings;
-import org.apache.drill.exec.proto.UserBitShared;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -34,9 +33,8 @@ public class TestFixedlenDecimal extends BaseTestQuery {
 
   @Test
   public void testNullCount() throws Exception {
-    String query = String.format("select count(*) as c from %s where department_id is null", DATAFILE);
     testBuilder()
-        .sqlQuery(query)
+        .sqlQuery("select count(*) as c from %s where department_id is null", DATAFILE)
         .unOrdered()
         .baselineColumns("c")
         .baselineValues(1L)
@@ -46,9 +44,8 @@ public class TestFixedlenDecimal extends BaseTestQuery {
 
   @Test
   public void testNotNullCount() throws Exception {
-    String query = String.format("select count(*) as c from %s where department_id is not null", DATAFILE);
     testBuilder()
-        .sqlQuery(query)
+        .sqlQuery("select count(*) as c from %s where department_id is not null", DATAFILE)
         .unOrdered()
         .baselineColumns("c")
         .baselineValues(106L)
@@ -58,9 +55,8 @@ public class TestFixedlenDecimal extends BaseTestQuery {
 
   @Test
   public void testSimpleQueryWithCast() throws Exception {
-    String query = String.format("select cast(department_id as bigint) as c from %s where cast(employee_id as decimal) = 170", DATAFILE);
     testBuilder()
-        .sqlQuery(query)
+        .sqlQuery("select cast(department_id as bigint) as c from %s where cast(employee_id as decimal) = 170", DATAFILE)
         .unOrdered()
         .baselineColumns("c")
         .baselineValues(80L)
@@ -70,9 +66,8 @@ public class TestFixedlenDecimal extends BaseTestQuery {
 
   @Test
   public void testSimpleQueryDrill4704Fix() throws Exception {
-    String query = String.format("select cast(department_id as bigint) as c from %s where employee_id = 170", DATAFILE);
     testBuilder()
-        .sqlQuery(query)
+        .sqlQuery("select cast(department_id as bigint) as c from %s where employee_id = 170", DATAFILE)
         .unOrdered()
         .baselineColumns("c")
         .baselineValues(80L)

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetComplex.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetComplex.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetComplex.java
index 301374f..7c148cb 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetComplex.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetComplex.java
@@ -17,7 +17,7 @@
  */
 package org.apache.drill.exec.store.parquet;
 
-import org.apache.drill.BaseTestQuery;
+import org.apache.drill.test.BaseTestQuery;
 import org.junit.Test;
 
 public class TestParquetComplex extends BaseTestQuery {

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetFilterPushDown.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetFilterPushDown.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetFilterPushDown.java
index cb5000f..85501d1 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetFilterPushDown.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetFilterPushDown.java
@@ -15,60 +15,87 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.drill.exec.store.parquet;
 
+import org.apache.commons.io.FileUtils;
 import org.apache.drill.PlanTestBase;
 import org.apache.drill.common.expression.LogicalExpression;
-import org.apache.drill.common.util.TestTools;
 import org.apache.drill.exec.ops.FragmentContext;
 import org.apache.drill.exec.planner.physical.PlannerSettings;
 import org.apache.drill.exec.proto.BitControl;
-import org.apache.drill.test.OperatorFixture;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.parquet.hadoop.ParquetFileReader;
 import org.apache.parquet.hadoop.metadata.ParquetMetadata;
-import org.junit.AfterClass;
 import org.junit.Assert;
+import org.junit.AfterClass;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.TestWatcher;
+import org.junit.runner.Description;
 
+import java.io.File;
 import java.io.IOException;
+import java.nio.file.Paths;
 
 import static org.junit.Assert.assertEquals;
 
 public class TestParquetFilterPushDown extends PlanTestBase {
-
-  private static final String WORKING_PATH = TestTools.getWorkingPath();
-  private static final String TEST_RES_PATH = WORKING_PATH + "/src/test/resources";
+  private static final String CTAS_TABLE = "order_ctas";
   private static FragmentContext fragContext;
 
-  private static Configuration conf;
   private static FileSystem fs;
 
   @BeforeClass
   public static void initFSAndCreateFragContext() throws Exception {
+    fs = getLocalFileSystem();
     fragContext = new FragmentContext(bits[0].getContext(),
         BitControl.PlanFragment.getDefaultInstance(), null, bits[0].getContext().getFunctionImplementationRegistry());
 
-    fs = getLocalFileSystem();
-    conf = fs.getConf();
+    dirTestWatcher.copyResourceToRoot(Paths.get("parquetFilterPush"));
+    dirTestWatcher.copyResourceToRoot(Paths.get("parquet", "multirowgroup.parquet"));
   }
 
   @AfterClass
-  public static void close() throws Exception {
-    fragContext.close();
+  public static void teardown() throws IOException {
     fs.close();
   }
 
+  @Rule
+  public final TestWatcher ctasWatcher = new TestWatcher() {
+    @Override
+    protected void failed(Throwable e, Description description) {
+      deleteCtasTable();
+    }
+
+    @Override
+    protected void starting(Description description) {
+      deleteCtasTable();
+    }
+
+    @Override
+    protected void finished(Description description) {
+      deleteCtasTable();
+    }
+
+    private void deleteCtasTable() {
+      FileUtils.deleteQuietly(new File(dirTestWatcher.getDfsTestTmpDir(), CTAS_TABLE));
+    }
+  };
+
   @Test
   // Test filter evaluation directly without go through SQL queries.
   public void testIntPredicateWithEval() throws Exception {
     // intTbl.parquet has only one int column
     //    intCol : [0, 100].
-    final String filePath = String.format("%s/parquetFilterPush/intTbl/intTbl.parquet", TEST_RES_PATH);
-    ParquetMetadata footer = getParquetMetaData(filePath);
+    final File file = dirTestWatcher.getRootDir()
+      .toPath()
+      .resolve(Paths.get("parquetFilterPush", "intTbl", "intTbl.parquet"))
+      .toFile();
+    ParquetMetadata footer = getParquetMetaData(file);
 
     testParquetRowGroupFilterEval(footer, "intCol = 100", false);
     testParquetRowGroupFilterEval(footer, "intCol = 0", false);
@@ -143,8 +170,11 @@ public class TestParquetFilterPushDown extends PlanTestBase {
   public void testIntPredicateAgainstAllNullColWithEval() throws Exception {
     // intAllNull.parquet has only one int column with all values being NULL.
     // column values statistics: num_nulls: 25, min/max is not defined
-    final String filePath = String.format("%s/parquetFilterPush/intTbl/intAllNull.parquet", TEST_RES_PATH);
-    ParquetMetadata footer = getParquetMetaData(filePath);
+    final File file = dirTestWatcher.getRootDir()
+      .toPath()
+      .resolve(Paths.get("parquetFilterPush", "intTbl", "intAllNull.parquet"))
+      .toFile();
+    ParquetMetadata footer = getParquetMetaData(file);
 
     testParquetRowGroupFilterEval(footer, "intCol = 100", true);
     testParquetRowGroupFilterEval(footer, "intCol = 0", true);
@@ -162,8 +192,11 @@ public class TestParquetFilterPushDown extends PlanTestBase {
     // The parquet file is created on drill 1.8.0 with DRILL CTAS:
     //   create table dfs.tmp.`dateTblCorrupted/t1` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-01' and date '1992-01-03';
 
-    final String filePath = String.format("%s/parquetFilterPush/dateTblCorrupted/t1/0_0_0.parquet", TEST_RES_PATH);
-    ParquetMetadata footer = getParquetMetaData(filePath);
+    final File file = dirTestWatcher.getRootDir()
+      .toPath()
+      .resolve(Paths.get("parquetFilterPush", "dateTblCorrupted", "t1", "0_0_0.parquet"))
+      .toFile();
+    ParquetMetadata footer = getParquetMetaData(file);
 
     testDatePredicateAgainstDrillCTASHelper(footer);
   }
@@ -173,13 +206,15 @@ public class TestParquetFilterPushDown extends PlanTestBase {
     // The parquet file is created on drill 1.9.0-SNAPSHOT (commit id:03e8f9f3e01c56a9411bb4333e4851c92db6e410) with DRILL CTAS:
     //   create table dfs.tmp.`dateTbl1_9/t1` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-01' and date '1992-01-03';
 
-    final String filePath = String.format("%s/parquetFilterPush/dateTbl1_9/t1/0_0_0.parquet", TEST_RES_PATH);
-    ParquetMetadata footer = getParquetMetaData(filePath);
+    final File file = dirTestWatcher.getRootDir()
+      .toPath()
+      .resolve(Paths.get("parquetFilterPush", "dateTbl1_9", "t1", "0_0_0.parquet"))
+      .toFile();
+    ParquetMetadata footer = getParquetMetaData(file);
 
     testDatePredicateAgainstDrillCTASHelper(footer);
   }
 
-
   private void testDatePredicateAgainstDrillCTASHelper(ParquetMetadata footer) throws Exception{
     testParquetRowGroupFilterEval(footer, "o_orderdate = cast('1992-01-01' as date)", false);
     testParquetRowGroupFilterEval(footer, "o_orderdate = cast('1991-12-31' as date)", true);
@@ -202,8 +237,11 @@ public class TestParquetFilterPushDown extends PlanTestBase {
   public void testTimeStampPredicateWithEval() throws Exception {
     // Table dateTblCorrupted is created by CTAS in drill 1.8.0.
     //    create table dfs.tmp.`tsTbl/t1` as select DATE_ADD(cast(o_orderdate as date), INTERVAL '0 10:20:30' DAY TO SECOND) as o_ordertimestamp from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-01' and date '1992-01-03';
-    final String filePath = String.format("%s/parquetFilterPush/tsTbl/t1/0_0_0.parquet", TEST_RES_PATH);
-    ParquetMetadata footer = getParquetMetaData(filePath);
+    final File file = dirTestWatcher.getRootDir()
+      .toPath()
+      .resolve(Paths.get("parquetFilterPush", "tsTbl", "t1", "0_0_0.parquet"))
+      .toFile();
+    ParquetMetadata footer = getParquetMetaData(file);
 
     testParquetRowGroupFilterEval(footer, "o_ordertimestamp = cast('1992-01-01 10:20:30' as timestamp)", false);
     testParquetRowGroupFilterEval(footer, "o_ordertimestamp = cast('1992-01-01 10:20:29' as timestamp)", true);
@@ -226,97 +264,82 @@ public class TestParquetFilterPushDown extends PlanTestBase {
   @Test
   // Test against parquet files from Drill CTAS post 1.8.0 release.
   public void testDatePredicateAgaistDrillCTASPost1_8() throws  Exception {
-    String tableName = "order_ctas";
-
-    try {
-      deleteTableIfExists(tableName);
-
-      test("use dfs_test.tmp");
-      test(String.format("create table `%s/t1` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-01' and date '1992-01-03'", tableName));
-      test(String.format("create table `%s/t2` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-04' and date '1992-01-06'", tableName));
-      test(String.format("create table `%s/t3` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-07' and date '1992-01-09'", tableName));
-
-      final String query1 = "select o_orderdate from dfs_test.tmp.order_ctas where o_orderdate = date '1992-01-01'";
-      testParquetFilterPD(query1, 9, 1, false);
-
-      final String query2 = "select o_orderdate from dfs_test.tmp.order_ctas where o_orderdate < date '1992-01-01'";
-      testParquetFilterPD(query2, 0, 1, false);
-
-      final String query3 = "select o_orderdate from dfs_test.tmp.order_ctas where o_orderdate between date '1992-01-01' and date '1992-01-03'";
-      testParquetFilterPD(query3, 22, 1, false);
-
-      final String query4 = "select o_orderdate from dfs_test.tmp.order_ctas where o_orderdate between date '1992-01-01' and date '1992-01-04'";
-      testParquetFilterPD(query4, 33, 2, false);
-
-      final String query5 = "select o_orderdate from dfs_test.tmp.order_ctas where o_orderdate between date '1992-01-01' and date '1992-01-06'";
-      testParquetFilterPD(query5, 49, 2, false);
-
-      final String query6 = "select o_orderdate from dfs_test.tmp.order_ctas where o_orderdate > date '1992-01-10'";
-      testParquetFilterPD(query6, 0, 1, false);
-
-      // Test parquet files with metadata cache files available.
-      // Now, create parquet metadata cache files, and run the above queries again. Flag "usedMetadataFile" should be true.
-      test(String.format("refresh table metadata %s", tableName));
+    test("use dfs.tmp");
+    test("create table `%s/t1` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-01' and " +
+      "date '1992-01-03'", CTAS_TABLE);
+    test("create table `%s/t2` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-04' and " +
+      "date '1992-01-06'", CTAS_TABLE);
+    test("create table `%s/t3` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-07' and " +
+      "date '1992-01-09'", CTAS_TABLE);
+
+    final String query1 = "select o_orderdate from dfs.tmp.order_ctas where o_orderdate = date '1992-01-01'";
+    testParquetFilterPD(query1, 9, 1, false);
 
-      testParquetFilterPD(query1, 9, 1, true);
+    final String query2 = "select o_orderdate from dfs.tmp.order_ctas where o_orderdate < date '1992-01-01'";
+    testParquetFilterPD(query2, 0, 1, false);
 
-      testParquetFilterPD(query2, 0, 1, true);
+    final String query3 = "select o_orderdate from dfs.tmp.order_ctas where o_orderdate between date '1992-01-01' and date '1992-01-03'";
+    testParquetFilterPD(query3, 22, 1, false);
 
-      testParquetFilterPD(query3, 22, 1, true);
+    final String query4 = "select o_orderdate from dfs.tmp.order_ctas where o_orderdate between date '1992-01-01' and date '1992-01-04'";
+    testParquetFilterPD(query4, 33, 2, false);
 
-      testParquetFilterPD(query4, 33, 2, true);
+    final String query5 = "select o_orderdate from dfs.tmp.order_ctas where o_orderdate between date '1992-01-01' and date '1992-01-06'";
+    testParquetFilterPD(query5, 49, 2, false);
 
-      testParquetFilterPD(query5, 49, 2, true);
+    final String query6 = "select o_orderdate from dfs.tmp.order_ctas where o_orderdate > date '1992-01-10'";
+    testParquetFilterPD(query6, 0, 1, false);
 
-      testParquetFilterPD(query6, 0, 1, true);
-    } finally {
-      deleteTableIfExists(tableName);
-    }
+    // Test parquet files with metadata cache files available.
+    // Now, create parquet metadata cache files, and run the above queries again. Flag "usedMetadataFile" should be true.
+    test(String.format("refresh table metadata %s", CTAS_TABLE));
+    testParquetFilterPD(query1, 9, 1, true);
+    testParquetFilterPD(query2, 0, 1, true);
+    testParquetFilterPD(query3, 22, 1, true);
+    testParquetFilterPD(query4, 33, 2, true);
+    testParquetFilterPD(query5, 49, 2, true);
+    testParquetFilterPD(query6, 0, 1, true);
   }
 
   @Test
   public void testParquetFilterPDOptionsDisabled() throws Exception {
-    final String tableName = "order_ctas";
-
     try {
-      deleteTableIfExists(tableName);
-
-      test("alter session set `" + PlannerSettings.PARQUET_ROWGROUP_FILTER_PUSHDOWN_PLANNING_KEY  + "` = false");
+      test("alter session set `%s` = false", PlannerSettings.PARQUET_ROWGROUP_FILTER_PUSHDOWN_PLANNING_KEY);
 
-      test("use dfs_test.tmp");
-      test(String.format("create table `%s/t1` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-01' and date '1992-01-03'", tableName));
-      test(String.format("create table `%s/t2` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-04' and date '1992-01-06'", tableName));
-      test(String.format("create table `%s/t3` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-07' and date '1992-01-09'", tableName));
+      test("use dfs.tmp");
+      test("create table `%s/t1` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-01' and " +
+        "date '1992-01-03'", CTAS_TABLE);
+      test("create table `%s/t2` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-04' and " +
+        "date '1992-01-06'", CTAS_TABLE);
+      test("create table `%s/t3` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-07' and " +
+        "date '1992-01-09'", CTAS_TABLE);
 
-      final String query1 = "select o_orderdate from dfs_test.tmp.order_ctas where o_orderdate = date '1992-01-01'";
+      final String query1 = "select o_orderdate from dfs.tmp.order_ctas where o_orderdate = date '1992-01-01'";
       testParquetFilterPD(query1, 9, 3, false);
 
     } finally {
       resetSessionOption(PlannerSettings.PARQUET_ROWGROUP_FILTER_PUSHDOWN_PLANNING_KEY);
-      deleteTableIfExists(tableName);
     }
   }
 
   @Test
   public void testParquetFilterPDOptionsThreshold() throws Exception {
-    final String tableName = "order_ctas";
-
     try {
-      deleteTableIfExists(tableName);
-
       test("alter session set `" + PlannerSettings.PARQUET_ROWGROUP_FILTER_PUSHDOWN_PLANNING_THRESHOLD_KEY  + "` = 2 ");
 
-      test("use dfs_test.tmp");
-      test(String.format("create table `%s/t1` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-01' and date '1992-01-03'", tableName));
-      test(String.format("create table `%s/t2` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-04' and date '1992-01-06'", tableName));
-      test(String.format("create table `%s/t3` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-07' and date '1992-01-09'", tableName));
+      test("use dfs.tmp");
+      test("create table `%s/t1` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-01' and " +
+        "date '1992-01-03'", CTAS_TABLE);
+      test("create table `%s/t2` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-04' and " +
+        "date '1992-01-06'", CTAS_TABLE);
+      test("create table `%s/t3` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-07' and " +
+        "date '1992-01-09'", CTAS_TABLE);
 
-      final String query1 = "select o_orderdate from dfs_test.tmp.order_ctas where o_orderdate = date '1992-01-01'";
+      final String query1 = "select o_orderdate from dfs.tmp.order_ctas where o_orderdate = date '1992-01-01'";
       testParquetFilterPD(query1, 9, 3, false);
 
     } finally {
       resetSessionOption(PlannerSettings.PARQUET_ROWGROUP_FILTER_PUSHDOWN_PLANNING_THRESHOLD_KEY);
-      deleteTableIfExists(tableName);
     }
   }
 
@@ -328,22 +351,22 @@ public class TestParquetFilterPushDown extends PlanTestBase {
     //    create table dfs.tmp.`dateTblCorrupted/t2` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-04' and date '1992-01-06';
     //    create table dfs.tmp.`dateTblCorrupted/t3` as select cast(o_orderdate as date) as o_orderdate from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-07' and date '1992-01-09';
 
-    final String query1 = String.format("select o_orderdate from dfs_test.`%s/parquetFilterPush/dateTblCorrupted` where o_orderdate = date '1992-01-01'", TEST_RES_PATH);
+    final String query1 = "select o_orderdate from dfs.`parquetFilterPush/dateTblCorrupted` where o_orderdate = date '1992-01-01'";
     testParquetFilterPD(query1, 9, 1, false);
 
-    final String query2 = String.format("select o_orderdate from dfs_test.`%s/parquetFilterPush/dateTblCorrupted` where o_orderdate < date '1992-01-01'", TEST_RES_PATH);
+    final String query2 = "select o_orderdate from dfs.`parquetFilterPush/dateTblCorrupted` where o_orderdate < date '1992-01-01'";
     testParquetFilterPD(query2, 0, 1, false);
 
-    final String query3 = String.format("select o_orderdate from dfs_test.`%s/parquetFilterPush/dateTblCorrupted` where o_orderdate between date '1992-01-01' and date '1992-01-03'", TEST_RES_PATH);
+    final String query3 = "select o_orderdate from dfs.`parquetFilterPush/dateTblCorrupted` where o_orderdate between date '1992-01-01' and date '1992-01-03'";
     testParquetFilterPD(query3, 22, 1, false);
 
-    final String query4 = String.format("select o_orderdate from dfs_test.`%s/parquetFilterPush/dateTblCorrupted` where o_orderdate between date '1992-01-01' and date '1992-01-04'", TEST_RES_PATH);
+    final String query4 = "select o_orderdate from dfs.`parquetFilterPush/dateTblCorrupted` where o_orderdate between date '1992-01-01' and date '1992-01-04'";
     testParquetFilterPD(query4, 33, 2, false);
 
-    final String query5 = String.format("select o_orderdate from dfs_test.`%s/parquetFilterPush/dateTblCorrupted` where o_orderdate between date '1992-01-01' and date '1992-01-06'", TEST_RES_PATH);
+    final String query5 = "select o_orderdate from dfs.`parquetFilterPush/dateTblCorrupted` where o_orderdate between date '1992-01-01' and date '1992-01-06'";
     testParquetFilterPD(query5, 49, 2, false);
 
-    final String query6 = String.format("select o_orderdate from dfs_test.`%s/parquetFilterPush/dateTblCorrupted` where o_orderdate > date '1992-01-10'", TEST_RES_PATH);
+    final String query6 = "select o_orderdate from dfs.`parquetFilterPush/dateTblCorrupted` where o_orderdate > date '1992-01-10'";
 
     testParquetFilterPD(query6, 0, 1, false);
   }
@@ -355,18 +378,18 @@ public class TestParquetFilterPushDown extends PlanTestBase {
     //    create table dfs.tmp.`tsTbl/t2` as select DATE_ADD(cast(o_orderdate as date), INTERVAL '0 10:20:30' DAY TO SECOND) as o_ordertimestamp from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-04' and date '1992-01-06';
     //    create table dfs.tmp.`tsTbl/t3` as select DATE_ADD(cast(o_orderdate as date), INTERVAL '0 10:20:30' DAY TO SECOND) as o_ordertimestamp from cp.`tpch/orders.parquet` where o_orderdate between date '1992-01-07' and date '1992-01-09';
 
-    final String query1 = String.format("select o_ordertimestamp from dfs_test.`%s/parquetFilterPush/tsTbl` where o_ordertimestamp = timestamp '1992-01-01 10:20:30'", TEST_RES_PATH);
+    final String query1 = "select o_ordertimestamp from dfs.`parquetFilterPush/tsTbl` where o_ordertimestamp = timestamp '1992-01-01 10:20:30'";
     testParquetFilterPD(query1, 9, 1, false);
 
-    final String query2 = String.format("select o_ordertimestamp from dfs_test.`%s/parquetFilterPush/tsTbl` where o_ordertimestamp < timestamp '1992-01-01 10:20:30'", TEST_RES_PATH);
+    final String query2 = "select o_ordertimestamp from dfs.`parquetFilterPush/tsTbl` where o_ordertimestamp < timestamp '1992-01-01 10:20:30'";
     testParquetFilterPD(query2, 0, 1, false);
 
-    final String query3 = String.format("select o_ordertimestamp from dfs_test.`%s/parquetFilterPush/tsTbl` where o_ordertimestamp between timestamp '1992-01-01 00:00:00' and timestamp '1992-01-06 10:20:30'", TEST_RES_PATH);
+    final String query3 = "select o_ordertimestamp from dfs.`parquetFilterPush/tsTbl` where o_ordertimestamp between timestamp '1992-01-01 00:00:00' and timestamp '1992-01-06 10:20:30'";
     testParquetFilterPD(query3, 49, 2, false);
   }
 
   @Test // DRILL-5359
-  public void testFilterWithItemFlatten() throws  Exception {
+  public void testFilterWithItemFlatten() throws Exception {
     final String sql = "select n_regionkey\n"
         + "from (select n_regionkey, \n"
         + "            flatten(nation.cities) as cities \n"
@@ -384,7 +407,7 @@ public class TestParquetFilterPushDown extends PlanTestBase {
   public void testMultiRowGroup() throws Exception {
     // multirowgroup is a parquet file with 2 rowgroups inside. One with a = 1 and the other with a = 2;
     // FilterPushDown should be able to remove the rowgroup with a = 1 from the scan operator.
-    final String sql = String.format("select * from dfs_test.`%s/parquet/multirowgroup.parquet` where a > 1", TEST_RES_PATH);
+    final String sql = String.format("select * from dfs.`parquet/multirowgroup.parquet` where a > 1");
     final String[] expectedPlan = {"numRowGroups=1"};
     final String[] excludedPlan = {};
     PlanTestBase.testPlanMatchingPatterns(sql, expectedPlan, excludedPlan);
@@ -416,19 +439,7 @@ public class TestParquetFilterPushDown extends PlanTestBase {
     Assert.assertEquals(canDropExpected, canDrop);
   }
 
-  private ParquetMetadata getParquetMetaData(String filePathStr) throws IOException{
-    return ParquetFileReader.readFooter(new Configuration(conf), new Path(filePathStr));
+  private ParquetMetadata getParquetMetaData(File file) throws IOException{
+    return ParquetFileReader.readFooter(new Configuration(fs.getConf()), new Path(file.toURI()));
   }
-
-  private static void deleteTableIfExists(String tableName) {
-    try {
-      Path path = new Path(getDfsTestTmpSchemaLocation(), tableName);
-      if (fs.exists(path)) {
-        fs.delete(path, true);
-      }
-    } catch (Exception e) {
-      // ignore exceptions.
-    }
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetGroupScan.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetGroupScan.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetGroupScan.java
index 21d6285..d7ccea4 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetGroupScan.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/parquet/TestParquetGroupScan.java
@@ -17,7 +17,7 @@
  */
 package org.apache.drill.exec.store.parquet;
 
-import org.apache.drill.BaseTestQuery;
+import org.apache.drill.test.BaseTestQuery;
 import org.apache.drill.common.exceptions.UserRemoteException;
 import org.junit.Test;
 
@@ -28,27 +28,27 @@ public class TestParquetGroupScan extends BaseTestQuery {
 
   private void prepareTables(final String tableName, boolean refreshMetadata) throws Exception {
     // first create some parquet subfolders
-    testNoResult("CREATE TABLE dfs_test.tmp.`%s`      AS SELECT employee_id FROM cp.`employee.json` LIMIT 1", tableName);
-    testNoResult("CREATE TABLE dfs_test.tmp.`%s/501`  AS SELECT employee_id FROM cp.`employee.json` LIMIT 2", tableName);
-    testNoResult("CREATE TABLE dfs_test.tmp.`%s/502`  AS SELECT employee_id FROM cp.`employee.json` LIMIT 4", tableName);
-    testNoResult("CREATE TABLE dfs_test.tmp.`%s/503`  AS SELECT employee_id FROM cp.`employee.json` LIMIT 8", tableName);
-    testNoResult("CREATE TABLE dfs_test.tmp.`%s/504`  AS SELECT employee_id FROM cp.`employee.json` LIMIT 16", tableName);
-    testNoResult("CREATE TABLE dfs_test.tmp.`%s/505`  AS SELECT employee_id FROM cp.`employee.json` LIMIT 32", tableName);
-    testNoResult("CREATE TABLE dfs_test.tmp.`%s/60`   AS SELECT employee_id FROM cp.`employee.json` LIMIT 64", tableName);
-    testNoResult("CREATE TABLE dfs_test.tmp.`%s/602`  AS SELECT employee_id FROM cp.`employee.json` LIMIT 128", tableName);
-    testNoResult("CREATE TABLE dfs_test.tmp.`%s/6031` AS SELECT employee_id FROM cp.`employee.json` LIMIT 256", tableName);
-    testNoResult("CREATE TABLE dfs_test.tmp.`%s/6032` AS SELECT employee_id FROM cp.`employee.json` LIMIT 512", tableName);
-    testNoResult("CREATE TABLE dfs_test.tmp.`%s/6033` AS SELECT employee_id FROM cp.`employee.json` LIMIT 1024", tableName);
+    testNoResult("CREATE TABLE dfs.tmp.`%s`      AS SELECT employee_id FROM cp.`employee.json` LIMIT 1", tableName);
+    testNoResult("CREATE TABLE dfs.tmp.`%s/501`  AS SELECT employee_id FROM cp.`employee.json` LIMIT 2", tableName);
+    testNoResult("CREATE TABLE dfs.tmp.`%s/502`  AS SELECT employee_id FROM cp.`employee.json` LIMIT 4", tableName);
+    testNoResult("CREATE TABLE dfs.tmp.`%s/503`  AS SELECT employee_id FROM cp.`employee.json` LIMIT 8", tableName);
+    testNoResult("CREATE TABLE dfs.tmp.`%s/504`  AS SELECT employee_id FROM cp.`employee.json` LIMIT 16", tableName);
+    testNoResult("CREATE TABLE dfs.tmp.`%s/505`  AS SELECT employee_id FROM cp.`employee.json` LIMIT 32", tableName);
+    testNoResult("CREATE TABLE dfs.tmp.`%s/60`   AS SELECT employee_id FROM cp.`employee.json` LIMIT 64", tableName);
+    testNoResult("CREATE TABLE dfs.tmp.`%s/602`  AS SELECT employee_id FROM cp.`employee.json` LIMIT 128", tableName);
+    testNoResult("CREATE TABLE dfs.tmp.`%s/6031` AS SELECT employee_id FROM cp.`employee.json` LIMIT 256", tableName);
+    testNoResult("CREATE TABLE dfs.tmp.`%s/6032` AS SELECT employee_id FROM cp.`employee.json` LIMIT 512", tableName);
+    testNoResult("CREATE TABLE dfs.tmp.`%s/6033` AS SELECT employee_id FROM cp.`employee.json` LIMIT 1024", tableName);
 
     // we need an empty subfolder `4376/20160401`
     // to do this we first create a table inside that subfolder
-    testNoResult("CREATE TABLE dfs_test.tmp.`%s/6041/a` AS SELECT * FROM cp.`employee.json` LIMIT 1", tableName);
+    testNoResult("CREATE TABLE dfs.tmp.`%s/6041/a` AS SELECT * FROM cp.`employee.json` LIMIT 1", tableName);
     // then we delete the table, leaving the parent subfolder empty
-    testNoResult("DROP TABLE   dfs_test.tmp.`%s/6041/a`", tableName);
+    testNoResult("DROP TABLE   dfs.tmp.`%s/6041/a`", tableName);
 
     if (refreshMetadata) {
       // build the metadata cache file
-      testNoResult("REFRESH TABLE METADATA dfs_test.tmp.`%s`", tableName);
+      testNoResult("REFRESH TABLE METADATA dfs.tmp.`%s`", tableName);
     }
   }
 
@@ -57,7 +57,7 @@ public class TestParquetGroupScan extends BaseTestQuery {
     prepareTables("4376_1", true);
 
     testBuilder()
-      .sqlQuery("SELECT COUNT(*) AS `count` FROM dfs_test.tmp.`4376_1/60*`")
+      .sqlQuery("SELECT COUNT(*) AS `count` FROM dfs.tmp.`4376_1/60*`")
       .ordered()
       .baselineColumns("count").baselineValues(1984L)
       .go();
@@ -68,7 +68,7 @@ public class TestParquetGroupScan extends BaseTestQuery {
     prepareTables("4376_2", true);
 
     try {
-      runSQL("SELECT COUNT(*) AS `count` FROM dfs_test.tmp.`4376_2/604*`");
+      runSQL("SELECT COUNT(*) AS `count` FROM dfs.tmp.`4376_2/604*`");
       fail("Query should've failed!");
     } catch (UserRemoteException uex) {
       final String expectedMsg = "The table you tried to query is empty";
@@ -82,10 +82,10 @@ public class TestParquetGroupScan extends BaseTestQuery {
     prepareTables("4376_3", false);
 
     try {
-      runSQL("SELECT COUNT(*) AS `count` FROM dfs_test.tmp.`4376_3/604*`");
+      runSQL("SELECT COUNT(*) AS `count` FROM dfs.tmp.`4376_3/604*`");
       fail("Query should've failed!");
     } catch (UserRemoteException uex) {
-      final String expectedMsg = "Table 'dfs_test.tmp.4376_3/604*' not found";
+      final String expectedMsg = "Table 'dfs.tmp.4376_3/604*' not found";
       assertTrue(String.format("Error message should contain \"%s\" but was instead \"%s\"", expectedMsg,
         uex.getMessage()), uex.getMessage().contains(expectedMsg));
     }
@@ -96,7 +96,7 @@ public class TestParquetGroupScan extends BaseTestQuery {
     prepareTables("4376_4", true);
 
     try {
-      runSQL("SELECT COUNT(*) AS `count` FROM dfs_test.tmp.`4376_4/6041`");
+      runSQL("SELECT COUNT(*) AS `count` FROM dfs.tmp.`4376_4/6041`");
       fail("Query should've failed!");
     } catch (UserRemoteException uex) {
       final String expectedMsg = "The table you tried to query is empty";
@@ -109,10 +109,10 @@ public class TestParquetGroupScan extends BaseTestQuery {
   public void testSelectEmptyNoCache() throws Exception {
     prepareTables("4376_5", false);
     try {
-      runSQL("SELECT COUNT(*) AS `count` FROM dfs_test.tmp.`4376_5/6041`");
+      runSQL("SELECT COUNT(*) AS `count` FROM dfs.tmp.`4376_5/6041`");
       fail("Query should've failed!");
     } catch (UserRemoteException uex) {
-      final String expectedMsg = "Table 'dfs_test.tmp.4376_5/6041' not found";
+      final String expectedMsg = "Table 'dfs.tmp.4376_5/6041' not found";
       assertTrue(String.format("Error message should contain \"%s\" but was instead \"%s\"", expectedMsg,
         uex.getMessage()), uex.getMessage().contains(expectedMsg));
     }