You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by pr...@apache.org on 2017/11/15 01:47:06 UTC

[20/22] drill git commit: DRILL-5783, DRILL-5841, DRILL-5894: Rationalize test temp directories

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/BaseTestQuery.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/BaseTestQuery.java b/exec/java-exec/src/test/java/org/apache/drill/BaseTestQuery.java
deleted file mode 100644
index 05ca6ea..0000000
--- a/exec/java-exec/src/test/java/org/apache/drill/BaseTestQuery.java
+++ /dev/null
@@ -1,713 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill;
-
-import static org.hamcrest.core.StringContains.containsString;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.fail;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.net.URL;
-import java.util.List;
-import java.util.Properties;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.drill.DrillTestWrapper.TestServices;
-import org.apache.drill.common.config.DrillConfig;
-import org.apache.drill.common.config.DrillProperties;
-import org.apache.drill.common.exceptions.UserException;
-import org.apache.drill.common.scanner.ClassPathScanner;
-import org.apache.drill.common.scanner.persistence.ScanResult;
-import org.apache.drill.common.util.TestTools;
-import org.apache.drill.exec.ExecConstants;
-import org.apache.drill.exec.ExecTest;
-import org.apache.drill.exec.client.DrillClient;
-import org.apache.drill.exec.exception.SchemaChangeException;
-import org.apache.drill.exec.memory.BufferAllocator;
-import org.apache.drill.exec.memory.RootAllocatorFactory;
-import org.apache.drill.exec.proto.UserBitShared;
-import org.apache.drill.exec.proto.UserBitShared.QueryId;
-import org.apache.drill.exec.proto.UserBitShared.QueryResult.QueryState;
-import org.apache.drill.exec.proto.UserBitShared.QueryType;
-import org.apache.drill.exec.proto.UserProtos.PreparedStatementHandle;
-import org.apache.drill.exec.record.RecordBatchLoader;
-import org.apache.drill.exec.rpc.ConnectionThrottle;
-import org.apache.drill.exec.rpc.user.AwaitableUserResultsListener;
-import org.apache.drill.exec.rpc.user.QueryDataBatch;
-import org.apache.drill.exec.rpc.user.UserResultsListener;
-import org.apache.drill.exec.server.Drillbit;
-import org.apache.drill.exec.server.DrillbitContext;
-import org.apache.drill.exec.server.RemoteServiceSet;
-import org.apache.drill.exec.store.StoragePluginRegistry;
-import org.apache.drill.exec.util.TestUtilities;
-import org.apache.drill.exec.util.VectorUtil;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.rules.TestRule;
-import org.junit.rules.TestWatcher;
-import org.junit.runner.Description;
-
-import com.google.common.base.Charsets;
-import com.google.common.base.Preconditions;
-import com.google.common.io.Resources;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.drill.exec.record.VectorWrapper;
-import org.apache.drill.exec.vector.ValueVector;
-import org.apache.drill.test.ClusterFixture;
-
-public class BaseTestQuery extends ExecTest {
-  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(BaseTestQuery.class);
-
-  public static final String TEST_SCHEMA = "dfs_test";
-  public static final String TEMP_SCHEMA = TEST_SCHEMA + ".tmp";
-
-  private static final int MAX_WIDTH_PER_NODE = 2;
-
-  @SuppressWarnings("serial")
-  private static final Properties TEST_CONFIGURATIONS = new Properties() {
-    {
-      put(ExecConstants.SYS_STORE_PROVIDER_LOCAL_ENABLE_WRITE, "false");
-      put(ExecConstants.HTTP_ENABLE, "false");
-      // Increasing retry attempts for testing
-      put(ExecConstants.UDF_RETRY_ATTEMPTS, "10");
-      put(ExecConstants.SSL_USE_HADOOP_CONF, "false");
-    }
-  };
-
-  public final TestRule resetWatcher = new TestWatcher() {
-    @Override
-    protected void failed(Throwable e, Description description) {
-      try {
-        resetClientAndBit();
-      } catch (Exception e1) {
-        throw new RuntimeException("Failure while resetting client.", e1);
-      }
-    }
-  };
-
-  protected static DrillClient client;
-  protected static Drillbit[] bits;
-  protected static RemoteServiceSet serviceSet;
-  protected static DrillConfig config;
-  protected static BufferAllocator allocator;
-
-  /**
-   * Number of Drillbits in test cluster. Default is 1.
-   *
-   * Tests can update the cluster size through {@link #updateTestCluster(int, DrillConfig)}
-   */
-  private static int drillbitCount = 1;
-
-  /**
-   * Location of the dfs_test.tmp schema on local filesystem.
-   */
-  private static String dfsTestTmpSchemaLocation;
-
-  private int[] columnWidths = new int[] { 8 };
-
-  private static ScanResult classpathScan;
-
-  private static FileSystem fs;
-
-  @BeforeClass
-  public static void setupDefaultTestCluster() throws Exception {
-    config = DrillConfig.create(TEST_CONFIGURATIONS);
-    classpathScan = ClassPathScanner.fromPrescan(config);
-    openClient();
-    // turns on the verbose errors in tests
-    // sever side stacktraces are added to the message before sending back to the client
-    test("ALTER SESSION SET `exec.errors.verbose` = true");
-    fs = getLocalFileSystem();
-  }
-
-  protected static void updateTestCluster(int newDrillbitCount, DrillConfig newConfig) {
-    updateTestCluster(newDrillbitCount, newConfig, null);
-  }
-
-  protected static void updateTestCluster(int newDrillbitCount, DrillConfig newConfig, Properties properties) {
-    Preconditions.checkArgument(newDrillbitCount > 0, "Number of Drillbits must be at least one");
-    if (drillbitCount != newDrillbitCount || config != null) {
-      // TODO: Currently we have to shutdown the existing Drillbit cluster before starting a new one with the given
-      // Drillbit count. Revisit later to avoid stopping the cluster.
-      try {
-        closeClient();
-        drillbitCount = newDrillbitCount;
-        if (newConfig != null) {
-          // For next test class, updated DrillConfig will be replaced by default DrillConfig in BaseTestQuery as part
-          // of the @BeforeClass method of test class.
-          config = newConfig;
-        }
-        openClient(properties);
-      } catch(Exception e) {
-        throw new RuntimeException("Failure while updating the test Drillbit cluster.", e);
-      }
-    }
-  }
-
-  /**
-   * Useful for tests that require a DrillbitContext to get/add storage plugins, options etc.
-   *
-   * @return DrillbitContext of first Drillbit in the cluster.
-   */
-  protected static DrillbitContext getDrillbitContext() {
-    Preconditions.checkState(bits != null && bits[0] != null, "Drillbits are not setup.");
-    return bits[0].getContext();
-  }
-
-  protected static Properties cloneDefaultTestConfigProperties() {
-    final Properties props = new Properties();
-    for(String propName : TEST_CONFIGURATIONS.stringPropertyNames()) {
-      props.put(propName, TEST_CONFIGURATIONS.getProperty(propName));
-    }
-
-    return props;
-  }
-
-  protected static String getDfsTestTmpSchemaLocation() {
-    return dfsTestTmpSchemaLocation;
-  }
-
-  private static void resetClientAndBit() throws Exception{
-    closeClient();
-    openClient();
-  }
-
-  private static void openClient() throws Exception {
-    openClient(null);
-  }
-
-  private static void openClient(Properties properties) throws Exception {
-    if (properties == null) {
-      properties = new Properties();
-    }
-
-    allocator = RootAllocatorFactory.newRoot(config);
-    serviceSet = RemoteServiceSet.getLocalServiceSet();
-
-    dfsTestTmpSchemaLocation = TestUtilities.createTempDir();
-
-    bits = new Drillbit[drillbitCount];
-    for(int i = 0; i < drillbitCount; i++) {
-      bits[i] = new Drillbit(config, serviceSet, classpathScan);
-      bits[i].run();
-
-      @SuppressWarnings("resource")
-      final StoragePluginRegistry pluginRegistry = bits[i].getContext().getStorage();
-      TestUtilities.updateDfsTestTmpSchemaLocation(pluginRegistry, dfsTestTmpSchemaLocation);
-      TestUtilities.makeDfsTmpSchemaImmutable(pluginRegistry);
-    }
-
-    if (!properties.containsKey(DrillProperties.DRILLBIT_CONNECTION)) {
-      properties.setProperty(DrillProperties.DRILLBIT_CONNECTION,
-          String.format("localhost:%s", bits[0].getUserPort()));
-    }
-
-    DrillConfig clientConfig = DrillConfig.forClient();
-    client = QueryTestUtil.createClient(clientConfig,  serviceSet, MAX_WIDTH_PER_NODE, properties);
-  }
-
-  /**
-   * Close the current <i>client</i> and open a new client using the given <i>properties</i>. All tests executed
-   * after this method call use the new <i>client</i>.
-   *
-   * @param properties
-   */
-  public static void updateClient(Properties properties) throws Exception {
-    Preconditions.checkState(bits != null && bits[0] != null, "Drillbits are not setup.");
-    if (client != null) {
-      client.close();
-      client = null;
-    }
-
-    DrillConfig clientConfig = DrillConfig.forClient();
-    client = QueryTestUtil.createClient(clientConfig, serviceSet, MAX_WIDTH_PER_NODE, properties);
-  }
-
-  /*
-   * Close the current <i>client</i> and open a new client for the given user. All tests executed
-   * after this method call use the new <i>client</i>.
-   * @param user
-   */
-  public static void updateClient(String user) throws Exception {
-    updateClient(user, null);
-  }
-
-  /*
-   * Close the current <i>client</i> and open a new client for the given user and password credentials. Tests
-   * executed after this method call use the new <i>client</i>.
-   * @param user
-   */
-  public static void updateClient(final String user, final String password) throws Exception {
-    final Properties props = new Properties();
-    props.setProperty(DrillProperties.USER, user);
-    if (password != null) {
-      props.setProperty(DrillProperties.PASSWORD, password);
-    }
-    updateClient(props);
-  }
-
-  protected static BufferAllocator getAllocator() {
-    return allocator;
-  }
-
-  public static int getUserPort() {
-    return bits[0].getUserPort();
-  }
-
-  public static TestBuilder newTest() {
-    return testBuilder();
-  }
-
-
-  public static class ClassicTestServices implements TestServices {
-    @Override
-    public BufferAllocator allocator() {
-      return allocator;
-    }
-
-    @Override
-    public void test(String query) throws Exception {
-      BaseTestQuery.test(query);
-    }
-
-    @Override
-    public List<QueryDataBatch> testRunAndReturn(final QueryType type, final Object query) throws Exception {
-      return BaseTestQuery.testRunAndReturn(type, query);
-    }
-  }
-
-  public static TestBuilder testBuilder() {
-    return new TestBuilder(new ClassicTestServices());
-  }
-
-  @AfterClass
-  public static void closeClient() throws Exception {
-    if (client != null) {
-      client.close();
-    }
-
-    if (bits != null) {
-      for(final Drillbit bit : bits) {
-        if (bit != null) {
-          bit.close();
-        }
-      }
-    }
-
-    if(serviceSet != null) {
-      serviceSet.close();
-    }
-    if (allocator != null) {
-      allocator.close();
-    }
-  }
-
-  @AfterClass
-  public static void resetDrillbitCount() {
-    // some test classes assume this value to be 1 and will fail if run along other tests that increase it
-    drillbitCount = 1;
-  }
-
-  protected static void runSQL(String sql) throws Exception {
-    final AwaitableUserResultsListener listener = new AwaitableUserResultsListener(new SilentListener());
-    testWithListener(QueryType.SQL, sql, listener);
-    listener.await();
-  }
-
-  protected static List<QueryDataBatch> testSqlWithResults(String sql) throws Exception{
-    return testRunAndReturn(QueryType.SQL, sql);
-  }
-
-  protected static List<QueryDataBatch> testLogicalWithResults(String logical) throws Exception{
-    return testRunAndReturn(QueryType.LOGICAL, logical);
-  }
-
-  protected static List<QueryDataBatch> testPhysicalWithResults(String physical) throws Exception{
-    return testRunAndReturn(QueryType.PHYSICAL, physical);
-  }
-
-  public static List<QueryDataBatch>  testRunAndReturn(QueryType type, Object query) throws Exception{
-    if (type == QueryType.PREPARED_STATEMENT) {
-      Preconditions.checkArgument(query instanceof PreparedStatementHandle,
-          "Expected an instance of PreparedStatement as input query");
-      return testPreparedStatement((PreparedStatementHandle)query);
-    } else {
-      Preconditions.checkArgument(query instanceof String, "Expected a string as input query");
-      query = QueryTestUtil.normalizeQuery((String)query);
-      return client.runQuery(type, (String)query);
-    }
-  }
-
-  public static List<QueryDataBatch> testPreparedStatement(PreparedStatementHandle handle) throws Exception {
-    return client.executePreparedStatement(handle);
-  }
-
-  public static int testRunAndPrint(final QueryType type, final String query) throws Exception {
-    return QueryTestUtil.testRunAndPrint(client, type, query);
-  }
-
-  protected static void testWithListener(QueryType type, String query, UserResultsListener resultListener) {
-    QueryTestUtil.testWithListener(client, type, query, resultListener);
-  }
-
-  public static void testNoResult(String query, Object... args) throws Exception {
-    testNoResult(1, query, args);
-  }
-
-  public static void alterSession(String option, Object value) {
-    String valueStr = ClusterFixture.stringify(value);
-    try {
-      test("ALTER SESSION SET `%s` = %s", option, valueStr);
-    } catch(final Exception e) {
-      fail(String.format("Failed to set session option `%s` = %s, Error: %s",
-          option, valueStr, e.toString()));
-    }
-  }
-
-  public static void resetSessionOption(String option) {
-    try {
-      test("ALTER SESSION RESET `%s`", option);
-    } catch(final Exception e) {
-      fail(String.format("Failed to reset session option `%s`, Error: %s",
-          option, e.toString()));
-    }
-  }
-
-  protected static void testNoResult(int interation, String query, Object... args) throws Exception {
-    query = String.format(query, args);
-    logger.debug("Running query:\n--------------\n" + query);
-    for (int i = 0; i < interation; i++) {
-      final List<QueryDataBatch> results = client.runQuery(QueryType.SQL, query);
-      for (final QueryDataBatch queryDataBatch : results) {
-        queryDataBatch.release();
-      }
-    }
-  }
-
-  public static void test(String query, Object... args) throws Exception {
-    QueryTestUtil.test(client, String.format(query, args));
-  }
-
-  public static void test(final String query) throws Exception {
-    QueryTestUtil.test(client, query);
-  }
-
-  protected static int testLogical(String query) throws Exception{
-    return testRunAndPrint(QueryType.LOGICAL, query);
-  }
-
-  protected static int testPhysical(String query) throws Exception{
-    return testRunAndPrint(QueryType.PHYSICAL, query);
-  }
-
-  protected static int testSql(String query) throws Exception{
-    return testRunAndPrint(QueryType.SQL, query);
-  }
-
-  protected static void testPhysicalFromFile(String file) throws Exception{
-    testPhysical(getFile(file));
-  }
-
-  protected static List<QueryDataBatch> testPhysicalFromFileWithResults(String file) throws Exception {
-    return testRunAndReturn(QueryType.PHYSICAL, getFile(file));
-  }
-
-  protected static void testLogicalFromFile(String file) throws Exception{
-    testLogical(getFile(file));
-  }
-
-  protected static void testSqlFromFile(String file) throws Exception{
-    test(getFile(file));
-  }
-
-  /**
-   * Utility method which tests given query produces a {@link UserException} and the exception message contains
-   * the given message.
-   * @param testSqlQuery Test query
-   * @param expectedErrorMsg Expected error message.
-   */
-  protected static void errorMsgTestHelper(final String testSqlQuery, final String expectedErrorMsg) throws Exception {
-    try {
-      test(testSqlQuery);
-      fail("Expected a UserException when running " + testSqlQuery);
-    } catch (final UserException actualException) {
-      try {
-        assertThat("message of UserException when running " + testSqlQuery, actualException.getMessage(), containsString(expectedErrorMsg));
-      } catch (AssertionError e) {
-        e.addSuppressed(actualException);
-        throw e;
-      }
-    }
-  }
-
-  /**
-   * Utility method which tests given query produces a {@link UserException}
-   * with {@link org.apache.drill.exec.proto.UserBitShared.DrillPBError.ErrorType} being DrillPBError.ErrorType.PARSE
-   * the given message.
-   * @param testSqlQuery Test query
-   */
-  protected static void parseErrorHelper(final String testSqlQuery) throws Exception {
-    errorMsgTestHelper(testSqlQuery, UserBitShared.DrillPBError.ErrorType.PARSE.name());
-  }
-
-  public static String getFile(String resource) throws IOException{
-    final URL url = Resources.getResource(resource);
-    if (url == null) {
-      throw new IOException(String.format("Unable to find path %s.", resource));
-    }
-    return Resources.toString(url, Charsets.UTF_8);
-  }
-
-  /**
-   * Copy the resource (ex. file on classpath) to a physical file on FileSystem.
-   * @param resource
-   * @return the file path
-   * @throws IOException
-   */
-  public static String getPhysicalFileFromResource(final String resource) throws IOException {
-    final File file = File.createTempFile("tempfile", ".txt");
-    file.deleteOnExit();
-    final PrintWriter printWriter = new PrintWriter(file);
-    printWriter.write(BaseTestQuery.getFile(resource));
-    printWriter.close();
-
-    return file.getPath();
-  }
-
-  protected static void setSessionOption(final String option, final boolean value) {
-    alterSession(option, value);
-  }
-
-  protected static void setSessionOption(final String option, final long value) {
-    alterSession(option, value);
-  }
-
-  protected static void setSessionOption(final String option, final double value) {
-    alterSession(option, value);
-  }
-
-  protected static void setSessionOption(final String option, final String value) {
-    alterSession(option, value);
-  }
-
-  public static class SilentListener implements UserResultsListener {
-    private final AtomicInteger count = new AtomicInteger();
-
-    @Override
-    public void submissionFailed(UserException ex) {
-      logger.debug("Query failed: " + ex.getMessage());
-    }
-
-    @Override
-    public void queryCompleted(QueryState state) {
-      logger.debug("Query completed successfully with row count: " + count.get());
-    }
-
-    @Override
-    public void dataArrived(QueryDataBatch result, ConnectionThrottle throttle) {
-      final int rows = result.getHeader().getRowCount();
-      if (result.getData() != null) {
-        count.addAndGet(rows);
-      }
-      result.release();
-    }
-
-    @Override
-    public void queryIdArrived(QueryId queryId) {}
-
-  }
-
-  protected void setColumnWidth(int columnWidth) {
-    this.columnWidths = new int[] { columnWidth };
-  }
-
-  protected void setColumnWidths(int[] columnWidths) {
-    this.columnWidths = columnWidths;
-  }
-
-  protected int printResult(List<QueryDataBatch> results) throws SchemaChangeException {
-    int rowCount = 0;
-    final RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
-    for(final QueryDataBatch result : results) {
-      rowCount += result.getHeader().getRowCount();
-      loader.load(result.getHeader().getDef(), result.getData());
-      // TODO:  Clean:  DRILL-2933:  That load(...) no longer throws
-      // SchemaChangeException, so check/clean throw clause above.
-      VectorUtil.showVectorAccessibleContent(loader, columnWidths);
-      loader.clear();
-      result.release();
-    }
-    System.out.println("Total record count: " + rowCount);
-    return rowCount;
-  }
-
-  protected static String getResultString(List<QueryDataBatch> results, String delimiter)
-      throws SchemaChangeException {
-    final StringBuilder formattedResults = new StringBuilder();
-    boolean includeHeader = true;
-    final RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
-    for(final QueryDataBatch result : results) {
-      loader.load(result.getHeader().getDef(), result.getData());
-      if (loader.getRecordCount() <= 0) {
-        continue;
-      }
-      VectorUtil.appendVectorAccessibleContent(loader, formattedResults, delimiter, includeHeader);
-      if (!includeHeader) {
-        includeHeader = false;
-      }
-      loader.clear();
-      result.release();
-    }
-
-    return formattedResults.toString();
-  }
-
-
-  public class TestResultSet {
-
-    private final List<List<String>> rows;
-
-    public TestResultSet() {
-      rows = new ArrayList<>();
-    }
-
-    public TestResultSet(List<QueryDataBatch> batches) throws SchemaChangeException {
-      rows = new ArrayList<>();
-      convert(batches);
-    }
-
-    public void addRow(String... cells) {
-      List<String> newRow = Arrays.asList(cells);
-      rows.add(newRow);
-    }
-
-    public int size() {
-      return rows.size();
-    }
-
-    @Override public boolean equals(Object o) {
-      boolean result = false;
-
-      if (this == o) {
-        result = true;
-      } else if (o instanceof TestResultSet) {
-        TestResultSet that = (TestResultSet) o;
-        assertEquals(this.size(), that.size());
-        for (int i = 0; i < this.rows.size(); i++) {
-          assertEquals(this.rows.get(i).size(), that.rows.get(i).size());
-          for (int j = 0; j < this.rows.get(i).size(); ++j) {
-            assertEquals(this.rows.get(i).get(j), that.rows.get(i).get(j));
-          }
-        }
-        result = true;
-      }
-
-      return result;
-    }
-
-    private void convert(List<QueryDataBatch> batches) throws SchemaChangeException {
-      RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
-      for (QueryDataBatch batch : batches) {
-        int rc = batch.getHeader().getRowCount();
-        if (batch.getData() != null) {
-          loader.load(batch.getHeader().getDef(), batch.getData());
-          for (int i = 0; i < rc; ++i) {
-            List<String> newRow = new ArrayList<>();
-            rows.add(newRow);
-            for (VectorWrapper<?> vw : loader) {
-              ValueVector.Accessor accessor = vw.getValueVector().getAccessor();
-              Object o = accessor.getObject(i);
-              newRow.add(o == null ? null : o.toString());
-            }
-          }
-        }
-        loader.clear();
-        batch.release();
-      }
-    }
-  }
-
-  private static String replaceWorkingPathInString(String orig) {
-    return orig.replaceAll(Pattern.quote("[WORKING_PATH]"), Matcher.quoteReplacement(TestTools.getWorkingPath()));
-  }
-
-  protected static void copyDirectoryIntoTempSpace(String resourcesDir) throws IOException {
-    copyDirectoryIntoTempSpace(resourcesDir, null);
-  }
-
-  protected static void copyDirectoryIntoTempSpace(String resourcesDir, String destinationSubDir) throws IOException {
-    Path destination = destinationSubDir != null ? new Path(getDfsTestTmpSchemaLocation(), destinationSubDir)
-        : new Path(getDfsTestTmpSchemaLocation());
-    fs.copyFromLocalFile(
-        new Path(replaceWorkingPathInString(resourcesDir)),
-        destination);
-  }
-
-  protected static void copyMetaDataCacheToTempReplacingInternalPaths(String srcFileOnClassPath, String destFolderInTmp,
-      String metaFileName) throws IOException {
-    copyMetaDataCacheToTempWithReplacements(srcFileOnClassPath, destFolderInTmp, metaFileName, null);
-  }
-
-  protected static void copyMetaDataCacheToTempReplacingInternalPaths(Path srcFileOnClassPath, String destFolderInTmp,
-                                                                      String metaFileName) throws IOException {
-    copyMetaDataCacheToTempReplacingInternalPaths(srcFileOnClassPath.toUri().getPath(), destFolderInTmp, metaFileName);
-  }
-
-  /**
-   * Old metadata cache files include full paths to the files that have been scanned.
-   * <p>
-   * There is no way to generate a metadata cache file with absolute paths that
-   * will be guaranteed to be available on an arbitrary test machine.
-   * <p>
-   * To enable testing older metadata cache files, they were generated manually
-   * using older drill versions, and the absolute path up to the folder where
-   * the metadata cache file appeared was manually replaced with the string
-   * REPLACED_IN_TEST. Here the file is re-written into the given temporary
-   * location after the REPLACED_IN_TEST string has been replaced by the actual
-   * location generated during this run of the tests.
-   *
-   * @param srcFileOnClassPath the source path of metadata cache file, which should be replaced
-   * @param destFolderInTmp  the parent folder name of the metadata cache file
-   * @param metaFileName the name of metadata cache file depending on the type of the metadata
-   * @param customStringReplacement custom string to replace the "CUSTOM_REPLACED" target string in metadata file
-   * @throws IOException if a create or write errors occur
-   */
-  protected static void copyMetaDataCacheToTempWithReplacements(String srcFileOnClassPath,
-      String destFolderInTmp, String metaFileName, String customStringReplacement) throws IOException {
-    String metadataFileContents = getFile(srcFileOnClassPath);
-    Path rootMeta = new Path(dfsTestTmpSchemaLocation, destFolderInTmp);
-    Path newMetaCache = new Path(rootMeta, metaFileName);
-    try (FSDataOutputStream outSteam = fs.create(newMetaCache)) {
-      if (customStringReplacement != null) {
-        metadataFileContents = metadataFileContents.replace("CUSTOM_STRING_REPLACEMENT", customStringReplacement);
-      }
-      outSteam.writeBytes(metadataFileContents.replace("REPLACED_IN_TEST", dfsTestTmpSchemaLocation));
-    }
-  }
-
- }

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/DrillTestWrapper.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/DrillTestWrapper.java b/exec/java-exec/src/test/java/org/apache/drill/DrillTestWrapper.java
deleted file mode 100644
index 990a24d..0000000
--- a/exec/java-exec/src/test/java/org/apache/drill/DrillTestWrapper.java
+++ /dev/null
@@ -1,808 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
-import java.io.UnsupportedEncodingException;
-import java.lang.reflect.Array;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-
-import org.apache.commons.lang3.tuple.Pair;
-import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.common.types.TypeProtos;
-import org.apache.drill.common.types.Types;
-import org.apache.drill.exec.HyperVectorValueIterator;
-import org.apache.drill.exec.exception.SchemaChangeException;
-import org.apache.drill.exec.memory.BufferAllocator;
-import org.apache.drill.exec.proto.UserBitShared;
-import org.apache.drill.exec.proto.UserBitShared.QueryType;
-import org.apache.drill.exec.record.BatchSchema;
-import org.apache.drill.exec.record.HyperVectorWrapper;
-import org.apache.drill.exec.record.MaterializedField;
-import org.apache.drill.exec.record.RecordBatchLoader;
-import org.apache.drill.exec.record.VectorAccessible;
-import org.apache.drill.exec.record.VectorWrapper;
-import org.apache.drill.exec.record.selection.SelectionVector2;
-import org.apache.drill.exec.record.selection.SelectionVector4;
-import org.apache.drill.exec.rpc.user.QueryDataBatch;
-import org.apache.drill.exec.util.Text;
-import org.apache.drill.exec.vector.ValueVector;
-
-/**
- * An object to encapsulate the options for a Drill unit test, as well as the execution methods to perform the tests and
- * validation of results.
- *
- * To construct an instance easily, look at the TestBuilder class. From an implementation of
- * the BaseTestQuery class, and instance of the builder is accessible through the testBuilder() method.
- */
-public class DrillTestWrapper {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(BaseTestQuery.class);
-
-  public interface TestServices {
-    BufferAllocator allocator();
-
-    void test(String query) throws Exception;
-
-    List<QueryDataBatch> testRunAndReturn(QueryType type, Object query) throws Exception;
-  }
-
-  // TODO - when in JSON, read baseline in all text mode to avoid precision loss for decimal values
-
-  // This flag will enable all of the values that are validated to be logged. For large validations this is time consuming
-  // so this is not exposed in a way that it can be enabled for an individual test. It can be changed here while debugging
-  // a test to see all of the output, but as this framework is doing full validation, there is no reason to keep it on as
-  // it will only make the test slower.
-  private static boolean VERBOSE_DEBUG = false;
-
-  // Unit test doesn't expect any specific batch count
-  public static final int EXPECTED_BATCH_COUNT_NOT_SET = -1;
-
-  // The motivation behind the TestBuilder was to provide a clean API for test writers. The model is mostly designed to
-  // prepare all of the components necessary for running the tests, before the TestWrapper is initialized. There is however
-  // one case where the setup for the baseline is driven by the test query results, and this is implicit type enforcement
-  // for the baseline data. In this case there needs to be a call back into the TestBuilder once we know the type information
-  // from the test query.
-  private TestBuilder testBuilder;
-  /**
-   * Test query to run. Type of object depends on the {@link #queryType}
-   */
-  private Object query;
-  // The type of query provided
-  private UserBitShared.QueryType queryType;
-  // The type of query provided for the baseline
-  private UserBitShared.QueryType baselineQueryType;
-  // should ordering be enforced in the baseline check
-  private boolean ordered;
-  private TestServices services;
-  // queries to run before the baseline or test queries, can be used to set options
-  private String baselineOptionSettingQueries;
-  private String testOptionSettingQueries;
-  // two different methods are available for comparing ordered results, the default reads all of the records
-  // into giant lists of objects, like one giant on-heap batch of 'vectors'
-  // this flag enables the other approach which iterates through a hyper batch for the test query results and baseline
-  // while this does work faster and use less memory, it can be harder to debug as all of the elements are not in a
-  // single list
-  private boolean highPerformanceComparison;
-  // if the baseline is a single option test writers can provide the baseline values and columns
-  // without creating a file, these are provided to the builder in the baselineValues() and baselineColumns() methods
-  // and translated into a map in the builder
-  private List<Map<String, Object>> baselineRecords;
-
-  private int expectedNumBatches;
-
-  public DrillTestWrapper(TestBuilder testBuilder, TestServices services, Object query, QueryType queryType,
-      String baselineOptionSettingQueries, String testOptionSettingQueries,
-      QueryType baselineQueryType, boolean ordered, boolean highPerformanceComparison,
-      List<Map<String, Object>> baselineRecords, int expectedNumBatches) {
-    this.testBuilder = testBuilder;
-    this.services = services;
-    this.query = query;
-    this.queryType = queryType;
-    this.baselineQueryType = baselineQueryType;
-    this.ordered = ordered;
-    this.baselineOptionSettingQueries = baselineOptionSettingQueries;
-    this.testOptionSettingQueries = testOptionSettingQueries;
-    this.highPerformanceComparison = highPerformanceComparison;
-    this.baselineRecords = baselineRecords;
-    this.expectedNumBatches = expectedNumBatches;
-  }
-
-  public void run() throws Exception {
-    if (testBuilder.getExpectedSchema() != null) {
-      compareSchemaOnly();
-    } else {
-      if (ordered) {
-        compareOrderedResults();
-      } else {
-        compareUnorderedResults();
-      }
-    }
-  }
-
-  private BufferAllocator getAllocator() {
-    return services.allocator();
-  }
-
-  private void compareHyperVectors(Map<String, HyperVectorValueIterator> expectedRecords,
-      Map<String, HyperVectorValueIterator> actualRecords) throws Exception {
-    for (String s : expectedRecords.keySet()) {
-      assertNotNull("Expected column '" + s + "' not found.", actualRecords.get(s));
-      assertEquals(expectedRecords.get(s).getTotalRecords(), actualRecords.get(s).getTotalRecords());
-      HyperVectorValueIterator expectedValues = expectedRecords.get(s);
-      HyperVectorValueIterator actualValues = actualRecords.get(s);
-      int i = 0;
-      while (expectedValues.hasNext()) {
-        compareValuesErrorOnMismatch(expectedValues.next(), actualValues.next(), i, s);
-        i++;
-      }
-    }
-    cleanupHyperValueIterators(expectedRecords.values());
-    cleanupHyperValueIterators(actualRecords.values());
-  }
-
-  private void cleanupHyperValueIterators(Collection<HyperVectorValueIterator> hyperBatches) {
-    for (HyperVectorValueIterator hvi : hyperBatches) {
-      for (ValueVector vv : hvi.getHyperVector().getValueVectors()) {
-        vv.clear();
-      }
-    }
-  }
-
-  public static void compareMergedVectors(Map<String, List<Object>> expectedRecords, Map<String, List<Object>> actualRecords) throws Exception {
-    for (String s : actualRecords.keySet()) {
-      assertNotNull("Unexpected extra column " + s + " returned by query.", expectedRecords.get(s));
-      assertEquals("Incorrect number of rows returned by query.", expectedRecords.get(s).size(), actualRecords.get(s).size());
-      List<?> expectedValues = expectedRecords.get(s);
-      List<?> actualValues = actualRecords.get(s);
-      assertEquals("Different number of records returned", expectedValues.size(), actualValues.size());
-
-      for (int i = 0; i < expectedValues.size(); i++) {
-        try {
-          compareValuesErrorOnMismatch(expectedValues.get(i), actualValues.get(i), i, s);
-        } catch (Exception ex) {
-          throw new Exception(ex.getMessage() + "\n\n" + printNearbyRecords(expectedRecords, actualRecords, i), ex);
-        }
-      }
-    }
-    if (actualRecords.size() < expectedRecords.size()) {
-      throw new Exception(findMissingColumns(expectedRecords.keySet(), actualRecords.keySet()));
-    }
-  }
-
-  private static String printNearbyRecords(Map<String, List<Object>> expectedRecords, Map<String, List<Object>> actualRecords, int offset) {
-    StringBuilder expected = new StringBuilder();
-    StringBuilder actual = new StringBuilder();
-    expected.append("Expected Records near verification failure:\n");
-    actual.append("Actual Records near verification failure:\n");
-    int firstRecordToPrint = Math.max(0, offset - 5);
-    List<?> expectedValuesInFirstColumn = expectedRecords.get(expectedRecords.keySet().iterator().next());
-    List<?> actualValuesInFirstColumn = expectedRecords.get(expectedRecords.keySet().iterator().next());
-    int numberOfRecordsToPrint = Math.min(Math.min(10, expectedValuesInFirstColumn.size()), actualValuesInFirstColumn.size());
-    for (int i = firstRecordToPrint; i < numberOfRecordsToPrint; i++) {
-      expected.append("Record Number: ").append(i).append(" { ");
-      actual.append("Record Number: ").append(i).append(" { ");
-      for (String s : actualRecords.keySet()) {
-        List<?> actualValues = actualRecords.get(s);
-        actual.append(s).append(" : ").append(actualValues.get(i)).append(",");
-      }
-      for (String s : expectedRecords.keySet()) {
-        List<?> expectedValues = expectedRecords.get(s);
-        expected.append(s).append(" : ").append(expectedValues.get(i)).append(",");
-      }
-      expected.append(" }\n");
-      actual.append(" }\n");
-    }
-
-    return expected.append("\n\n").append(actual).toString();
-
-  }
-
-  private Map<String, HyperVectorValueIterator> addToHyperVectorMap(final List<QueryDataBatch> records,
-      final RecordBatchLoader loader)
-      throws SchemaChangeException, UnsupportedEncodingException {
-    // TODO - this does not handle schema changes
-    Map<String, HyperVectorValueIterator> combinedVectors = new TreeMap<>();
-
-    long totalRecords = 0;
-    QueryDataBatch batch;
-    int size = records.size();
-    for (int i = 0; i < size; i++) {
-      batch = records.get(i);
-      loader.load(batch.getHeader().getDef(), batch.getData());
-      logger.debug("reading batch with " + loader.getRecordCount() + " rows, total read so far " + totalRecords);
-      totalRecords += loader.getRecordCount();
-      for (VectorWrapper<?> w : loader) {
-        String field = SchemaPath.getSimplePath(w.getField().getName()).toExpr();
-        if (!combinedVectors.containsKey(field)) {
-          MaterializedField mf = w.getField();
-          ValueVector[] vvList = (ValueVector[]) Array.newInstance(mf.getValueClass(), 1);
-          vvList[0] = w.getValueVector();
-          combinedVectors.put(field, new HyperVectorValueIterator(mf, new HyperVectorWrapper<>(mf, vvList)));
-        } else {
-          combinedVectors.get(field).getHyperVector().addVector(w.getValueVector());
-        }
-
-      }
-    }
-    for (HyperVectorValueIterator hvi : combinedVectors.values()) {
-      hvi.determineTotalSize();
-    }
-    return combinedVectors;
-  }
-
-  private static class BatchIterator implements Iterable<VectorAccessible>, AutoCloseable {
-    private final List<QueryDataBatch> dataBatches;
-    private final RecordBatchLoader batchLoader;
-
-    public BatchIterator(List<QueryDataBatch> dataBatches, RecordBatchLoader batchLoader) {
-      this.dataBatches = dataBatches;
-      this.batchLoader = batchLoader;
-    }
-
-    @Override
-    public Iterator<VectorAccessible> iterator() {
-      return new Iterator<VectorAccessible>() {
-
-        int index = -1;
-
-        @Override
-        public boolean hasNext() {
-          return index < dataBatches.size() - 1;
-        }
-
-        @Override
-        public VectorAccessible next() {
-          index++;
-          if (index == dataBatches.size()) {
-            throw new RuntimeException("Tried to call next when iterator had no more items.");
-          }
-          batchLoader.clear();
-          QueryDataBatch batch = dataBatches.get(index);
-          try {
-            batchLoader.load(batch.getHeader().getDef(), batch.getData());
-          } catch (SchemaChangeException e) {
-            throw new RuntimeException(e);
-          }
-          return batchLoader;
-        }
-
-        @Override
-        public void remove() {
-          throw new UnsupportedOperationException("Removing is not supported");
-        }
-      };
-    }
-
-    @Override
-    public void close() throws Exception {
-      batchLoader.clear();
-    }
-
-  }
-
-  /**
-   * Iterate over batches, and combine the batches into a map, where key is schema path, and value is
-   * the list of column values across all the batches.
-   * @param batches
-   * @return
-   * @throws SchemaChangeException
-   * @throws UnsupportedEncodingException
-   */
-  public static Map<String, List<Object>> addToCombinedVectorResults(Iterable<VectorAccessible> batches)
-      throws SchemaChangeException, UnsupportedEncodingException {
-    Map<String, List<Object>> combinedVectors = new TreeMap<>();
-    addToCombinedVectorResults(batches, null, combinedVectors);
-    return combinedVectors;
-  }
-
-  /**
-   * Add to result vectors and compare batch schema against expected schema while iterating batches.
-   * @param batches
-   * @param  expectedSchema: the expected schema the batches should contain. Through SchemaChangeException
-   *                       if encounter different batch schema.
-   * @param combinedVectors: the vectors to hold the values when iterate the batches.
-   *
-   * @return number of batches
-   * @throws SchemaChangeException
-   * @throws UnsupportedEncodingException
-   */
-  public static int addToCombinedVectorResults(Iterable<VectorAccessible> batches, BatchSchema expectedSchema, Map<String, List<Object>> combinedVectors)
-       throws SchemaChangeException, UnsupportedEncodingException {
-    // TODO - this does not handle schema changes
-    int numBatch = 0;
-    long totalRecords = 0;
-    BatchSchema schema = null;
-    for (VectorAccessible loader : batches)  {
-      numBatch++;
-      if (expectedSchema != null) {
-        if (! expectedSchema.equals(loader.getSchema())) {
-          throw new SchemaChangeException(String.format("Batch schema does not match expected schema\n" +
-                  "Actual schema: %s.  Expected schema : %s",
-              loader.getSchema(), expectedSchema));
-        }
-      }
-
-      // TODO:  Clean:  DRILL-2933:  That load(...) no longer throws
-      // SchemaChangeException, so check/clean throws clause above.
-      if (schema == null) {
-        schema = loader.getSchema();
-        for (MaterializedField mf : schema) {
-          combinedVectors.put(SchemaPath.getSimplePath(mf.getName()).toExpr(), new ArrayList<>());
-        }
-      } else {
-        // TODO - actually handle schema changes, this is just to get access to the SelectionVectorMode
-        // of the current batch, the check for a null schema is used to only mutate the schema once
-        // need to add new vectors and null fill for previous batches? distinction between null and non-existence important?
-        schema = loader.getSchema();
-      }
-      logger.debug("reading batch with " + loader.getRecordCount() + " rows, total read so far " + totalRecords);
-      totalRecords += loader.getRecordCount();
-      for (VectorWrapper<?> w : loader) {
-        String field = SchemaPath.getSimplePath(w.getField().getName()).toExpr();
-        ValueVector[] vectors;
-        if (w.isHyper()) {
-          vectors = w.getValueVectors();
-        } else {
-          vectors = new ValueVector[] {w.getValueVector()};
-        }
-        SelectionVector2 sv2 = null;
-        SelectionVector4 sv4 = null;
-        switch(schema.getSelectionVectorMode()) {
-          case TWO_BYTE:
-            sv2 = loader.getSelectionVector2();
-            break;
-          case FOUR_BYTE:
-            sv4 = loader.getSelectionVector4();
-            break;
-        }
-        if (sv4 != null) {
-          for (int j = 0; j < sv4.getCount(); j++) {
-            int complexIndex = sv4.get(j);
-            int batchIndex = complexIndex >> 16;
-            int recordIndexInBatch = complexIndex & 65535;
-            Object obj = vectors[batchIndex].getAccessor().getObject(recordIndexInBatch);
-            if (obj != null) {
-              if (obj instanceof Text) {
-                obj = obj.toString();
-              }
-            }
-            combinedVectors.get(field).add(obj);
-          }
-        }
-        else {
-          for (ValueVector vv : vectors) {
-            for (int j = 0; j < loader.getRecordCount(); j++) {
-              int index;
-              if (sv2 != null) {
-                index = sv2.getIndex(j);
-              } else {
-                index = j;
-              }
-              Object obj = vv.getAccessor().getObject(index);
-              if (obj != null) {
-                if (obj instanceof Text) {
-                  obj = obj.toString();
-                }
-              }
-              combinedVectors.get(field).add(obj);
-            }
-          }
-        }
-      }
-    }
-    return numBatch;
-  }
-
-  protected void compareSchemaOnly() throws Exception {
-    RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
-    List<QueryDataBatch> actual = null;
-    QueryDataBatch batch = null;
-    try {
-      test(testOptionSettingQueries);
-      actual = testRunAndReturn(queryType, query);
-      batch = actual.get(0);
-      loader.load(batch.getHeader().getDef(), batch.getData());
-
-      final BatchSchema schema = loader.getSchema();
-      final List<Pair<SchemaPath, TypeProtos.MajorType>> expectedSchema = testBuilder.getExpectedSchema();
-      if (schema.getFieldCount() != expectedSchema.size()) {
-        throw new Exception("Expected and actual numbers of columns do not match.");
-      }
-
-      for (int i = 0; i < schema.getFieldCount(); ++i) {
-        final String actualSchemaPath = schema.getColumn(i).getName();
-        final TypeProtos.MajorType actualMajorType = schema.getColumn(i).getType();
-
-        final String expectedSchemaPath = expectedSchema.get(i).getLeft().getRootSegmentPath();
-        final TypeProtos.MajorType expectedMajorType = expectedSchema.get(i).getValue();
-
-        if (!actualSchemaPath.equals(expectedSchemaPath)
-            || !actualMajorType.equals(expectedMajorType)) {
-          throw new Exception(String.format("Schema path or type mismatch for column #%d:\n" +
-                  "Expected schema path: %s\nActual   schema path: %s\nExpected type: %s\nActual   type: %s",
-              i, expectedSchemaPath, actualSchemaPath, Types.toString(expectedMajorType),
-              Types.toString(actualMajorType)));
-        }
-      }
-
-    } finally {
-      if (actual != null) {
-        for (QueryDataBatch b : actual) {
-          b.release();
-        }
-      }
-      loader.clear();
-    }
-  }
-
-  /**
-   * Use this method only if necessary to validate one query against another. If you are just validating against a
-   * baseline file use one of the simpler interfaces that will write the validation query for you.
-   *
-   * @throws Exception
-   */
-  protected void compareUnorderedResults() throws Exception {
-    RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
-
-    List<QueryDataBatch> actual = Collections.emptyList();
-    List<QueryDataBatch> expected = Collections.emptyList();
-    List<Map<String, Object>> expectedRecords = new ArrayList<>();
-    List<Map<String, Object>> actualRecords = new ArrayList<>();
-
-    try {
-      test(testOptionSettingQueries);
-      actual = testRunAndReturn(queryType, query);
-
-      checkNumBatches(actual);
-
-      addTypeInfoIfMissing(actual.get(0), testBuilder);
-      addToMaterializedResults(actualRecords, actual, loader);
-
-      // If baseline data was not provided to the test builder directly, we must run a query for the baseline, this includes
-      // the cases where the baseline is stored in a file.
-      if (baselineRecords == null) {
-        test(baselineOptionSettingQueries);
-        expected = testRunAndReturn(baselineQueryType, testBuilder.getValidationQuery());
-        addToMaterializedResults(expectedRecords, expected, loader);
-      } else {
-        expectedRecords = baselineRecords;
-      }
-
-      compareResults(expectedRecords, actualRecords);
-    } finally {
-      cleanupBatches(actual, expected);
-    }
-  }
-
-  /**
-   * Use this method only if necessary to validate one query against another. If you are just validating against a
-   * baseline file use one of the simpler interfaces that will write the validation query for you.
-   *
-   * @throws Exception
-   */
-  protected void compareOrderedResults() throws Exception {
-    if (highPerformanceComparison) {
-      if (baselineQueryType == null) {
-        throw new Exception("Cannot do a high performance comparison without using a baseline file");
-      }
-      compareResultsHyperVector();
-    } else {
-      compareMergedOnHeapVectors();
-    }
-  }
-
-  public void compareMergedOnHeapVectors() throws Exception {
-    RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
-
-    List<QueryDataBatch> actual = Collections.emptyList();
-    List<QueryDataBatch> expected = Collections.emptyList();
-    Map<String, List<Object>> actualSuperVectors;
-    Map<String, List<Object>> expectedSuperVectors;
-
-    try {
-      test(testOptionSettingQueries);
-      actual = testRunAndReturn(queryType, query);
-
-      checkNumBatches(actual);
-
-      // To avoid extra work for test writers, types can optionally be inferred from the test query
-      addTypeInfoIfMissing(actual.get(0), testBuilder);
-
-      BatchIterator batchIter = new BatchIterator(actual, loader);
-      actualSuperVectors = addToCombinedVectorResults(batchIter);
-      batchIter.close();
-
-      // If baseline data was not provided to the test builder directly, we must run a query for the baseline, this includes
-      // the cases where the baseline is stored in a file.
-      if (baselineRecords == null) {
-        test(baselineOptionSettingQueries);
-        expected = testRunAndReturn(baselineQueryType, testBuilder.getValidationQuery());
-        BatchIterator exBatchIter = new BatchIterator(expected, loader);
-        expectedSuperVectors = addToCombinedVectorResults(exBatchIter);
-        exBatchIter.close();
-      } else {
-        // data is built in the TestBuilder in a row major format as it is provided by the user
-        // translate it here to vectorized, the representation expected by the ordered comparison
-        expectedSuperVectors = translateRecordListToHeapVectors(baselineRecords);
-      }
-
-      compareMergedVectors(expectedSuperVectors, actualSuperVectors);
-    } catch (Exception e) {
-      throw new Exception(e.getMessage() + "\nFor query: " + query , e);
-    } finally {
-      cleanupBatches(expected, actual);
-    }
-  }
-
-  public static Map<String, List<Object>> translateRecordListToHeapVectors(List<Map<String, Object>> records) {
-    Map<String, List<Object>> ret = new TreeMap<>();
-    for (String s : records.get(0).keySet()) {
-      ret.put(s, new ArrayList<>());
-    }
-    for (Map<String, Object> m : records) {
-      for (String s : m.keySet()) {
-        ret.get(s).add(m.get(s));
-      }
-    }
-    return ret;
-  }
-
-  public void compareResultsHyperVector() throws Exception {
-    RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
-
-    test(testOptionSettingQueries);
-    List<QueryDataBatch> results = testRunAndReturn(queryType, query);
-
-    checkNumBatches(results);
-
-    // To avoid extra work for test writers, types can optionally be inferred from the test query
-    addTypeInfoIfMissing(results.get(0), testBuilder);
-
-    Map<String, HyperVectorValueIterator> actualSuperVectors = addToHyperVectorMap(results, loader);
-
-    test(baselineOptionSettingQueries);
-    List<QueryDataBatch> expected = testRunAndReturn(baselineQueryType, testBuilder.getValidationQuery());
-
-    Map<String, HyperVectorValueIterator> expectedSuperVectors = addToHyperVectorMap(expected, loader);
-
-    compareHyperVectors(expectedSuperVectors, actualSuperVectors);
-    cleanupBatches(results, expected);
-  }
-
-  private void checkNumBatches(final List<QueryDataBatch> results) {
-    if (expectedNumBatches != EXPECTED_BATCH_COUNT_NOT_SET) {
-      final int actualNumBatches = results.size();
-      assertEquals(String.format("Expected %d batches but query returned %d non empty batch(es)%n", expectedNumBatches,
-          actualNumBatches), expectedNumBatches, actualNumBatches);
-    }
-  }
-
-  private void addTypeInfoIfMissing(QueryDataBatch batch, TestBuilder testBuilder) {
-    if (! testBuilder.typeInfoSet()) {
-      Map<SchemaPath, TypeProtos.MajorType> typeMap = getTypeMapFromBatch(batch);
-      testBuilder.baselineTypes(typeMap);
-    }
-
-  }
-
-  private Map<SchemaPath, TypeProtos.MajorType> getTypeMapFromBatch(QueryDataBatch batch) {
-    Map<SchemaPath, TypeProtos.MajorType> typeMap = new HashMap<>();
-    for (int i = 0; i < batch.getHeader().getDef().getFieldCount(); i++) {
-      typeMap.put(SchemaPath.getSimplePath(MaterializedField.create(batch.getHeader().getDef().getField(i)).getName()),
-          batch.getHeader().getDef().getField(i).getMajorType());
-    }
-    return typeMap;
-  }
-
-  @SafeVarargs
-  private final void cleanupBatches(List<QueryDataBatch>... results) {
-    for (List<QueryDataBatch> resultList : results ) {
-      for (QueryDataBatch result : resultList) {
-        result.release();
-      }
-    }
-  }
-
-  public static void addToMaterializedResults(List<Map<String, Object>> materializedRecords,
-                                          List<QueryDataBatch> records,
-                                          RecordBatchLoader loader)
-      throws SchemaChangeException, UnsupportedEncodingException {
-    long totalRecords = 0;
-    QueryDataBatch batch;
-    int size = records.size();
-    for (int i = 0; i < size; i++) {
-      batch = records.get(0);
-      loader.load(batch.getHeader().getDef(), batch.getData());
-      // TODO:  Clean:  DRILL-2933:  That load(...) no longer throws
-      // SchemaChangeException, so check/clean throws clause above.
-      logger.debug("reading batch with " + loader.getRecordCount() + " rows, total read so far " + totalRecords);
-      totalRecords += loader.getRecordCount();
-      for (int j = 0; j < loader.getRecordCount(); j++) {
-        Map<String, Object> record = new TreeMap<>();
-        for (VectorWrapper<?> w : loader) {
-          Object obj = w.getValueVector().getAccessor().getObject(j);
-          if (obj != null) {
-            if (obj instanceof Text) {
-              obj = obj.toString();
-            }
-            record.put(SchemaPath.getSimplePath(w.getField().getName()).toExpr(), obj);
-          }
-          record.put(SchemaPath.getSimplePath(w.getField().getName()).toExpr(), obj);
-        }
-        materializedRecords.add(record);
-      }
-      records.remove(0);
-      batch.release();
-      loader.clear();
-    }
-  }
-
-  public static boolean compareValuesErrorOnMismatch(Object expected, Object actual, int counter, String column) throws Exception {
-
-    if (compareValues(expected, actual, counter, column)) {
-      return true;
-    }
-    if (expected == null) {
-      throw new Exception("at position " + counter + " column '" + column + "' mismatched values, expected: null " +
-          "but received " + actual + "(" + actual.getClass().getSimpleName() + ")");
-    }
-    if (actual == null) {
-      throw new Exception("unexpected null at position " + counter + " column '" + column + "' should have been:  " + expected);
-    }
-    if (actual instanceof byte[]) {
-      throw new Exception("at position " + counter + " column '" + column + "' mismatched values, expected: "
-          + new String((byte[])expected, "UTF-8") + " but received " + new String((byte[])actual, "UTF-8"));
-    }
-    if (!expected.equals(actual)) {
-      throw new Exception("at position " + counter + " column '" + column + "' mismatched values, expected: "
-          + expected + "(" + expected.getClass().getSimpleName() + ") but received " + actual + "(" + actual.getClass().getSimpleName() + ")");
-    }
-    return true;
-  }
-
-  public static boolean compareValues(Object expected, Object actual, int counter, String column) throws Exception {
-    if (expected == null) {
-      if (actual == null) {
-        if (VERBOSE_DEBUG) {
-          logger.debug("(1) at position " + counter + " column '" + column + "' matched value:  " + expected );
-        }
-        return true;
-      } else {
-        return false;
-      }
-    }
-    if (actual == null) {
-      return false;
-    }
-    if (actual instanceof byte[]) {
-      if ( ! Arrays.equals((byte[]) expected, (byte[]) actual)) {
-        return false;
-      } else {
-        if (VERBOSE_DEBUG) {
-          logger.debug("at position " + counter + " column '" + column + "' matched value " + new String((byte[])expected, "UTF-8"));
-        }
-        return true;
-      }
-    }
-    if (!expected.equals(actual)) {
-      return false;
-    } else {
-      if (VERBOSE_DEBUG) {
-        logger.debug("at position " + counter + " column '" + column + "' matched value:  " + expected );
-      }
-    }
-    return true;
-  }
-
-  /**
-   * Compare two result sets, ignoring ordering.
-   *
-   * @param expectedRecords - list of records from baseline
-   * @param actualRecords - list of records from test query, WARNING - this list is destroyed in this method
-   * @throws Exception
-   */
-  private void compareResults(List<Map<String, Object>> expectedRecords, List<Map<String, Object>> actualRecords) throws Exception {
-
-    assertEquals("Different number of records returned", expectedRecords.size(), actualRecords.size());
-
-    int i = 0;
-    int counter = 0;
-    boolean found;
-    for (Map<String, Object> expectedRecord : expectedRecords) {
-      i = 0;
-      found = false;
-      findMatch:
-      for (Map<String, Object> actualRecord : actualRecords) {
-        for (String s : actualRecord.keySet()) {
-          if (!expectedRecord.containsKey(s)) {
-            throw new Exception("Unexpected column '" + s + "' returned by query.");
-          }
-          if ( ! compareValues(expectedRecord.get(s), actualRecord.get(s), counter, s)) {
-            i++;
-            continue findMatch;
-          }
-        }
-        if (actualRecord.size() < expectedRecord.size()) {
-          throw new Exception(findMissingColumns(expectedRecord.keySet(), actualRecord.keySet()));
-        }
-        found = true;
-        break;
-      }
-      if (!found) {
-        StringBuilder sb = new StringBuilder();
-        for (int expectedRecordDisplayCount = 0;
-             expectedRecordDisplayCount < 10 && expectedRecordDisplayCount < expectedRecords.size();
-             expectedRecordDisplayCount++) {
-          sb.append(printRecord(expectedRecords.get(expectedRecordDisplayCount)));
-        }
-        String expectedRecordExamples = sb.toString();
-        sb.setLength(0);
-        for (int actualRecordDisplayCount = 0;
-             actualRecordDisplayCount < 10 && actualRecordDisplayCount < actualRecords.size();
-             actualRecordDisplayCount++) {
-          sb.append(printRecord(actualRecords.get(actualRecordDisplayCount)));
-        }
-        String actualRecordExamples = sb.toString();
-        throw new Exception(String.format("After matching %d records, did not find expected record in result set:\n %s\n\n" +
-            "Some examples of expected records:\n%s\n\n Some examples of records returned by the test query:\n%s",
-            counter, printRecord(expectedRecord), expectedRecordExamples, actualRecordExamples));
-      } else {
-        actualRecords.remove(i);
-        counter++;
-      }
-    }
-    assertEquals(0, actualRecords.size());
-  }
-
-  private static String findMissingColumns(Set<String> expected, Set<String> actual) {
-    String missingCols = "";
-    for (String colName : expected) {
-      if (!actual.contains(colName)) {
-        missingCols += colName + ", ";
-      }
-    }
-    return "Expected column(s) " + missingCols + " not found in result set: " + actual + ".";
-  }
-
-  private String printRecord(Map<String, ?> record) {
-    String ret = "";
-    for (String s : record.keySet()) {
-      ret += s + " : "  + record.get(s) + ", ";
-    }
-    return ret + "\n";
-  }
-
-  private void test(String query) throws Exception {
-    services.test(query);
-  }
-
-  private List<QueryDataBatch> testRunAndReturn(QueryType type, Object query) throws Exception {
-    return services.testRunAndReturn(type, query);
-  }
-}

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/PlanTestBase.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/PlanTestBase.java b/exec/java-exec/src/test/java/org/apache/drill/PlanTestBase.java
index eb11532..091d567 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/PlanTestBase.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/PlanTestBase.java
@@ -36,6 +36,8 @@ import org.apache.calcite.sql.SqlExplain.Depth;
 import org.apache.calcite.sql.SqlExplainLevel;
 
 import com.google.common.base.Strings;
+import org.apache.drill.test.BaseTestQuery;
+import org.apache.drill.test.QueryTestUtil;
 
 public class PlanTestBase extends BaseTestQuery {
 

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/PlanningBase.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/PlanningBase.java b/exec/java-exec/src/test/java/org/apache/drill/PlanningBase.java
index b00d4d6..ca643f5 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/PlanningBase.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/PlanningBase.java
@@ -29,7 +29,7 @@ import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.common.config.LogicalPlanPersistence;
 import org.apache.drill.common.scanner.ClassPathScanner;
 import org.apache.drill.common.scanner.persistence.ScanResult;
-import org.apache.drill.common.util.TestTools;
+import org.apache.drill.test.TestTools;
 import org.apache.drill.exec.ExecTest;
 import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
 import org.apache.drill.exec.memory.BufferAllocator;

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/QueryTestUtil.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/QueryTestUtil.java b/exec/java-exec/src/test/java/org/apache/drill/QueryTestUtil.java
deleted file mode 100644
index 26a0537..0000000
--- a/exec/java-exec/src/test/java/org/apache/drill/QueryTestUtil.java
+++ /dev/null
@@ -1,221 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill;
-
-import java.util.List;
-import java.util.Properties;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.drill.BaseTestQuery.SilentListener;
-import org.apache.drill.common.config.DrillConfig;
-import org.apache.drill.common.util.TestTools;
-import org.apache.drill.exec.ExecConstants;
-import org.apache.drill.exec.client.DrillClient;
-import org.apache.drill.exec.client.PrintingResultsListener;
-import org.apache.drill.exec.client.QuerySubmitter.Format;
-import org.apache.drill.exec.compile.ClassTransformer;
-import org.apache.drill.exec.exception.OutOfMemoryException;
-import org.apache.drill.exec.proto.UserBitShared.QueryType;
-import org.apache.drill.exec.rpc.RpcException;
-import org.apache.drill.exec.rpc.user.AwaitableUserResultsListener;
-import org.apache.drill.exec.rpc.user.QueryDataBatch;
-import org.apache.drill.exec.rpc.user.UserResultsListener;
-import org.apache.drill.exec.server.Drillbit;
-import org.apache.drill.exec.server.DrillbitContext;
-import org.apache.drill.exec.server.RemoteServiceSet;
-import org.apache.drill.exec.server.options.OptionManager;
-import org.apache.drill.exec.server.options.OptionValue;
-import org.apache.drill.exec.server.options.SystemOptionManager;
-import org.apache.drill.exec.util.VectorUtil;
-
-/**
- * Utilities useful for tests that issue SQL queries.
- */
-public class QueryTestUtil {
-
-  public static final String TEST_QUERY_PRINTING_SILENT = "drill.test.query.printing.silent";
-
-  /**
-   * Constructor. All methods are static.
-   */
-  private QueryTestUtil() {
-  }
-
-  /**
-   * Create a DrillClient that can be used to query a drill cluster.
-   *
-   * @param drillConfig
-   * @param remoteServiceSet remote service set
-   * @param maxWidth maximum width per node
-   * @param props Connection properties contains properties such as "user", "password", "schema" etc
-   * @return the newly created client
-   * @throws RpcException if there is a problem setting up the client
-   */
-  public static DrillClient createClient(final DrillConfig drillConfig, final RemoteServiceSet remoteServiceSet,
-      final int maxWidth, final Properties props) throws RpcException, OutOfMemoryException {
-    final DrillClient drillClient = new DrillClient(drillConfig, remoteServiceSet.getCoordinator());
-    drillClient.connect(props);
-
-    final List<QueryDataBatch> results = drillClient.runQuery(
-        QueryType.SQL, String.format("alter session set `%s` = %d",
-            ExecConstants.MAX_WIDTH_PER_NODE_KEY, maxWidth));
-    for (QueryDataBatch queryDataBatch : results) {
-      queryDataBatch.release();
-    }
-
-    return drillClient;
-  }
-
-  /**
-   * Normalize the query relative to the test environment.
-   *
-   * <p>Looks for "${WORKING_PATH}" in the query string, and replaces it the current
-   * working patch obtained from {@link org.apache.drill.common.util.TestTools#getWorkingPath()}.
-   *
-   * @param query the query string
-   * @return the normalized query string
-   */
-  public static String normalizeQuery(final String query) {
-    if (query.contains("${WORKING_PATH}")) {
-      return query.replaceAll(Pattern.quote("${WORKING_PATH}"), Matcher.quoteReplacement(TestTools.getWorkingPath()));
-    } else if (query.contains("[WORKING_PATH]")) {
-      return query.replaceAll(Pattern.quote("[WORKING_PATH]"), Matcher.quoteReplacement(TestTools.getWorkingPath()));
-    }
-    return query;
-  }
-
-  /**
-   * Execute a SQL query, and print the results.
-   *
-   * @param drillClient drill client to use
-   * @param type type of the query
-   * @param queryString query string
-   * @return number of rows returned
-   * @throws Exception
-   */
-  public static int testRunAndPrint(
-      final DrillClient drillClient, final QueryType type, final String queryString) throws Exception {
-    final String query = normalizeQuery(queryString);
-    DrillConfig config = drillClient.getConfig();
-    AwaitableUserResultsListener resultListener =
-        new AwaitableUserResultsListener(
-            config.getBoolean(TEST_QUERY_PRINTING_SILENT) ?
-                new SilentListener() :
-                new PrintingResultsListener(config, Format.TSV, VectorUtil.DEFAULT_COLUMN_WIDTH)
-        );
-    drillClient.runQuery(type, query, resultListener);
-    return resultListener.await();
-  }
-
-  /**
-   * Execute one or more queries separated by semicolons, and print the results.
-   *
-   * @param drillClient drill client to use
-   * @param queryString the query string
-   * @throws Exception
-   */
-  public static void test(final DrillClient drillClient, final String queryString) throws Exception{
-    final String query = normalizeQuery(queryString);
-    String[] queries = query.split(";");
-    for (String q : queries) {
-      final String trimmedQuery = q.trim();
-      if (trimmedQuery.isEmpty()) {
-        continue;
-      }
-      testRunAndPrint(drillClient, QueryType.SQL, trimmedQuery);
-    }
-  }
-
-  /**
-   * Execute one or more queries separated by semicolons, and print the results, with the option to
-   * add formatted arguments to the query string.
-   *
-   * @param drillClient drill client to use
-   * @param query the query string; may contain formatting specifications to be used by
-   *   {@link String#format(String, Object...)}.
-   * @param args optional args to use in the formatting call for the query string
-   * @throws Exception
-   */
-  public static void test(final DrillClient drillClient, final String query, Object... args) throws Exception {
-    test(drillClient, String.format(query, args));
-  }
-
-  /**
-   * Execute a single query with a user supplied result listener.
-   *
-   * @param drillClient drill client to use
-   * @param type type of query
-   * @param queryString the query string
-   * @param resultListener the result listener
-   */
-  public static void testWithListener(final DrillClient drillClient, final QueryType type,
-      final String queryString, final UserResultsListener resultListener) {
-    final String query = QueryTestUtil.normalizeQuery(queryString);
-    drillClient.runQuery(type, query, resultListener);
-  }
-
-  /**
-   * Set up the options to test the scalar replacement retry option (see
-   * ClassTransformer.java). Scalar replacement rewrites bytecode to replace
-   * value holders (essentially boxed values) with their member variables as
-   * locals. There is still one pattern that doesn't work, and occasionally new
-   * ones are introduced. This can be used in tests that exercise failing patterns.
-   *
-   * <p>This also flushes the compiled code cache.
-   *
-   * @param drillbit the drillbit
-   * @param srOption the scalar replacement option value to use
-   * @return the original scalar replacement option setting (so it can be restored)
-   */
-  @SuppressWarnings("resource")
-  public static OptionValue setupScalarReplacementOption(
-      final Drillbit drillbit, final ClassTransformer.ScalarReplacementOption srOption) {
-    // set the system option
-    final DrillbitContext drillbitContext = drillbit.getContext();
-    final SystemOptionManager optionManager = drillbitContext.getOptionManager();
-    final OptionValue originalOptionValue = optionManager.getOption(ClassTransformer.SCALAR_REPLACEMENT_OPTION);
-    optionManager.setLocalOption(ClassTransformer.SCALAR_REPLACEMENT_OPTION, srOption.name().toLowerCase());
-
-    // flush the code cache
-    drillbitContext.getCompiler().flushCache();
-
-    return originalOptionValue;
-  }
-
-  /**
-   * Restore the original scalar replacement option returned from
-   * setupScalarReplacementOption().
-   *
-   * <p>This also flushes the compiled code cache.
-   *
-   * @param drillbit the drillbit
-   * @param srOption the scalar replacement option value to use
-   */
-  public static void restoreScalarReplacementOption(final Drillbit drillbit, final String srOption) {
-    @SuppressWarnings("resource")
-    final DrillbitContext drillbitContext = drillbit.getContext();
-    @SuppressWarnings("resource")
-    final OptionManager optionManager = drillbitContext.getOptionManager();
-    optionManager.setLocalOption(ClassTransformer.SCALAR_REPLACEMENT_OPTION, srOption);
-
-    // flush the code cache
-    drillbitContext.getCompiler().flushCache();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/TestAggNullable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/TestAggNullable.java b/exec/java-exec/src/test/java/org/apache/drill/TestAggNullable.java
index 4085fb4..95e011b 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/TestAggNullable.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/TestAggNullable.java
@@ -20,30 +20,25 @@ package org.apache.drill;
 import static org.junit.Assert.assertEquals;
 
 import org.apache.drill.categories.OperatorTest;
-import org.apache.drill.common.util.TestTools;
+import org.apache.drill.test.BaseTestQuery;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 @Category(OperatorTest.class)
-public class TestAggNullable extends BaseTestQuery{
+public class TestAggNullable extends BaseTestQuery {
   static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(TestAggNullable.class);
 
-  static final String WORKING_PATH = TestTools.getWorkingPath();
-  static final String TEST_RES_PATH = WORKING_PATH + "/src/test/resources";
-
   private static void enableAggr(boolean ha, boolean sa) throws Exception {
 
-    test(String.format("alter session set `planner.enable_hashagg` = %s", ha ? "true":"false"));
-    test(String.format("alter session set `planner.enable_streamagg` = %s", sa ? "true":"false"));
+    test("alter session set `planner.enable_hashagg` = %s", ha);
+    test("alter session set `planner.enable_streamagg` = %s", sa);
     test("alter session set `planner.slice_target` = 1");
   }
 
   @Test  // HashAgg on nullable columns
   public void testHashAggNullableColumns() throws Exception {
-    String query1 = String.format("select t2.b2 from dfs_test.`%s/jsoninput/nullable2.json` t2 " +
-                    " group by t2.b2", TEST_RES_PATH);
-    String query2 = String.format("select t2.a2, t2.b2 from dfs_test.`%s/jsoninput/nullable2.json` t2 " +
-        " group by t2.a2, t2.b2", TEST_RES_PATH);
+    String query1 = "select t2.b2 from cp.`jsoninput/nullable2.json` t2 group by t2.b2";
+    String query2 = "select t2.a2, t2.b2 from cp.`jsoninput/nullable2.json` t2 group by t2.a2, t2.b2";
 
     int actualRecordCount;
     int expectedRecordCount = 2;
@@ -61,10 +56,8 @@ public class TestAggNullable extends BaseTestQuery{
 
   @Test  // StreamingAgg on nullable columns
   public void testStreamAggNullableColumns() throws Exception {
-    String query1 = String.format("select t2.b2 from dfs_test.`%s/jsoninput/nullable2.json` t2 " +
-                    " group by t2.b2", TEST_RES_PATH);
-    String query2 = String.format("select t2.a2, t2.b2 from dfs_test.`%s/jsoninput/nullable2.json` t2 " +
-        " group by t2.a2, t2.b2", TEST_RES_PATH);
+    String query1 = "select t2.b2 from cp.`jsoninput/nullable2.json` t2 group by t2.b2";
+    String query2 = "select t2.a2, t2.b2 from cp.`jsoninput/nullable2.json` t2 group by t2.a2, t2.b2";
 
     int actualRecordCount;
     int expectedRecordCount = 2;

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/TestAltSortQueries.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/TestAltSortQueries.java b/exec/java-exec/src/test/java/org/apache/drill/TestAltSortQueries.java
index cc32dbe..3631ccf 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/TestAltSortQueries.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/TestAltSortQueries.java
@@ -19,24 +19,33 @@ package org.apache.drill;
 
 import org.apache.drill.categories.OperatorTest;
 import org.apache.drill.categories.SqlTest;
+import org.apache.drill.test.BaseTestQuery;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import java.nio.file.Paths;
+
 @Category({SqlTest.class, OperatorTest.class})
-public class TestAltSortQueries extends BaseTestQuery{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(TestAltSortQueries.class);
+public class TestAltSortQueries extends BaseTestQuery {
+  @BeforeClass
+  public static void setupTestFiles() {
+    dirTestWatcher.copyFileToRoot(Paths.get("sample-data", "region.parquet"));
+    dirTestWatcher.copyFileToRoot(Paths.get("sample-data", "regionsSF"));
+    dirTestWatcher.copyFileToRoot(Paths.get("sample-data", "nation.parquet"));
+  }
 
   @Test
   public void testOrderBy() throws Exception{
     test("select R_REGIONKEY " +
-         "from dfs_test.`[WORKING_PATH]/../../sample-data/region.parquet` " +
+         "from dfs.`sample-data/region.parquet` " +
          "order by R_REGIONKEY");
   }
 
   @Test
   public void testOrderBySingleFile() throws Exception{
     test("select R_REGIONKEY " +
-         "from dfs_test.`[WORKING_PATH]/../../sample-data/regionsSF/` " +
+         "from dfs.`sample-data/regionsSF/` " +
          "order by R_REGIONKEY");
   }
 
@@ -64,12 +73,11 @@ public class TestAltSortQueries extends BaseTestQuery{
         "  nations.N_NAME,\n" +
         "  regions.R_NAME\n" +
         "FROM\n" +
-        "  dfs_test.`[WORKING_PATH]/../../sample-data/nation.parquet` nations\n" +
+        "  dfs.`sample-data/nation.parquet` nations\n" +
         "JOIN\n" +
-        "  dfs_test.`[WORKING_PATH]/../../sample-data/region.parquet` regions\n" +
+        "  dfs.`sample-data/region.parquet` regions\n" +
         "  on nations.N_REGIONKEY = regions.R_REGIONKEY" +
         " order by regions.R_NAME, nations.N_NAME " +
         " limit 5");
   }
-
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/acc5ed92/exec/java-exec/src/test/java/org/apache/drill/TestBugFixes.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/TestBugFixes.java b/exec/java-exec/src/test/java/org/apache/drill/TestBugFixes.java
index 8b608c6..e88e5a4 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/TestBugFixes.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/TestBugFixes.java
@@ -20,12 +20,14 @@ package org.apache.drill;
 import com.google.common.collect.ImmutableList;
 import org.apache.drill.categories.UnlikelyTest;
 import org.apache.drill.common.exceptions.UserException;
-import org.apache.drill.common.util.TestTools;
 import org.apache.drill.exec.planner.physical.PlannerSettings;
+import org.apache.drill.test.BaseTestQuery;
+import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import java.nio.file.Paths;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
@@ -33,8 +35,11 @@ import java.util.Map;
 @Category(UnlikelyTest.class)
 public class TestBugFixes extends BaseTestQuery {
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(TestBugFixes.class);
-  private static final String WORKING_PATH = TestTools.getWorkingPath();
-  private static final String TEST_RES_PATH = WORKING_PATH + "/src/test/resources";
+
+  @BeforeClass
+  public static void setupTestFiles() {
+    dirTestWatcher.copyResourceToRoot(Paths.get("bugs", "DRILL-4192"));
+  }
 
   @Test
   public void leak1() throws Exception {
@@ -157,19 +162,16 @@ public class TestBugFixes extends BaseTestQuery {
 
   @Test
   public void testDRILL4192() throws Exception {
-    String query = (String.format("select dir0, dir1 from dfs_test.`%s/bugs/DRILL-4192` order by dir1", TEST_RES_PATH));
     testBuilder()
-        .sqlQuery(query)
+        .sqlQuery("select dir0, dir1 from dfs.`bugs/DRILL-4192` order by dir1")
         .unOrdered()
         .baselineColumns("dir0", "dir1")
         .baselineValues("single_top_partition", "nested_partition_1")
         .baselineValues("single_top_partition", "nested_partition_2")
         .go();
 
-    query = (String.format("select dir0, dir1 from dfs_test.`%s/bugs/DRILL-4192/*/nested_partition_1` order by dir1", TEST_RES_PATH));
-
     testBuilder()
-        .sqlQuery(query)
+        .sqlQuery("select dir0, dir1 from dfs.`bugs/DRILL-4192/*/nested_partition_1` order by dir1")
         .unOrdered()
         .baselineColumns("dir0", "dir1")
         .baselineValues("single_top_partition", "nested_partition_1")
@@ -219,11 +221,10 @@ public class TestBugFixes extends BaseTestQuery {
     List<Map<String, Object>> baseline = baselineBuilder.build();
 
     testBuilder()
-            .sqlQuery(String.format("select cast(id as int) as id from dfs_test.`%s/bugs/DRILL-4884/limit_test_parquet/test0_0_0.parquet` group by id order by 1 limit %s",
-                TEST_RES_PATH, limit))
-            .unOrdered()
-            .baselineRecords(baseline)
-            .go();
+      .sqlQuery("select cast(id as int) as id from cp.`bugs/DRILL-4884/limit_test_parquet/test0_0_0.parquet` group by id order by 1 limit %s", limit)
+      .unOrdered()
+      .baselineRecords(baseline)
+      .go();
   }
 
   @Test