You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@drill.apache.org by GitBox <gi...@apache.org> on 2018/07/19 06:09:41 UTC

[GitHub] Ben-Zvi closed pull request #1336: DRILL-6496: Added missing logging statement in VectorUtil.showVectorAccessibleContent(VectorAccessible va, int[] columnWidths)

Ben-Zvi closed pull request #1336: DRILL-6496: Added missing logging statement in VectorUtil.showVectorAccessibleContent(VectorAccessible va, int[] columnWidths)
URL: https://github.com/apache/drill/pull/1336
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/json/BaseJsonTest.java b/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/json/BaseJsonTest.java
index ee32aa18ef6..550fb73b330 100644
--- a/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/json/BaseJsonTest.java
+++ b/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/json/BaseJsonTest.java
@@ -55,11 +55,11 @@ public static void tearDownAfterClass() throws Exception {
 
   protected void runSQLAndVerifyCount(String sql, int expectedRowCount) throws Exception{
     List<QueryDataBatch> results = runHBaseSQLlWithResults(sql);
-    printResultAndVerifyRowCount(results, expectedRowCount);
+    logResultAndVerifyRowCount(results, expectedRowCount);
   }
 
-  private void printResultAndVerifyRowCount(List<QueryDataBatch> results, int expectedRowCount) throws SchemaChangeException {
-    int rowCount = printResult(results);
+  private void logResultAndVerifyRowCount(List<QueryDataBatch> results, int expectedRowCount) throws SchemaChangeException {
+    int rowCount = logResult(results);
     if (expectedRowCount != -1) {
       Assert.assertEquals(expectedRowCount, rowCount);
     }
diff --git a/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/BaseHBaseTest.java b/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/BaseHBaseTest.java
index dd7ce674e49..ab75eda1ee2 100644
--- a/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/BaseHBaseTest.java
+++ b/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/BaseHBaseTest.java
@@ -79,7 +79,7 @@ protected String getPlanText(String planFile, String tableName) throws IOExcepti
   protected void runHBasePhysicalVerifyCount(String planFile, String tableName, int expectedRowCount) throws Exception{
     String physicalPlan = getPlanText(planFile, tableName);
     List<QueryDataBatch> results = testPhysicalWithResults(physicalPlan);
-    printResultAndVerifyRowCount(results, expectedRowCount);
+    logResultAndVerifyRowCount(results, expectedRowCount);
   }
 
   protected List<QueryDataBatch> runHBaseSQLlWithResults(String sql) throws Exception {
@@ -89,11 +89,11 @@ protected void runHBasePhysicalVerifyCount(String planFile, String tableName, in
 
   protected void runHBaseSQLVerifyCount(String sql, int expectedRowCount) throws Exception{
     List<QueryDataBatch> results = runHBaseSQLlWithResults(sql);
-    printResultAndVerifyRowCount(results, expectedRowCount);
+    logResultAndVerifyRowCount(results, expectedRowCount);
   }
 
-  private void printResultAndVerifyRowCount(List<QueryDataBatch> results, int expectedRowCount) throws SchemaChangeException {
-    int rowCount = printResult(results);
+  private void logResultAndVerifyRowCount(List<QueryDataBatch> results, int expectedRowCount) throws SchemaChangeException {
+    int rowCount = logResult(results);
     if (expectedRowCount != -1) {
       Assert.assertEquals(expectedRowCount, rowCount);
     }
diff --git a/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/TestHBaseCFAsJSONString.java b/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/TestHBaseCFAsJSONString.java
index 592dda07396..50cda8fb292 100644
--- a/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/TestHBaseCFAsJSONString.java
+++ b/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/TestHBaseCFAsJSONString.java
@@ -55,7 +55,7 @@ public static void closeMyClient() throws IOException {
   public void testColumnFamiliesAsJSONString() throws Exception {
     setColumnWidths(new int[] {112, 12});
     List<QueryDataBatch> resultList = runHBaseSQLlWithResults("SELECT f, f2 FROM hbase.`[TABLE_NAME]` tableName LIMIT 1");
-    printResult(resultList);
+    logResult(resultList);
   }
 
 }
diff --git a/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/TestHBaseQueries.java b/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/TestHBaseQueries.java
index abd76a7e693..27882b59e70 100644
--- a/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/TestHBaseQueries.java
+++ b/contrib/storage-hbase/src/test/java/org/apache/drill/hbase/TestHBaseQueries.java
@@ -96,7 +96,7 @@ public void testCastEmptyStrings() throws Exception {
         List<QueryDataBatch> resultList = runHBaseSQLlWithResults("SELECT row_key,\n"
             + " CAST(t.f.c1 as INT) c1, CAST(t.f.c2 as BIGINT) c2, CAST(t.f.c3 as INT) c3,\n"
             + " CAST(t.f.c4 as INT) c4 FROM hbase.TestTableNullStr t where row_key='a1'");
-        printResult(resultList);
+        logResult(resultList);
     }
     finally {
         test("alter system reset `drill.exec.functions.cast_empty_string_to_null`;");
diff --git a/contrib/storage-kafka/src/test/java/org/apache/drill/exec/store/kafka/KafkaTestBase.java b/contrib/storage-kafka/src/test/java/org/apache/drill/exec/store/kafka/KafkaTestBase.java
index e30f3e65010..83da934e99f 100644
--- a/contrib/storage-kafka/src/test/java/org/apache/drill/exec/store/kafka/KafkaTestBase.java
+++ b/contrib/storage-kafka/src/test/java/org/apache/drill/exec/store/kafka/KafkaTestBase.java
@@ -66,12 +66,12 @@ public static void initKafkaStoragePlugin(EmbeddedKafkaCluster embeddedKafkaClus
 
   public void runKafkaSQLVerifyCount(String sql, int expectedRowCount) throws Exception {
     List<QueryDataBatch> results = runKafkaSQLWithResults(sql);
-    printResultAndVerifyRowCount(results, expectedRowCount);
+    logResultAndVerifyRowCount(results, expectedRowCount);
   }
 
-  public void printResultAndVerifyRowCount(List<QueryDataBatch> results, int expectedRowCount)
+  public void logResultAndVerifyRowCount(List<QueryDataBatch> results, int expectedRowCount)
       throws SchemaChangeException {
-    int rowCount = printResult(results);
+    int rowCount = logResult(results);
     if (expectedRowCount != -1) {
       Assert.assertEquals(expectedRowCount, rowCount);
     }
@@ -89,4 +89,4 @@ public static void tearDownKafkaTestBase() throws Exception {
     TestKafkaSuit.tearDownCluster();
   }
 
-}
\ No newline at end of file
+}
diff --git a/contrib/storage-mongo/src/test/java/org/apache/drill/exec/store/mongo/MongoTestBase.java b/contrib/storage-mongo/src/test/java/org/apache/drill/exec/store/mongo/MongoTestBase.java
index 923c648ca24..4b4412f8d8f 100644
--- a/contrib/storage-mongo/src/test/java/org/apache/drill/exec/store/mongo/MongoTestBase.java
+++ b/contrib/storage-mongo/src/test/java/org/apache/drill/exec/store/mongo/MongoTestBase.java
@@ -64,12 +64,12 @@ public static void initMongoStoragePlugin() throws Exception {
   public void runMongoSQLVerifyCount(String sql, int expectedRowCount)
       throws Exception {
     List<QueryDataBatch> results = runMongoSQLWithResults(sql);
-    printResultAndVerifyRowCount(results, expectedRowCount);
+    logResultAndVerifyRowCount(results, expectedRowCount);
   }
 
-  public void printResultAndVerifyRowCount(List<QueryDataBatch> results,
-      int expectedRowCount) throws SchemaChangeException {
-    int rowCount = printResult(results);
+  public void logResultAndVerifyRowCount(List<QueryDataBatch> results,
+                                         int expectedRowCount) throws SchemaChangeException {
+    int rowCount = logResult(results);
     if (expectedRowCount != -1) {
       Assert.assertEquals(expectedRowCount, rowCount);
     }
@@ -92,4 +92,4 @@ public static void tearDownMongoTestBase() throws Exception {
     storagePlugin = null;
   }
 
-}
\ No newline at end of file
+}
diff --git a/docs/dev/TestLogging.md b/docs/dev/TestLogging.md
index b87adb10b1f..59fec543b98 100644
--- a/docs/dev/TestLogging.md
+++ b/docs/dev/TestLogging.md
@@ -103,3 +103,40 @@ Then, if for some reason you want to see the Logback logging, add the following
 -Dlogback.statusListenerClass=ch.qos.logback.core.status.OnConsoleStatusListener
 ```
 The launch configuration option overrides (appears on the Java command line after) the global setting.
+
+## Test Logging Configurations
+
+### Default Test Log Levels
+
+There is a global `logback-test.xml` configuration file in [common/src/test/resources/logback-test.xml](../../common/src/test/resources/logback-test.xml). This
+logging configuration by default outputs error level logs to stdout.
+
+Debug level logging to lilith can be turned on by adding `-Ddrill.lilith.enable=true` to the command used to run tests.
+
+### Changing Test Log Levels
+
+Often times it is most convenient to output logs to the console for debugging. This is best done programatically
+by using the [LogFixture](../../exec/java-exec/src/test/java/org/apache/drill/test/LogFixture.java). The [LogFixture](../../exec/java-exec/src/test/java/org/apache/drill/test/LogFixture.java)
+allows temporarily changing log levels for blocks of code programatically for debugging. An example of doing this is
+the following.
+
+```
+    try(LogFixture logFixture = new LogFixture.LogFixtureBuilder()
+      .logger(MyClass.class, Level.INFO)
+      .toConsole() // This redirects output to stdout
+      .build()) {
+      // Code block with different log levels.
+    }
+```
+
+More details on how to use the [LogFixture](../../exec/java-exec/src/test/java/org/apache/drill/test/LogFixture.java) can be found
+int the javadocs for the class. Additionally, there are several methods that allow printing of query results to the console for debugging:
+
+ * BaseTestQuery.printResult
+ * QueryTestUtil.testRunAndPrint
+ * QueryBuilder.print
+ * ClusterTest.runAndPrint
+ * ClientFixture.runQueriesAndPrint
+ 
+**IMPORTANT NOTE:** The methods described above along with LogFixtureBuilder.toConsole() should only be used for debugging. Code
+that uses these methods should not be committed, since it produces excess logging on our build servers.
diff --git a/docs/dev/Testing.md b/docs/dev/Testing.md
index 09343d65683..148c7b10a31 100644
--- a/docs/dev/Testing.md
+++ b/docs/dev/Testing.md
@@ -149,7 +149,6 @@ Drill uses the [Maven Surefire plugin](http://maven.apache.org/components/surefi
               -Ddrill.exec.sys.store.provider.local.write=false
               -Dorg.apache.drill.exec.server.Drillbit.system_options=\
                "org.apache.drill.exec.compile.ClassTransformer.scalar_replacement=on"
-              -Ddrill.test.query.printing.silent=true
               -Ddrill.catastrophic_to_standard_out=true
               -XX:MaxPermSize=512M -XX:MaxDirectMemorySize=3072M
               -Djava.net.preferIPv4Stack=true
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/client/DumpCat.java b/exec/java-exec/src/main/java/org/apache/drill/exec/client/DumpCat.java
index 6c8592124e5..ff0d0b24756 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/client/DumpCat.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/client/DumpCat.java
@@ -261,7 +261,7 @@ private void showSingleBatch (VectorAccessibleSerializable vcSerializable, boole
     }
 
     /* show the contents in the batch */
-    VectorUtil.showVectorAccessibleContent(vectorContainer);
+    VectorUtil.logVectorAccessibleContent(vectorContainer);
   }
 
   /* Get batch meta info : rows, selectedRows, dataSize */
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/client/PrintingResultsListener.java b/exec/java-exec/src/main/java/org/apache/drill/exec/client/LoggingResultsListener.java
similarity index 84%
rename from exec/java-exec/src/main/java/org/apache/drill/exec/client/PrintingResultsListener.java
rename to exec/java-exec/src/main/java/org/apache/drill/exec/client/LoggingResultsListener.java
index c233837d8cb..454abfa3bd0 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/client/PrintingResultsListener.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/client/LoggingResultsListener.java
@@ -40,8 +40,8 @@
 
 import io.netty.buffer.DrillBuf;
 
-public class PrintingResultsListener implements UserResultsListener {
-  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(PrintingResultsListener.class);
+public class LoggingResultsListener implements UserResultsListener {
+  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(LoggingResultsListener.class);
 
   private final AtomicInteger count = new AtomicInteger();
   private final Stopwatch w = Stopwatch.createUnstarted();
@@ -50,7 +50,7 @@
   private final int columnWidth;
   private final BufferAllocator allocator;
 
-  public PrintingResultsListener(DrillConfig config, Format format, int columnWidth) {
+  public LoggingResultsListener(DrillConfig config, Format format, int columnWidth) {
     this.allocator = RootAllocatorFactory.newRoot(config);
     this.loader = new RecordBatchLoader(allocator);
     this.format = format;
@@ -59,15 +59,13 @@ public PrintingResultsListener(DrillConfig config, Format format, int columnWidt
 
   @Override
   public void submissionFailed(UserException ex) {
-    System.out.println("Exception (no rows returned): " + ex + ".  Returned in " + w.elapsed(TimeUnit.MILLISECONDS)
-        + "ms.");
+    logger.info("Exception (no rows returned). Returned in {} ms.", w.elapsed(TimeUnit.MILLISECONDS), ex);
   }
 
   @Override
   public void queryCompleted(QueryState state) {
     DrillAutoCloseables.closeNoChecked(allocator);
-    System.out.println("Total rows returned : " + count.get() + ".  Returned in " + w.elapsed(TimeUnit.MILLISECONDS)
-        + "ms.");
+    logger.info("Total rows returned: {}. Returned in {} ms.", count.get(), w.elapsed(TimeUnit.MILLISECONDS));
   }
 
   @Override
@@ -90,13 +88,13 @@ public void dataArrived(QueryDataBatch result, ConnectionThrottle throttle) {
         try {
           switch(format) {
             case TABLE:
-              VectorUtil.showVectorAccessibleContent(loader, columnWidth);
+              VectorUtil.logVectorAccessibleContent(loader, columnWidth);
               break;
             case TSV:
-              VectorUtil.showVectorAccessibleContent(loader, "\t");
+              VectorUtil.logVectorAccessibleContent(loader, "\t");
               break;
             case CSV:
-              VectorUtil.showVectorAccessibleContent(loader, ",");
+              VectorUtil.logVectorAccessibleContent(loader, ",");
               break;
             default:
               throw new IllegalStateException(format.toString());
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/client/QuerySubmitter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/client/QuerySubmitter.java
index d9f47b52f93..f9b2d00b060 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/client/QuerySubmitter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/client/QuerySubmitter.java
@@ -189,7 +189,7 @@ public int submitQuery(DrillClient client, String plan, String type, String form
     Stopwatch watch = Stopwatch.createUnstarted();
     for (String query : queries) {
       AwaitableUserResultsListener listener =
-          new AwaitableUserResultsListener(new PrintingResultsListener(client.getConfig(), outputFormat, width));
+          new AwaitableUserResultsListener(new LoggingResultsListener(client.getConfig(), outputFormat, width));
       watch.start();
       client.runQuery(queryType, query, listener);
       int rows = listener.await();
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/util/CheckedSupplier.java b/exec/java-exec/src/main/java/org/apache/drill/exec/util/CheckedSupplier.java
new file mode 100644
index 00000000000..b744ac8c033
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/util/CheckedSupplier.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.util;
+
+/**
+ * The java standard library does not provide a lambda function interface for funtions that take no arguments,
+ * but that throw an exception. So, we have to define our own here.
+ * @param <T> The return type of the lambda function.
+ * @param <E> The type of exception thrown by the lambda function.
+ */
+@FunctionalInterface
+public interface CheckedSupplier<T, E extends Exception> {
+  T get() throws E;
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/util/VectorUtil.java b/exec/java-exec/src/main/java/org/apache/drill/exec/util/VectorUtil.java
index 9808a2b2e75..8729a391bf1 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/util/VectorUtil.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/util/VectorUtil.java
@@ -39,7 +39,7 @@
   private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(VectorUtil.class);
   public static final int DEFAULT_COLUMN_WIDTH = 15;
 
-  public static void showVectorAccessibleContent(VectorAccessible va, final String delimiter) {
+  public static void logVectorAccessibleContent(VectorAccessible va, final String delimiter) {
     final StringBuilder sb = new StringBuilder();
     int rows = va.getRecordCount();
     sb.append(rows).append(" row(s):\n");
@@ -133,15 +133,15 @@ public static void appendVectorAccessibleContent(VectorAccessible va, StringBuil
     }
   }
 
-  public static void showVectorAccessibleContent(VectorAccessible va) {
-    showVectorAccessibleContent(va, DEFAULT_COLUMN_WIDTH);
+  public static void logVectorAccessibleContent(VectorAccessible va) {
+    logVectorAccessibleContent(va, DEFAULT_COLUMN_WIDTH);
   }
 
-  public static void showVectorAccessibleContent(VectorAccessible va, int columnWidth) {
-    showVectorAccessibleContent(va, new int[]{ columnWidth });
+  public static void logVectorAccessibleContent(VectorAccessible va, int columnWidth) {
+    logVectorAccessibleContent(va, new int[]{ columnWidth });
   }
 
-  public static void showVectorAccessibleContent(VectorAccessible va, int[] columnWidths) {
+  public static void logVectorAccessibleContent(VectorAccessible va, int[] columnWidths) {
     final StringBuilder sb = new StringBuilder();
     int width = 0;
     int columnIndex = 0;
@@ -194,6 +194,8 @@ public static void showVectorAccessibleContent(VectorAccessible va, int[] column
     for (VectorWrapper<?> vw : va) {
       vw.clear();
     }
+
+    logger.info(sb.toString());
   }
 
   private static String expandMapSchema(MaterializedField mapField) {
diff --git a/exec/java-exec/src/test/java/org/apache/drill/TestTpchDistributedConcurrent.java b/exec/java-exec/src/test/java/org/apache/drill/TestTpchDistributedConcurrent.java
index bd68b536eca..7258a97c7a5 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/TestTpchDistributedConcurrent.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/TestTpchDistributedConcurrent.java
@@ -181,7 +181,7 @@ public void run() {
 
   @Test
   public void testConcurrentQueries() throws Exception {
-    QueryTestUtil.testRunAndPrint(client, UserBitShared.QueryType.SQL, alterSession);
+    QueryTestUtil.testRunAndLog(client, UserBitShared.QueryType.SQL, alterSession);
 
     testThread = Thread.currentThread();
     final QuerySubmitter querySubmitter = new QuerySubmitter();
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestConvertFunctions.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestConvertFunctions.java
index 581c972e529..2bff1dad2fd 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestConvertFunctions.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestConvertFunctions.java
@@ -518,7 +518,7 @@ public void testBigIntVarCharReturnTripConvertLogical() throws Exception {
       count += result.getHeader().getRowCount();
       loader.load(result.getHeader().getDef(), result.getData());
       if (loader.getRecordCount() > 0) {
-        VectorUtil.showVectorAccessibleContent(loader);
+        VectorUtil.logVectorAccessibleContent(loader);
       }
       loader.clear();
       result.release();
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/lateraljoin/TestE2EUnnestAndLateral.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/lateraljoin/TestE2EUnnestAndLateral.java
index 394e7321f53..88108a602f7 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/lateraljoin/TestE2EUnnestAndLateral.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/lateraljoin/TestE2EUnnestAndLateral.java
@@ -61,7 +61,7 @@ public void testLateral_WithLimitInSubQuery() throws Exception {
     String Sql = "SELECT customer.c_name, customer.c_address, orders.o_id, orders.o_amount " +
       "FROM cp.`lateraljoin/nested-customer.parquet` customer, LATERAL " +
       "(SELECT t.ord.o_id as o_id, t.ord.o_amount as o_amount FROM UNNEST(customer.orders) t(ord) LIMIT 1) orders";
-    test(Sql);
+    runAndLog(Sql);
   }
 
   @Test
@@ -69,7 +69,7 @@ public void testLateral_WithFilterInSubQuery() throws Exception {
     String Sql = "SELECT customer.c_name, customer.c_address, orders.o_id, orders.o_amount " +
       "FROM cp.`lateraljoin/nested-customer.parquet` customer, LATERAL " +
       "(SELECT t.ord.o_id as o_id, t.ord.o_amount as o_amount FROM UNNEST(customer.orders) t(ord) WHERE t.ord.o_amount > 10) orders";
-    test(Sql);
+    runAndLog(Sql);
   }
 
   @Test
@@ -77,7 +77,7 @@ public void testLateral_WithFilterAndLimitInSubQuery() throws Exception {
     String Sql = "SELECT customer.c_name, customer.c_address, orders.o_id, orders.o_amount " +
       "FROM cp.`lateraljoin/nested-customer.parquet` customer, LATERAL " +
       "(SELECT t.ord.o_id as o_id, t.ord.o_amount as o_amount FROM UNNEST(customer.orders) t(ord) WHERE t.ord.o_amount > 10 LIMIT 1) orders";
-    test(Sql);
+    runAndLog(Sql);
   }
 
   @Test
@@ -105,7 +105,7 @@ public void testLateral_WithTopNInSubQuery() throws Exception {
   @Test
   public void testLateral_WithSortAndLimitInSubQuery() throws Exception {
 
-    test("alter session set `planner.enable_topn`=false");
+    runAndLog("alter session set `planner.enable_topn`=false");
 
     String Sql = "SELECT customer.c_name, orders.o_id, orders.o_amount " +
       "FROM cp.`lateraljoin/nested-customer.parquet` customer, LATERAL " +
@@ -123,7 +123,7 @@ public void testLateral_WithSortAndLimitInSubQuery() throws Exception {
         .baselineValues("customer4", 32.0,  1030.1)
         .go();
     } finally {
-      test("alter session set `planner.enable_topn`=true");
+      runAndLog("alter session set `planner.enable_topn`=true");
     }
   }
 
@@ -149,7 +149,7 @@ public void testOuterApply_WithFilterAndLimitInSubQuery() throws Exception {
     String Sql = "SELECT customer.c_name, customer.c_address, orders.o_id, orders.o_amount " +
       "FROM cp.`lateraljoin/nested-customer.parquet` customer OUTER APPLY " +
       "(SELECT t.ord.o_id as o_id , t.ord.o_amount as o_amount FROM UNNEST(customer.orders) t(ord) WHERE t.ord.o_amount > 10 LIMIT 1) orders";
-    test(Sql);
+    runAndLog(Sql);
   }
 
   @Test
@@ -157,7 +157,7 @@ public void testLeftLateral_WithFilterAndLimitInSubQuery() throws Exception {
     String Sql = "SELECT customer.c_name, customer.c_address, orders.o_id, orders.o_amount " +
       "FROM cp.`lateraljoin/nested-customer.parquet` customer LEFT JOIN LATERAL " +
       "(SELECT t.ord.o_id as o_id, t.ord.o_amount as o_amount FROM UNNEST(customer.orders) t(ord) WHERE t.ord.o_amount > 10 LIMIT 1) orders ON TRUE";
-    test(Sql);
+    runAndLog(Sql);
   }
 
   @Test
@@ -167,7 +167,7 @@ public void testMultiUnnestAtSameLevel() throws Exception {
       " (SELECT t.ord.o_id AS order_id, t.ord.o_amount AS order_amt, U2.item_name AS itemName, U2.item_num AS " +
         "itemNum FROM UNNEST(customer.orders) t(ord) , LATERAL" +
       " (SELECT t1.ord.i_name AS item_name, t1.ord.i_number AS item_num FROM UNNEST(t.ord) AS t1(ord)) AS U2) AS U1";
-    test(Sql);
+    runAndLog(Sql);
   }
 
   @Test
@@ -261,7 +261,7 @@ public void testSingleUnnestCol() throws Exception {
   public void testNestedUnnest() throws Exception {
     String Sql = "select * from (select customer.orders as orders from cp.`lateraljoin/nested-customer.parquet` customer ) t1," +
         " lateral ( select t.ord.items as items from unnest(t1.orders) t(ord) ) t2, unnest(t2.items) t3(item) ";
-    test(Sql);
+    runAndLog(Sql);
   }
 
   /***********************************************************************************************
@@ -273,7 +273,7 @@ public void testMultipleBatchesLateralQuery() throws Exception {
     String sql = "SELECT customer.c_name, customer.c_address, orders.o_orderkey, orders.o_totalprice " +
       "FROM dfs.`lateraljoin/multipleFiles` customer, LATERAL " +
       "(SELECT t.ord.o_orderkey as o_orderkey, t.ord.o_totalprice as o_totalprice FROM UNNEST(customer.c_orders) t(ord)) orders";
-    test(sql);
+    runAndLog(sql);
   }
 
   @Test
@@ -281,7 +281,7 @@ public void testMultipleBatchesLateral_WithLimitInSubQuery() throws Exception {
     String sql = "SELECT customer.c_name, customer.c_address, orders.o_orderkey, orders.o_totalprice " +
       "FROM dfs.`lateraljoin/multipleFiles` customer, LATERAL " +
       "(SELECT t.ord.o_orderkey as o_orderkey, t.ord.o_totalprice as o_totalprice FROM UNNEST(customer.c_orders) t(ord) LIMIT 10) orders";
-    test(sql);
+    runAndLog(sql);
   }
 
   @Test
@@ -303,7 +303,7 @@ public void testMultipleBatchesLateral_WithTopNInSubQuery() throws Exception {
   @Test
   public void testMultipleBatchesLateral_WithSortAndLimitInSubQuery() throws Exception {
 
-    test("alter session set `planner.enable_topn`=false");
+    runAndLog("alter session set `planner.enable_topn`=false");
 
     String sql = "SELECT customer.c_name, orders.o_orderkey, orders.o_totalprice " +
       "FROM dfs.`lateraljoin/multipleFiles` customer, LATERAL " +
@@ -319,7 +319,7 @@ public void testMultipleBatchesLateral_WithSortAndLimitInSubQuery() throws Excep
         .baselineValues("Customer#000007180", (long)54646821, 367189.55)
         .go();
     } finally {
-      test("alter session set `planner.enable_topn`=true");
+      runAndLog("alter session set `planner.enable_topn`=true");
     }
   }
 
@@ -346,7 +346,7 @@ public void testMultipleBatchesLateral_WithLimitFilterInSubQuery() throws Except
       "FROM dfs.`lateraljoin/multipleFiles` customer, LATERAL " +
       "(SELECT t.ord.o_orderkey as o_orderkey, t.ord.o_totalprice as o_totalprice FROM UNNEST(customer.c_orders) t(ord) WHERE t.ord.o_totalprice > 100000 LIMIT 2) " +
       "orders";
-    test(sql);
+    runAndLog(sql);
   }
 
   /***********************************************************************************************
@@ -362,7 +362,7 @@ public void testSchemaChangeOnNonUnnestColumn() throws Exception {
       String sql = "SELECT customer.c_name, customer.c_address, orders.o_orderkey, orders.o_totalprice " +
         "FROM dfs.`lateraljoin/multipleFiles` customer, LATERAL " +
         "(SELECT t.ord.o_orderkey as o_orderkey, t.ord.o_totalprice as o_totalprice FROM UNNEST (customer.c_orders) t(ord)) orders";
-      test(sql);
+      runAndLog(sql);
     } catch (Exception ex) {
       fail();
     } finally {
@@ -393,7 +393,7 @@ public void testSchemaChangeOnNonUnnestColumn_InMultilevelCase() throws Exceptio
         "FROM UNNEST(customer.c_orders) t1(o)) orders, " +
         "LATERAL (SELECT t2.l.l_partkey as l_partkey, t2.l.l_linenumber as l_linenumber, t2.l.l_quantity as l_quantity " +
         "FROM UNNEST(orders.lineitems) t2(l)) olineitems";
-      test(sql);
+      runAndLog(sql);
     } catch (Exception ex) {
       fail();
     } finally {
@@ -409,7 +409,7 @@ public void testSchemaChangeOnUnnestColumn() throws Exception {
       String sql = "SELECT customer.c_name, customer.c_address, orders.o_orderkey, orders.o_totalprice " +
         "FROM dfs.`lateraljoin/multipleFiles` customer, LATERAL " +
         "(SELECT t.ord.o_orderkey as o_orderkey, t.ord.o_totalprice as o_totalprice FROM UNNEST(customer.c_orders) t(ord)) orders";
-      test(sql);
+      runAndLog(sql);
     } catch (Exception ex) {
       fail();
     } finally {
@@ -429,7 +429,7 @@ public void testSchemaChangeOnUnnestColumn_InMultilevelCase() throws Exception {
         " t1.o.o_shippriority as spriority FROM UNNEST(customer.c_orders) t1(o)) orders, " +
         "LATERAL (SELECT t2.l.l_partkey as l_partkey, t2.l.l_linenumber as l_linenumber, t2.l.l_quantity as l_quantity " +
         "FROM UNNEST(orders.lineitems) t2(l)) olineitems";
-      test(sql);
+      runAndLog(sql);
     } catch (Exception ex) {
       fail();
     } finally {
@@ -446,7 +446,7 @@ public void testSchemaChangeOnMultipleColumns() throws Exception {
         "orders.o_totalprice FROM dfs.`lateraljoin/multipleFiles` customer, LATERAL " +
         "(SELECT t.ord.o_orderkey as o_orderkey, t.ord.o_totalprice as o_totalprice, t.ord.o_shippriority o_shippriority FROM UNNEST(customer.c_orders) t(ord)) orders";
 
-      test(sql);
+      runAndLog(sql);
     } catch (Exception ex) {
       fail();
     } finally {
@@ -464,7 +464,7 @@ public void testMultipleBatchesLateral_WithLimitInParent() throws Exception {
       "FROM dfs.`lateraljoin/multipleFiles` customer, LATERAL " +
       "(SELECT t.ord.o_orderkey as o_orderkey, t.ord.o_totalprice  as o_totalprice FROM UNNEST(customer.c_orders) t(ord) WHERE t.ord.o_totalprice > 100000 LIMIT 2) " +
       "orders LIMIT 1";
-    test(sql);
+    runAndLog(sql);
   }
 
   @Test
@@ -473,7 +473,7 @@ public void testMultipleBatchesLateral_WithFilterInParent() throws Exception {
       "FROM dfs.`lateraljoin/multipleFiles` customer, LATERAL " +
       "(SELECT t.ord.o_orderkey as o_orderkey, t.ord.o_totalprice as o_totalprice FROM UNNEST(customer.c_orders) t(ord) WHERE t.ord.o_totalprice > 100000 LIMIT 2) " +
       "orders WHERE orders.o_totalprice > 240000";
-    test(sql);
+    runAndLog(sql);
   }
 
   @Test
@@ -482,7 +482,7 @@ public void testMultipleBatchesLateral_WithGroupByInParent() throws Exception {
       "FROM dfs.`lateraljoin/multipleFiles` customer, LATERAL " +
       "(SELECT t.ord.o_totalprice as o_totalprice FROM UNNEST(customer.c_orders) t(ord) WHERE t.ord.o_totalprice > 100000 LIMIT 2) " +
       "orders GROUP BY customer.c_name";
-    test(sql);
+    runAndLog(sql);
   }
 
   @Test
@@ -491,7 +491,7 @@ public void testMultipleBatchesLateral_WithOrderByInParent() throws Exception {
       "FROM dfs.`lateraljoin/multipleFiles` customer, LATERAL " +
       "(SELECT t.ord.o_orderkey as o_orderkey, t.ord.o_totalprice as o_totalprice FROM UNNEST(customer.c_orders) t(ord)) orders " +
       "ORDER BY orders.o_orderkey";
-    test(sql);
+    runAndLog(sql);
   }
 
   @Test
@@ -515,7 +515,7 @@ public void testMultipleBatchesLateral_WithHashAgg() throws Exception {
       .baselineValues(177819)
       .build().run();
     } finally {
-      test("alter session set `" + PlannerSettings.STREAMAGG.getOptionName() + "` = true");
+      runAndLog("alter session set `" + PlannerSettings.STREAMAGG.getOptionName() + "` = true");
     }
   }
 
@@ -541,7 +541,7 @@ public void testLateral_HashAgg_with_nulls() throws Exception {
       .baselineValues("dd",222L)
       .build().run();
     } finally {
-      test("alter session set `" + PlannerSettings.STREAMAGG.getOptionName() + "` = true");
+      runAndLog("alter session set `" + PlannerSettings.STREAMAGG.getOptionName() + "` = true");
     }
   }
 
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/project/TestSimpleProjection.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/project/TestSimpleProjection.java
index b2a899d7d84..d71ac760f96 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/project/TestSimpleProjection.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/project/TestSimpleProjection.java
@@ -63,7 +63,7 @@ public void project() throws Throwable {
     final SimpleRootExec exec = new SimpleRootExec(ImplCreator.getExec(context, (FragmentRoot) plan.getSortedOperators(false).iterator().next()));
 
     while (exec.next()) {
-      VectorUtil.showVectorAccessibleContent(exec.getIncoming(), "\t");
+      VectorUtil.logVectorAccessibleContent(exec.getIncoming(), "\t");
       final NullableBigIntVector c1 = exec.getValueVectorById(new SchemaPath("col1", ExpressionPosition.UNKNOWN), NullableBigIntVector.class);
       final NullableBigIntVector c2 = exec.getValueVectorById(new SchemaPath("col2", ExpressionPosition.UNKNOWN), NullableBigIntVector.class);
       final NullableBigIntVector.Accessor a1 = c1.getAccessor();
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/TestSortSpillWithException.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/TestSortSpillWithException.java
index 5ab29deebe7..a8797cd0b69 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/TestSortSpillWithException.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/xsort/TestSortSpillWithException.java
@@ -86,7 +86,7 @@ public void testSpillLeakLegacy() throws Exception {
     ControlsInjectionUtil.setControls(cluster.client(), controls);
     // run a simple order by query
     try {
-      test("select employee_id from dfs.`xsort/2batches` order by employee_id");
+      runAndLog("select employee_id from dfs.`xsort/2batches` order by employee_id");
       fail("Query should have failed!");
     } catch (UserRemoteException e) {
       assertEquals(ErrorType.RESOURCE, e.getErrorType());
@@ -109,7 +109,7 @@ public void testSpillLeakManaged() throws Exception {
     ControlsInjectionUtil.setControls(cluster.client(), controls);
     // run a simple order by query
     try {
-      test("SELECT id_i, name_s250 FROM `mock`.`employee_500K` ORDER BY id_i");
+      runAndLog("SELECT id_i, name_s250 FROM `mock`.`employee_500K` ORDER BY id_i");
       fail("Query should have failed!");
     } catch (UserRemoteException e) {
       assertEquals(ErrorType.RESOURCE, e.getErrorType());
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/pop/PopUnitTestBase.java b/exec/java-exec/src/test/java/org/apache/drill/exec/pop/PopUnitTestBase.java
index dbabcac8ebf..aabf9c4edf6 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/pop/PopUnitTestBase.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/pop/PopUnitTestBase.java
@@ -20,7 +20,6 @@
 import java.io.IOException;
 import java.util.Properties;
 
-import org.apache.drill.test.QueryTestUtil;
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.common.util.DrillFileUtils;
 import org.apache.drill.exec.ExecConstants;
@@ -54,7 +53,6 @@ public static void setup() {
     props.put(ExecConstants.SYS_STORE_PROVIDER_LOCAL_ENABLE_WRITE, "false");
     props.put(ExecConstants.HTTP_ENABLE, "false");
     props.put(Drillbit.SYSTEM_OPTIONS_NAME, "org.apache.drill.exec.compile.ClassTransformer.scalar_replacement=on");
-    props.put(QueryTestUtil.TEST_QUERY_PRINTING_SILENT, "true");
     props.put("drill.catastrophic_to_standard_out", "true");
     CONFIG = DrillConfig.create(props);
   }
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/server/options/TestConfigLinkage.java b/exec/java-exec/src/test/java/org/apache/drill/exec/server/options/TestConfigLinkage.java
index d76e209e0d4..5b9ae97c30d 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/server/options/TestConfigLinkage.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/server/options/TestConfigLinkage.java
@@ -317,8 +317,8 @@ public void testAlterInternalSystemOption() throws Exception {
          ClientFixture client = cluster.clientFixture()) {
       client.queryBuilder().sql("ALTER SYSTEM SET `%s` = 'bleh'", MOCK_PROPERTY).run();
 
-      client.queryBuilder().sql("SELECT * FROM sys.%s", SystemTable.INTERNAL_OPTIONS.getTableName()).printCsv();
-      client.queryBuilder().sql("SELECT * FROM sys.%s", SystemTable.INTERNAL_OPTIONS_VAL.getTableName()).printCsv();
+      client.queryBuilder().sql("SELECT * FROM sys.%s", SystemTable.INTERNAL_OPTIONS.getTableName()).logCsv();
+      client.queryBuilder().sql("SELECT * FROM sys.%s", SystemTable.INTERNAL_OPTIONS_VAL.getTableName()).logCsv();
 
       String mockProp = client.queryBuilder().
         sql("SELECT string_val FROM sys.%s where name='%s'", SystemTable.INTERNAL_OPTIONS, MOCK_PROPERTY).singletonString();
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TextRecordReaderTest.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TextRecordReaderTest.java
index 342ed683445..c1e63a98839 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TextRecordReaderTest.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/text/TextRecordReaderTest.java
@@ -56,7 +56,7 @@ public void testFullExecution() throws Exception {
           count += b.getHeader().getRowCount();
         }
         loader.load(b.getHeader().getDef(), b.getData());
-        VectorUtil.showVectorAccessibleContent(loader);
+        VectorUtil.logVectorAccessibleContent(loader);
         loader.clear();
         b.release();
       }
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestResourceLeak.java b/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestResourceLeak.java
index 6efcabce8fa..1f5ee9f46f0 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestResourceLeak.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestResourceLeak.java
@@ -94,7 +94,7 @@ public static void openClient() throws Exception {
   public void tpch01() throws Exception {
     final String query = getFile("memory/tpch01_memory_leak.sql");
     try {
-      QueryTestUtil.test(client, "alter session set `planner.slice_target` = 10; " + query);
+      QueryTestUtil.testRunAndLog(client, "alter session set `planner.slice_target` = 10; " + query);
     } catch (UserRemoteException e) {
       if (e.getMessage().contains("Allocator closed with outstanding buffers allocated")) {
         return;
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/BaseTestQuery.java b/exec/java-exec/src/test/java/org/apache/drill/test/BaseTestQuery.java
index ef67d58f4c9..db62bf0be19 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/BaseTestQuery.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/BaseTestQuery.java
@@ -347,7 +347,7 @@ protected static void runSQL(String sql) throws Exception {
   }
 
   public static int testRunAndPrint(final QueryType type, final String query) throws Exception {
-    return QueryTestUtil.testRunAndPrint(client, type, query);
+    return QueryTestUtil.testRunAndLog(client, type, query);
   }
 
   protected static void testWithListener(QueryType type, String query, UserResultsListener resultListener) {
@@ -389,11 +389,11 @@ protected static void testNoResult(int interation, String query, Object... args)
   }
 
   public static void test(String query, Object... args) throws Exception {
-    QueryTestUtil.test(client, String.format(query, args));
+    QueryTestUtil.testRunAndLog(client, String.format(query, args));
   }
 
   public static void test(final String query) throws Exception {
-    QueryTestUtil.test(client, query);
+    QueryTestUtil.testRunAndLog(client, query);
   }
 
   protected static int testPhysical(String query) throws Exception{
@@ -513,7 +513,7 @@ protected void setColumnWidths(int[] columnWidths) {
     this.columnWidths = columnWidths;
   }
 
-  protected int printResult(List<QueryDataBatch> results) throws SchemaChangeException {
+  protected int logResult(List<QueryDataBatch> results) throws SchemaChangeException {
     int rowCount = 0;
     final RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
     for(final QueryDataBatch result : results) {
@@ -521,13 +521,18 @@ protected int printResult(List<QueryDataBatch> results) throws SchemaChangeExcep
       loader.load(result.getHeader().getDef(), result.getData());
       // TODO:  Clean:  DRILL-2933:  That load(...) no longer throws
       // SchemaChangeException, so check/clean throw clause above.
-      VectorUtil.showVectorAccessibleContent(loader, columnWidths);
+      VectorUtil.logVectorAccessibleContent(loader, columnWidths);
       loader.clear();
       result.release();
     }
     return rowCount;
   }
 
+  protected int printResult(final List<QueryDataBatch> results) throws SchemaChangeException {
+    int result = PrintingUtils.printAndThrow(() -> logResult(results));
+    return result;
+  }
+
   protected static String getResultString(List<QueryDataBatch> results, String delimiter)
       throws SchemaChangeException {
     final StringBuilder formattedResults = new StringBuilder();
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/BufferingQueryEventListener.java b/exec/java-exec/src/test/java/org/apache/drill/test/BufferingQueryEventListener.java
index 6d687579358..a47e54de268 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/BufferingQueryEventListener.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/BufferingQueryEventListener.java
@@ -39,6 +39,8 @@
 
 public class BufferingQueryEventListener implements UserResultsListener
 {
+  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(BufferingQueryEventListener.class);
+
   public static class QueryEvent
   {
     public enum Type { QUERY_ID, BATCH, EOF, ERROR }
@@ -96,8 +98,7 @@ private void silentPut(QueryEvent event) {
     try {
       queue.put(event);
     } catch (InterruptedException e) {
-      // What to do, what to do...
-      e.printStackTrace();
+      logger.error("Exception:", e);
     }
   }
 
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/ClientFixture.java b/exec/java-exec/src/test/java/org/apache/drill/test/ClientFixture.java
index a13789fff05..b43e9d7c261 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/ClientFixture.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/ClientFixture.java
@@ -180,14 +180,10 @@ public TestBuilder testBuilder() {
   }
 
   /**
-   * Run zero or more queries and optionally print the output in TSV format.
-   * Similar to {@link QueryTestUtil#test}. Output is printed
-   * only if the tests are running as verbose.
-   *
-   * @return the number of rows returned
+   * Run zero or more queries and output the results in TSV format.
    */
-
-  public void runQueries(final String queryString) throws Exception{
+  private void runQueriesAndOutput(final String queryString,
+                                   final boolean print) throws Exception {
     final String query = QueryTestUtil.normalizeQuery(queryString);
     String[] queries = query.split(";");
     for (String q : queries) {
@@ -195,10 +191,29 @@ public void runQueries(final String queryString) throws Exception{
       if (trimmedQuery.isEmpty()) {
         continue;
       }
-      queryBuilder().sql(trimmedQuery).print();
+
+      if (print) {
+        queryBuilder().sql(trimmedQuery).print();
+      } else {
+        queryBuilder().sql(trimmedQuery).log();
+      }
     }
   }
 
+  /**
+   * Run zero or more queries and log the output in TSV format.
+   */
+  public void runQueriesAndLog(final String queryString) throws Exception {
+    runQueriesAndOutput(queryString, false);
+  }
+
+  /**
+   * Run zero or more queries and print the output in TSV format.
+   */
+  public void runQueriesAndPrint(final String queryString) throws Exception {
+    runQueriesAndOutput(queryString, true);
+  }
+
   /**
    * Plan a query without execution.
    * @throws ExecutionException
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/ClusterFixture.java b/exec/java-exec/src/test/java/org/apache/drill/test/ClusterFixture.java
index 77df0097b83..b393db080e2 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/ClusterFixture.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/ClusterFixture.java
@@ -91,7 +91,6 @@
 
       put(ExecConstants.DEFAULT_TEMPORARY_WORKSPACE, DFS_TMP_SCHEMA);
       put(ExecConstants.HTTP_ENABLE, false);
-      put(QueryTestUtil.TEST_QUERY_PRINTING_SILENT, true);
       put("drill.catastrophic_to_standard_out", true);
 
       // Verbose errors.
@@ -570,7 +569,7 @@ public BufferAllocator allocator() {
 
     @Override
     public void test(String query) throws Exception {
-      client.runQueries(query);
+      client.runQueriesAndLog(query);
     }
 
     @Override
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/ClusterTest.java b/exec/java-exec/src/test/java/org/apache/drill/test/ClusterTest.java
index 0a770a0fd98..57ba711b542 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/ClusterTest.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/ClusterTest.java
@@ -20,7 +20,6 @@
 import java.io.IOException;
 
 import org.apache.drill.common.AutoCloseables;
-import org.apache.drill.test.rowSet.RowSet;
 import org.junit.AfterClass;
 import org.junit.ClassRule;
 
@@ -110,44 +109,19 @@ public String getFile(String resource) throws IOException {
     return ClusterFixture.getResource(resource);
   }
 
-  public void test(String sqlQuery) throws Exception {
-    client.runQueries(sqlQuery);
+  public void runAndLog(String sqlQuery) throws Exception {
+    client.runQueriesAndLog(sqlQuery);
   }
 
-  public static void test(String query, Object... args) throws Exception {
+  public void runAndPrint(String sqlQuery) throws Exception {
+    client.runQueriesAndPrint(sqlQuery);
+  }
+
+  public static void run(String query, Object... args) throws Exception {
     client.queryBuilder().sql(query, args).run( );
   }
 
   public QueryBuilder queryBuilder( ) {
     return client.queryBuilder();
   }
-
-  /**
-   * Handy development-time tool to run a query and print the results. Use this
-   * when first developing tests. Then, encode the expected results using
-   * the appropriate tool and verify them rather than just printing them to
-   * create the final test.
-   *
-   * @param sql the query to run
-   */
-
-  protected void runAndPrint(String sql) {
-    QueryResultSet results = client.queryBuilder().sql(sql).resultSet();
-    try {
-      for (;;) {
-        RowSet rowSet = results.next();
-        if (rowSet == null) {
-          break;
-        }
-        if (rowSet.rowCount() > 0) {
-          rowSet.print();
-        }
-        rowSet.clear();
-      }
-    } catch (Exception e) {
-      throw new IllegalStateException(e);
-    } finally {
-      results.close();
-    }
-  }
 }
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/ExampleTest.java b/exec/java-exec/src/test/java/org/apache/drill/test/ExampleTest.java
index 6e3893e54f3..77ee6e96dff 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/ExampleTest.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/ExampleTest.java
@@ -90,7 +90,7 @@
   public void firstTest() throws Exception {
     try (ClusterFixture cluster = ClusterFixture.standardCluster(dirTestWatcher);
          ClientFixture client = cluster.clientFixture()) {
-      client.queryBuilder().sql("SELECT * FROM `cp`.`employee.json` LIMIT 10").printCsv();
+      client.queryBuilder().sql("SELECT * FROM `cp`.`employee.json` LIMIT 10").logCsv();
     }
   }
 
@@ -170,7 +170,7 @@ public void thirdTest() throws Exception {
     try (ClusterFixture cluster = ClusterFixture.standardCluster(dirTestWatcher);
          ClientFixture client = cluster.clientFixture()) {
       String sql = "SELECT id_i, name_s10 FROM `mock`.`employees_5`";
-      client.queryBuilder().sql(sql).printCsv();
+      client.queryBuilder().sql(sql).logCsv();
     }
   }
 
@@ -268,7 +268,7 @@ public void sixthTest() throws Exception {
     try (ClusterFixture cluster = ClusterFixture.standardCluster(dirTestWatcher);
          ClientFixture client = cluster.clientFixture()) {
       cluster.defineWorkspace("dfs", "resources", TestTools.TEST_RESOURCES_ABS.toFile().getAbsolutePath(), "tsv");
-      client.queryBuilder().sql("SELECT * from dfs.resources.`testframework/small_test_data.tsv`").printCsv();
+      client.queryBuilder().sql("SELECT * from dfs.resources.`testframework/small_test_data.tsv`").logCsv();
     }
   }
 
@@ -280,7 +280,7 @@ public void seventhTest() throws Exception {
     try (ClusterFixture cluster = ClusterFixture.standardCluster(dirTestWatcher);
          ClientFixture client = cluster.clientFixture()) {
       cluster.defineWorkspace("dfs", "sampledata", TestTools.SAMPLE_DATA.toFile().getAbsolutePath(), "parquet");
-      client.queryBuilder().sql("SELECT * from dfs.sampledata.`nation.parquet`").printCsv();
+      client.queryBuilder().sql("SELECT * from dfs.sampledata.`nation.parquet`").logCsv();
     }
   }
 
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/PrintingResultsListener.java b/exec/java-exec/src/test/java/org/apache/drill/test/PrintingResultsListener.java
new file mode 100644
index 00000000000..f5cd9954056
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/PrintingResultsListener.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.test;
+;
+import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.exec.client.LoggingResultsListener;
+import org.apache.drill.exec.client.QuerySubmitter;
+import org.apache.drill.exec.proto.UserBitShared;
+import org.apache.drill.exec.rpc.ConnectionThrottle;
+import org.apache.drill.exec.rpc.user.QueryDataBatch;
+
+public class PrintingResultsListener extends LoggingResultsListener {
+  public PrintingResultsListener(DrillConfig config, QuerySubmitter.Format format, int columnWidth) {
+    super(config, format, columnWidth);
+  }
+
+  @Override
+  public void submissionFailed(UserException ex) {
+    PrintingUtils.print(() -> {
+      super.submissionFailed(ex);
+      return null;
+    });
+  }
+
+  @Override
+  public void queryCompleted(UserBitShared.QueryResult.QueryState state) {
+    PrintingUtils.print(() -> {
+      super.queryCompleted(state);
+      return null;
+    });
+  }
+
+  @Override
+  public void dataArrived(QueryDataBatch result, ConnectionThrottle throttle) {
+    PrintingUtils.print(() -> {
+      super.dataArrived(result, throttle);
+      return null;
+    });
+  }
+}
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/PrintingUtils.java b/exec/java-exec/src/test/java/org/apache/drill/test/PrintingUtils.java
new file mode 100644
index 00000000000..1709bdf501a
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/PrintingUtils.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.test;
+
+import ch.qos.logback.classic.Level;
+import org.apache.drill.exec.client.LoggingResultsListener;
+import org.apache.drill.exec.util.CheckedSupplier;
+import org.apache.drill.exec.util.VectorUtil;
+
+import java.util.function.Supplier;
+
+/**
+ * <p>
+ *   This class contains utility methods to run lambda functions with the necessary {@link org.apache.drill.test.LogFixture}
+ *   boilerplate to print results to stdout for debugging purposes.
+ * </p>
+ *
+ * <p>
+ *   If you need to enable printing for more classes, simply add them to the {@link org.apache.drill.test.LogFixture}
+ *   constructed in {@link #printAndThrow(CheckedSupplier)}.
+ * </p>
+ */
+public final class PrintingUtils {
+
+  /**
+   * Enables printing to stdout for lambda functions that do not throw exceptions.
+   * @param supplier Lambda function to execute.
+   * @param <T> The return type of the lambda function.
+   * @return Data produced by the lambda function.
+   */
+  public static <T> T print(final Supplier<T> supplier) {
+    return printAndThrow(new CheckedSupplier<T, RuntimeException>() {
+      @Override
+      public T get() throws RuntimeException {
+        return supplier.get();
+      }
+    });
+  }
+
+  /**
+   * Enables printing to stdout for lambda functions that throw an exception.
+   * @param supplier Lambda function to execute.
+   * @param <T> Return type of the lambda function.
+   * @param <E> Type of exception thrown.
+   * @return Data produced by the lambda function.
+   * @throws E An exception.
+   */
+  public static <T, E extends Exception> T printAndThrow(CheckedSupplier<T, E> supplier) throws E {
+    try(LogFixture logFixture = new LogFixture.LogFixtureBuilder()
+      .rootLogger(Level.OFF)
+      // For some reason rootLogger(Level.OFF) is not sufficient.
+      .logger("org.apache.drill", Level.OFF) // Disable logging for Drill class we don't want
+      .logger(VectorUtil.class, Level.INFO)
+      .logger(LoggingResultsListener.class, Level.INFO)
+      .toConsole() // This redirects output to stdout
+      .build()) {
+      return supplier.get();
+    }
+  }
+}
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/QueryBuilder.java b/exec/java-exec/src/test/java/org/apache/drill/test/QueryBuilder.java
index ff0e166134c..834e47bce13 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/QueryBuilder.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/QueryBuilder.java
@@ -29,10 +29,10 @@
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
-import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.common.exceptions.UserException;
+import org.apache.drill.common.exceptions.UserRemoteException;
 import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.exec.client.PrintingResultsListener;
+import org.apache.drill.exec.client.LoggingResultsListener;
 import org.apache.drill.exec.client.QuerySubmitter.Format;
 import org.apache.drill.exec.exception.SchemaChangeException;
 import org.apache.drill.exec.proto.BitControl.PlanFragment;
@@ -496,6 +496,31 @@ public BufferingQueryEventListener withEventListener() {
     return listener;
   }
 
+  public long logCsv() {
+    return log(Format.CSV);
+  }
+
+  public long log(Format format) {
+    return log(format,20);
+  }
+
+  public long log(Format format, int colWidth) {
+    return runAndWait(new LoggingResultsListener(client.cluster().config(), format, colWidth));
+  }
+
+  /**
+   * <p>
+   *   Run a query and logs the output in TSV format.
+   *   Similar to {@link QueryTestUtil#testRunAndLog} with one query.
+   * </p>
+   *
+   * @return The number of rows returned.
+   * @throws Exception If anything goes wrong with query execution.
+   */
+  public long log() throws Exception {
+    return log(Format.TSV, VectorUtil.DEFAULT_COLUMN_WIDTH);
+  }
+
   public long printCsv() {
     return print(Format.CSV);
   }
@@ -508,6 +533,19 @@ public long print(Format format, int colWidth) {
     return runAndWait(new PrintingResultsListener(client.cluster().config(), format, colWidth));
   }
 
+  /**
+   * <p>
+   *   Runs a query and prints the output to stdout in TSV format.
+   *   Similar to {@link QueryTestUtil#testRunAndLog} with one query.
+   * </p>
+   *
+   * @return The number of rows returned.
+   * @throws Exception If anything goes wrong with query execution.
+   */
+  public long print() throws Exception {
+    return print(Format.TSV, VectorUtil.DEFAULT_COLUMN_WIDTH);
+  }
+
   /**
    * Run the query asynchronously, returning a future to be used
    * to check for query completion, wait for completion, and obtain
@@ -520,33 +558,14 @@ public QuerySummaryFuture futureSummary() {
     return future;
   }
 
-  /**
-   * Run a query and optionally print the output in TSV format.
-   * Similar to {@link QueryTestUtil#test} with one query. Output is printed
-   * only if the tests are running as verbose.
-   *
-   * @return the number of rows returned
-   * @throws Exception if anything goes wrong with query execution
-   */
-
-  public long print() throws Exception {
-    DrillConfig config = client.cluster().config( );
-
-    boolean verbose = !config.getBoolean(QueryTestUtil.TEST_QUERY_PRINTING_SILENT);
-
-    if (verbose) {
-      return print(Format.TSV, VectorUtil.DEFAULT_COLUMN_WIDTH);
-    } else {
-      return run().recordCount();
-    }
-  }
-
   public long runAndWait(UserResultsListener listener) {
     AwaitableUserResultsListener resultListener =
         new AwaitableUserResultsListener(listener);
     withListener(resultListener);
     try {
       return resultListener.await();
+    } catch (UserRemoteException e) {
+      throw e;
     } catch (Exception e) {
       throw new IllegalStateException(e);
     }
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/QueryTestUtil.java b/exec/java-exec/src/test/java/org/apache/drill/test/QueryTestUtil.java
index 96de2de1f49..159656247a1 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/QueryTestUtil.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/QueryTestUtil.java
@@ -22,11 +22,10 @@
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.drill.test.BaseTestQuery.SilentListener;
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.client.DrillClient;
-import org.apache.drill.exec.client.PrintingResultsListener;
+import org.apache.drill.exec.client.LoggingResultsListener;
 import org.apache.drill.exec.client.QuerySubmitter.Format;
 import org.apache.drill.exec.compile.ClassTransformer;
 import org.apache.drill.exec.exception.OutOfMemoryException;
@@ -47,9 +46,6 @@
  * Utilities useful for tests that issue SQL queries.
  */
 public class QueryTestUtil {
-
-  public static final String TEST_QUERY_PRINTING_SILENT = "drill.test.query.printing.silent";
-
   /**
    * Constructor. All methods are static.
    */
@@ -100,36 +96,41 @@ public static String normalizeQuery(final String query) {
   }
 
   /**
-   * Execute a SQL query, and print the results.
+   * Execute a SQL query, and output the results.
    *
    * @param drillClient drill client to use
    * @param type type of the query
    * @param queryString query string
+   * @param print True to output results to stdout. False to log results.
+   *
    * @return number of rows returned
-   * @throws Exception
+   * @throws Exception An error while running the query.
    */
-  public static int testRunAndPrint(
-      final DrillClient drillClient, final QueryType type, final String queryString) throws Exception {
+  private static int testRunAndOutput(final DrillClient drillClient,
+                                      final QueryType type,
+                                      final String queryString,
+                                      final boolean print) throws Exception {
     final String query = normalizeQuery(queryString);
     DrillConfig config = drillClient.getConfig();
     AwaitableUserResultsListener resultListener =
-        new AwaitableUserResultsListener(
-            config.getBoolean(TEST_QUERY_PRINTING_SILENT) ?
-                new SilentListener() :
-                new PrintingResultsListener(config, Format.TSV, VectorUtil.DEFAULT_COLUMN_WIDTH)
-        );
+      new AwaitableUserResultsListener(print ?
+      new PrintingResultsListener(config, Format.TSV, VectorUtil.DEFAULT_COLUMN_WIDTH):
+      new LoggingResultsListener(config, Format.TSV, VectorUtil.DEFAULT_COLUMN_WIDTH));
     drillClient.runQuery(type, query, resultListener);
     return resultListener.await();
   }
 
   /**
-   * Execute one or more queries separated by semicolons, and print the results.
+   * Execute one or more queries separated by semicolons, and output the results.
    *
    * @param drillClient drill client to use
    * @param queryString the query string
-   * @throws Exception
+   * @param print True to output results to stdout. False to log results.
+   * @throws Exception An error while running the query.
    */
-  public static void test(final DrillClient drillClient, final String queryString) throws Exception{
+  public static void testRunAndOutput(final DrillClient drillClient,
+                                      final String queryString,
+                                      final boolean print) throws Exception {
     final String query = normalizeQuery(queryString);
     String[] queries = query.split(";");
     for (String q : queries) {
@@ -137,10 +138,78 @@ public static void test(final DrillClient drillClient, final String queryString)
       if (trimmedQuery.isEmpty()) {
         continue;
       }
-      testRunAndPrint(drillClient, QueryType.SQL, trimmedQuery);
+      testRunAndOutput(drillClient, QueryType.SQL, trimmedQuery, print);
     }
   }
 
+  /**
+   * Execute a SQL query, and log the results.
+   *
+   * @param drillClient drill client to use
+   * @param type type of the query
+   * @param queryString query string
+   * @return number of rows returned
+   * @throws Exception An error while running the query.
+   */
+  public static int testRunAndLog(final DrillClient drillClient,
+                                  final QueryType type,
+                                  final String queryString) throws Exception {
+    return testRunAndOutput(drillClient, type, queryString, false);
+  }
+
+  /**
+   * Execute one or more queries separated by semicolons, and log the results.
+   *
+   * @param drillClient drill client to use
+   * @param queryString the query string
+   * @throws Exception An error while running the queries.
+   */
+  public static void testRunAndLog(final DrillClient drillClient,
+                                   final String queryString) throws Exception {
+    testRunAndOutput(drillClient, queryString, false);
+  }
+
+  /**
+   * Execute one or more queries separated by semicolons, and log the results, with the option to
+   * add formatted arguments to the query string.
+   *
+   * @param drillClient drill client to use
+   * @param query the query string; may contain formatting specifications to be used by
+   *   {@link String#format(String, Object...)}.
+   * @param args optional args to use in the formatting call for the query string
+   * @throws Exception An error while running the query.
+   */
+  public static void testRunAndLog(final DrillClient drillClient, final String query, Object... args) throws Exception {
+    testRunAndLog(drillClient, String.format(query, args));
+  }
+
+  /**
+   * Execute a SQL query, and print the results.
+   *
+   * @param drillClient drill client to use
+   * @param type type of the query
+   * @param queryString query string
+   * @return number of rows returned
+   * @throws Exception An error while running the query.
+   */
+  public static int testRunAndPrint(final DrillClient drillClient,
+                                    final QueryType type,
+                                    final String queryString) throws Exception {
+    return testRunAndOutput(drillClient, type, queryString, true);
+  }
+
+  /**
+   * Execute one or more queries separated by semicolons, and print the results.
+   *
+   * @param drillClient drill client to use
+   * @param queryString the query string
+   * @throws Exception An error while running the queries.
+   */
+  public static void testRunAndPrint(final DrillClient drillClient,
+                                     final String queryString) throws Exception{
+    testRunAndOutput(drillClient, queryString, true);
+  }
+
   /**
    * Execute one or more queries separated by semicolons, and print the results, with the option to
    * add formatted arguments to the query string.
@@ -149,10 +218,10 @@ public static void test(final DrillClient drillClient, final String queryString)
    * @param query the query string; may contain formatting specifications to be used by
    *   {@link String#format(String, Object...)}.
    * @param args optional args to use in the formatting call for the query string
-   * @throws Exception
+   * @throws Exception An error while running the query.
    */
-  public static void test(final DrillClient drillClient, final String query, Object... args) throws Exception {
-    test(drillClient, String.format(query, args));
+  public static void testRunAndPrint(final DrillClient drillClient, final String query, Object... args) throws Exception {
+    testRunAndPrint(drillClient, String.format(query, args));
   }
 
   /**
diff --git a/pom.xml b/pom.xml
index e1954340ec8..cbd27e89ae9 100644
--- a/pom.xml
+++ b/pom.xml
@@ -721,7 +721,6 @@
               -Ddrill.exec.memory.enable_unsafe_bounds_check=true
               -Ddrill.exec.sys.store.provider.local.write=false
               -Dorg.apache.drill.exec.server.Drillbit.system_options="org.apache.drill.exec.compile.ClassTransformer.scalar_replacement=on"
-              -Ddrill.test.query.printing.silent=true
               -Ddrill.catastrophic_to_standard_out=true
               -XX:MaxDirectMemorySize=${directMemoryMb}M
               -Djava.net.preferIPv4Stack=true


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services