You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by am...@apache.org on 2017/11/05 19:45:20 UTC

[1/2] drill git commit: DRILL-5864: Selecting a non-existing field from a MapR-DB JSON table fails with NPE.

Repository: drill
Updated Branches:
  refs/heads/master 4a718a0bd -> 7a2fc87ee


DRILL-5864: Selecting a non-existing field from a MapR-DB JSON table fails with NPE.


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/125a9271
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/125a9271
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/125a9271

Branch: refs/heads/master
Commit: 125a9271d7cf0dfb30aac8e62447507ea0a7d6c9
Parents: 4a718a0
Author: Hanumath Rao Maduri <hm...@maprtech.com>
Authored: Wed Oct 11 17:07:22 2017 -0700
Committer: Aman Sinha <as...@maprtech.com>
Committed: Sun Nov 5 08:20:41 2017 -0800

----------------------------------------------------------------------
 contrib/format-maprdb/README.md                 |  6 ++
 .../store/mapr/db/MapRDBFormatPluginConfig.java | 11 +++
 .../mapr/db/json/MaprDBJsonRecordReader.java    |  7 ++
 .../drill/maprdb/tests/json/TestSimpleJson.java | 10 +++
 .../exec/vector/complex/fn/JsonReader.java      | 66 +-------------
 .../exec/vector/complex/fn/JsonReaderUtils.java | 94 ++++++++++++++++++++
 6 files changed, 129 insertions(+), 65 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/125a9271/contrib/format-maprdb/README.md
----------------------------------------------------------------------
diff --git a/contrib/format-maprdb/README.md b/contrib/format-maprdb/README.md
index ff19285..a94a7cb 100644
--- a/contrib/format-maprdb/README.md
+++ b/contrib/format-maprdb/README.md
@@ -1,2 +1,8 @@
 drill-mapr-plugin
 =================
+By default all the tests in contrib/format-maprdb are disabled.
+To enable and run these tests please use -Pmapr profile to
+compile and execute the tests.
+
+Here is an example of the mvn command to use to run these tests.
+mvn install -Dtests=cluster -Pmapr

http://git-wip-us.apache.org/repos/asf/drill/blob/125a9271/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBFormatPluginConfig.java
----------------------------------------------------------------------
diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBFormatPluginConfig.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBFormatPluginConfig.java
index 50a67b4..ad153fe 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBFormatPluginConfig.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBFormatPluginConfig.java
@@ -32,6 +32,12 @@ public class MapRDBFormatPluginConfig extends TableFormatPluginConfig {
   public boolean ignoreSchemaChange = false;
   public boolean readAllNumbersAsDouble = false;
   public boolean disableCountOptimization = false;
+  /* This flag is a switch to do special handling in case of
+   * no columns in the query exists in the maprdb table. This flag
+   * can get deprecated once it is observed that this special handling
+   * is not regressing performance of reading maprdb table.
+   */
+  public boolean nonExistentFieldSupport = true;
 
   @Override
   public int hashCode() {
@@ -40,6 +46,7 @@ public class MapRDBFormatPluginConfig extends TableFormatPluginConfig {
     result = 31 * result + (ignoreSchemaChange ? 1231 : 1237);
     result = 31 * result + (readAllNumbersAsDouble ? 1231 : 1237);
     result = 31 * result + (disableCountOptimization ? 1231 : 1237);
+    result = 31 * result + (nonExistentFieldSupport ? 1231 : 1237);
     return result;
   }
 
@@ -56,6 +63,8 @@ public class MapRDBFormatPluginConfig extends TableFormatPluginConfig {
       return false;
     } else if (disableCountOptimization != other.disableCountOptimization) {
       return false;
+    } else if (nonExistentFieldSupport != other.nonExistentFieldSupport) {
+      return false;
     }
     return true;
   }
@@ -76,6 +85,8 @@ public class MapRDBFormatPluginConfig extends TableFormatPluginConfig {
     return enablePushdown;
   }
 
+  public boolean isNonExistentFieldSupport() { return nonExistentFieldSupport; }
+
   public boolean isIgnoreSchemaChange() {
     return ignoreSchemaChange;
   }

http://git-wip-us.apache.org/repos/asf/drill/blob/125a9271/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/MaprDBJsonRecordReader.java
----------------------------------------------------------------------
diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/MaprDBJsonRecordReader.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/MaprDBJsonRecordReader.java
index ca31767..113b3ad 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/MaprDBJsonRecordReader.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/json/MaprDBJsonRecordReader.java
@@ -26,6 +26,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
 import java.util.Stack;
+import java.util.Collections;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.drill.common.exceptions.ExecutionSetupException;
@@ -44,6 +45,7 @@ import org.apache.drill.exec.store.mapr.db.MapRDBSubScanSpec;
 import org.apache.drill.exec.util.Utilities;
 import org.apache.drill.exec.vector.BaseValueVector;
 import org.apache.drill.exec.vector.complex.impl.MapOrListWriterImpl;
+import org.apache.drill.exec.vector.complex.fn.JsonReaderUtils;
 import org.apache.drill.exec.vector.complex.impl.VectorContainerWriter;
 import org.ojai.DocumentReader;
 import org.ojai.DocumentReader.EventType;
@@ -95,6 +97,7 @@ public class MaprDBJsonRecordReader extends AbstractRecordReader {
   private final boolean allTextMode;
   private final boolean ignoreSchemaChange;
   private final boolean disableCountOptimization;
+  private final boolean nonExistentColumnsProjection;
 
   public MaprDBJsonRecordReader(MapRDBSubScanSpec subScanSpec,
       MapRDBFormatPluginConfig formatPluginConfig,
@@ -119,6 +122,7 @@ public class MaprDBJsonRecordReader extends AbstractRecordReader {
     allTextMode = formatPluginConfig.isAllTextMode();
     ignoreSchemaChange = formatPluginConfig.isIgnoreSchemaChange();
     disablePushdown = !formatPluginConfig.isEnablePushdown();
+    nonExistentColumnsProjection = formatPluginConfig.isNonExistentFieldSupport();
   }
 
   @Override
@@ -230,6 +234,9 @@ public class MaprDBJsonRecordReader extends AbstractRecordReader {
       }
     }
 
+    if (nonExistentColumnsProjection && recordCount > 0) {
+      JsonReaderUtils.ensureAtLeastOneField(vectorWriter, getColumns(), allTextMode, Collections.EMPTY_LIST);
+    }
     vectorWriter.setValueCount(recordCount);
     logger.debug("Took {} ms to get {} records", watch.elapsed(TimeUnit.MILLISECONDS), recordCount);
     return recordCount;

http://git-wip-us.apache.org/repos/asf/drill/blob/125a9271/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/json/TestSimpleJson.java
----------------------------------------------------------------------
diff --git a/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/json/TestSimpleJson.java b/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/json/TestSimpleJson.java
index 998aae6..26f54b8 100644
--- a/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/json/TestSimpleJson.java
+++ b/contrib/format-maprdb/src/test/java/com/mapr/drill/maprdb/tests/json/TestSimpleJson.java
@@ -58,6 +58,16 @@ public class TestSimpleJson extends BaseJsonTest {
   }
 
   @Test
+  public void testSelectNonExistentColumns() throws Exception {
+    setColumnWidths(new int[] {23});
+    final String sql = "SELECT\n"
+            + "  something\n"
+            + "FROM\n"
+            + "  hbase.business business limit 5";
+    runSQLAndVerifyCount(sql, 5);
+  }
+
+  @Test
   public void testKVGen() throws Exception {
     setColumnWidths(new int[] {21, 10, 6});
     final String sql = "select _id, t.parking[0].`key` K, t.parking[0].`value` V from"

http://git-wip-us.apache.org/repos/asf/drill/blob/125a9271/exec/java-exec/src/main/java/org/apache/drill/exec/vector/complex/fn/JsonReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/vector/complex/fn/JsonReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/vector/complex/fn/JsonReader.java
index cfad551..4ffbb26 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/vector/complex/fn/JsonReader.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/vector/complex/fn/JsonReader.java
@@ -104,71 +104,7 @@ public class JsonReader extends BaseJsonProcessor {
   @SuppressWarnings("resource")
   @Override
   public void ensureAtLeastOneField(ComplexWriter writer) {
-    List<BaseWriter.MapWriter> writerList = Lists.newArrayList();
-    List<PathSegment> fieldPathList = Lists.newArrayList();
-    BitSet emptyStatus = new BitSet(columns.size());
-
-    // first pass: collect which fields are empty
-    for (int i = 0; i < columns.size(); i++) {
-      SchemaPath sp = columns.get(i);
-      PathSegment fieldPath = sp.getRootSegment();
-      BaseWriter.MapWriter fieldWriter = writer.rootAsMap();
-      while (fieldPath.getChild() != null && !fieldPath.getChild().isArray()) {
-        fieldWriter = fieldWriter.map(fieldPath.getNameSegment().getPath());
-        fieldPath = fieldPath.getChild();
-      }
-      writerList.add(fieldWriter);
-      fieldPathList.add(fieldPath);
-      if (fieldWriter.isEmptyMap()) {
-        emptyStatus.set(i, true);
-      }
-      if (i == 0 && !allTextMode) {
-        // when allTextMode is false, there is not much benefit to producing all
-        // the empty
-        // fields; just produce 1 field. The reason is that the type of the
-        // fields is
-        // unknown, so if we produce multiple Integer fields by default, a
-        // subsequent batch
-        // that contains non-integer fields will error out in any case. Whereas,
-        // with
-        // allTextMode true, we are sure that all fields are going to be treated
-        // as varchar,
-        // so it makes sense to produce all the fields, and in fact is necessary
-        // in order to
-        // avoid schema change exceptions by downstream operators.
-        break;
-      }
-
-    }
-
-    // second pass: create default typed vectors corresponding to empty fields
-    // Note: this is not easily do-able in 1 pass because the same fieldWriter
-    // may be
-    // shared by multiple fields whereas we want to keep track of all fields
-    // independently,
-    // so we rely on the emptyStatus.
-    for (int j = 0; j < fieldPathList.size(); j++) {
-      BaseWriter.MapWriter fieldWriter = writerList.get(j);
-      PathSegment fieldPath = fieldPathList.get(j);
-      if (emptyStatus.get(j)) {
-        if (allTextMode) {
-          fieldWriter.varChar(fieldPath.getNameSegment().getPath());
-        } else {
-          fieldWriter.integer(fieldPath.getNameSegment().getPath());
-        }
-      }
-    }
-
-    for (ListWriter field : emptyArrayWriters) {
-      // checks that array has not been initialized
-      if (field.getValueCapacity() == 0) {
-        if (allTextMode) {
-          field.varChar();
-        } else {
-          field.integer();
-        }
-      }
-    }
+    JsonReaderUtils.ensureAtLeastOneField(writer, columns, allTextMode, emptyArrayWriters);
   }
 
   public void setSource(int start, int end, DrillBuf buf) throws IOException {

http://git-wip-us.apache.org/repos/asf/drill/blob/125a9271/exec/java-exec/src/main/java/org/apache/drill/exec/vector/complex/fn/JsonReaderUtils.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/vector/complex/fn/JsonReaderUtils.java b/exec/java-exec/src/main/java/org/apache/drill/exec/vector/complex/fn/JsonReaderUtils.java
new file mode 100644
index 0000000..775be02
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/vector/complex/fn/JsonReaderUtils.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.vector.complex.fn;
+
+import com.google.common.collect.Lists;
+import org.apache.drill.common.expression.PathSegment;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.exec.vector.complex.writer.BaseWriter;
+
+import java.util.BitSet;
+import java.util.Collection;
+import java.util.List;
+
+public class JsonReaderUtils {
+
+  public static void ensureAtLeastOneField(BaseWriter.ComplexWriter writer,
+                                    Collection<SchemaPath> columns,
+                                    boolean allTextMode,
+                                    List<BaseWriter.ListWriter> emptyArrayWriters) {
+
+    List<BaseWriter.MapWriter> writerList = Lists.newArrayList();
+    List<PathSegment> fieldPathList = Lists.newArrayList();
+    BitSet emptyStatus = new BitSet(columns.size());
+    int i = 0;
+
+    // first pass: collect which fields are empty
+    for (SchemaPath sp : columns) {
+      PathSegment fieldPath = sp.getRootSegment();
+      BaseWriter.MapWriter fieldWriter = writer.rootAsMap();
+      while (fieldPath.getChild() != null && !fieldPath.getChild().isArray()) {
+        fieldWriter = fieldWriter.map(fieldPath.getNameSegment().getPath());
+        fieldPath = fieldPath.getChild();
+      }
+      writerList.add(fieldWriter);
+      fieldPathList.add(fieldPath);
+      if (fieldWriter.isEmptyMap()) {
+        emptyStatus.set(i, true);
+      }
+      if (i == 0 && !allTextMode) {
+        // when allTextMode is false, there is not much benefit to producing all
+        // the empty fields; just produce 1 field. The reason is that the type of the
+        // fields is unknown, so if we produce multiple Integer fields by default, a
+        // subsequent batch that contains non-integer fields will error out in any case.
+        // Whereas, with allTextMode true, we are sure that all fields are going to be
+        // treated as varchar, so it makes sense to produce all the fields, and in fact
+        // is necessary in order to avoid schema change exceptions by downstream operators.
+        break;
+      }
+      i++;
+    }
+
+    // second pass: create default typed vectors corresponding to empty fields
+    // Note: this is not easily do-able in 1 pass because the same fieldWriter
+    // may be shared by multiple fields whereas we want to keep track of all fields
+    // independently, so we rely on the emptyStatus.
+    for (int j = 0; j < fieldPathList.size(); j++) {
+      BaseWriter.MapWriter fieldWriter = writerList.get(j);
+      PathSegment fieldPath = fieldPathList.get(j);
+      if (emptyStatus.get(j)) {
+        if (allTextMode) {
+          fieldWriter.varChar(fieldPath.getNameSegment().getPath());
+        } else {
+          fieldWriter.integer(fieldPath.getNameSegment().getPath());
+        }
+      }
+    }
+
+    for (BaseWriter.ListWriter field : emptyArrayWriters) {
+      // checks that array has not been initialized
+      if (field.getValueCapacity() == 0) {
+        if (allTextMode) {
+          field.varChar();
+        } else {
+          field.integer();
+        }
+      }
+    }
+  }
+}
\ No newline at end of file


[2/2] drill git commit: DRILL-5878: TableNotFound exception is being reported for a wrong storage plugin.

Posted by am...@apache.org.
DRILL-5878: TableNotFound exception is being reported for a wrong storage plugin.

Address review comments.


Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/7a2fc87e
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/7a2fc87e
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/7a2fc87e

Branch: refs/heads/master
Commit: 7a2fc87ee20f706d85cb5c90cc441e6b44b71592
Parents: 125a927
Author: Hanumath Rao Maduri <hm...@maprtech.com>
Authored: Sat Sep 16 16:54:00 2017 -0700
Committer: Aman Sinha <as...@maprtech.com>
Committed: Sun Nov 5 08:22:33 2017 -0800

----------------------------------------------------------------------
 .../drill/exec/planner/sql/SchemaUtilites.java  | 35 +++++++-
 .../drill/exec/planner/sql/SqlConverter.java    | 50 ++++++++++--
 .../drill/exec/store/dfs/TestFileSelection.java |  3 -
 .../store/dfs/TestSchemaNotFoundException.java  | 86 ++++++++++++++++++++
 4 files changed, 164 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/drill/blob/7a2fc87e/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SchemaUtilites.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SchemaUtilites.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SchemaUtilites.java
index 51c3cb1..7d42e57 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SchemaUtilites.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SchemaUtilites.java
@@ -77,6 +77,29 @@ public class SchemaUtilites {
     return findSchema(defaultSchema, schemaPathAsList);
   }
 
+  /**
+   * Utility function to get the commonPrefix schema between two supplied schemas.
+   *
+   * Eg: if the defaultSchema: dfs and the schemaPath is dfs.tmp.`cicks.json`
+   *     then this function returns dfs if (caseSensitive is not true
+   *     otherwise it returns empty string.
+   *
+   * @param defaultSchema default schema
+   * @param schemaPath current schema path
+   * @param isCaseSensitive true if caseSensitive comparision is required.
+   * @return common prefix schemaPath
+   */
+  public static String getPrefixSchemaPath(final String defaultSchema,
+                                           final String schemaPath,
+                                           final boolean isCaseSensitive) {
+    if (!isCaseSensitive) {
+      return Strings.commonPrefix(defaultSchema.toLowerCase(), schemaPath.toLowerCase());
+    }
+    else {
+      return Strings.commonPrefix(defaultSchema, schemaPath);
+    }
+  }
+
   /** Utility method to search for schema path starting from the given <i>schema</i> reference */
   private static SchemaPlus searchSchemaTree(SchemaPlus schema, final List<String> schemaPath) {
     for (String schemaName : schemaPath) {
@@ -93,7 +116,7 @@ public class SchemaUtilites {
    * @return true if the given <i>schema</i> is root schema. False otherwise.
    */
   public static boolean isRootSchema(SchemaPlus schema) {
-    return schema.getParentSchema() == null;
+    return schema == null || schema.getParentSchema() == null;
   }
 
   /**
@@ -149,6 +172,16 @@ public class SchemaUtilites {
         .build(logger);
   }
 
+  /** Utility method to throw {@link UserException} with context information */
+  public static void throwSchemaNotFoundException(final SchemaPlus defaultSchema, final List<String> givenSchemaPath) {
+    throw UserException.validationError()
+            .message("Schema [%s] is not valid with respect to either root schema or current default schema.",
+                    givenSchemaPath)
+            .addContext("Current default schema: ",
+                    isRootSchema(defaultSchema) ? "No default schema selected" : getSchemaPath(defaultSchema))
+            .build(logger);
+  }
+
   /**
    * Given reference to default schema in schema tree, search for schema with given <i>schemaPath</i>. Once a schema is
    * found resolve it into a mutable <i>AbstractDrillSchema</i> instance. A {@link UserException} is throws when:

http://git-wip-us.apache.org/repos/asf/drill/blob/7a2fc87e/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java
index 5778041..798e3a4 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/SqlConverter.java
@@ -21,6 +21,7 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.Set;
 
+import com.google.common.base.Strings;
 import org.apache.calcite.adapter.java.JavaTypeFactory;
 import org.apache.calcite.jdbc.CalciteSchema;
 import org.apache.calcite.jdbc.CalciteSchemaImpl;
@@ -53,6 +54,8 @@ import org.apache.calcite.sql.validate.SqlValidatorImpl;
 import org.apache.calcite.sql.validate.SqlValidatorScope;
 import org.apache.calcite.sql2rel.RelDecorrelator;
 import org.apache.calcite.sql2rel.SqlToRelConverter;
+import org.apache.calcite.util.Util;
+import org.apache.commons.collections.ListUtils;
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.common.types.Types;
@@ -114,7 +117,7 @@ public class SqlConverter {
     this.session = context.getSession();
     this.drillConfig = context.getConfig();
     this.catalog = new DrillCalciteCatalogReader(
-        CalciteSchemaImpl.from(rootSchema),
+        this.rootSchema,
         parserConfig.caseSensitive(),
         CalciteSchemaImpl.from(defaultSchema).path(null),
         typeFactory,
@@ -281,7 +284,7 @@ public class SqlConverter {
     @Override
     public RelNode expandView(RelDataType rowType, String queryString, List<String> schemaPath) {
       final DrillCalciteCatalogReader catalogReader = new DrillCalciteCatalogReader(
-          CalciteSchemaImpl.from(rootSchema),
+          rootSchema,
           parserConfig.caseSensitive(),
           schemaPath,
           typeFactory,
@@ -294,7 +297,7 @@ public class SqlConverter {
     @Override
     public RelNode expandView(RelDataType rowType, String queryString, SchemaPlus rootSchema, List<String> schemaPath) {
       final DrillCalciteCatalogReader catalogReader = new DrillCalciteCatalogReader(
-          CalciteSchemaImpl.from(rootSchema), // new root schema
+          rootSchema, // new root schema
           parserConfig.caseSensitive(),
           schemaPath,
           typeFactory,
@@ -431,17 +434,20 @@ public class SqlConverter {
     private final DrillConfig drillConfig;
     private final UserSession session;
     private boolean allowTemporaryTables;
+    private final SchemaPlus rootSchema;
 
-    DrillCalciteCatalogReader(CalciteSchema rootSchema,
+
+    DrillCalciteCatalogReader(SchemaPlus rootSchema,
                               boolean caseSensitive,
                               List<String> defaultSchema,
                               JavaTypeFactory typeFactory,
                               DrillConfig drillConfig,
                               UserSession session) {
-      super(rootSchema, caseSensitive, defaultSchema, typeFactory);
+      super(CalciteSchemaImpl.from(rootSchema), caseSensitive, defaultSchema, typeFactory);
       this.drillConfig = drillConfig;
       this.session = session;
       this.allowTemporaryTables = true;
+      this.rootSchema = rootSchema;
     }
 
     /**
@@ -481,7 +487,39 @@ public class SqlConverter {
             .message("Temporary tables usage is disallowed. Used temporary table name: %s.", names)
             .build(logger);
       }
-      return super.getTable(names);
+
+      RelOptTableImpl table = super.getTable(names);
+
+      // Check the schema and throw a valid SchemaNotFound exception instead of TableNotFound exception.
+      if (table == null) {
+        isValidSchema(names);
+      }
+
+      return table;
+    }
+
+    /**
+     * check if the schema provided is a valid schema:
+     * <li>schema is not indicated (only one element in the names list)<li/>
+     *
+     * @param names             list of schema and table names, table name is always the last element
+     * @return throws a userexception if the schema is not valid.
+     */
+    private void isValidSchema(final List<String> names) throws UserException {
+      SchemaPlus defaultSchema = session.getDefaultSchema(this.rootSchema);
+      String defaultSchemaCombinedPath = SchemaUtilites.getSchemaPath(defaultSchema);
+      List<String> schemaPath = Util.skipLast(names);
+      String schemaPathCombined = SchemaUtilites.getSchemaPath(schemaPath);
+      String commonPrefix = SchemaUtilites.getPrefixSchemaPath(defaultSchemaCombinedPath,
+              schemaPathCombined,
+              parserConfig.caseSensitive());
+      boolean isPrefixDefaultPath = commonPrefix.length() == defaultSchemaCombinedPath.length();
+      List<String> fullSchemaPath = Strings.isNullOrEmpty(defaultSchemaCombinedPath) ? schemaPath :
+              isPrefixDefaultPath ? schemaPath : ListUtils.union(SchemaUtilites.getSchemaPathAsList(defaultSchema), schemaPath);
+      if (names.size() > 1 && (SchemaUtilites.findSchema(this.rootSchema, fullSchemaPath) == null &&
+              SchemaUtilites.findSchema(this.rootSchema, schemaPath) == null)) {
+        SchemaUtilites.throwSchemaNotFoundException(defaultSchema, schemaPath);
+      }
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/drill/blob/7a2fc87e/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestFileSelection.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestFileSelection.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestFileSelection.java
index 82f45ae..d23cd1f 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestFileSelection.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestFileSelection.java
@@ -26,9 +26,7 @@ import com.google.common.collect.ImmutableList;
 import org.apache.drill.BaseTestQuery;
 import org.apache.drill.common.util.TestTools;
 import org.apache.hadoop.fs.FileStatus;
-import org.junit.Rule;
 import org.junit.Test;
-import org.junit.rules.ExpectedException;
 
 public class TestFileSelection extends BaseTestQuery {
   private static final List<FileStatus> EMPTY_STATUSES = ImmutableList.of();
@@ -62,5 +60,4 @@ public class TestFileSelection extends BaseTestQuery {
       throw ex;
     }
   }
-
 }

http://git-wip-us.apache.org/repos/asf/drill/blob/7a2fc87e/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestSchemaNotFoundException.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestSchemaNotFoundException.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestSchemaNotFoundException.java
new file mode 100644
index 0000000..cca2bd0
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/store/dfs/TestSchemaNotFoundException.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.dfs;
+
+import org.apache.drill.BaseTestQuery;
+import org.apache.drill.common.util.TestTools;
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
+
+public class TestSchemaNotFoundException extends BaseTestQuery {
+
+    @Test(expected = Exception.class)
+    public void testSchemaNotFoundForWrongStoragePlgn() throws Exception {
+        final String table = String.format("%s/empty", TestTools.getTestResourcesPath());
+        final String query = String.format("select * from dfs1.`%s`", table);
+        try {
+            testNoResult(query);
+        } catch (Exception ex) {
+            final String pattern = String.format("[[dfs1]] is not valid with respect to either root schema or current default schema").toLowerCase();
+            final boolean isSchemaNotFound = ex.getMessage().toLowerCase().contains(pattern);
+            assertTrue(isSchemaNotFound);
+            throw ex;
+        }
+    }
+
+    @Test(expected = Exception.class)
+    public void testSchemaNotFoundForWrongWorkspace() throws Exception {
+        final String table = String.format("%s/empty", TestTools.getTestResourcesPath());
+        final String query = String.format("select * from dfs.tmp1.`%s`", table);
+        try {
+            testNoResult(query);
+        } catch (Exception ex) {
+            final String pattern = String.format("[[dfs, tmp1]] is not valid with respect to either root schema or current default schema").toLowerCase();
+            final boolean isSchemaNotFound = ex.getMessage().toLowerCase().contains(pattern);
+            assertTrue(isSchemaNotFound);
+            throw ex;
+        }
+    }
+
+    @Test(expected = Exception.class)
+    public void testSchemaNotFoundForWrongWorkspaceUsingDefaultWorkspace() throws Exception {
+        final String table = String.format("%s/empty", TestTools.getTestResourcesPath());
+        final String query = String.format("select * from tmp1.`%s`", table);
+        try {
+            testNoResult("use dfs");
+            testNoResult(query);
+        } catch (Exception ex) {
+            final String pattern = String.format("[[tmp1]] is not valid with respect to either root schema or current default schema").toLowerCase();
+            final boolean isSchemaNotFound = ex.getMessage().toLowerCase().contains(pattern);
+            assertTrue(isSchemaNotFound);
+            throw ex;
+        }
+    }
+
+    @Test(expected = Exception.class)
+    public void testTableNotFoundException() throws Exception {
+        final String table = String.format("%s/empty1", TestTools.getTestResourcesPath());
+        final String query = String.format("select * from tmp.`%s`", table);
+        try {
+            testNoResult("use dfs");
+            testNoResult(query);
+        } catch (Exception ex) {
+            final String pattern = String.format("[[dfs, tmp1]] is not valid with respect to either root schema or current default schema").toLowerCase();
+            final boolean isSchemaNotFound = ex.getMessage().toLowerCase().contains(pattern);
+            final boolean isTableNotFound = ex.getMessage().toLowerCase().contains(String.format("%s' not found", table).toLowerCase());
+            assertTrue(!isSchemaNotFound && isTableNotFound);
+            throw ex;
+        }
+    }
+}