You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@drill.apache.org by ja...@apache.org on 2014/05/04 18:55:42 UTC

[20/21] Rename SQLParser to exec/jdbc. Rename jdbc-all to exec/jdbc-all

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/jdbc/src/main/java/org/apache/drill/jdbc/GlobalServiceSetReference.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/GlobalServiceSetReference.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/GlobalServiceSetReference.java
new file mode 100644
index 0000000..9326283
--- /dev/null
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/GlobalServiceSetReference.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.jdbc;
+
+import org.apache.drill.exec.server.RemoteServiceSet;
+
+public class GlobalServiceSetReference {
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(GlobalServiceSetReference.class);
+  
+  public static final ThreadLocal<RemoteServiceSet> SETS = new ThreadLocal<RemoteServiceSet>();
+  
+  
+  
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/jdbc/src/main/java/org/apache/drill/jdbc/MetaImpl.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/MetaImpl.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/MetaImpl.java
new file mode 100644
index 0000000..5d8b6a7
--- /dev/null
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/MetaImpl.java
@@ -0,0 +1,180 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.jdbc;
+
+import java.sql.ResultSet;
+import java.util.List;
+
+import net.hydromatic.avatica.AvaticaPrepareResult;
+import net.hydromatic.avatica.AvaticaResultSet;
+import net.hydromatic.avatica.AvaticaStatement;
+import net.hydromatic.avatica.Cursor;
+import net.hydromatic.avatica.Meta;
+import net.hydromatic.linq4j.Linq4j;
+
+public class MetaImpl implements Meta {
+  
+  static final Driver DRIVER = new Driver();
+
+  final DrillConnectionImpl connection;
+
+  public MetaImpl(DrillConnectionImpl connection) {
+    this.connection = connection;
+  }
+
+  public String getSqlKeywords() {
+    return "";
+  }
+
+  public String getNumericFunctions() {
+    return "";
+  }
+
+  public String getStringFunctions() {
+    return "";
+  }
+
+  public String getSystemFunctions() {
+    return "";
+  }
+
+  public String getTimeDateFunctions() {
+    return "";
+  }
+
+  public static ResultSet getEmptyResultSet() {
+    return null;
+  }
+
+  public ResultSet getTables(String catalog, final Pat schemaPattern, final Pat tableNamePattern,
+      final List<String> typeList) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getColumns(String catalog, Pat schemaPattern, Pat tableNamePattern, Pat columnNamePattern) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getSchemas(String catalog, Pat schemaPattern) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getCatalogs() {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getTableTypes() {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getProcedures(String catalog, Pat schemaPattern, Pat procedureNamePattern) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getProcedureColumns(String catalog, Pat schemaPattern, Pat procedureNamePattern,
+      Pat columnNamePattern) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getColumnPrivileges(String catalog, String schema, String table, Pat columnNamePattern) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getTablePrivileges(String catalog, Pat schemaPattern, Pat tableNamePattern) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getBestRowIdentifier(String catalog, String schema, String table, int scope, boolean nullable) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getVersionColumns(String catalog, String schema, String table) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getPrimaryKeys(String catalog, String schema, String table) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getImportedKeys(String catalog, String schema, String table) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getExportedKeys(String catalog, String schema, String table) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTable,
+      String foreignCatalog, String foreignSchema, String foreignTable) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getTypeInfo() {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getIndexInfo(String catalog, String schema, String table, boolean unique, boolean approximate) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getUDTs(String catalog, Pat schemaPattern, Pat typeNamePattern, int[] types) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getSuperTypes(String catalog, Pat schemaPattern, Pat typeNamePattern) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getSuperTables(String catalog, Pat schemaPattern, Pat tableNamePattern) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getAttributes(String catalog, Pat schemaPattern, Pat typeNamePattern, Pat attributeNamePattern) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getClientInfoProperties() {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getFunctions(String catalog, Pat schemaPattern, Pat functionNamePattern) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getFunctionColumns(String catalog, Pat schemaPattern, Pat functionNamePattern, Pat columnNamePattern) {
+    return getEmptyResultSet();
+  }
+
+  public ResultSet getPseudoColumns(String catalog, Pat schemaPattern, Pat tableNamePattern, Pat columnNamePattern) {
+    return getEmptyResultSet();
+  }
+
+  public Cursor createCursor(AvaticaResultSet resultSet_) {
+    return ((DrillResultSet) resultSet_).cursor;
+  }
+
+  public AvaticaPrepareResult prepare(AvaticaStatement statement_, String sql) {
+    //DrillStatement statement = (DrillStatement) statement_;
+    return new DrillPrepareResult(sql);
+  }
+
+  interface Named {
+    String getName();
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/jdbc/src/main/java/org/apache/drill/jdbc/SchemaChangeListener.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/SchemaChangeListener.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/SchemaChangeListener.java
new file mode 100644
index 0000000..38a39da
--- /dev/null
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/SchemaChangeListener.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.jdbc;
+
+import org.apache.drill.exec.record.BatchSchema;
+
+public interface SchemaChangeListener {
+  public void schemaChanged(BatchSchema newSchema);
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/jdbc/src/main/java/org/apache/drill/jdbc/SqlTimeoutException.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/SqlTimeoutException.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/SqlTimeoutException.java
new file mode 100644
index 0000000..c24858e
--- /dev/null
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/SqlTimeoutException.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.jdbc;
+
+import java.sql.SQLException;
+
+/**
+ * Indicates that an operation timed out. This is not an error; you can
+ * retry the operation.
+ */
+public class SqlTimeoutException
+    extends SQLException
+{
+  SqlTimeoutException() {
+    // SQLException(reason, SQLState, vendorCode)
+    // REVIEW mb 19-Jul-05 Is there a standard SQLState?
+    super("timeout", null, 0);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/jdbc/src/main/java/org/apache/drill/jdbc/package-info.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/package-info.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/package-info.java
new file mode 100644
index 0000000..f465718
--- /dev/null
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/package-info.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * JDBC driver for Drill.
+ */
+package org.apache.drill.jdbc;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/jdbc/src/test/java/org/apache/drill/jdbc/JdbcTest.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/JdbcTest.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/JdbcTest.java
new file mode 100644
index 0000000..3f44ed8
--- /dev/null
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/JdbcTest.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.jdbc;
+
+import org.apache.drill.exec.ExecTest;
+
+public class JdbcTest extends ExecTest{
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(JdbcTest.class);
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/JdbcAssert.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/JdbcAssert.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/JdbcAssert.java
new file mode 100644
index 0000000..198d272
--- /dev/null
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/JdbcAssert.java
@@ -0,0 +1,303 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.jdbc.test;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Properties;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Iterables;
+import net.hydromatic.linq4j.Ord;
+
+import org.apache.drill.common.config.DrillConfig;
+import org.apache.drill.common.logical.LogicalPlan;
+import org.apache.drill.common.logical.data.LogicalOperator;
+import org.apache.drill.common.util.Hook;
+import org.codehaus.jackson.node.ObjectNode;
+import org.junit.Assert;
+
+import com.google.common.base.Function;
+
+/**
+ * Fluent interface for writing JDBC and query-planning tests.
+ */
+public class JdbcAssert {
+  public static ModelAndSchema withModel(String model, String schema) {
+    final Properties info = new Properties();
+    info.setProperty("schema", schema);
+    info.setProperty("model", "inline:" + model);
+    return new ModelAndSchema(info);
+  }
+
+  public static ModelAndSchema withFull(String schema) {
+    final Properties info = new Properties();
+    info.setProperty("schema", schema);
+    return new ModelAndSchema(info);
+  }
+
+  public static ModelAndSchema withNoDefaultSchema() {
+    return new ModelAndSchema();
+  }
+
+  static String toString(ResultSet resultSet, int expectedRecordCount) throws SQLException {
+    StringBuilder buf = new StringBuilder();
+    int total = 0, n;
+    while (resultSet.next()) {
+      n = resultSet.getMetaData().getColumnCount();
+      total++;
+      String sep = "";
+      for (int i = 1; i <= n; i++) {
+        buf.append(sep)
+            .append(resultSet.getMetaData().getColumnLabel(i))
+            .append("=")
+            .append(resultSet.getObject(i));
+        sep = "; ";
+      }
+      buf.append("\n");
+    }
+    return buf.toString();
+  }
+
+  static String toString(ResultSet resultSet) throws SQLException {
+    StringBuilder buf = new StringBuilder();
+    final List<Ord<String>> columns = columnLabels(resultSet);
+    while (resultSet.next()) {
+      for (Ord<String> column : columns) {
+        buf.append(column.i == 1 ? "" : "; ").append(column.e).append("=").append(resultSet.getObject(column.i));
+      }
+      buf.append("\n");
+    }
+    return buf.toString();
+  }
+
+
+  static List<String> toStrings(ResultSet resultSet) throws SQLException {
+    final List<String> list = new ArrayList<>();
+    StringBuilder buf = new StringBuilder();
+    final List<Ord<String>> columns = columnLabels(resultSet);
+    while (resultSet.next()) {
+      buf.setLength(0);
+      for (Ord<String> column : columns) {
+        buf.append(column.i == 1 ? "" : "; ").append(column.e).append("=").append(resultSet.getObject(column.i));
+      }
+      list.add(buf.toString());
+    }
+    return list;
+  }
+
+  private static List<Ord<String>> columnLabels(ResultSet resultSet) throws SQLException {
+    int n = resultSet.getMetaData().getColumnCount();
+    List<Ord<String>> columns = new ArrayList<>();
+    for (int i = 1; i <= n; i++) {
+      columns.add(Ord.of(i, resultSet.getMetaData().getColumnLabel(i)));
+    }
+    return columns;
+  }
+
+  public static class ModelAndSchema {
+    private final Properties info;
+    private final ConnectionFactory connectionFactory;
+
+    public ModelAndSchema() {
+      this(null);
+    }
+
+    public ModelAndSchema(Properties info) {
+      this.info = info;
+      this.connectionFactory = new ConnectionFactory() {
+        public Connection createConnection() throws Exception {
+          Class.forName("org.apache.drill.jdbc.Driver");
+          return DriverManager.getConnection("jdbc:drill:zk=local", ModelAndSchema.this.info);
+        }
+      };
+    }
+
+    public TestDataConnection sql(String sql) {
+      return new TestDataConnection(connectionFactory, sql);
+    }
+
+    public <T> T withConnection(Function<Connection, T> function) throws Exception {
+      Connection connection = null;
+      try {
+        connection = connectionFactory.createConnection();
+        return function.apply(connection);
+      } finally {
+        if (connection != null) {
+          connection.close();
+        }
+      }
+    }
+  }
+
+  public static class TestDataConnection {
+    private final ConnectionFactory connectionFactory;
+    private final String sql;
+
+    TestDataConnection(ConnectionFactory connectionFactory, String sql) {
+      this.connectionFactory = connectionFactory;
+      this.sql = sql;
+    }
+
+    /**
+     * Checks that the current SQL statement returns the expected result.
+     */
+    public TestDataConnection returns(String expected) throws Exception {
+      Connection connection = null;
+      Statement statement = null;
+      try {
+        connection = connectionFactory.createConnection();
+        statement = connection.createStatement();
+        ResultSet resultSet = statement.executeQuery(sql);
+        expected = expected.trim();
+        String result = JdbcAssert.toString(resultSet).trim();
+
+        Assert.assertTrue(String.format("Generated string:\n%s\ndoes not match:\n%s", result, expected), expected.equals(result));
+        Assert.assertEquals(expected, result);
+        resultSet.close();
+        return this;
+      } finally {
+        if (statement != null) {
+          statement.close();
+        }
+        if (connection != null) {
+          connection.close();
+        }
+      }
+    }
+
+
+    /**
+     * Checks that the current SQL statement returns the expected result lines. Lines are compared unordered; the test
+     * succeeds if the query returns these lines in any order.
+     */
+    public TestDataConnection returnsUnordered(String... expecteds) throws Exception {
+      Connection connection = null;
+      Statement statement = null;
+      try {
+        connection = connectionFactory.createConnection();
+        statement = connection.createStatement();
+        ResultSet resultSet = statement.executeQuery(sql);
+        Assert.assertEquals(unsortedList(Arrays.asList(expecteds)), unsortedList(JdbcAssert.toStrings(resultSet)));
+        resultSet.close();
+        return this;
+      } finally {
+        if (statement != null) {
+          statement.close();
+        }
+        if (connection != null) {
+          connection.close();
+        }
+      }
+    }
+
+    public TestDataConnection displayResults(int recordCount) throws Exception {
+      // record count check is done in toString method
+
+      Connection connection = null;
+      Statement statement = null;
+      try {
+        connection = connectionFactory.createConnection();
+        statement = connection.createStatement();
+        ResultSet resultSet = statement.executeQuery(sql);
+        System.out.println(JdbcAssert.toString(resultSet, recordCount));
+        resultSet.close();
+        return this;
+      } finally {
+        if (statement != null) {
+          statement.close();
+        }
+        if (connection != null) {
+          connection.close();
+        }
+      }
+
+    }
+
+    private SortedSet<String> unsortedList(List<String> strings) {
+      final SortedSet<String> set = new TreeSet<>();
+      for (String string : strings) {
+        set.add(string + "\n");
+      }
+      return set;
+    }
+
+    public LogicalPlan logicalPlan() {
+      final String[] plan0 = {null};
+      Connection connection = null;
+      Statement statement = null;
+      final Hook.Closeable x = Hook.LOGICAL_PLAN.add(new Function<String, Void>() {
+        public Void apply(String o) {
+          plan0[0] = o;
+          return null;
+        }
+      });
+      try {
+        connection = connectionFactory.createConnection();
+        statement = connection.prepareStatement(sql);
+        statement.close();
+        final String plan = plan0[0].trim();
+        return LogicalPlan.parse(DrillConfig.create(), plan);
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      } finally {
+        if (statement != null) {
+          try {
+            statement.close();
+          } catch (SQLException e) {
+            // ignore
+          }
+        }
+        if (connection != null) {
+          try {
+            connection.close();
+          } catch (SQLException e) {
+            // ignore
+          }
+        }
+        x.close();
+      }
+    }
+
+    public <T extends LogicalOperator> T planContains(final Class<T> operatorClazz) {
+      return (T) Iterables.find(logicalPlan().getSortedOperators(), new Predicate<LogicalOperator>() {
+        @Override
+        public boolean apply(LogicalOperator input) {
+          return input.getClass().equals(operatorClazz);
+        }
+      });
+    }
+  }
+
+  private static interface ConnectionFactory {
+    Connection createConnection() throws Exception;
+  }
+}
+
+// End JdbcAssert.java

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/JdbcDataTest.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/JdbcDataTest.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/JdbcDataTest.java
new file mode 100644
index 0000000..0e9f8a0
--- /dev/null
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/JdbcDataTest.java
@@ -0,0 +1,521 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.jdbc.test;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.Statement;
+import java.util.Map;
+
+import org.apache.drill.common.logical.LogicalPlan;
+import org.apache.drill.common.logical.PlanProperties;
+import org.apache.drill.common.logical.StoragePluginConfig;
+import org.apache.drill.common.logical.data.Filter;
+import org.apache.drill.common.logical.data.Join;
+import org.apache.drill.common.logical.data.Limit;
+import org.apache.drill.common.logical.data.LogicalOperator;
+import org.apache.drill.common.logical.data.Order;
+import org.apache.drill.common.logical.data.Project;
+import org.apache.drill.common.logical.data.Scan;
+import org.apache.drill.common.logical.data.Store;
+import org.apache.drill.common.logical.data.Union;
+import org.apache.drill.jdbc.JdbcTest;
+import org.apache.drill.jdbc.test.JdbcAssert.TestDataConnection;
+import org.eigenbase.rel.JoinRelType;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Function;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Iterables;
+import com.google.common.io.Resources;
+
+/** Unit tests for Drill's JDBC driver. */
+
+
+@Ignore // ignore for now.
+public class JdbcDataTest extends JdbcTest{
+  private static String MODEL;
+  private static String EXPECTED;
+
+
+  @BeforeClass
+  public static void setupFixtures() throws IOException {
+    MODEL = Resources.toString(Resources.getResource("test-models.json"), Charsets.UTF_8);
+    EXPECTED = Resources.toString(Resources.getResource("donuts-output-data.txt"), Charsets.UTF_8);
+  }
+
+  /**
+   * Command-line utility to execute a logical plan.
+   *
+   * <p>
+   * The forwarding method ensures that the IDE calls this method with the right classpath.
+   * </p>
+   */
+  public static void main(String[] args) throws Exception {
+  }
+
+  /** Load driver. */
+  @Test
+  public void testLoadDriver() throws ClassNotFoundException {
+    Class.forName("org.apache.drill.jdbc.Driver");
+  }
+
+  /** Load driver and make a connection. */
+  @Test
+  public void testConnect() throws Exception {
+    Class.forName("org.apache.drill.jdbc.Driver");
+    final Connection connection = DriverManager.getConnection("jdbc:drill:zk=local");
+    connection.close();
+  }
+
+  /** Load driver, make a connection, prepare a statement. */
+  @Test
+  public void testPrepare() throws Exception {
+    JdbcAssert.withModel(MODEL, "DONUTS").withConnection(new Function<Connection, Void>() {
+      public Void apply(Connection connection) {
+        try {
+          final Statement statement = connection.prepareStatement("select * from donuts");
+          statement.close();
+          return null;
+        } catch (Exception e) {
+          throw new RuntimeException(e);
+        }
+      }
+    });
+  }
+
+  /** Simple query against JSON. */
+  @Test
+  public void testSelectJson() throws Exception {
+    JdbcAssert.withModel(MODEL, "DONUTS").sql("select * from donuts").returns(EXPECTED);
+  }
+
+  /** Simple query against EMP table in HR database. */
+  @Test
+  public void testSelectEmployees() throws Exception {
+    JdbcAssert
+        .withModel(MODEL, "HR")
+        .sql("select * from employees")
+        .returns(
+            "_MAP={deptId=31, lastName=Rafferty}\n" + "_MAP={deptId=33, lastName=Jones}\n"
+                + "_MAP={deptId=33, lastName=Steinberg}\n" + "_MAP={deptId=34, lastName=Robinson}\n"
+                + "_MAP={deptId=34, lastName=Smith}\n" + "_MAP={lastName=John}\n");
+  }
+
+  /** Simple query against EMP table in HR database. */
+  @Test
+  public void testSelectEmpView() throws Exception {
+    JdbcAssert
+        .withModel(MODEL, "HR")
+        .sql("select * from emp")
+        .returns(
+            "DEPTID=31; LASTNAME=Rafferty\n" + "DEPTID=33; LASTNAME=Jones\n" + "DEPTID=33; LASTNAME=Steinberg\n"
+                + "DEPTID=34; LASTNAME=Robinson\n" + "DEPTID=34; LASTNAME=Smith\n" + "DEPTID=null; LASTNAME=John\n");
+  }
+
+  /** Simple query against EMP table in HR database. */
+  @Test
+  public void testSelectDept() throws Exception {
+    JdbcAssert
+        .withModel(MODEL, "HR")
+        .sql("select * from departments")
+        .returns(
+            "_MAP={deptId=31, name=Sales}\n" + "_MAP={deptId=33, name=Engineering}\n"
+                + "_MAP={deptId=34, name=Clerical}\n" + "_MAP={deptId=35, name=Marketing}\n");
+  }
+
+  /** Query with project list. No field references yet. */
+  @Test
+  public void testProjectConstant() throws Exception {
+    JdbcAssert.withModel(MODEL, "DONUTS").sql("select 1 + 3 as c from donuts")
+        .returns("C=4\n" + "C=4\n" + "C=4\n" + "C=4\n" + "C=4\n");
+  }
+
+  /** Query that projects an element from the map. */
+  @Test
+  public void testProject() throws Exception {
+    JdbcAssert.withModel(MODEL, "DONUTS").sql("select _MAP['ppu'] as ppu from donuts")
+        .returns("PPU=0.55\n" + "PPU=0.69\n" + "PPU=0.55\n" + "PPU=0.69\n" + "PPU=1.0\n");
+  }
+
+  /** Same logic as {@link #testProject()}, but using a subquery. */
+  @Test
+  public void testProjectOnSubquery() throws Exception {
+    JdbcAssert.withModel(MODEL, "DONUTS").sql("select d['ppu'] as ppu from (\n" + " select _MAP as d from donuts)")
+        .returns("PPU=0.55\n" + "PPU=0.69\n" + "PPU=0.55\n" + "PPU=0.69\n" + "PPU=1.0\n");
+  }
+
+  /** Checks the logical plan. */
+  @Test
+  public void testProjectPlan() throws Exception {
+    LogicalPlan plan = JdbcAssert
+        .withModel(MODEL, "DONUTS")
+        .sql("select _MAP['ppu'] as ppu from donuts")
+        .logicalPlan();
+
+    PlanProperties planProperties = plan.getProperties();
+    Assert.assertEquals("optiq", planProperties.generator.type);
+    Assert.assertEquals("na", planProperties.generator.info);
+    Assert.assertEquals(1, planProperties.version);
+    Assert.assertEquals(PlanProperties.PlanType.APACHE_DRILL_LOGICAL, planProperties.type);
+    Map<String, StoragePluginConfig> seConfigs = plan.getStorageEngines();
+    StoragePluginConfig config = seConfigs.get("donuts-json");
+//    Assert.assertTrue(config != null && config instanceof ClasspathRSE.ClasspathRSEConfig);
+    config = seConfigs.get("queue");
+//    Assert.assertTrue(config != null && config instanceof QueueRSE.QueueRSEConfig);
+    Scan scan = findOnlyOperator(plan, Scan.class);
+    Assert.assertEquals("donuts-json", scan.getStorageEngine());
+    Project project = findOnlyOperator(plan, Project.class);
+    Assert.assertEquals(1, project.getSelections().length);
+    Assert.assertEquals(Scan.class, project.getInput().getClass());
+    Store store = findOnlyOperator(plan, Store.class);
+    Assert.assertEquals("queue", store.getStorageEngine());
+    Assert.assertEquals("output sink", store.getMemo());
+    Assert.assertEquals(Project.class, store.getInput().getClass());
+  }
+
+  /**
+   * Query with subquery, filter, and projection of one real and one nonexistent field from a map field.
+   */
+  @Test
+  public void testProjectFilterSubquery() throws Exception {
+    JdbcAssert
+        .withModel(MODEL, "DONUTS")
+        .sql(
+            "select d['name'] as name, d['xx'] as xx from (\n" + " select _MAP as d from donuts)\n"
+                + "where cast(d['ppu'] as double) > 0.6")
+        .returns("NAME=Raised; XX=null\n" + "NAME=Filled; XX=null\n" + "NAME=Apple Fritter; XX=null\n");
+  }
+
+  private static <T extends LogicalOperator> Iterable<T> findOperator(LogicalPlan plan, final Class<T> operatorClazz) {
+    return (Iterable<T>) Iterables.filter(plan.getSortedOperators(), new Predicate<LogicalOperator>() {
+      @Override
+      public boolean apply(LogicalOperator input) {
+        return input.getClass().equals(operatorClazz);
+      }
+    });
+  }
+
+  private static <T extends LogicalOperator> T findOnlyOperator(LogicalPlan plan, final Class<T> operatorClazz) {
+    return Iterables.getOnlyElement(findOperator(plan, operatorClazz));
+  }
+
+  @Test
+  public void testProjectFilterSubqueryPlan() throws Exception {
+    LogicalPlan plan = JdbcAssert
+        .withModel(MODEL, "DONUTS")
+        .sql(
+            "select d['name'] as name, d['xx'] as xx from (\n" + " select _MAP['donuts'] as d from donuts)\n"
+                + "where cast(d['ppu'] as double) > 0.6")
+        .logicalPlan();
+    PlanProperties planProperties = plan.getProperties();
+    Assert.assertEquals("optiq", planProperties.generator.type);
+    Assert.assertEquals("na", planProperties.generator.info);
+    Assert.assertEquals(1, planProperties.version);
+    Assert.assertEquals(PlanProperties.PlanType.APACHE_DRILL_LOGICAL, planProperties.type);
+    Map<String, StoragePluginConfig> seConfigs = plan.getStorageEngines();
+    StoragePluginConfig config = seConfigs.get("donuts-json");
+//    Assert.assertTrue(config != null && config instanceof ClasspathRSE.ClasspathRSEConfig);
+    config = seConfigs.get("queue");
+//    Assert.assertTrue(config != null && config instanceof QueueRSE.QueueRSEConfig);
+    Scan scan = findOnlyOperator(plan, Scan.class);
+    Assert.assertEquals("donuts-json", scan.getStorageEngine());
+    Filter filter = findOnlyOperator(plan, Filter.class);
+    Assert.assertTrue(filter.getInput() instanceof Scan);
+    Project[] projects = Iterables.toArray(findOperator(plan, Project.class), Project.class);
+    Assert.assertEquals(2, projects.length);
+    Assert.assertEquals(1, projects[0].getSelections().length);
+    Assert.assertEquals(Filter.class, projects[0].getInput().getClass());
+    Assert.assertEquals(2, projects[1].getSelections().length);
+    Assert.assertEquals(Project.class, projects[1].getInput().getClass());
+    Store store = findOnlyOperator(plan, Store.class);
+    Assert.assertEquals("queue", store.getStorageEngine());
+    Assert.assertEquals("output sink", store.getMemo());
+    Assert.assertEquals(Project.class, store.getInput().getClass());
+  }
+
+  /** Query that projects one field. (Disabled; uses sugared syntax.) */
+  @Test @Ignore
+  public void testProjectNestedFieldSugared() throws Exception {
+    JdbcAssert.withModel(MODEL, "DONUTS").sql("select donuts.ppu from donuts")
+        .returns("C=4\n" + "C=4\n" + "C=4\n" + "C=4\n" + "C=4\n");
+  }
+
+  /** Query with filter. No field references yet. */
+  @Test
+  public void testFilterConstantFalse() throws Exception {
+    JdbcAssert.withModel(MODEL, "DONUTS").sql("select * from donuts where 3 > 4").returns("");
+  }
+
+  @Test
+  public void testFilterConstant() throws Exception {
+    JdbcAssert.withModel(MODEL, "DONUTS").sql("select * from donuts where 3 < 4").returns(EXPECTED);
+  }
+
+
+  @Ignore
+  @Test
+  public void testValues() throws Exception {
+    JdbcAssert.withModel(MODEL, "DONUTS").sql("values (1)").returns("EXPR$0=1\n");
+
+    // Enable when https://issues.apache.org/jira/browse/DRILL-57 fixed
+    // .planContains("store");
+  }
+
+//  @Test
+//  public void testDistinct() throws Exception {
+//    JdbcAssert.withModel(MODEL, "HR").sql("select distinct deptId from emp")
+//        .returnsUnordered("DEPTID=null", "DEPTID=31", "DEPTID=34", "DEPTID=33")
+//        .planContains(CollapsingAggregate.class);
+//  }
+//
+//  @Test
+//  public void testCountNoGroupBy() throws Exception {
+//    // 5 out of 6 employees have a not-null deptId
+//    JdbcAssert.withModel(MODEL, "HR").sql("select count(deptId) as cd, count(*) as c from emp").returns("CD=5; C=6\n")
+//        .planContains(CollapsingAggregate.class);
+//  }
+//
+//  @Test
+//  public void testDistinctCountNoGroupBy() throws Exception {
+//    JdbcAssert.withModel(MODEL, "HR").sql("select count(distinct deptId) as c from emp").returns("C=3\n")
+//        .planContains(CollapsingAggregate.class);
+//  }
+//
+//  @Test
+//  public void testDistinctCountGroupByEmpty() throws Exception {
+//    JdbcAssert.withModel(MODEL, "HR").sql("select count(distinct deptId) as c from emp group by ()").returns("C=3\n")
+//        .planContains(CollapsingAggregate.class);
+//  }
+//
+//  @Test
+//  public void testCountNull() throws Exception {
+//    JdbcAssert.withModel(MODEL, "HR").sql("select count(distinct deptId) as c from emp group by ()").returns("C=3\n")
+//        .planContains(CollapsingAggregate.class);
+//  }
+//
+//  @Test
+//  public void testCount() throws Exception {
+//    JdbcAssert.withModel(MODEL, "HR").sql("select deptId, count(*) as c from emp group by deptId")
+//        .returnsUnordered("DEPTID=31; C=1", "DEPTID=33; C=2", "DEPTID=34; C=2", "DEPTID=null; C=1")
+//        .planContains(CollapsingAggregate.class); // make sure using drill
+//  }
+
+  @Test
+  public void testJoin() throws Exception {
+    Join join = JdbcAssert
+        .withModel(MODEL, "HR")
+        .sql("select * from emp join dept on emp.deptId = dept.deptId")
+        .returnsUnordered("DEPTID=31; LASTNAME=Rafferty; DEPTID0=31; NAME=Sales",
+            "DEPTID=33; LASTNAME=Jones; DEPTID0=33; NAME=Engineering",
+            "DEPTID=33; LASTNAME=Steinberg; DEPTID0=33; NAME=Engineering",
+            "DEPTID=34; LASTNAME=Robinson; DEPTID0=34; NAME=Clerical",
+            "DEPTID=34; LASTNAME=Smith; DEPTID0=34; NAME=Clerical").planContains(Join.class);
+    Assert.assertEquals(JoinRelType.INNER, join.getJoinType());
+  }
+
+  @Test
+  public void testLeftJoin() throws Exception {
+    Join join = JdbcAssert
+        .withModel(MODEL, "HR")
+        .sql("select * from emp left join dept on emp.deptId = dept.deptId")
+        .returnsUnordered("DEPTID=31; LASTNAME=Rafferty; DEPTID0=31; NAME=Sales",
+            "DEPTID=33; LASTNAME=Jones; DEPTID0=33; NAME=Engineering",
+            "DEPTID=33; LASTNAME=Steinberg; DEPTID0=33; NAME=Engineering",
+            "DEPTID=34; LASTNAME=Robinson; DEPTID0=34; NAME=Clerical",
+            "DEPTID=34; LASTNAME=Smith; DEPTID0=34; NAME=Clerical",
+            "DEPTID=null; LASTNAME=John; DEPTID0=null; NAME=null").planContains(Join.class);
+    Assert.assertEquals(JoinRelType.LEFT, join.getJoinType());
+  }
+
+  /**
+   * Right join is tricky because Drill's "join" operator only supports "left", so we have to flip inputs.
+   */
+  @Test @Ignore
+  public void testRightJoin() throws Exception {
+    Join join = JdbcAssert.withModel(MODEL, "HR").sql("select * from emp right join dept on emp.deptId = dept.deptId")
+        .returnsUnordered("xx").planContains(Join.class);
+    Assert.assertEquals(JoinRelType.LEFT, join.getJoinType());
+  }
+
+  @Test
+  public void testFullJoin() throws Exception {
+    Join join = JdbcAssert
+        .withModel(MODEL, "HR")
+        .sql("select * from emp full join dept on emp.deptId = dept.deptId")
+        .returnsUnordered("DEPTID=31; LASTNAME=Rafferty; DEPTID0=31; NAME=Sales",
+            "DEPTID=33; LASTNAME=Jones; DEPTID0=33; NAME=Engineering",
+            "DEPTID=33; LASTNAME=Steinberg; DEPTID0=33; NAME=Engineering",
+            "DEPTID=34; LASTNAME=Robinson; DEPTID0=34; NAME=Clerical",
+            "DEPTID=34; LASTNAME=Smith; DEPTID0=34; NAME=Clerical",
+            "DEPTID=null; LASTNAME=John; DEPTID0=null; NAME=null",
+            "DEPTID=null; LASTNAME=null; DEPTID0=35; NAME=Marketing").planContains(Join.class);
+    Assert.assertEquals(JoinRelType.FULL, join.getJoinType());
+  }
+
+  /**
+   * Join on subquery; also tests that if a field of the same name exists in both inputs, both fields make it through
+   * the join.
+   */
+  @Test
+  public void testJoinOnSubquery() throws Exception {
+    Join join = JdbcAssert
+        .withModel(MODEL, "HR")
+        .sql(
+            "select * from (\n" + "select deptId, lastname, 'x' as name from emp) as e\n"
+                + " join dept on e.deptId = dept.deptId")
+        .returnsUnordered("DEPTID=31; LASTNAME=Rafferty; NAME=x; DEPTID0=31; NAME0=Sales",
+            "DEPTID=33; LASTNAME=Jones; NAME=x; DEPTID0=33; NAME0=Engineering",
+            "DEPTID=33; LASTNAME=Steinberg; NAME=x; DEPTID0=33; NAME0=Engineering",
+            "DEPTID=34; LASTNAME=Robinson; NAME=x; DEPTID0=34; NAME0=Clerical",
+            "DEPTID=34; LASTNAME=Smith; NAME=x; DEPTID0=34; NAME0=Clerical").planContains(Join.class);
+    Assert.assertEquals(JoinRelType.INNER, join.getJoinType());
+  }
+
+  /** Tests that one of the FoodMart tables is present. */
+  @Test @Ignore
+  public void testFoodMart() throws Exception {
+    JdbcAssert
+        .withModel(MODEL, "FOODMART")
+        .sql("select * from product_class where cast(_map['product_class_id'] as integer) < 3")
+        .returnsUnordered(
+            "_MAP={product_category=Seafood, product_class_id=2, product_department=Seafood, product_family=Food, product_subcategory=Shellfish}",
+            "_MAP={product_category=Specialty, product_class_id=1, product_department=Produce, product_family=Food, product_subcategory=Nuts}");
+  }
+
+  @Test
+  public void testUnionAll() throws Exception {
+    Union union = JdbcAssert.withModel(MODEL, "HR").sql("select deptId from dept\n" + "union all\n" + "select deptId from emp")
+        .returnsUnordered("DEPTID=31", "DEPTID=33", "DEPTID=34", "DEPTID=35", "DEPTID=null")
+        .planContains(Union.class);
+    Assert.assertFalse(union.isDistinct());
+  }
+
+  @Test
+  public void testUnion() throws Exception {
+    Union union = JdbcAssert.withModel(MODEL, "HR").sql("select deptId from dept\n" + "union\n" + "select deptId from emp")
+        .returnsUnordered("DEPTID=31", "DEPTID=33", "DEPTID=34", "DEPTID=35", "DEPTID=null")
+        .planContains(Union.class);
+    Assert.assertTrue(union.isDistinct());
+  }
+
+  @Test
+  public void testOrderByDescNullsFirst() throws Exception {
+    // desc nulls last
+    JdbcAssert
+        .withModel(MODEL, "HR")
+        .sql("select * from emp order by deptId desc nulls first")
+        .returns(
+            "DEPTID=null; LASTNAME=John\n" + "DEPTID=34; LASTNAME=Robinson\n" + "DEPTID=34; LASTNAME=Smith\n"
+                + "DEPTID=33; LASTNAME=Jones\n" + "DEPTID=33; LASTNAME=Steinberg\n" + "DEPTID=31; LASTNAME=Rafferty\n")
+        .planContains(Order.class);
+  }
+
+  @Test
+  public void testOrderByDescNullsLast() throws Exception {
+    // desc nulls first
+    JdbcAssert
+        .withModel(MODEL, "HR")
+        .sql("select * from emp order by deptId desc nulls last")
+        .returns(
+            "DEPTID=34; LASTNAME=Robinson\n" + "DEPTID=34; LASTNAME=Smith\n" + "DEPTID=33; LASTNAME=Jones\n"
+                + "DEPTID=33; LASTNAME=Steinberg\n" + "DEPTID=31; LASTNAME=Rafferty\n" + "DEPTID=null; LASTNAME=John\n")
+        .planContains(Order.class);
+  }
+
+  @Test @Ignore
+  public void testOrderByDesc() throws Exception {
+    // desc is implicitly "nulls first" (i.e. null sorted as +inf)
+    // Current behavior is to sort nulls last. This is wrong.
+    JdbcAssert
+        .withModel(MODEL, "HR")
+        .sql("select * from emp order by deptId desc")
+        .returns(
+            "DEPTID=null; LASTNAME=John\n" + "DEPTID=34; LASTNAME=Robinson\n" + "DEPTID=34; LASTNAME=Smith\n"
+                + "DEPTID=33; LASTNAME=Jones\n" + "DEPTID=33; LASTNAME=Steinberg\n" + "DEPTID=31; LASTNAME=Rafferty\n")
+        .planContains(Order.class);
+  }
+
+  @Test
+  public void testOrderBy() throws Exception {
+    // no sort order specified is implicitly "asc", and asc is "nulls last"
+    JdbcAssert
+        .withModel(MODEL, "HR")
+        .sql("select * from emp order by deptId")
+        .returns(
+            "DEPTID=31; LASTNAME=Rafferty\n"
+            + "DEPTID=33; LASTNAME=Jones\n"
+            + "DEPTID=33; LASTNAME=Steinberg\n"
+            + "DEPTID=34; LASTNAME=Robinson\n"
+            + "DEPTID=34; LASTNAME=Smith\n"
+            + "DEPTID=null; LASTNAME=John\n")
+        .planContains(Order.class);
+  }
+
+  @Test
+  public void testLimit() throws Exception {
+    JdbcAssert
+        .withModel(MODEL, "HR")
+        .sql("select LASTNAME from emp limit 2")
+        .returns("LASTNAME=Rafferty\n" +
+            "LASTNAME=Jones")
+        .planContains(Limit.class);
+  }
+
+
+  @Test
+  public void testLimitOrderBy() throws Exception {
+    TestDataConnection tdc = JdbcAssert
+        .withModel(MODEL, "HR")
+        .sql("select LASTNAME from emp order by LASTNAME limit 2")
+        .returns("LASTNAME=John\n" +
+            "LASTNAME=Jones");
+        tdc.planContains(Limit.class);
+        tdc.planContains(Order.class);
+
+  }
+
+  @Test
+  public void testOrderByWithOffset() throws Exception {
+    JdbcAssert
+        .withModel(MODEL, "HR")
+        .sql("select LASTNAME from emp order by LASTNAME asc offset 3")
+        .returns("LASTNAME=Robinson\n" +
+            "LASTNAME=Smith\n" +
+            "LASTNAME=Steinberg")
+        .planContains(Limit.class);
+
+  }
+
+  @Test
+  public void testOrderByWithOffsetAndFetch() throws Exception {
+    JdbcAssert
+        .withModel(MODEL, "HR")
+        .sql("select LASTNAME from emp order by LASTNAME asc offset 3 fetch next 2 rows only")
+        .returns("LASTNAME=Robinson\n" +
+            "LASTNAME=Smith")
+        .planContains(Limit.class);
+  }
+}
+
+// End JdbcTest.java

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestDateAggregateFunction.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestDateAggregateFunction.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestDateAggregateFunction.java
new file mode 100644
index 0000000..05c3fae
--- /dev/null
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestDateAggregateFunction.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.jdbc.test;
+
+import java.nio.file.Paths;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.Statement;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.drill.common.util.TestTools;
+import org.apache.drill.exec.store.hive.HiveTestDataGenerator;
+import org.apache.drill.jdbc.Driver;
+import org.apache.drill.jdbc.JdbcTest;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestRule;
+
+import com.google.common.base.Function;
+import com.google.common.base.Stopwatch;
+
+public class TestDateAggregateFunction {
+
+  public static final String WORKING_PATH;
+  static{
+    Driver.load();
+    WORKING_PATH = Paths.get("").toAbsolutePath().toString();
+
+  }
+  @Test
+  public void testDateAggFunction() throws Exception{
+    String query = new String("SELECT max(cast(HIRE_DATE as date)) as MAX_DATE, min(cast(HIRE_DATE as date)) as MIN_DATE" +
+        " FROM `employee.json`");
+
+    JdbcAssert.withFull("cp")
+        .sql(query)
+        .returns(
+                "MAX_DATE=1998-01-01; " +
+                "MIN_DATE=1993-05-01\n"
+        );
+  }
+
+  @Test
+  public void testIntervalAggFunction() throws Exception{
+    String query = new String("select max(date_diff(date'2014-5-2', cast(HIRE_DATE as date))) as MAX_DAYS,  min(date_diff(date'2014-5-2', cast(HIRE_DATE as date))) MIN_DAYS" +
+        " FROM `employee.json`");
+
+    JdbcAssert.withFull("cp")
+        .sql(query)
+        .returns(
+            "MAX_DAYS=7671 days 0:0:0.0; " +
+                "MIN_DAYS=5965 days 0:0:0.0\n"
+        );
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcDistQuery.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcDistQuery.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcDistQuery.java
new file mode 100644
index 0000000..2e279e7
--- /dev/null
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcDistQuery.java
@@ -0,0 +1,220 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.jdbc.test;
+
+import java.nio.file.Paths;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.Statement;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.drill.common.util.TestTools;
+import org.apache.drill.exec.store.hive.HiveTestDataGenerator;
+import org.apache.drill.jdbc.Driver;
+import org.apache.drill.jdbc.JdbcTest;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestRule;
+
+import com.google.common.base.Stopwatch;
+
+public class TestJdbcDistQuery extends JdbcTest{
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(TestJdbcDistQuery.class);
+
+
+  // Set a timeout unless we're debugging.
+  @Rule public TestRule TIMEOUT = TestTools.getTimeoutRule(50000);
+
+  private static final String WORKING_PATH;
+  static{
+    Driver.load();
+    WORKING_PATH = Paths.get("").toAbsolutePath().toString();
+
+  }
+
+  @BeforeClass
+  public static void generateHive() throws Exception{
+    new HiveTestDataGenerator().generateTestData();
+  }
+
+
+  @Test
+  public void testSimpleQuerySingleFile() throws Exception{
+    testQuery(String.format("select R_REGIONKEY, R_NAME "
+        + "from dfs.`%s/../../sample-data/regionsSF/`", WORKING_PATH));
+  }
+
+
+  @Test
+  public void testSimpleQueryMultiFile() throws Exception{
+    testQuery(String.format("select R_REGIONKEY, R_NAME "
+        + "from dfs.`%s/../../sample-data/regionsMF/`", WORKING_PATH));
+  }
+
+  @Test
+  public void testWhereOverSFile() throws Exception{
+    testQuery(String.format("select R_REGIONKEY, R_NAME "
+        + "from dfs.`%s/../../sample-data/regionsSF/` "
+        + "WHERE R_REGIONKEY = 1", WORKING_PATH));
+  }
+
+  @Test
+  public void testWhereOverMFile() throws Exception{
+    testQuery(String.format("select R_REGIONKEY, R_NAME "
+        + "from dfs.`%s/../../sample-data/regionsMF/` "
+        + "WHERE R_REGIONKEY = 1", WORKING_PATH));
+  }
+
+
+  @Test
+  public void testAggSingleFile() throws Exception{
+    testQuery(String.format("select R_REGIONKEY "
+        + "from dfs.`%s/../../sample-data/regionsSF/` "
+        + "group by R_REGIONKEY", WORKING_PATH));
+  }
+
+  @Test
+  public void testAggMultiFile() throws Exception{
+    testQuery(String.format("select R_REGIONKEY "
+        + "from dfs.`%s/../../sample-data/regionsMF/` "
+        + "group by R_REGIONKEY", WORKING_PATH));
+  }
+
+  @Test
+  public void testAggOrderByDiffGKeyMultiFile() throws Exception{
+    testQuery(String.format("select R_REGIONKEY, SUM(cast(R_REGIONKEY AS int)) As S "
+        + "from dfs.`%s/../../sample-data/regionsMF/` "
+        + "group by R_REGIONKEY ORDER BY S", WORKING_PATH));
+  }
+
+  @Test
+  public void testAggOrderBySameGKeyMultiFile() throws Exception{
+    testQuery(String.format("select R_REGIONKEY, SUM(cast(R_REGIONKEY AS int)) As S "
+        + "from dfs.`%s/../../sample-data/regionsMF/` "
+        + "group by R_REGIONKEY "
+        + "ORDER BY R_REGIONKEY", WORKING_PATH));
+  }
+
+  @Test
+  public void testJoinSingleFile() throws Exception{
+    testQuery(String.format("select T1.R_REGIONKEY "
+        + "from dfs.`%s/../../sample-data/regionsSF/` as T1 "
+        + "join dfs.`%s/../../sample-data/nationsSF/` as T2 "
+        + "on T1.R_REGIONKEY = T2.N_REGIONKEY", WORKING_PATH, WORKING_PATH));
+  }
+
+  @Test
+  public void testJoinMultiFile() throws Exception{
+    testQuery(String.format("select T1.R_REGIONKEY "
+        + "from dfs.`%s/../../sample-data/regionsMF/` as T1 "
+        + "join dfs.`%s/../../sample-data/nationsMF/` as T2 "
+        + "on T1.R_REGIONKEY = T2.N_REGIONKEY", WORKING_PATH, WORKING_PATH));
+  }
+
+  @Test
+  public void testJoinMFileWhere() throws Exception{
+    testQuery(String.format("select T1.R_REGIONKEY, T1.R_NAME "
+        + "from dfs.`%s/../../sample-data/regionsMF/` as T1 "
+        + "join dfs.`%s/../../sample-data/nationsMF/` as T2 "
+        + "on T1.R_REGIONKEY = T2.N_REGIONKEY "
+        + "WHERE T1.R_REGIONKEY  = 3 ", WORKING_PATH, WORKING_PATH));
+  }
+
+  @Test
+  //NPE at ExternalSortBatch.java : 151
+  public void testSortSingleFile() throws Exception{
+    testQuery(String.format("select R_REGIONKEY "
+        + "from dfs.`%s/../../sample-data/regionsSF/` "
+        + "order by R_REGIONKEY", WORKING_PATH));
+  }
+
+  @Test
+  //NPE at ExternalSortBatch.java : 151
+  public void testSortMultiFile() throws Exception{
+    testQuery(String.format("select R_REGIONKEY "
+        + "from dfs.`%s/../../sample-data/regionsMF/` "
+        + "order by R_REGIONKEY", WORKING_PATH));
+  }
+
+  @Test
+  public void testSortMFileWhere() throws Exception{
+    testQuery(String.format("select R_REGIONKEY "
+        + "from dfs.`%s/../../sample-data/regionsMF/` "
+        + "WHERE R_REGIONKEY = 1 "
+        + "order by R_REGIONKEY ", WORKING_PATH ));
+  }
+
+  @Test
+  public void testJoinAggSortWhere() throws Exception{
+    testQuery(String.format("select T1.R_REGIONKEY, COUNT(1) as CNT "
+        + "from dfs.`%s/../../sample-data/regionsMF/` as T1 "
+        + "join dfs.`%s/../../sample-data/nationsMF/` as T2 "
+        + "on T1.R_REGIONKEY = T2.N_REGIONKEY "
+        + "WHERE T1.R_REGIONKEY  = 3 "
+        + "GROUP BY T1.R_REGIONKEY "
+        + "ORDER BY T1.R_REGIONKEY",WORKING_PATH, WORKING_PATH ));
+  }
+
+  @Test
+  public void testSelectLimit() throws Exception{
+    testQuery(String.format("select R_REGIONKEY, R_NAME "
+        + "from dfs.`%s/../../sample-data/regionsMF/` "
+        + "limit 2", WORKING_PATH));
+  }
+
+ private void testQuery(String sql) throws Exception{
+    boolean success = false;
+    try (Connection c = DriverManager.getConnection("jdbc:drill:zk=local", null);) {
+      for (int x = 0; x < 1; x++) {
+        Stopwatch watch = new Stopwatch().start();
+        Statement s = c.createStatement();
+        ResultSet r = s.executeQuery(sql);
+        boolean first = true;
+        while (r.next()) {
+          ResultSetMetaData md = r.getMetaData();
+          if (first == true) {
+            for (int i = 1; i <= md.getColumnCount(); i++) {
+              System.out.print(md.getColumnName(i));
+              System.out.print('\t');
+            }
+            System.out.println();
+            first = false;
+          }
+
+          for (int i = 1; i <= md.getColumnCount(); i++) {
+            System.out.print(r.getObject(i));
+            System.out.print('\t');
+          }
+          System.out.println();
+        }
+
+        System.out.println(String.format("Query completed in %d millis.", watch.elapsed(TimeUnit.MILLISECONDS)));
+      }
+
+      System.out.println("\n\n\n");
+      success = true;
+    }finally{
+      if(!success) Thread.sleep(2000);
+    }
+
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcQuery.java
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcQuery.java b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcQuery.java
new file mode 100644
index 0000000..f70ddca
--- /dev/null
+++ b/exec/jdbc/src/test/java/org/apache/drill/jdbc/test/TestJdbcQuery.java
@@ -0,0 +1,497 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.jdbc.test;
+
+import java.nio.file.Paths;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.Statement;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.drill.common.util.TestTools;
+import org.apache.drill.exec.store.hive.HiveTestDataGenerator;
+import org.apache.drill.jdbc.Driver;
+import org.apache.drill.jdbc.JdbcTest;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestRule;
+
+import com.google.common.base.Function;
+import com.google.common.base.Stopwatch;
+
+public class TestJdbcQuery extends JdbcTest{
+  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(TestJdbcQuery.class);
+
+
+  // Set a timeout unless we're debugging.
+  @Rule public TestRule TIMEOUT = TestTools.getTimeoutRule(20000);
+
+  private static final String WORKING_PATH;
+  static{
+    Driver.load();
+    WORKING_PATH = Paths.get("").toAbsolutePath().toString();
+
+  }
+
+  @BeforeClass
+  public static void generateHive() throws Exception{
+    new HiveTestDataGenerator().generateTestData();
+  }
+
+  @Test
+  @Ignore
+  public void testHiveRead() throws Exception{
+    testQuery("select * from hive.kv");
+  }
+
+  @Test
+  public void testHiveReadWithDb() throws Exception{
+    testQuery("select * from hive.`default`.kv");
+  }
+
+  @Test
+  @Ignore
+  public void testJsonQuery() throws Exception{
+    testQuery("select * from cp.`employee.json`");
+  }
+
+
+  @Test
+  public void testInfoSchema() throws Exception{
+    testQuery("select * from INFORMATION_SCHEMA.SCHEMATA");
+    testQuery("select * from INFORMATION_SCHEMA.CATALOGS");
+    testQuery("select * from INFORMATION_SCHEMA.VIEWS");
+//    testQuery("select * from INFORMATION_SCHEMA.TABLES");
+    testQuery("select * from INFORMATION_SCHEMA.COLUMNS");
+  }
+
+  @Test
+  public void testCast() throws Exception{
+    testQuery(String.format("select R_REGIONKEY, cast(R_NAME as varchar(15)) as region, cast(R_COMMENT as varchar(255)) as comment from dfs.`%s/../../sample-data/region.parquet`", WORKING_PATH));
+  }
+
+  @Test
+  @Ignore
+  public void testWorkspace() throws Exception{
+    testQuery(String.format("select * from dfs.home.`%s/../../sample-data/region.parquet`", WORKING_PATH));
+  }
+
+  @Test
+  @Ignore
+  public void testWildcard() throws Exception{
+    testQuery(String.format("select * from dfs.`%s/../../sample-data/region.parquet`", WORKING_PATH));
+  }
+
+  @Test
+  public void testCharLiteral() throws Exception {
+    testQuery("select 'test literal' from INFORMATION_SCHEMA.`TABLES` LIMIT 1");
+  }
+
+  @Test
+  public void testVarCharLiteral() throws Exception {
+    testQuery("select cast('test literal' as VARCHAR) from INFORMATION_SCHEMA.`TABLES` LIMIT 1");
+  }
+
+  @Test
+  @Ignore
+  public void testLogicalExplain() throws Exception{
+    testQuery(String.format("EXPLAIN PLAN WITHOUT IMPLEMENTATION FOR select * from dfs.`%s/../../sample-data/region.parquet`", WORKING_PATH));
+  }
+
+  @Test
+  @Ignore
+  public void testPhysicalExplain() throws Exception{
+    testQuery(String.format("EXPLAIN PLAN FOR select * from dfs.`%s/../../sample-data/region.parquet`", WORKING_PATH));
+  }
+
+  @Test
+  @Ignore
+  public void checkUnknownColumn() throws Exception{
+    testQuery(String.format("SELECT unknownColumn FROM dfs.`%s/../../sample-data/region.parquet`", WORKING_PATH));
+  }
+
+  private void testQuery(String sql) throws Exception{
+    boolean success = false;
+    try (Connection c = DriverManager.getConnection("jdbc:drill:zk=local", null);) {
+      for (int x = 0; x < 1; x++) {
+        Stopwatch watch = new Stopwatch().start();
+        Statement s = c.createStatement();
+        ResultSet r = s.executeQuery(sql);
+        boolean first = true;
+        while (r.next()) {
+          ResultSetMetaData md = r.getMetaData();
+          if (first == true) {
+            for (int i = 1; i <= md.getColumnCount(); i++) {
+              System.out.print(md.getColumnName(i));
+              System.out.print('\t');
+            }
+            System.out.println();
+            first = false;
+          }
+
+          for (int i = 1; i <= md.getColumnCount(); i++) {
+            System.out.print(r.getObject(i));
+            System.out.print('\t');
+          }
+          System.out.println();
+        }
+
+        System.out.println(String.format("Query completed in %d millis.", watch.elapsed(TimeUnit.MILLISECONDS)));
+      }
+
+      System.out.println("\n\n\n");
+      success = true;
+    }finally{
+      if(!success) Thread.sleep(2000);
+    }
+  }
+
+  @Test
+  public void testLikeNotLike() throws Exception{
+    JdbcAssert.withNoDefaultSchema()
+      .sql("SELECT TABLE_NAME, COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS " +
+        "WHERE TABLE_NAME NOT LIKE 'C%' AND COLUMN_NAME LIKE 'TABLE_%E'")
+      .returns(
+        "TABLE_NAME=VIEWS; COLUMN_NAME=TABLE_NAME\n" +
+        "TABLE_NAME=TABLES; COLUMN_NAME=TABLE_NAME\n" +
+        "TABLE_NAME=TABLES; COLUMN_NAME=TABLE_TYPE\n"
+      );
+  }
+
+  @Test
+  public void testSimilarNotSimilar() throws Exception{
+    JdbcAssert.withNoDefaultSchema()
+      .sql("SELECT TABLE_NAME FROM INFORMATION_SCHEMA.`TABLES` "+
+        "WHERE TABLE_NAME SIMILAR TO '%(H|I)E%' AND TABLE_NAME NOT SIMILAR TO 'C%'")
+      .returns(
+        "TABLE_NAME=VIEWS\n" +
+        "TABLE_NAME=SCHEMATA\n"
+      );
+  }
+
+
+  @Test
+  public void testIntegerLiteral() throws Exception{
+    JdbcAssert.withNoDefaultSchema()
+      .sql("select substring('asd' from 1 for 2) from INFORMATION_SCHEMA.`TABLES` limit 1")
+      .returns("EXPR$0=as\n");
+  }
+
+  @Test
+  public void testNullOpForNullableType() throws Exception{
+    JdbcAssert.withNoDefaultSchema()
+        .sql("SELECT * FROM cp.`test_null_op.json` WHERE intType IS NULL AND varCharType IS NOT NULL")
+        .returns("intType=null; varCharType=val2");
+  }
+
+  @Test
+  public void testNullOpForNonNullableType() throws Exception{
+    // output of (intType IS NULL) is a non-nullable type
+    JdbcAssert.withNoDefaultSchema()
+        .sql("SELECT * FROM cp.`test_null_op.json` "+
+            "WHERE (intType IS NULL) IS NULL AND (varCharType IS NOT NULL) IS NOT NULL")
+        .returns("");
+  }
+
+  @Test
+  public void testTrueOpForNullableType() throws Exception{
+    JdbcAssert.withNoDefaultSchema()
+        .sql("SELECT data FROM cp.`test_true_false_op.json` WHERE booleanType IS TRUE")
+        .returns("data=set to true");
+
+    JdbcAssert.withNoDefaultSchema()
+        .sql("SELECT data FROM cp.`test_true_false_op.json` WHERE booleanType IS FALSE")
+        .returns("data=set to false");
+
+    JdbcAssert.withNoDefaultSchema()
+        .sql("SELECT data FROM cp.`test_true_false_op.json` WHERE booleanType IS NOT TRUE")
+        .returns(
+            "data=set to false\n" +
+            "data=not set"
+        );
+
+    JdbcAssert.withNoDefaultSchema()
+        .sql("SELECT data FROM cp.`test_true_false_op.json` WHERE booleanType IS NOT FALSE")
+        .returns(
+            "data=set to true\n" +
+            "data=not set"
+        );
+  }
+
+  @Test
+  public void testTrueOpForNonNullableType() throws Exception{
+    // Output of IS TRUE (and others) is a Non-nullable type
+    JdbcAssert.withNoDefaultSchema()
+        .sql("SELECT data FROM cp.`test_true_false_op.json` WHERE (booleanType IS TRUE) IS TRUE")
+        .returns("data=set to true");
+
+    JdbcAssert.withNoDefaultSchema()
+        .sql("SELECT data FROM cp.`test_true_false_op.json` WHERE (booleanType IS FALSE) IS FALSE")
+        .returns(
+            "data=set to true\n" +
+            "data=not set"
+        );
+
+    JdbcAssert.withNoDefaultSchema()
+        .sql("SELECT data FROM cp.`test_true_false_op.json` WHERE (booleanType IS NOT TRUE) IS NOT TRUE")
+        .returns("data=set to true");
+
+    JdbcAssert.withNoDefaultSchema()
+        .sql("SELECT data FROM cp.`test_true_false_op.json` WHERE (booleanType IS NOT FALSE) IS NOT FALSE")
+        .returns(
+            "data=set to true\n" +
+            "data=not set"
+        );
+  }
+
+  @Test
+  public void testShowTables() throws Exception{
+    JdbcAssert.withNoDefaultSchema()
+      .sql("SHOW TABLES")
+      .returns(
+        "TABLE_SCHEMA=hive.default; TABLE_NAME=kv\n" +
+        "TABLE_SCHEMA=hive.db1; TABLE_NAME=kv_db1\n" +
+        "TABLE_SCHEMA=hive; TABLE_NAME=kv\n" +
+        "TABLE_SCHEMA=INFORMATION_SCHEMA; TABLE_NAME=VIEWS\n" +
+        "TABLE_SCHEMA=INFORMATION_SCHEMA; TABLE_NAME=COLUMNS\n" +
+        "TABLE_SCHEMA=INFORMATION_SCHEMA; TABLE_NAME=TABLES\n" +
+        "TABLE_SCHEMA=INFORMATION_SCHEMA; TABLE_NAME=CATALOGS\n" +
+        "TABLE_SCHEMA=INFORMATION_SCHEMA; TABLE_NAME=SCHEMATA\n"
+      );
+  }
+
+  @Test
+  public void testShowTablesFromDb() throws Exception{
+    JdbcAssert.withNoDefaultSchema()
+      .sql("SHOW TABLES FROM INFORMATION_SCHEMA")
+      .returns(
+        "TABLE_SCHEMA=INFORMATION_SCHEMA; TABLE_NAME=VIEWS\n" +
+        "TABLE_SCHEMA=INFORMATION_SCHEMA; TABLE_NAME=COLUMNS\n" +
+        "TABLE_SCHEMA=INFORMATION_SCHEMA; TABLE_NAME=TABLES\n" +
+        "TABLE_SCHEMA=INFORMATION_SCHEMA; TABLE_NAME=CATALOGS\n" +
+        "TABLE_SCHEMA=INFORMATION_SCHEMA; TABLE_NAME=SCHEMATA\n"
+      );
+
+    JdbcAssert.withNoDefaultSchema()
+      .sql("SHOW TABLES IN hive")
+      .returns("TABLE_SCHEMA=hive; TABLE_NAME=kv\n");
+  }
+
+  @Test
+  public void testShowTablesFromDbWhere() throws Exception{
+    JdbcAssert.withNoDefaultSchema()
+      .sql("SHOW TABLES FROM INFORMATION_SCHEMA WHERE TABLE_NAME='VIEWS'")
+      .returns("TABLE_SCHEMA=INFORMATION_SCHEMA; TABLE_NAME=VIEWS\n");
+  }
+
+  @Test
+  public void testShowTablesLike() throws Exception{
+    JdbcAssert.withNoDefaultSchema()
+      .sql("SHOW TABLES LIKE '%CH%'")
+      .returns("TABLE_SCHEMA=INFORMATION_SCHEMA; TABLE_NAME=SCHEMATA\n");
+  }
+
+  @Test
+  public void testShowDatabases() throws Exception{
+    String expected =
+        "SCHEMA_NAME=hive.default\n" +
+        "SCHEMA_NAME=hive.db1\n" +
+        "SCHEMA_NAME=hive\n" +
+        "SCHEMA_NAME=dfs.home\n" +
+        "SCHEMA_NAME=dfs.default\n" +
+        "SCHEMA_NAME=dfs\n" +
+        "SCHEMA_NAME=cp.default\n" +
+        "SCHEMA_NAME=cp\n" +
+        "SCHEMA_NAME=INFORMATION_SCHEMA\n";
+
+    JdbcAssert.withNoDefaultSchema().sql("SHOW DATABASES").returns(expected);
+    JdbcAssert.withNoDefaultSchema().sql("SHOW SCHEMAS").returns(expected);
+  }
+
+  @Test
+  public void testShowDatabasesWhere() throws Exception{
+    JdbcAssert.withNoDefaultSchema()
+      .sql("SHOW DATABASES WHERE SCHEMA_NAME='dfs'")
+      .returns("SCHEMA_NAME=dfs\n");
+  }
+
+  @Test
+  public void testShowDatabasesLike() throws Exception{
+    JdbcAssert.withNoDefaultSchema()
+      .sql("SHOW DATABASES LIKE '%i%'")
+      .returns(
+        "SCHEMA_NAME=hive.default\n"+
+        "SCHEMA_NAME=hive.db1\n"+
+        "SCHEMA_NAME=hive\n"
+      );
+  }
+
+  @Test
+  public void testDescribeTable() throws Exception{
+    JdbcAssert.withNoDefaultSchema()
+      .sql("DESCRIBE CATALOGS")
+      .returns(
+        "COLUMN_NAME=CATALOG_NAME; DATA_TYPE=VARCHAR; IS_NULLABLE=NO\n"+
+        "COLUMN_NAME=CATALOG_DESCRIPTION; DATA_TYPE=VARCHAR; IS_NULLABLE=NO\n"+
+        "COLUMN_NAME=CATALOG_CONNECT; DATA_TYPE=VARCHAR; IS_NULLABLE=NO\n"
+      );
+  }
+
+  @Test
+  public void testDescribeTableWithSchema() throws Exception{
+    JdbcAssert.withNoDefaultSchema()
+      .sql("DESCRIBE INFORMATION_SCHEMA.`TABLES`")
+      .returns(
+        "COLUMN_NAME=TABLE_CATALOG; DATA_TYPE=VARCHAR; IS_NULLABLE=NO\n"+
+        "COLUMN_NAME=TABLE_SCHEMA; DATA_TYPE=VARCHAR; IS_NULLABLE=NO\n"+
+        "COLUMN_NAME=TABLE_NAME; DATA_TYPE=VARCHAR; IS_NULLABLE=NO\n"+
+        "COLUMN_NAME=TABLE_TYPE; DATA_TYPE=VARCHAR; IS_NULLABLE=NO\n"
+      );
+  }
+
+  @Test
+  @Ignore // DRILL-399 - default schema doesn't work
+  public void testDescribeTableWithColumnName() throws Exception{
+    JdbcAssert.withFull("INFORMATION_SCHEMA")
+        .sql("DESCRIBE `TABLES` TABLE_CATALOG")
+        .returns("COLUMN_NAME=TABLE_CATALOG; DATA_TYPE=VARCHAR; IS_NULLABLE=NO\n");
+  }
+
+  @Test
+  public void testDescribeTableWithSchemaAndColumnName() throws Exception{
+    JdbcAssert.withNoDefaultSchema()
+      .sql("DESCRIBE INFORMATION_SCHEMA.`TABLES` TABLE_CATALOG")
+      .returns("COLUMN_NAME=TABLE_CATALOG; DATA_TYPE=VARCHAR; IS_NULLABLE=NO\n");
+  }
+
+  @Test
+  @Ignore // DRILL-399 - default schema doesn't work
+  public void testDescribeTableWithColQualifier() throws Exception{
+    JdbcAssert.withFull("INFORMATION_SCHEMA")
+      .sql("DESCRIBE COLUMNS 'TABLE%'")
+      .returns(
+        "COLUMN_NAME=TABLE_CATALOG; DATA_TYPE=VARCHAR; IS_NULLABLE=NO\n"+
+        "COLUMN_NAME=TABLE_SCHEMA; DATA_TYPE=VARCHAR; IS_NULLABLE=NO\n"+
+        "COLUMN_NAME=TABLE_NAME; DATA_TYPE=VARCHAR; IS_NULLABLE=NO\n"
+      );
+  }
+
+  @Test
+  public void testDescribeTableWithSchemaAndColQualifier() throws Exception{
+    JdbcAssert.withNoDefaultSchema()
+      .sql("DESCRIBE INFORMATION_SCHEMA.SCHEMATA 'SCHEMA%'")
+      .returns(
+        "COLUMN_NAME=SCHEMA_NAME; DATA_TYPE=VARCHAR; IS_NULLABLE=NO\n"+
+        "COLUMN_NAME=SCHEMA_OWNER; DATA_TYPE=VARCHAR; IS_NULLABLE=NO\n"
+      );
+  }
+
+  @Test
+  public void testDefaultSchemaDfs() throws Exception{
+    JdbcAssert.withFull("dfs")
+      .sql(String.format("SELECT R_REGIONKEY FROM `%s/../../sample-data/region.parquet` LIMIT 2", WORKING_PATH))
+      .returns(
+        "R_REGIONKEY=0\n" +
+        "R_REGIONKEY=1\n"
+      );
+  }
+
+  @Test
+  public void testDefaultSchemaClasspath() throws Exception{
+    JdbcAssert.withFull("cp")
+      .sql("SELECT full_name FROM `employee.json` LIMIT 2")
+      .returns(
+        "full_name=Sheri Nowmer\n" +
+        "full_name=Derrick Whelply\n"
+      );
+  }
+
+  @Test
+  public void testDefaultSchemaHive() throws Exception{
+    JdbcAssert.withFull("hive")
+      .sql("SELECT * FROM kv LIMIT 2")
+      .returns(
+        "key=1; value= key_1\n" +
+        "key=2; value= key_2\n"
+      );
+  }
+
+  @Test
+  public void testDefaultTwoLevelSchemaHive() throws Exception{
+    JdbcAssert.withFull("hive.db1")
+      .sql("SELECT * FROM `kv_db1` LIMIT 2")
+      .returns(
+        "key=1; value= key_1\n" +
+        "key=2; value= key_2\n"
+      );
+  }
+
+  @Test
+  public void testQueryFromNonDefaultSchema() throws Exception{
+    JdbcAssert.withFull("hive")
+      .sql("SELECT full_name FROM cp.`employee.json` LIMIT 2")
+      .returns(
+        "full_name=Sheri Nowmer\n" +
+        "full_name=Derrick Whelply\n"
+      );
+  }
+
+  @Test
+  public void testUseSchema() throws Exception{
+    JdbcAssert.withNoDefaultSchema()
+      .sql("USE hive.`default`")
+      .returns("ok=true; summary=Default schema changed to 'hive.default'");
+  }
+
+  @Test
+  public void testUseSchemaNegative() throws Exception{
+    JdbcAssert.withNoDefaultSchema()
+        .sql("USE invalid.schema")
+        .returns("ok=false; summary=Failed to change default schema to 'invalid.schema'");
+  }
+
+  @Test
+  public void testUseSchemaAndQuery() throws Exception{
+    JdbcAssert.withNoDefaultSchema().withConnection(new Function<Connection, Void>() {
+      public Void apply(Connection connection) {
+        try {
+          Statement statement = connection.createStatement();
+          ResultSet resultSet = statement.executeQuery("USE hive.db1");
+          String result = JdbcAssert.toString(resultSet).trim();
+          String expected = "ok=true; summary=Default schema changed to 'hive.db1'";
+          Assert.assertTrue(String.format("Generated string:\n%s\ndoes not match:\n%s", result, expected), expected.equals(result));
+
+
+          resultSet = statement.executeQuery("SELECT * FROM kv_db1 LIMIT 2");
+          result = JdbcAssert.toString(resultSet).trim();
+          expected = "key=1; value= key_1\nkey=2; value= key_2";
+          Assert.assertTrue(String.format("Generated string:\n%s\ndoes not match:\n%s", result, expected), expected.equals(result));
+          statement.close();
+          return null;
+        } catch (Exception e) {
+          throw new RuntimeException(e);
+        }
+      }
+    });
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/jdbc/src/test/resources/donuts-output-data.txt
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/resources/donuts-output-data.txt b/exec/jdbc/src/test/resources/donuts-output-data.txt
new file mode 100644
index 0000000..6934600
--- /dev/null
+++ b/exec/jdbc/src/test/resources/donuts-output-data.txt
@@ -0,0 +1,5 @@
+_MAP={batters={batter=[{id=1001, type=Regular}, {id=1002, type=Chocolate}, {id=1003, type=Blueberry}, {id=1004, type=Devil's Food}]}, id=0001, name=Cake, ppu=0.55, sales=35, topping=[{id=5001, type=None}, {id=5002, type=Glazed}, {id=5005, type=Sugar}, {id=5007, type=Powdered Sugar}, {id=5006, type=Chocolate with Sprinkles}, {id=5003, type=Chocolate}, {id=5004, type=Maple}], type=donut}
+_MAP={batters={batter=[{id=1001, type=Regular}]}, id=0002, name=Raised, ppu=0.69, sales=145, topping=[{id=5001, type=None}, {id=5002, type=Glazed}, {id=5005, type=Sugar}, {id=5003, type=Chocolate}, {id=5004, type=Maple}], type=donut}
+_MAP={batters={batter=[{id=1001, type=Regular}, {id=1002, type=Chocolate}]}, id=0003, name=Old Fashioned, ppu=0.55, sales=300, topping=[{id=5001, type=None}, {id=5002, type=Glazed}, {id=5003, type=Chocolate}, {id=5004, type=Maple}], type=donut}
+_MAP={batters={batter=[{id=1001, type=Regular}, {id=1002, type=Chocolate}, {id=1003, type=Blueberry}, {id=1004, type=Devil's Food}]}, filling=[{id=6001, type=None}, {id=6002, type=Raspberry}, {id=6003, type=Lemon}, {id=6004, type=Chocolate}, {id=6005, type=Kreme}], id=0004, name=Filled, ppu=0.69, sales=14, topping=[{id=5001, type=None}, {id=5002, type=Glazed}, {id=5005, type=Sugar}, {id=5007, type=Powdered Sugar}, {id=5006, type=Chocolate with Sprinkles}, {id=5003, type=Chocolate}, {id=5004, type=Maple}], type=donut}
+_MAP={batters={batter=[{id=1001, type=Regular}]}, id=0005, name=Apple Fritter, ppu=1.0, sales=700, topping=[{id=5002, type=Glazed}], type=donut}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/jdbc/src/test/resources/full-model.json
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/resources/full-model.json b/exec/jdbc/src/test/resources/full-model.json
new file mode 100644
index 0000000..9f3d482
--- /dev/null
+++ b/exec/jdbc/src/test/resources/full-model.json
@@ -0,0 +1,18 @@
+{
+  version: '1.0',
+   schemas: [
+     {
+       name: 'DONUTS',
+       tables: [
+         {
+           name: 'DONUTS',
+           type: 'custom',
+           factory: 'org.apache.drill.jdbc.DrillTable$Factory'
+,           operand: {
+             path: '/donuts.json'
+           }
+         }
+       ]
+     }
+   ]
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/jdbc/src/test/resources/logback.xml
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/resources/logback.xml b/exec/jdbc/src/test/resources/logback.xml
new file mode 100644
index 0000000..13808a6
--- /dev/null
+++ b/exec/jdbc/src/test/resources/logback.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration>
+
+  <appender name="SOCKET" class="de.huxhorn.lilith.logback.appender.ClassicMultiplexSocketAppender">
+    <Compressing>true</Compressing> 
+    <ReconnectionDelay>10000</ReconnectionDelay>
+    <IncludeCallerData>true</IncludeCallerData>
+    <RemoteHosts>localhost</RemoteHosts>
+  </appender>
+
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+    <!-- encoders are assigned the type
+         ch.qos.logback.classic.encoder.PatternLayoutEncoder by default -->
+    <encoder>
+      <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
+    </encoder>
+    <level value="warn" />
+  </appender>
+  
+  <logger name="org.apache.drill" additivity="false">
+    <level value="debug" />
+    <appender-ref ref="SOCKET" />
+<!--     <appender-ref ref="STDOUT" /> -->
+  </logger>
+
+  <root>
+    <level value="debug" />
+    <appender-ref ref="SOCKET" />
+<!--     <appender-ref ref="STDOUT" /> -->
+  </root>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/jdbc/src/test/resources/storage-plugins.json
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/resources/storage-plugins.json b/exec/jdbc/src/test/resources/storage-plugins.json
new file mode 100644
index 0000000..60efa50
--- /dev/null
+++ b/exec/jdbc/src/test/resources/storage-plugins.json
@@ -0,0 +1,46 @@
+{
+  "storage":{
+    dfs: {
+      type: "file",
+      connection: "file:///",
+      workspaces: {
+        home: "/"
+      },
+      formats: {
+        "psv" : {
+          type: "text",
+          extensions: [ "tbl" ],
+          delimiter: "|"
+        },
+        "csv" : {
+          type: "text",
+          extensions: [ "csv" ],
+          delimiter: ","
+        },
+        "tsv" : {
+          type: "text",
+          extensions: [ "tsv" ],
+          delimiter: "\t"
+        },
+        "parquet" : {
+          type: "parquet"
+        }
+      }
+    },
+    cp: {
+      type: "file",
+      connection: "classpath:///"
+    },
+    hive : {
+        type:"hive",
+        config :
+          {
+            "hive.metastore.uris" : "",
+            "javax.jdo.option.ConnectionURL" : "jdbc:derby:;databaseName=/tmp/drill_hive_db;create=true",
+            "hive.metastore.warehouse.dir" : "/tmp/drill_hive_wh",
+            "fs.default.name" : "file:///",
+            "hive.metastore.sasl.enabled" : "false"
+          }
+      }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/jdbc/src/test/resources/test-models.json
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/resources/test-models.json b/exec/jdbc/src/test/resources/test-models.json
new file mode 100644
index 0000000..23895c9
--- /dev/null
+++ b/exec/jdbc/src/test/resources/test-models.json
@@ -0,0 +1,77 @@
+{
+  version: '1.0',
+   schemas: [
+     {
+       name: 'DONUTS',
+       tables: [
+         {
+           name: 'DONUTS',
+           type: 'custom',
+           factory: 'org.apache.drill.jdbc.DrillTable$Factory',
+           operand: {
+             path: '/donuts.json',
+             useReferenceInterpreter: 'true'
+           }
+         }
+       ]
+     },
+     {
+       name: 'HR',
+       tables: [
+         {
+           name: 'EMPLOYEES',
+           type: 'custom',
+           factory: 'org.apache.drill.jdbc.DrillTable$Factory',
+           operand: {
+             path: '/donuts.json',
+             useReferenceInterpreter: 'true'
+           }
+         },
+         {
+           name: 'DEPARTMENTS',
+           type: 'custom',
+           factory: 'org.apache.drill.jdbc.DrillTable$Factory',
+           operand: {
+             path: '/donuts.json',
+             useReferenceInterpreter: 'true'
+           }
+         },
+         {
+           name: 'EMP',
+           type: 'view',
+           sql: 'select _MAP[\'deptId\'] as deptid, cast(_MAP[\'lastName\'] as varchar) as lastName from employees'
+         },
+         {
+           name: 'DEPT',
+           type: 'view',
+           sql: 'select _MAP[\'deptId\'] as deptid, _MAP[\'name\'] as name from departments'
+         }
+       ]
+     },
+     {
+       name: 'FOODMART',
+       tables: [
+         {
+           name: 'PRODUCT_CLASS',
+           type: 'custom',
+           factory: 'org.apache.drill.jdbc.DrillTable$Factory',
+           operand: {
+             path: '/donuts.json',
+             useReferenceInterpreter: 'true'
+           }
+           
+         },
+         {
+           name: 'TIME_BY_DAY',
+           type: 'custom',
+           factory: 'org.apache.drill.jdbc.DrillTable$Factory',
+           operand: {
+             path: '/donuts.json',
+             useReferenceInterpreter: 'true'
+           }
+           
+         }
+       ]
+     }
+   ]
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/jdbc/src/test/resources/test_null_op.json
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/resources/test_null_op.json b/exec/jdbc/src/test/resources/test_null_op.json
new file mode 100644
index 0000000..01acead
--- /dev/null
+++ b/exec/jdbc/src/test/resources/test_null_op.json
@@ -0,0 +1,10 @@
+{
+    "intType": 1,
+    "varCharType": "val1"
+}
+{
+    "varCharType": "val2"
+}
+{
+    "intType": 2
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/jdbc/src/test/resources/test_true_false_op.json
----------------------------------------------------------------------
diff --git a/exec/jdbc/src/test/resources/test_true_false_op.json b/exec/jdbc/src/test/resources/test_true_false_op.json
new file mode 100644
index 0000000..9e29ddc
--- /dev/null
+++ b/exec/jdbc/src/test/resources/test_true_false_op.json
@@ -0,0 +1,11 @@
+{
+    "booleanType": false,
+    "data" : "set to false"
+}
+{
+    "booleanType": true,
+    "data" : "set to true"
+}
+{
+    "data" : "not set"
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/exec/pom.xml
----------------------------------------------------------------------
diff --git a/exec/pom.xml b/exec/pom.xml
index 399f788..908d3bc 100644
--- a/exec/pom.xml
+++ b/exec/pom.xml
@@ -33,5 +33,7 @@
   <modules>
     <module>bufferl</module>
     <module>java-exec</module>
+    <module>jdbc</module>
+    <!-- <module>jdbc-all</module> -->
   </modules>
 </project>

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/2ae4a5f0/jdbc-all/example-conf/drill-module.conf
----------------------------------------------------------------------
diff --git a/jdbc-all/example-conf/drill-module.conf b/jdbc-all/example-conf/drill-module.conf
deleted file mode 100644
index 082b8e6..0000000
--- a/jdbc-all/example-conf/drill-module.conf
+++ /dev/null
@@ -1,13 +0,0 @@
-drill: {
-  logical: {
-    operator.packages: ["org.apache.drill.common.logical.data"],
-    expression.packages: ["org.apache.drill.common.expression"],
-    function.packages: ["org.apache.drill.common.expression"],
-    storage.packages: []
-  },
-  physical: {
-    operator.packages: ["org.apache.drill.common.physical.pop"]
-  }
-}
-
-drill.exec.storage.packages += org.apache.drill.exec.store.mock