You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by ja...@apache.org on 2014/06/11 05:52:24 UTC

[39/61] [abbrv] git commit: DRILL-760: Move information schema to use POJO record reader

DRILL-760: Move information schema to use POJO record reader


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/0dde773b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/0dde773b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/0dde773b

Branch: refs/heads/master
Commit: 0dde773b7f7a56fda4490f98175d71271b10bf41
Parents: 9b22d2c
Author: vkorukanti <ve...@gmail.com>
Authored: Thu Jun 5 07:54:24 2014 -0700
Committer: vkorukanti <ve...@gmail.com>
Committed: Mon Jun 9 13:34:13 2014 -0700

----------------------------------------------------------------------
 .../exec/planner/logical/DrillViewTable.java    |   4 +
 .../exec/store/ischema/EmptyVectorSet.java      | 240 -------------
 .../drill/exec/store/ischema/FixedTable.java    |  93 -----
 .../store/ischema/InfoSchemaBatchCreator.java   |   2 +-
 .../exec/store/ischema/InfoSchemaGroupScan.java |   1 -
 .../store/ischema/InfoSchemaStoragePlugin.java  |   3 -
 .../exec/store/ischema/InfoSchemaTable.java     | 221 ++++++------
 .../drill/exec/store/ischema/OptiqProvider.java | 246 --------------
 .../drill/exec/store/ischema/PipeProvider.java  |  80 -----
 .../exec/store/ischema/RecordGenerator.java     | 171 ++++++++++
 .../drill/exec/store/ischema/Records.java       | 117 +++++++
 .../drill/exec/store/ischema/RowProvider.java   |  27 --
 .../exec/store/ischema/RowRecordReader.java     | 133 --------
 .../drill/exec/store/ischema/SelectedTable.java |  32 +-
 .../drill/exec/store/ischema/VectorSet.java     |  38 ---
 .../drill/exec/store/ischema/OrphanSchema.java  |  94 ------
 .../exec/store/ischema/TestOrphanSchema.java    | 167 ---------
 .../exec/store/ischema/TestTableProvider.java   | 171 ----------
 .../apache/drill/jdbc/test/TestJdbcQuery.java   | 318 +----------------
 .../apache/drill/jdbc/test/TestMetadataDDL.java | 337 +++++++++++++++++++
 20 files changed, 766 insertions(+), 1729 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dde773b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillViewTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillViewTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillViewTable.java
index 453aa15..8ce6af3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillViewTable.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/logical/DrillViewTable.java
@@ -66,4 +66,8 @@ public class DrillViewTable implements TranslatableTable{
   public TableType getJdbcTableType() {
     return TableType.VIEW;
   }
+
+  public String getViewSql() {
+    return view.getSql();
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dde773b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/EmptyVectorSet.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/EmptyVectorSet.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/EmptyVectorSet.java
deleted file mode 100644
index 89c9c1e..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/EmptyVectorSet.java
+++ /dev/null
@@ -1,240 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.drill.exec.store.ischema;
-
-import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.drill.common.expression.ExpressionPosition;
-import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.common.logical.StoragePluginConfig;
-import org.apache.drill.common.types.TypeProtos.MajorType;
-import org.apache.drill.common.types.TypeProtos.MinorType;
-import org.apache.drill.common.types.Types;
-import org.apache.drill.exec.expr.TypeHelper;
-import org.apache.drill.exec.memory.BufferAllocator;
-import org.apache.drill.exec.planner.logical.DrillTable;
-import org.apache.drill.exec.record.MaterializedField;
-import org.apache.drill.exec.vector.AllocationHelper;
-import org.apache.drill.exec.vector.IntVector;
-import org.apache.drill.exec.vector.ValueVector;
-import org.apache.drill.exec.vector.VarCharVector;
-
-/**
- * Manages the value vectors used to implement columns in a record batch.
- * The vectors themselves are created by subclasses, so this class
- * handles the generic handling of the vectors.
- */
-public abstract class EmptyVectorSet implements VectorSet {
-
-  protected List<ValueVector> vectors;
-
-  /**
-   * Prepare to construct a new set of vectors.
-   * The actual vectors will be created by subclasses
-   * by the time our "next" procedure is invoked.
-   */
-  public EmptyVectorSet() {
-    vectors = new ArrayList<ValueVector>();
-  }
-
-  /**
-   * Prepare to read the next batch of rows.
-   * @param maxRows
-   */
-  @Override
-  public void beginBatch(int maxRows) {
-
-    // Allocate memory for each column (value vector)
-    for (ValueVector v: vectors) {
-      AllocationHelper.allocate(v, maxRows, 100); // TODO: later, use configured size
-    }
-  }
-
-
-  /**
-   * Write a row to the value vectors.
-   * This is a routine to "assign generic objects to generic ValueVectors"
-   * which can be overridden to optimize for fixed types of vectors and
-   * fixed types of values.
-   * @param index - the position within the value vectors.
-   * @param row - the objects to write into the vectors
-   * @return true if there was room to write all the values.
-   */
-  @Override
-  public boolean writeRowToVectors(int index, Object[] row) {
-    for (int i=0; i<row.length; i++) {
-      if (!setSafe(vectors.get(i), index, row[i])) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-
-
-  /**
-   * Signal the end of the current batch.
-   * @param actualRowCount
-   */
-  @Override
-  public void endBatch(int actualRowCount) {
-
-    // Finalize each of the value vectors.
-    for (ValueVector v: vectors) {
-      v.getMutator().setValueCount(actualRowCount);
-    }
-  }
-
-  /**
-   * When everything is done, free up the resources.
-   */
-  @Override
-  public void cleanup() {
-    for (ValueVector v: vectors) {
-      v.close();
-    }
-  }
-
-
-  /**
-   * Make the value vectors visible to whomever needs them.
-   */
-  public List<ValueVector> getValueVectors() {
-    return vectors;
-  }
-
-
-  /**
-   * Estimate how many rows will fit in a given amount of memory.
-   * Perfect estimates are nice, but things work out OK if
-   * the estimates are a bit off.
-   */
-  @Override
-  public int getEstimatedRowCount(int bufSize) {
-    return Math.max(1, bufSize/getEstimatedRowSize());
-  }
-
-
-
-  /**
-   * Estimate the size of an average row. Used for allocating memory.
-   * Override when more information is known about the data.
-   * @return bytes per row.
-   */
-  protected int getEstimatedRowSize() {
-
-    // Add up the sizes of the vectors
-    int size = 0;
-    for (ValueVector v: vectors) {
-      size += TypeHelper.getSize(v.getField().getType());
-    }
-    return size;
-  }
-
-
-  /**
-   * Helper function to create value vectors for a set of columns.
-   * @param names - the names of the fields
-   * @param types - the major types of the fields
-   * @param allocator - a buffer allocator
-   */
-  protected void createVectors(String[] names, MajorType[] types, BufferAllocator allocator) {
-    vectors = new ArrayList<ValueVector>(names.length);
-    for (int i=0; i<names.length; i++) {
-      vectors.add(createVector(names[i], types[i], allocator));
-    }
-  }
-
-
-  /**
-   * Create a value vector for a single column.
-   * @param name - the name of the field
-   * @param type - the type of the field
-   * @param allocator - a buffer allocator
-   * @return the new value vector.
-   */
-  private static ValueVector createVector(String name, MajorType type, BufferAllocator allocator) {
-    return TypeHelper.getNewVector(field(name, type), allocator);
-  }
-
-
-  /**
-   * Helper function to create a MaterializedField, used to create a ValueVector.
-   * @param name - the name of the field
-   * @param majorType - the type of the field
-   * @return the MaterializedField
-   */
-  private static MaterializedField field(String name, MajorType majorType) {
-    return MaterializedField.create(SchemaPath.getSimplePath(name), majorType);
-  }
-
-
-  //////////////////////////////////////////////////////////////////
-  //
-  // The following section contains wrappers around ValueVectors.
-  // The wrappers make it easier to create vectors and set values.
-  //
-  // A different approach is to enhance TypeHelper to provide
-  // a uniform way to "setSafe" the common Java types into the type vectors.
-  // (It does that already for some types, but Strings are a particular nuisance.)
-  //
-  // For now, only types used in information schema are implemented.
-  //
-  ///////////////////////////////////////////////////////////////////
-  static final Charset UTF8 = Charset.forName("UTF-8");
-
-
-  // Here are the types used in information schema.
-  public static final MajorType VARCHAR = Types.required(MinorType.VARCHAR);
-  public static final MajorType INT = Types.required(MinorType.INT);
-  //public static final MajorType NULLABLINT = Types.optional(MinorType.INT);
-
-
-  /**
-   * A generic routine to set a Java value into a value vector. It assumes the types are compatible.
-   * When a subclass knows the types of its columns, it should use the strongly typed routines instead.
-   * <P>
-   * Note the value corresponds to what would be received by a varargs procedure.
-   * Also note we are switching on minor type. We really should switch on major type, but it is not an enum or ordinal.
-   * @return true if the value was successfully set.
-   */
-  protected static boolean setSafe(ValueVector vector, int index, Object value) {
-    switch (vector.getField().getType().getMinorType()) {
-    case INT:       return setSafe((IntVector)vector, index, (int)value);
-    case VARCHAR:   return setSafe((VarCharVector)vector, index, (String)value);
-    default:        return false;
-    }
-  }
-
-
-  /**
-   * Strongly typed routines for setting a Java value into a value vector.
-   * @return true if the value was successfully set.
-   */
-  protected static boolean setSafe(VarCharVector v, int index, String string) {
-    return v.getMutator().setSafe(index, string.getBytes(UTF8));
-  }
-
-  protected static boolean setSafe(IntVector v, int index, int value) {
-    return v.getMutator().setSafe(index, value);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dde773b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/FixedTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/FixedTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/FixedTable.java
deleted file mode 100644
index a2f289d..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/FixedTable.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.ischema;
-
-import org.apache.drill.common.types.TypeProtos.MajorType;
-import org.apache.drill.exec.memory.BufferAllocator;
-import org.eigenbase.reltype.RelDataType;
-import org.eigenbase.reltype.RelDataTypeFactory;
-import org.eigenbase.sql.type.SqlTypeFactoryImpl;
-import org.eigenbase.sql.type.SqlTypeName;
-
-/**
- * A FixedTable represents a table where the fields are always the same name and type.
- * Since the names and types are unchanging, it is easy to create value vectors during startup
- * and to use strong types when storing values in the vectors.
- */
-public class FixedTable extends EmptyVectorSet  {
-  String tableName;
-  String[] fieldNames;
-  MajorType[] fieldTypes;
-
-  /* (non-Javadoc)
-   * @see org.apache.drill.exec.store.ischema.VectorSet#createVectors(org.apache.drill.exec.memory.BufferAllocator)
-   */
-  @Override
-  public void createVectors(BufferAllocator allocator) {
-    createVectors(fieldNames, fieldTypes, allocator);
-  }
-  
-  /**
-   * Construct a generic table with an unchanging schema. 
-   * We leave it to subclasses to define the fields and types.
-   * @param tableName - name of the table
-   * @param fieldNames - names of the fields
-   * @param fieldTypes - major types of the fields
-   */
-  FixedTable(String tableName, String[] fieldNames, MajorType[] fieldTypes) {
-    this.tableName = tableName;
-    this.fieldNames = fieldNames;
-    this.fieldTypes = fieldTypes;
-  }
-  
-  public String getName() {
-    return tableName;
-  }
-  
-  
-  
-  /**
-   * Helper function to get the Optiq Schema type from a Drill Type.
-   * Again, we only do it for the information schema types, so it needs to be generalized.
-   * (This probably already exists elsewhere.)
-   */
-  
-  static public RelDataType getRelDataType(RelDataTypeFactory typeFactory, MajorType type) {
-    switch (type.getMinorType()) {
-    case INT:    return typeFactory.createSqlType(SqlTypeName.INTEGER);
-    case VARCHAR: return typeFactory.createSqlType(SqlTypeName.VARCHAR);
-    default: return null; // TODO - throw exception?
-    }
-  }
-    
-  public RelDataType getRowType(RelDataTypeFactory typeFactory) {
-    
-    // Convert the array of Drill types to an array of Optiq types
-    RelDataType[] relTypes = new RelDataType[fieldTypes.length];
-    for (int i=0; i<fieldTypes.length; i++) {
-      relTypes[i] = getRelDataType(typeFactory, fieldTypes[i]);
-    }
-    
-    // Create a struct type to represent the 
-    return typeFactory.createStructType(relTypes, fieldNames);
-  }
-
-  
-  
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dde773b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaBatchCreator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaBatchCreator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaBatchCreator.java
index a7e8146..cc9ee78 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaBatchCreator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaBatchCreator.java
@@ -32,7 +32,7 @@ public class InfoSchemaBatchCreator implements BatchCreator<InfoSchemaSubScan>{
 
   @Override
   public RecordBatch getBatch(FragmentContext context, InfoSchemaSubScan config, List<RecordBatch> children) throws ExecutionSetupException {
-    RecordReader rr = new RowRecordReader(context, config.getTable(), context.getRootSchema());
+    RecordReader rr = config.getTable().getRecordReader(context.getRootSchema());
     return new ScanBatch(config, context, Collections.singleton(rr).iterator());
   }
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dde773b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaGroupScan.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaGroupScan.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaGroupScan.java
index 7337cea..01e5a60 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaGroupScan.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaGroupScan.java
@@ -31,7 +31,6 @@ import org.apache.drill.exec.physical.base.PhysicalOperator;
 import org.apache.drill.exec.physical.base.Size;
 import org.apache.drill.exec.physical.base.SubScan;
 import org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint;
-import org.apache.drill.exec.store.dfs.easy.EasyGroupScan;
 
 import com.fasterxml.jackson.annotation.JsonCreator;
 import com.fasterxml.jackson.annotation.JsonProperty;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dde773b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaStoragePlugin.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaStoragePlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaStoragePlugin.java
index db91f08..0997152 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaStoragePlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaStoragePlugin.java
@@ -30,13 +30,10 @@ import net.hydromatic.optiq.Table;
 import org.apache.drill.common.JSONOptions;
 import org.apache.drill.common.expression.SchemaPath;
 import org.apache.drill.common.logical.StoragePluginConfig;
-import org.apache.drill.exec.planner.logical.DrillTable;
-import org.apache.drill.exec.rpc.user.DrillUser;
 import org.apache.drill.exec.rpc.user.UserSession;
 import org.apache.drill.exec.server.DrillbitContext;
 import org.apache.drill.exec.store.AbstractSchema;
 import org.apache.drill.exec.store.AbstractStoragePlugin;
-import org.apache.drill.exec.store.dfs.FileSystemConfig;
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Maps;

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dde773b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaTable.java
index 6ab41b0..a0caf1e 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaTable.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/InfoSchemaTable.java
@@ -17,136 +17,159 @@
  */
 package org.apache.drill.exec.store.ischema;
 
+import com.beust.jcommander.internal.Lists;
+import com.google.common.collect.ImmutableList;
 import org.apache.drill.common.types.TypeProtos.MajorType;
-import org.apache.drill.exec.vector.IntVector;
-import org.apache.drill.exec.vector.VarCharVector;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.common.types.Types;
+import org.eigenbase.reltype.RelDataType;
+import org.eigenbase.reltype.RelDataTypeFactory;
+import org.eigenbase.sql.type.SqlTypeName;
+
+import java.util.List;
+
+/** Base class of tables in INFORMATION_SCHEMA. Defines the table (fields and types) */
+public abstract class InfoSchemaTable{
+
+  public static class Field {
+    public String name;
+    public MajorType type;
+
+    public static Field create(String name, MajorType type) {
+      Field field = new Field();
+      field.name = name;
+      field.type = type;
+      return field;
+    }
+  }
 
-/**
- * InfoSchemaTable defines the various Information Schema tables.
- * <p>
- * All the information schema tables are grouped together for convenience.
- * For each specific table, the corresponding class:
- * <p>Declares the table name.
- * <p>Declares the field names and types.
- * <p>Optionally defines a typed method to write a row of data to the vectors.
- *    If not defined here, FixedTable will kick in and do the job using
- *    a slower, generic method.
- */
-public class InfoSchemaTable{
+  public static final MajorType VARCHAR = Types.required(MinorType.VARCHAR);
+  public static final MajorType INT = Types.required(MinorType.INT);
 
-  /**
-   * Layout for the SCHEMATA table.
-   */
-  public static class Schemata extends FixedTable {
-    static final String tableName = "SCHEMATA";
-    static final String[] fieldNames = {"CATALOG_NAME", "SCHEMA_NAME", "SCHEMA_OWNER", "TYPE"};
-    static final MajorType[] fieldTypes = {VARCHAR,         VARCHAR,       VARCHAR, VARCHAR};
+  private final String tableName;
+  private final List<Field> fields;
 
-    public Schemata() {
-      super(tableName, fieldNames, fieldTypes);
+  public InfoSchemaTable(String tableName, List<Field> fields) {
+    this.tableName = tableName;
+    this.fields = fields;
+  }
+
+  static public RelDataType getRelDataType(RelDataTypeFactory typeFactory, MajorType type) {
+    switch (type.getMinorType()) {
+    case INT: return typeFactory.createSqlType(SqlTypeName.INTEGER);
+    case VARCHAR: return typeFactory.createSqlType(SqlTypeName.VARCHAR);
+    default: throw new UnsupportedOperationException("Only INT and VARCHAR types are supported in INFORMATION_SCHEMA");
     }
+  }
+
+  public RelDataType getRowType(RelDataTypeFactory typeFactory) {
 
-    // Optional ...
-    public boolean writeRowToVectors(int index, Object[] row) {
-      return
-          setSafe((VarCharVector)vectors.get(0), index, (String)row[0])  &&
-          setSafe((VarCharVector)vectors.get(1), index,  (String)row[1]) &&
-          setSafe((VarCharVector)vectors.get(2), index,  (String)row[2]) &&
-          setSafe((VarCharVector)vectors.get(3), index,  (String)row[3]);
+    // Convert the array of Drill types to an array of Optiq types
+    List<RelDataType> relTypes = Lists.newArrayList();
+    List<String> fieldNames = Lists.newArrayList();
+    for (Field field : fields) {
+      relTypes.add(getRelDataType(typeFactory, field.type));
+      fieldNames.add(field.name);
     }
+
+    return typeFactory.createStructType(relTypes, fieldNames);
   }
 
-  /**
-   * Layout for the TABLES table.
-   */
-  public static class Tables extends FixedTable {
-    static final String tableName = "TABLES";
-    static final String[] fieldNames = {"TABLE_CATALOG", "TABLE_SCHEMA", "TABLE_NAME", "TABLE_TYPE"};
-    static final MajorType[] fieldTypes = {VARCHAR,          VARCHAR,        VARCHAR,      VARCHAR};
+  public abstract RecordGenerator getRecordGenerator();
 
-    public Tables() {
-      super(tableName, fieldNames, fieldTypes);
+  /** Layout for the CATALOGS table. */
+  static public class Catalogs extends InfoSchemaTable {
+    private static final List<Field> fields = ImmutableList.of(
+        Field.create("CATALOG_NAME", VARCHAR),
+        Field.create("CATALOG_DESCRIPTION", VARCHAR),
+        Field.create("CATALOG_CONNECT", VARCHAR));
+
+    Catalogs() {
+      super("CATALOGS", fields);
     }
 
-    // Optional ...
-    public boolean writeRowToVectors(int index, Object[] row) {
-      return
-          setSafe((VarCharVector)vectors.get(0), index, (String)row[0]) &&
-          setSafe((VarCharVector)vectors.get(1), index, (String)row[1]) &&
-          setSafe((VarCharVector)vectors.get(2), index, (String)row[2]) &&
-          setSafe((VarCharVector)vectors.get(3), index, (String)row[3]);
+    @Override
+    public RecordGenerator getRecordGenerator() {
+      return new RecordGenerator.Catalogs();
     }
   }
 
+  /** Layout for the SCHEMATA table. */
+  public static class Schemata extends InfoSchemaTable {
+    private static final List<Field> fields = ImmutableList.of(
+        Field.create("CATALOG_NAME", VARCHAR),
+        Field.create("SCHEMA_NAME", VARCHAR),
+        Field.create("SCHEMA_OWNER", VARCHAR),
+        Field.create("TYPE", VARCHAR));
 
-  /**
-   * Layout for the COLUMNS table.
-   */
-  public static class Columns extends FixedTable {
-    static final String tableName = "COLUMNS";
-    static final String[] fieldNames = {"TABLE_CATALOG",     "TABLE_SCHEMA",     "TABLE_NAME",    "COLUMN_NAME",
-      "ORDINAL_POSITION",   "IS_NULLABLE",      "DATA_TYPE",     "CHARACTER_MAXIMUM_LENGTH",
-      "NUMERIC_PRECISION_RADIX", "NUMERIC_SCALE", "NUMERIC_PRECISION"};
-    static final MajorType[] fieldTypes= { VARCHAR,         VARCHAR,       VARCHAR,      VARCHAR,
-      INT,             VARCHAR,        VARCHAR,     INT,
-      INT,             INT,            INT};
-    public Columns() {
-      super(tableName, fieldNames, fieldTypes);
+    public Schemata() {
+      super("SCHEMATA", fields);
     }
 
-
-    // Optional ...
-    public boolean writeRowToVectors(int index, Object[] row) {
-      return
-          setSafe((VarCharVector)vectors.get(0), index, (String)row[0]) &&
-          setSafe((VarCharVector)vectors.get(1), index, (String)row[1]) &&
-          setSafe((VarCharVector)vectors.get(2), index, (String)row[2]) &&
-          setSafe((VarCharVector)vectors.get(3), index, (String)row[3]) &&
-          setSafe((IntVector)vectors.get(4), index, (int)row[4])        &&
-          setSafe((VarCharVector)vectors.get(5), index, (String)row[5])     &&
-          setSafe((VarCharVector)vectors.get(6), index, (String)row[6]) &&
-          setSafe((IntVector)vectors.get(7), index, (int)row[7]) &&
-          setSafe((IntVector)vectors.get(8), index, (int)row[8])        &&
-          setSafe((IntVector)vectors.get(9), index, (int)row[9])        &&
-          setSafe((IntVector)vectors.get(10), index, (int)row[10]);
+    @Override
+    public RecordGenerator getRecordGenerator() {
+      return new RecordGenerator.Schemata();
     }
   }
 
+  /** Layout for the TABLES table. */
+  public static class Tables extends InfoSchemaTable {
+    private static final List<Field> fields = ImmutableList.of(
+        Field.create("TABLE_CATALOG", VARCHAR),
+        Field.create("TABLE_SCHEMA", VARCHAR),
+        Field.create("TABLE_NAME", VARCHAR),
+        Field.create("TABLE_TYPE", VARCHAR));
 
-  /**
-   * Layout for the VIEWS table.
-   */
-  static public class Views extends FixedTable {
-    static final String tableName = "VIEWS";
-    static final String[] fieldNames = {"TABLE_CATALOG", "TABLE_SHEMA", "TABLE_NAME", "VIEW_DEFINITION"};
-    static final MajorType[] fieldTypes = {VARCHAR,         VARCHAR,       VARCHAR,      VARCHAR};
-
-    Views() {
-      super(tableName, fieldNames, fieldTypes);
+    public Tables() {
+      super("TABLES", fields);
     }
 
-    // Optional ...
-    public boolean writeRowToVectors(int index, Object[] row) {
-      return setSafe((VarCharVector)vectors.get(0), index, (String)row[0]) &&
-          setSafe((VarCharVector)vectors.get(1), index, (String)row[1])    &&
-          setSafe((VarCharVector)vectors.get(2), index, (String)row[2])    &&
-          setSafe((VarCharVector)vectors.get(3), index, (String)row[3]);
+    @Override
+    public RecordGenerator getRecordGenerator() {
+      return new RecordGenerator.Tables();
     }
   }
 
+  /** Layout for the VIEWS table. */
+  static public class Views extends InfoSchemaTable {
+    private static final List<Field> fields = ImmutableList.of(
+        Field.create("TABLE_CATALOG", VARCHAR),
+        Field.create("TABLE_SCHEMA", VARCHAR),
+        Field.create("TABLE_NAME", VARCHAR),
+        Field.create("VIEW_DEFINITION", VARCHAR));
 
-  /**
-   * Layout for the CATALOGS table.
-   */
-  static public class Catalogs extends FixedTable {
-    static final String tableName = "CATALOGS";
-    static final String[] fieldNames = {"CATALOG_NAME", "CATALOG_DESCRIPTION", "CATALOG_CONNECT"};
-    static final MajorType[] fieldTypes = {VARCHAR,      VARCHAR,               VARCHAR};
+    public Views() {
+      super("VIEWS", fields);
+    }
 
-    Catalogs() {
-      super(tableName, fieldNames, fieldTypes);
+    @Override
+    public RecordGenerator getRecordGenerator() {
+      return new RecordGenerator.Views();
     }
   }
 
+  /** Layout for the COLUMNS table. */
+  public static class Columns extends InfoSchemaTable {
+    private static final List<Field> fields = ImmutableList.of(
+        Field.create("TABLE_CATALOG", VARCHAR),
+        Field.create("TABLE_SCHEMA", VARCHAR),
+        Field.create("TABLE_NAME", VARCHAR),
+        Field.create("COLUMN_NAME", VARCHAR),
+        Field.create("ORDINAL_POSITION", INT),
+        Field.create("IS_NULLABLE", VARCHAR),
+        Field.create("DATA_TYPE", VARCHAR),
+        Field.create("CHARACTER_MAXIMUM_LENGTH", INT),
+        Field.create("NUMERIC_PRECISION_RADIX", INT),
+        Field.create("NUMERIC_SCALE", INT),
+        Field.create("NUMERIC_PRECISION", INT));
+
+    public Columns() {
+      super("COLUMNS", fields);
+    }
 
+    @Override
+    public RecordGenerator getRecordGenerator() {
+      return new RecordGenerator.Columns();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dde773b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/OptiqProvider.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/OptiqProvider.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/OptiqProvider.java
deleted file mode 100644
index 1aee579..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/OptiqProvider.java
+++ /dev/null
@@ -1,246 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.ischema;
-
-import net.hydromatic.optiq.Schema;
-import net.hydromatic.optiq.SchemaPlus;
-import net.hydromatic.optiq.Table;
-import net.hydromatic.optiq.jdbc.JavaTypeFactoryImpl;
-
-import org.apache.drill.exec.store.AbstractSchema;
-import org.eigenbase.reltype.RelDataType;
-import org.eigenbase.reltype.RelDataTypeField;
-import org.eigenbase.sql.type.SqlTypeName;
-
-/**
- * OptiqProvider provides data for the various tables in the information schema.
- * Each table has its own nested class, keeping them grouped together.
- * Note "writeRow(...)" must match the values expected by the corresponding table.
- * <p>
- * To keep code concise, each class inherits from both an OptiqScanner
- * and a PipeProvider. Double inheritance is a problem in Java, so this
- * code needs to be cleaned up. For the moment, OptiqScanner artificially
- * inherits from PipeProvider.
- */
-public class OptiqProvider  {
-
-  /**
-   * Provide data for TABLES table.
-   */
-  static public class Tables extends Abstract {
-    Tables(SchemaPlus root) {
-      super(root);
-    }
-
-    @Override
-    public boolean visitTableName(String schema, String tableName) {
-      return writeRow("DRILL", schema, tableName, Schema.TableType.TABLE.toString());
-    }
-  }
-
-
-  /**
-   * Provide data for SCHEMATA table.
-   */
-  static public class Schemata extends Abstract {
-    @Override
-    public boolean visitSchema(String schemaName, SchemaPlus schema) {
-      if (shouldVisitSchema(schema) && schemaName != null && schemaName != "") {
-        AbstractSchema as = schema.unwrap(AbstractSchema.class);
-        writeRow("DRILL", schemaName, "<owner>", as.getTypeName());
-      }
-      return false;
-    }
-
-    Schemata(SchemaPlus root) {
-      super(root);
-    }
-  }
-
-
-
-  /**
-   * Provide data for COLUMNS data.
-   */
-  static public class Columns extends Abstract {
-
-    public Columns(SchemaPlus root) {
-      super(root);
-    }
-
-    @Override
-    public boolean visitField(String schemaName, String tableName, RelDataTypeField field) {
-      String columnName = field.getName();
-      RelDataType type = field.getType();
-      SqlTypeName sqlType = type.getSqlTypeName();
-
-      int position = field.getIndex();
-      String nullable;
-      if (type.isNullable()) nullable = "YES";
-      else                   nullable = "NO";
-      String sqlTypeName = sqlType.getName();
-      int radix = (sqlType == SqlTypeName.DECIMAL)?10:-1;        // TODO: where do we get radix?
-      int charMaxLen = -1;  // TODO: where do we get char length?
-      int scale = (sqlType.allowsPrec())?type.getScale(): -1;
-      int precision = (sqlType.allowsScale())?type.getPrecision(): -1;
-
-      writeRow("DRILL", schemaName, tableName, columnName, position, nullable, sqlTypeName, charMaxLen, radix, scale, precision);
-
-      return false;
-    }
-  }
-
-
-
-  /**
-   * Provide data for VIEWS table
-   */
-  public static class Views extends Abstract {
-    public Views(SchemaPlus root) {
-      super(root);
-    }
-    @Override
-    public boolean visitTable(String schemaName, String tableName, Table table) {
-      if (table.getJdbcTableType() == Schema.TableType.VIEW) {
-        writeRow("DRILL", schemaName, tableName, "TODO: GetViewDefinition");
-      }
-      return false;
-    }
-  }
-
-  public static class Catalogs extends Abstract {
-    public Catalogs(SchemaPlus root) {
-      super(root);
-    }
-    @Override
-    public void generateRows() {
-      writeRow("DRILL", "The internal metadata used by Drill", "");
-    }
-  }
-
-
-  /**
-   * An abstract class which helps generate data. It does the actual scanning of an Optiq schema,
-   * but relies on a subclass to provide a "visit" routine to write out data.
-   */
-  public static class Abstract extends OptiqScanner {
-    SchemaPlus root;
-
-    protected Abstract(SchemaPlus root) {
-      this.root = root;
-    }
-
-
-    /**
-     * Start writing out rows.
-     */
-    @Override
-    public void generateRows() {
-
-      // Scan the root schema for subschema, tables, columns.
-      scanSchema(root);
-    }
-  }
-
-
-
-  /**
-   * An OptiqScanner scans the Optiq schema, generating rows for each
-   * schema, table or column. It is intended to be subclassed, where the
-   * subclass does what it needs when visiting a Optiq schema structure.
-   */
-  // We would really prefer multiple inheritance from both OptiqScanner and PipeProvider,
-  //   but making one a subclass of the other works for now.
-  //   TODO: Refactor to avoid subclassing of what should be an unrelated class.
-  abstract static class OptiqScanner extends PipeProvider {
-
-
-    /**
-     *  Each visitor implements at least one of the the following methods.
-     *    If the schema visitor returns true, then visit the tables.
-     *    If the table visitor returns true, then visit the fields (columns).
-     */
-    public boolean visitSchema(String schemaName, SchemaPlus schema) {
-      return shouldVisitSchema(schema);
-    }
-    public boolean visitTableName(String schemaName, String tableName){return true;}
-    public boolean visitTable(String schemaName, String tableName, Table table){return true;}
-    public boolean visitField(String schemaName, String tableName, RelDataTypeField field){return true;}
-
-    protected boolean shouldVisitSchema(SchemaPlus schema) {
-      try {
-        AbstractSchema drillSchema = schema.unwrap(AbstractSchema.class);
-        return drillSchema.showInInformationSchema();
-      } catch(ClassCastException e) {
-        // ignore and return true as this is not a drill schema
-      }
-      return true;
-    }
-
-    /**
-     * Start scanning an Optiq Schema.
-     * @param root - where to start
-     */
-    protected void scanSchema(SchemaPlus root) {
-      scanSchema(root.getName(), root);
-    }
-
-    /**
-     * Recursively scan the schema, invoking the visitor as appropriate.
-     * @param schemaPath - the path to the current schema, so far,
-     * @param schema - the current schema.
-     * @param visitor - the methods to invoke at each entity in the schema.
-     */
-    private void scanSchema(String schemaPath, SchemaPlus schema) {
-
-      // If we have an empty schema path, then don't insert a leading dot.
-      String separator;
-      if (schemaPath == "") separator = "";
-      else                  separator = ".";
-
-      // Recursively scan the subschema.
-      for (String name: schema.getSubSchemaNames()) {
-        scanSchema(schemaPath + separator + name, schema.getSubSchema(name));
-      }
-
-      // Visit this schema and if requested ...
-      if (visitSchema(schemaPath, schema)) {
-
-        // ... do for each of the schema's tables.
-        for (String tableName: schema.getTableNames()) {
-          if(visitTableName(schemaPath, tableName)){
-            Table table = schema.getTable(tableName);
-
-            // Visit the table, and if requested ...
-            if (visitTable(schemaPath,  tableName, table)) {
-
-              // ... do for each of the table's fields.
-              RelDataType tableRow = table.getRowType(new JavaTypeFactoryImpl()); // TODO: Is this correct?
-              for (RelDataTypeField field: tableRow.getFieldList()) {
-
-                // Visit the field.
-                visitField(schemaPath,  tableName, field);
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dde773b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/PipeProvider.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/PipeProvider.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/PipeProvider.java
deleted file mode 100644
index 30d7d76..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/PipeProvider.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.ischema;
-
-import java.util.ArrayList;
-import java.util.ListIterator;
-
-/**
- * PipeProvider sets up the framework so some subclass can "write" rows
- * to a an internal pipe, which another class (RowRecordReader) can "read" to
- * build up a record batch.
- * <p>
- * This class helps work around the situation where the rows cannot be conveniently
- * be generated one at a time by an iterator. Logically, the "writer" writes rows to the pipe,
- * while a "reader" reads rows from the pipe. The vocabulary implies two separate threads,
- * but the current implementation is actually just a wrapper around a List.
- */
-public abstract class PipeProvider implements RowProvider {
-  ArrayList<Object[]> pipe = null;
-  ListIterator<Object[]> iter;
-  
-  /**
-   * Method to generate and write rows to the pipe.
-   */
-  abstract void generateRows();
-  
-  /**
-   * true if there are rows waiting to be "read".
-   */
-  public boolean hasNext() {
-    if (pipe == null) {
-      pipe = new ArrayList<Object[]>();
-      generateRows();
-      iter = pipe.listIterator();
-    }
-    return iter.hasNext();
-  }
-  
-  /**
-   * Read the next row from the pipe. 
-   * Should only be called after "hasNext" indicates there are more rows.
-   */
-  public Object[] next() {
-    return iter.next();
-  }
-  
-  /**
-   * Sometimes, a row cannot be immediately processed. Put the last row back and re-read it next time.
-   */
-  public void previous() {
-    iter.previous();
-  }
-  
-  /**
-   * Write a row to the pipe.
-   * @param values - a varargs list of values, in the same order as the RecordReader's value vectors.
-   * @return true if the row was successfully written to the pipe.
-   */
-  protected boolean writeRow(Object...values) {
-    pipe.add(values);
-    return true;
-  }
-  
-}
-  
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dde773b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RecordGenerator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RecordGenerator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RecordGenerator.java
new file mode 100644
index 0000000..a8792fe
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RecordGenerator.java
@@ -0,0 +1,171 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.ischema;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import net.hydromatic.optiq.Schema.TableType;
+import net.hydromatic.optiq.SchemaPlus;
+import net.hydromatic.optiq.Table;
+import net.hydromatic.optiq.jdbc.JavaTypeFactoryImpl;
+import org.apache.drill.exec.planner.logical.DrillViewTable;
+import org.apache.drill.exec.store.AbstractSchema;
+import org.apache.drill.exec.store.RecordReader;
+import org.apache.drill.exec.store.pojo.PojoRecordReader;
+import org.eigenbase.reltype.RelDataType;
+import org.eigenbase.reltype.RelDataTypeField;
+
+import java.util.List;
+
+/** Generates records for POJO RecordReader by scanning the given schema */
+public abstract class RecordGenerator {
+
+  public boolean visitSchema(String schemaName, SchemaPlus schema) {
+    return shouldVisitSchema(schema);
+  }
+
+  public boolean visitTable(String schemaName, String tableName, Table table) {
+    return true;
+  }
+
+  public boolean visitField(String schemaName, String tableName, RelDataTypeField field) {
+    return true;
+  }
+
+  protected boolean shouldVisitSchema(SchemaPlus schema) {
+    try {
+      AbstractSchema drillSchema = schema.unwrap(AbstractSchema.class);
+      return drillSchema.showInInformationSchema();
+    } catch(ClassCastException e) {
+      // ignore and return true as this is not a drill schema
+    }
+    return true;
+  }
+
+  public abstract RecordReader getRecordReader();
+
+  public void scanSchema(SchemaPlus root) {
+    scanSchema(root.getName(), root);
+  }
+
+  /**
+   * Recursively scan the schema, invoking the visitor as appropriate.
+   * @param schemaPath - the path to the current schema, so far,
+   * @param schema - the current schema.
+   */
+  private void scanSchema(String schemaPath, SchemaPlus schema) {
+
+    // Recursively scan the subschema.
+    for (String name: schema.getSubSchemaNames()) {
+      scanSchema(schemaPath +
+          (schemaPath == "" ? "" : ".") + // If we have an empty schema path, then don't insert a leading dot.
+          name, schema.getSubSchema(name));
+    }
+
+    // Visit this schema and if requested ...
+    if (visitSchema(schemaPath, schema)) {
+
+      // ... do for each of the schema's tables.
+      for (String tableName: schema.getTableNames()) {
+        Table table = schema.getTable(tableName);
+        // Visit the table, and if requested ...
+        if (visitTable(schemaPath,  tableName, table)) {
+
+          // ... do for each of the table's fields.
+          RelDataType tableRow = table.getRowType(new JavaTypeFactoryImpl());
+          for (RelDataTypeField field: tableRow.getFieldList()) {
+            visitField(schemaPath,  tableName, field);
+          }
+        }
+      }
+    }
+  }
+
+  public static class Catalogs extends RecordGenerator {
+    @Override
+    public RecordReader getRecordReader() {
+      Records.Catalog catalogRecord = new Records.Catalog("DRILL", "The internal metadata used by Drill", "");
+      return new PojoRecordReader<>(Records.Catalog.class, ImmutableList.of(catalogRecord).iterator());
+    }
+  }
+
+  public static class Schemata extends RecordGenerator {
+    List<Records.Schema> records = Lists.newArrayList();
+
+    @Override
+    public RecordReader getRecordReader() {
+      return new PojoRecordReader<>(Records.Schema.class, records.iterator());
+    }
+
+    @Override
+    public boolean visitSchema(String schemaName, SchemaPlus schema) {
+      if (shouldVisitSchema(schema) && schemaName != null && !schemaName.isEmpty()) {
+        AbstractSchema as = schema.unwrap(AbstractSchema.class);
+        records.add(new Records.Schema("DRILL", schemaName, "<owner>", as.getTypeName()));
+      }
+      return false;
+    }
+  }
+
+  public static class Tables extends RecordGenerator {
+    List<Records.Table> records = Lists.newArrayList();
+
+    @Override
+    public RecordReader getRecordReader() {
+      return new PojoRecordReader<>(Records.Table.class, records.iterator());
+    }
+
+    @Override
+    public boolean visitTable(String schemaName, String tableName, Table table) {
+      records.add(new Records.Table("DRILL", schemaName, tableName, table.getJdbcTableType().toString()));
+      return false;
+    }
+  }
+
+  public static class Views extends RecordGenerator {
+    List<Records.View> records = Lists.newArrayList();
+
+    @Override
+    public RecordReader getRecordReader() {
+      return new PojoRecordReader<>(Records.View.class, records.iterator());
+    }
+
+    @Override
+    public boolean visitTable(String schemaName, String tableName, Table table) {
+      if (table.getJdbcTableType() == TableType.VIEW) {
+        records.add(new Records.View("DRILL", schemaName, tableName, ((DrillViewTable)table).getViewSql()));
+      }
+      return false;
+    }
+  }
+
+  public static class Columns extends RecordGenerator {
+    List<Records.Column> records = Lists.newArrayList();
+
+    @Override
+    public RecordReader getRecordReader() {
+      return new PojoRecordReader<>(Records.Column.class, records.iterator());
+    }
+
+    @Override
+    public boolean visitField(String schemaName, String tableName, RelDataTypeField field) {
+      records.add(new Records.Column("DRILL", schemaName, tableName, field));
+      return false;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dde773b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/Records.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/Records.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/Records.java
new file mode 100644
index 0000000..d999346
--- /dev/null
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/Records.java
@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.drill.exec.store.ischema;
+
+import org.eigenbase.reltype.RelDataType;
+import org.eigenbase.reltype.RelDataTypeField;
+import org.eigenbase.sql.type.SqlTypeName;
+
+public class Records {
+
+  /** Pojo object for a record in INFORMATION_SCHEMA.TABLES */
+  public static class Table {
+    public final String TABLE_CATALOG;
+    public final String TABLE_SCHEMA;
+    public final String TABLE_NAME;
+    public final String TABLE_TYPE;
+
+    public Table(String catalog, String schema, String name, String type) {
+      this.TABLE_CATALOG = catalog;
+      this.TABLE_SCHEMA = schema;
+      this.TABLE_NAME = name;
+      this.TABLE_TYPE = type;
+    }
+  }
+
+  /** Pojo object for a record in INFORMATION_SCHEMA.COLUMNS */
+  public static class Column {
+    public final String TABLE_CATALOG;
+    public final String TABLE_SCHEMA;
+    public final String TABLE_NAME;
+    public final String COLUMN_NAME;
+    public final int ORDINAL_POSITION;
+    public final String IS_NULLABLE;
+    public final String DATA_TYPE;
+    public final int CHARACTER_MAXIMUM_LENGTH;
+    public final int NUMERIC_PRECISION_RADIX;
+    public final int NUMERIC_SCALE;
+    public final int NUMERIC_PRECISION;
+
+    public Column(String catalog, String schemaName, String tableName, RelDataTypeField field) {
+      this.TABLE_CATALOG = catalog;
+      this.TABLE_SCHEMA = schemaName;
+      this.TABLE_NAME = tableName;
+
+      this.COLUMN_NAME = field.getName();
+      RelDataType type = field.getType();
+      SqlTypeName sqlType = type.getSqlTypeName();
+
+      this.ORDINAL_POSITION = field.getIndex();
+      this.IS_NULLABLE = type.isNullable() ? "YES" : "NO";
+      this.DATA_TYPE = sqlType.getName();
+      this.NUMERIC_PRECISION_RADIX = (sqlType == SqlTypeName.DECIMAL) ? 10 : -1; // TODO: where do we get radix?
+      this.CHARACTER_MAXIMUM_LENGTH = -1;  // TODO: where do we get char length?
+      this.NUMERIC_PRECISION = (sqlType.allowsPrec())?type.getPrecision(): -1;
+      this.NUMERIC_SCALE = (sqlType.allowsScale())?type.getScale(): -1;
+    }
+  }
+
+  /** Pojo object for a record in INFORMATION_SCHEMA.VIEWS */
+  public static class View {
+    public final String TABLE_CATALOG;
+    public final String TABLE_SCHEMA;
+    public final String TABLE_NAME;
+    public final String VIEW_DEFINITION;
+
+    public View(String catalog, String schema, String name, String definition) {
+      this.TABLE_CATALOG = catalog;
+      this.TABLE_SCHEMA = schema;
+      this.TABLE_NAME = name;
+      this.VIEW_DEFINITION = definition;
+    }
+  }
+
+  /** Pojo object for a record in INFORMATION_SCHEMA.CATALOGS */
+  public static class Catalog {
+    public final String CATALOG_NAME;
+    public final String CATALOG_DESCRIPTION;
+    public final String CATALOG_CONNECT;
+
+    public Catalog(String name, String description, String connect) {
+      this.CATALOG_NAME = name;
+      this.CATALOG_DESCRIPTION = description;
+      this.CATALOG_CONNECT = connect;
+    }
+  }
+
+  /** Pojo object for a record in INFORMATION_SCHEMA.SCHEMATA */
+  public static class Schema {
+    public final String CATALOG_NAME;
+    public final String SCHEMA_NAME;
+    public final String SCHEMA_OWNER;
+    public final String TYPE;
+
+    public Schema(String catalog, String name, String owner, String type) {
+      this.CATALOG_NAME = catalog;
+      this.SCHEMA_NAME = name;
+      this.SCHEMA_OWNER = owner;
+      this.TYPE = type;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dde773b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RowProvider.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RowProvider.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RowProvider.java
deleted file mode 100644
index c26c836..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RowProvider.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.ischema;
-
-/**
- * An interface for providing rows of data.
- */
-public interface RowProvider {
-  public Object[] next();  // Fetch the next row of values
-  public boolean hasNext();  // true if there are rows remaining to fetch.
-  public void previous();  // Put back the last row read, so it can be reread. (Only one row can be put back)
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dde773b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RowRecordReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RowRecordReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RowRecordReader.java
deleted file mode 100644
index c578b5c..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/RowRecordReader.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.drill.exec.store.ischema;
-
-import net.hydromatic.optiq.SchemaPlus;
-
-import org.apache.drill.common.exceptions.DrillRuntimeException;
-import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.exec.exception.SchemaChangeException;
-import org.apache.drill.exec.memory.BufferAllocator;
-import org.apache.drill.exec.memory.OutOfMemoryException;
-import org.apache.drill.exec.ops.FragmentContext;
-import org.apache.drill.exec.physical.impl.OutputMutator;
-import org.apache.drill.exec.store.RecordReader;
-import org.apache.drill.exec.vector.ValueVector;
-import org.apache.drill.exec.vector.complex.reader.FieldReader;
-
-
-/**
- * RowRecordReader is a RecordReader which creates RecordBatchs by
- * reading rows one at a time. The fixed format rows come from a "RowProvider".
- */
-public class RowRecordReader implements RecordReader {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(RowRecordReader.class);
-
-  public static final long ALLOCATOR_INITIAL_RESERVATION = 1*1024*1024;
-  public static final long ALLOCATOR_MAX_RESERVATION = 20L*1000*1000*1000;
-
-  protected final VectorSet batch;
-  protected final RowProvider provider;
-  protected final FragmentContext context;
-  protected final BufferAllocator allocator;
-  protected OutputMutator output;
-  
-  private int bufSize = 256*1024;
-  private int maxRowCount;
-  /**
-   * Construct a RecordReader which uses rows from a RowProvider and puts them into a set of value vectors.
-   * @param context
-   * @param vectors
-   */
-  public RowRecordReader(FragmentContext context, VectorSet batch, RowProvider provider) throws OutOfMemoryException {
-    this.context = context;
-    this.allocator = context.getNewChildAllocator(ALLOCATOR_INITIAL_RESERVATION, ALLOCATOR_MAX_RESERVATION);
-    this.provider = provider;
-    this.batch = batch;
-  }
- 
-  public RowRecordReader(FragmentContext context, SelectedTable table, SchemaPlus rootSchema) throws OutOfMemoryException {
-    this.context = context;
-    this.allocator = context.getNewChildAllocator(ALLOCATOR_INITIAL_RESERVATION, ALLOCATOR_MAX_RESERVATION);
-    this.provider = table.getProvider(rootSchema);
-    this.batch = table.getFixedTable();
-  }
-
-  /** 
-   * Prepare to create record batches. 
-   */
-  @Override
-  public void setup(OutputMutator output) throws ExecutionSetupException {
-    this.output = output; 
-    batch.createVectors(allocator);
-    
-    // Inform drill of the output columns. They were set up when the vector handler was created.
-    //  Note we are currently working with fixed tables.
-    output.addFields(batch.getValueVectors());
-
-    // Estimate the number of records we can hold in a RecordBatch
-    maxRowCount = batch.getEstimatedRowCount(bufSize);
-  }
-
-
-
-  /** 
-   * Return the next record batch.  An empty batch means end of data.
-   */
-  @Override
-  public int next() {
-    
-    // Repeat until out of data or vectors are full
-    int actualCount;
-    for (actualCount = 0; actualCount < maxRowCount && provider.hasNext(); actualCount++) {
-   
-      // Put the next row into the vectors. If vectors full, try again later.
-      Object[] row = provider.next();
-      if (!batch.writeRowToVectors(actualCount, row)) {
-        provider.previous();
-        break;
-      }
-    }
-    
-    // Make note the batch is complete. 
-    batch.endBatch(actualCount);
-    
-    // Problem if we had a single row which didn't fit.
-    if (actualCount == 0 && provider.hasNext()) {
-      throw new DrillRuntimeException("Row size larger than batch size");
-    }
-    
-    // Return the number of rows.  0 means end of data.
-    return actualCount;
-  }
-     
-
-      
-  /**
-   *  Release all resources 
-   */
-  public void cleanup() {
-    batch.cleanup();
-  }
-
-
-  
-}
-
-

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dde773b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/SelectedTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/SelectedTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/SelectedTable.java
index 1dd7c8a..131e795 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/SelectedTable.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/SelectedTable.java
@@ -19,43 +19,35 @@ package org.apache.drill.exec.store.ischema;
 
 import net.hydromatic.optiq.SchemaPlus;
 
+import org.apache.drill.exec.store.RecordReader;
 import org.apache.drill.exec.store.ischema.InfoSchemaTable.Catalogs;
 import org.apache.drill.exec.store.ischema.InfoSchemaTable.Columns;
 import org.apache.drill.exec.store.ischema.InfoSchemaTable.Schemata;
 import org.apache.drill.exec.store.ischema.InfoSchemaTable.Tables;
 import org.apache.drill.exec.store.ischema.InfoSchemaTable.Views;
-import org.apache.drill.exec.store.ischema.OptiqProvider.OptiqScanner;
 import org.eigenbase.reltype.RelDataType;
 import org.eigenbase.reltype.RelDataTypeFactory;
 
 public enum SelectedTable{
-  CATALOGS(new Catalogs(), new ScannerFactory(){public OptiqScanner get(SchemaPlus root) {return new OptiqProvider.Catalogs(root);}} ), //
-  SCHEMATA(new Schemata(), new ScannerFactory(){public OptiqScanner get(SchemaPlus root) {return new OptiqProvider.Schemata(root);}} ), //
-  VIEWS(new Views(), new ScannerFactory(){public OptiqScanner get(SchemaPlus root) {return new OptiqProvider.Views(root);}} ), //
-  COLUMNS(new Columns(), new ScannerFactory(){public OptiqScanner get(SchemaPlus root) {return new OptiqProvider.Columns(root);}} ), //
-  TABLES(new Tables(), new ScannerFactory(){public OptiqScanner get(SchemaPlus root) {return new OptiqProvider.Tables(root);}} ); //
+  CATALOGS(new Catalogs()),
+  SCHEMATA(new Schemata()),
+  VIEWS(new Views()),
+  COLUMNS(new Columns()),
+  TABLES(new Tables());
   
-  private final FixedTable tableDef;
-  private final ScannerFactory providerFactory;
+  private final InfoSchemaTable tableDef;
   
-  private SelectedTable(FixedTable tableDef, ScannerFactory providerFactory) {
+  private SelectedTable(InfoSchemaTable tableDef) {
     this.tableDef = tableDef;
-    this.providerFactory = providerFactory;
   }
   
-  public OptiqScanner getProvider(SchemaPlus root){
-    return providerFactory.get(root);
-  }
-  
-  private interface ScannerFactory{
-    public OptiqScanner get(SchemaPlus root);
+  public RecordReader getRecordReader(SchemaPlus rootSchema) {
+    RecordGenerator recordGenerator = tableDef.getRecordGenerator();
+    recordGenerator.scanSchema(rootSchema);
+    return recordGenerator.getRecordReader();
   }
   
   public RelDataType getRowType(RelDataTypeFactory typeFactory) {
     return tableDef.getRowType(typeFactory);
   }
-  
-  public FixedTable getFixedTable(){
-    return tableDef;
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dde773b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/VectorSet.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/VectorSet.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/VectorSet.java
deleted file mode 100644
index cb39337..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/ischema/VectorSet.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.ischema;
-
-import java.util.List;
-
-import org.apache.drill.exec.memory.BufferAllocator;
-import org.apache.drill.exec.vector.ValueVector;
-
-/**
- * A collection of value vectors representing the columns in a table.
- */
-public interface VectorSet {
-    public void cleanup();
-   
-    public void beginBatch(int maxRows);
-    public boolean writeRowToVectors(int index, Object[] values);
-    public void endBatch(int actualRows);
- 
-    public int getEstimatedRowCount(int bufSize);
-    public void createVectors(BufferAllocator allocator);
-    public List<ValueVector>  getValueVectors();
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dde773b/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/OrphanSchema.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/OrphanSchema.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/OrphanSchema.java
deleted file mode 100644
index 3ccb96b..0000000
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/OrphanSchema.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.ischema;
-
-
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-import net.hydromatic.optiq.SchemaPlus;
-import net.hydromatic.optiq.tools.Frameworks;
-
-import org.apache.drill.common.config.DrillConfig;
-import org.apache.drill.exec.cache.local.LocalCache;
-import org.apache.drill.exec.memory.TopLevelAllocator;
-import org.apache.drill.exec.rpc.user.UserSession;
-import org.apache.drill.exec.server.DrillbitContext;
-import org.apache.drill.exec.store.StoragePluginRegistry;
-import org.junit.Test;
-
-import com.codahale.metrics.MetricRegistry;
-
-public class OrphanSchema {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(OrphanSchema.class);
-
-  /**
-   * Create an orphan schema to be used for testing.
-   * @return root node of the created schema.
-   */
-  public static SchemaPlus create() throws Exception {
-
-    final DrillConfig c = DrillConfig.create();
-
-    // Mock up a context which will allow us to create a schema.
-    final DrillbitContext bitContext = mock(DrillbitContext.class);
-    when(bitContext.getMetrics()).thenReturn(new MetricRegistry());
-    when(bitContext.getAllocator()).thenReturn(new TopLevelAllocator());
-    when(bitContext.getConfig()).thenReturn(c);
-    when(bitContext.getCache()).thenReturn(new LocalCache());
-
-    bitContext.getCache().run();
-
-    // Using the mock context, get the orphan schema.
-    StoragePluginRegistry r = new StoragePluginRegistry(bitContext);
-    r.init();
-    SchemaPlus plus = Frameworks.createRootSchema(false);
-    r.getSchemaFactory().registerSchemas(new UserSession(null, null, null), plus);
-    return plus;
-  }
-
-
-  /**
-   * This test replicates the one in org.apache.drill.exec.server,
-   * but it is refactored to provide a standalone "create()" method.
-   */
-
-  @Test
-  public void test() throws Exception {
-    printSchema(create(), 0);
-  }
-
-  private static void t(final int t){
-    for(int i =0; i < t; i++) System.out.print('\t');
-  }
-  private static void printSchema(SchemaPlus s, int indent){
-    t(indent);
-    System.out.print("Schema: ");
-    System.out.println(s.getName().equals("") ? "root" : s.getName());
-    for(String table : s.getTableNames()){
-      t(indent + 1);
-      System.out.print("Table: ");
-      System.out.println(table);
-    }
-
-    for(String schema : s.getSubSchemaNames()){
-      SchemaPlus p = s.getSubSchema(schema);
-      printSchema(p, indent + 1);
-    }
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dde773b/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestOrphanSchema.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestOrphanSchema.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestOrphanSchema.java
deleted file mode 100644
index 8eadb56..0000000
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestOrphanSchema.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.ischema;
-
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import net.hydromatic.optiq.SchemaPlus;
-
-import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.exec.ExecTest;
-import org.apache.drill.exec.expr.TypeHelper;
-import org.apache.drill.exec.exception.SchemaChangeException;
-import org.apache.drill.exec.memory.OutOfMemoryException;
-import org.apache.drill.exec.memory.TopLevelAllocator;
-import org.apache.drill.exec.ops.FragmentContext;
-import org.apache.drill.exec.physical.impl.OutputMutator;
-import org.apache.drill.exec.record.MaterializedField;
-import org.apache.drill.exec.store.RecordReader;
-import org.apache.drill.exec.store.ischema.FixedTable;
-import org.apache.drill.exec.store.ischema.InfoSchemaTable;
-import org.apache.drill.exec.store.ischema.OptiqProvider;
-import org.apache.drill.exec.store.ischema.RowProvider;
-import org.apache.drill.exec.store.ischema.RowRecordReader;
-import org.apache.drill.exec.vector.ValueVector;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-
-/**
- * Using an orphan schema, create and display the various information schema tables.
- * An "orphan schema" is a stand alone schema which is not (yet?) connected to Optiq.
- */
-@Ignore // I think we should remove these tests. They are too difficult to maintain.
-public class TestOrphanSchema extends ExecTest {
-  static SchemaPlus root;
-
-  @BeforeClass
-  public static void init() throws Exception {
-    root = OrphanSchema.create();
-  }
-
-  @Test
-  public void testTables() throws OutOfMemoryException {
-    displayTable(new InfoSchemaTable.Tables(), new OptiqProvider.Tables(root));
-  }
-
-  @Test
-  public void testSchemata() throws OutOfMemoryException {
-    displayTable(new InfoSchemaTable.Schemata(), new OptiqProvider.Schemata(root));
-  }
-
-
-  @Test
-  public void testViews() throws OutOfMemoryException {
-    displayTable(new InfoSchemaTable.Views(), new OptiqProvider.Views(root));
-  }
-
-  @Test
-  public void testCatalogs() throws OutOfMemoryException {
-    displayTable(new InfoSchemaTable.Catalogs(), new OptiqProvider.Catalogs(root));
-  }
-
-  @Test
-  public void testColumns() throws OutOfMemoryException {
-    displayTable(new InfoSchemaTable.Columns(), new OptiqProvider.Columns(root));
-  }
-
-
-  private void displayTable(FixedTable table, RowProvider provider) throws OutOfMemoryException {
-
-    // Set up a mock context
-    FragmentContext context = mock(FragmentContext.class);
-    when(context.getAllocator()).thenReturn(new TopLevelAllocator());
-
-    // Create a RecordReader which reads from the test table.
-    RecordReader reader = new RowRecordReader(context, table, provider);
-
-    // Create an dummy output mutator for the RecordReader.
-    TestOutput output = new TestOutput();
-    try {reader.setup(output);}
-    catch (ExecutionSetupException e) {Assert.fail("reader threw an exception");}
-
-    // print out headers
-    System.out.printf("\n%20s\n", table.getName());
-    System.out.printf("%10s", "RowNumber");
-    for (ValueVector v: table.getValueVectors()) {
-      System.out.printf(" | %16s", v.getField().toExpr());
-    }
-    System.out.println();
-
-    // Do for each record batch
-    int rowNumber = 0;
-    for (;;) {
-      int count = reader.next();
-      if (count == 0) break;
-
-      // Do for each row in the batch
-      for (int row=0; row<count; row++, rowNumber++) {
-
-        // Display the row
-        System.out.printf("%10d", rowNumber);
-        for (ValueVector v: table.getValueVectors()) {
-          System.out.printf(" | %16s", v.getAccessor().getObject(row));
-        }
-        System.out.println();
-
-      }
-    }
-  }
-
-
-  /**
-   * A dummy OutputMutator so we can examine the contents of the current batch
-   */
-  static class TestOutput implements OutputMutator {
-    List<ValueVector> vectors = new ArrayList<ValueVector>();
-
-    public void addField(ValueVector vector) throws SchemaChangeException {
-      vectors.add(vector);
-    }
-
-    public void addFields(List<ValueVector> v) {
-      return;
-    }
-
-    public Object get(int column, int row) {
-      return vectors.get(column).getAccessor().getObject(row);
-    }
-
-    @Override
-    public <T extends ValueVector> T addField(MaterializedField field, Class<T> clazz) throws SchemaChangeException {
-      return null;
-    }
-
-    @Override
-    public void allocate(int recordCount) {
-      return;
-    }
-
-    @Override
-    public boolean isNewSchema() {
-      return false;
-    }
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/0dde773b/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestTableProvider.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestTableProvider.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestTableProvider.java
deleted file mode 100644
index 217b792..0000000
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/ischema/TestTableProvider.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.ischema;
-
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.common.types.TypeProtos.MajorType;
-import org.apache.drill.exec.ExecTest;
-import org.apache.drill.exec.exception.SchemaChangeException;
-import org.apache.drill.exec.memory.OutOfMemoryException;
-import org.apache.drill.exec.memory.TopLevelAllocator;
-import org.apache.drill.exec.ops.FragmentContext;
-import org.apache.drill.exec.physical.impl.OutputMutator;
-import org.apache.drill.exec.record.MaterializedField;
-import org.apache.drill.exec.store.RecordReader;
-import org.apache.drill.exec.vector.ValueVector;
-import org.junit.Assert;
-import org.junit.Ignore;
-import org.junit.Test;
-
-/**
- * Using a test table with two columns, create data and verify the values are in the record batch.
- */
-@Ignore
-public class TestTableProvider extends ExecTest {
-
-  @Test
-  public void zeroRead() throws OutOfMemoryException {
-    readTestTable(0);
-  }
-
-  @Test
-  public void oneRead() throws OutOfMemoryException {
-    readTestTable(1);
-  }
-
-  @Test
-  public void smallRead() throws OutOfMemoryException {
-    readTestTable(10);
-  }
-
-  @Test
-  @Ignore // due to out of heap space
-  public void largeRead() throws OutOfMemoryException {
-    readTestTable(1024*1024);
-  }
-
-
-  /**
-   * Read record batches from the test table and verify the contents.
-   * @param nrRows - the total number of rows expected.
-   */
-  private void readTestTable(int nrRows) throws OutOfMemoryException {
-
-    // Mock up a context with a BufferAllocator
-    FragmentContext context = mock(FragmentContext.class);
-    when(context.getAllocator()).thenReturn(new TopLevelAllocator());
-
-    // Create a RecordReader which reads from the test table.
-    RecordReader reader = new RowRecordReader(context, new TestTable(), new TestProvider(nrRows));
-
-    // Create an dummy output mutator for the RecordReader.
-    TestOutput output = new TestOutput();
-    try {reader.setup(output);}
-    catch (ExecutionSetupException e) {Assert.fail("reader threw an exception");}
-
-    // Do for each record batch
-    int rowNumber = 0;
-    for (;;) {
-      int count = reader.next();
-      if (count == 0) break;
-
-      // Do for each row in the batch
-      for (int row=0; row<count; row++, rowNumber++) {
-
-        // Verify the row has an integer and string containing the row number
-        int intValue = (int)output.get(1, row);
-        String strValue = (String)output.get(0, row);
-        Assert.assertEquals(rowNumber, intValue);
-        Assert.assertEquals(rowNumber, Integer.parseInt(strValue));
-      }
-    }
-
-  // Verify we read the correct number of rows.
-  Assert.assertEquals(nrRows, rowNumber);
-  }
-
-
-  /**
-   * Class to define the table we want to create. Two columns - string, integer
-   */
-  static class TestTable extends FixedTable {
-    static final String tableName = "MOCK_TABLE";
-    static final String fieldNames[] = {"STRING_COLUMM", "INTEGER_COLUMN"};
-    static final MajorType fieldTypes[] = {VARCHAR, INT};
-    TestTable() {
-      super(tableName, fieldNames, fieldTypes);
-    }
-  }
-
-
-  /**
-   * Class to generate data for the table
-   */
-  static class TestProvider extends PipeProvider {
-    int maxRows;
-    TestProvider(int maxRows) {
-      this.maxRows = maxRows;
-    }
-    void generateRows() {
-      for (int rowNumber=0; rowNumber<maxRows; rowNumber++) {
-        writeRow(Integer.toString(rowNumber), rowNumber);
-      }
-    }
-  }
-
-
-
-
-  /**
-   * A dummy OutputMutator so we can examine the contents of the current batch
-   */
-  static class TestOutput implements OutputMutator {
-    List<ValueVector> vectors = new ArrayList<ValueVector>();
-
-    public Object get(int column, int row) {
-      return vectors.get(column).getAccessor().getObject(row);
-    }
-
-    @Override
-    public <T extends ValueVector> T addField(MaterializedField field, Class<T> clazz) throws SchemaChangeException {
-      return null;
-    }
-
-    @Override
-    public void addFields(List<ValueVector> vv) {
-      return;
-    }
-
-    @Override
-    public void allocate(int recordCount) {
-      
-    }
-    @Override
-    public boolean isNewSchema() {
-      return false;
-    }
-  }
-
-
-}