You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by ja...@apache.org on 2014/06/20 22:25:02 UTC

[26/32] DRILL-1024: Move hive storage code out of 'exec/java-exec' into 'contrib/storage-hive' module.

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveTable.java
deleted file mode 100644
index 50c81e9..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveTable.java
+++ /dev/null
@@ -1,331 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive;
-
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Order;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.annotation.JsonTypeName;
-import com.google.common.collect.Lists;
-
-@JsonTypeName("table")
-public class HiveTable {
-
-  @JsonIgnore
-  private Table table;
-
-  @JsonProperty
-  public String tableName;
-  @JsonProperty
-  public String dbName;
-  @JsonProperty
-  public String owner;
-  @JsonProperty
-  public int createTime;
-  @JsonProperty
-  public int lastAccessTime;
-  @JsonProperty
-  public int retention;
-  @JsonProperty
-  public StorageDescriptorWrapper sd;
-  @JsonProperty
-  public List<FieldSchemaWrapper> partitionKeys;
-  @JsonProperty
-  public Map<String,String> parameters;
-  @JsonProperty
-  public String viewOriginalText;
-  @JsonProperty
-  public String viewExpandedText;
-  @JsonProperty
-  public String tableType;
-
-  @JsonCreator
-  public HiveTable(@JsonProperty("tableName") String tableName, @JsonProperty("dbName") String dbName, @JsonProperty("owner") String owner, @JsonProperty("createTime") int createTime,
-                   @JsonProperty("lastAccessTime") int lastAccessTime, @JsonProperty("retention") int retention, @JsonProperty("sd") StorageDescriptorWrapper sd,
-                   @JsonProperty("partitionKeys") List<FieldSchemaWrapper> partitionKeys, @JsonProperty("parameters") Map<String, String> parameters,
-                   @JsonProperty("viewOriginalText") String viewOriginalText, @JsonProperty("viewExpandedText") String viewExpandedText, @JsonProperty("tableType") String tableType
-                   ) {
-    this.tableName = tableName;
-    this.dbName = dbName;
-    this.owner = owner;
-    this.createTime = createTime;
-    this.lastAccessTime = lastAccessTime;
-    this.retention = retention;
-    this.sd = sd;
-    this.partitionKeys = partitionKeys;
-    this.parameters = parameters;
-    this.viewOriginalText = viewOriginalText;
-    this.viewExpandedText = viewExpandedText;
-    this.tableType = tableType;
-
-    List<FieldSchema> partitionKeysUnwrapped = Lists.newArrayList();
-    for (FieldSchemaWrapper w : partitionKeys) partitionKeysUnwrapped.add(w.getFieldSchema());
-    StorageDescriptor sdUnwrapped = sd.getSd();
-    this.table = new Table(tableName, dbName, owner, createTime, lastAccessTime, retention, sdUnwrapped, partitionKeysUnwrapped,
-            parameters, viewOriginalText, viewExpandedText, tableType);
-  }
-
-  public HiveTable(Table table) {
-    if (table == null) return;
-    this.table = table;
-    this.tableName = table.getTableName();
-    this.dbName = table.getDbName();
-    this.owner = table.getOwner();
-    this.createTime = table.getCreateTime();
-    this.lastAccessTime = table.getLastAccessTime();
-    this.retention = table.getRetention();
-    this.sd = new StorageDescriptorWrapper(table.getSd());
-    this.partitionKeys = Lists.newArrayList();
-    for (FieldSchema f : table.getPartitionKeys()) this.partitionKeys.add(new FieldSchemaWrapper(f));
-    this.parameters = table.getParameters();
-    this.viewOriginalText = table.getViewOriginalText();
-    this.viewExpandedText = table.getViewExpandedText();
-    this.tableType = table.getTableType();
-  }
-
-  @JsonIgnore
-  public Table getTable() {
-    return table;
-  }
-
-  public static class HivePartition {
-
-    @JsonIgnore
-    private Partition partition;
-
-    @JsonProperty
-    public List<String> values;
-    @JsonProperty
-    public String tableName;
-    @JsonProperty
-    public String dbName;
-    @JsonProperty
-    public int createTime;
-    @JsonProperty
-    public int lastAccessTime;
-    @JsonProperty
-    public StorageDescriptorWrapper sd;
-    @JsonProperty
-    public Map<String,String> parameters;
-
-    @JsonCreator
-    public HivePartition(@JsonProperty("values") List<String> values, @JsonProperty("tableName") String tableName, @JsonProperty("dbName") String dbName, @JsonProperty("createTime") int createTime,
-                     @JsonProperty("lastAccessTime") int lastAccessTime,  @JsonProperty("sd") StorageDescriptorWrapper sd,
-                     @JsonProperty("parameters") Map<String, String> parameters
-    ) {
-      this.values = values;
-      this.tableName = tableName;
-      this.dbName = dbName;
-      this.createTime = createTime;
-      this.lastAccessTime = lastAccessTime;
-      this.sd = sd;
-      this.parameters = parameters;
-
-      StorageDescriptor sdUnwrapped = sd.getSd();
-      this.partition = new org.apache.hadoop.hive.metastore.api.Partition(values, tableName, dbName, createTime, lastAccessTime, sdUnwrapped, parameters);
-    }
-
-    public HivePartition(Partition partition) {
-      if (partition == null) return;
-      this.partition = partition;
-      this.values = partition.getValues();
-      this.tableName = partition.getTableName();
-      this.dbName = partition.getDbName();
-      this.createTime = partition.getCreateTime();
-      this.lastAccessTime = partition.getLastAccessTime();
-      this.sd = new StorageDescriptorWrapper(partition.getSd());
-      this.parameters = partition.getParameters();
-    }
-
-    @JsonIgnore
-    public Partition getPartition() {
-      return partition;
-    }
-  }
-
-  public static class StorageDescriptorWrapper {
-    @JsonIgnore
-    private StorageDescriptor sd;
-    @JsonProperty
-    public List<FieldSchemaWrapper> cols;
-    @JsonProperty
-    public String location;
-    @JsonProperty
-    public String inputFormat;
-    @JsonProperty
-    public String outputFormat;
-    @JsonProperty
-    public boolean compressed;
-    @JsonProperty
-    public int numBuckets;
-    @JsonProperty
-    public SerDeInfoWrapper serDeInfo;
-//    @JsonProperty
-//    public List<String> bucketCols;
-    @JsonProperty
-    public List<OrderWrapper> sortCols;
-    @JsonProperty
-    public Map<String,String> parameters;
-
-    @JsonCreator
-    public StorageDescriptorWrapper(@JsonProperty("cols") List<FieldSchemaWrapper> cols, @JsonProperty("location") String location, @JsonProperty("inputFormat") String inputFormat,
-                                    @JsonProperty("outputFormat") String outputFormat, @JsonProperty("compressed") boolean compressed, @JsonProperty("numBuckets") int numBuckets,
-                                    @JsonProperty("serDeInfo") SerDeInfoWrapper serDeInfo,  @JsonProperty("sortCols") List<OrderWrapper> sortCols,
-                                    @JsonProperty("parameters") Map<String,String> parameters) {
-      this.cols = cols;
-      this.location = location;
-      this.inputFormat = inputFormat;
-      this.outputFormat = outputFormat;
-      this.compressed = compressed;
-      this.numBuckets = numBuckets;
-      this.serDeInfo = serDeInfo;
-//      this.bucketCols = bucketCols;
-      this.sortCols = sortCols;
-      this.parameters = parameters;
-      List<FieldSchema> colsUnwrapped = Lists.newArrayList();
-      for (FieldSchemaWrapper w: cols) colsUnwrapped.add(w.getFieldSchema());
-      SerDeInfo serDeInfoUnwrapped = serDeInfo.getSerDeInfo();
-      List<Order> sortColsUnwrapped = Lists.newArrayList();
-      for (OrderWrapper w : sortCols) sortColsUnwrapped.add(w.getOrder());
-//      this.sd = new StorageDescriptor(colsUnwrapped, location, inputFormat, outputFormat, compressed, numBuckets, serDeInfoUnwrapped,
-//              bucketCols, sortColsUnwrapped, parameters);
-      this.sd = new StorageDescriptor(colsUnwrapped, location, inputFormat, outputFormat, compressed, numBuckets, serDeInfoUnwrapped,
-              null, sortColsUnwrapped, parameters);
-    }
-
-    public StorageDescriptorWrapper(StorageDescriptor sd) {
-      this.sd = sd;
-      this.cols = Lists.newArrayList();
-      for (FieldSchema f : sd.getCols()) this.cols.add(new FieldSchemaWrapper(f));
-      this.location = sd.getLocation();
-      this.inputFormat = sd.getInputFormat();
-      this.outputFormat = sd.getOutputFormat();
-      this.compressed = sd.isCompressed();
-      this.numBuckets = sd.getNumBuckets();
-      this.serDeInfo = new SerDeInfoWrapper(sd.getSerdeInfo());
-//      this.bucketCols = sd.getBucketCols();
-      this.sortCols = Lists.newArrayList();
-      for (Order o : sd.getSortCols()) this.sortCols.add(new OrderWrapper(o));
-      this.parameters = sd.getParameters();
-    }
-
-    @JsonIgnore
-    public StorageDescriptor getSd() {
-      return sd;
-    }
-
-  }
-
-  public static class SerDeInfoWrapper {
-    @JsonIgnore
-    private SerDeInfo serDeInfo;
-    @JsonProperty
-    public String name;
-    @JsonProperty
-    public String serializationLib;
-    @JsonProperty
-    public Map<String,String> parameters;
-
-    @JsonCreator
-    public SerDeInfoWrapper(@JsonProperty("name") String name, @JsonProperty("serializationLib") String serializationLib, @JsonProperty("parameters") Map<String, String> parameters) {
-      this.name = name;
-      this.serializationLib = serializationLib;
-      this.parameters = parameters;
-      this.serDeInfo = new SerDeInfo(name, serializationLib, parameters);
-    }
-
-    public SerDeInfoWrapper(SerDeInfo serDeInfo) {
-      this.serDeInfo = serDeInfo;
-      this.name = serDeInfo.getName();
-      this.serializationLib = serDeInfo.getSerializationLib();
-      this.parameters = serDeInfo.getParameters();
-    }
-
-    @JsonIgnore
-    public SerDeInfo getSerDeInfo() {
-      return serDeInfo;
-    }
-  }
-
-  public static class FieldSchemaWrapper {
-    @JsonIgnore
-    private FieldSchema fieldSchema;
-    @JsonProperty
-    public String name;
-    @JsonProperty
-    public String type;
-    @JsonProperty
-    public String comment;
-
-    @JsonCreator
-    public FieldSchemaWrapper(@JsonProperty("name") String name, @JsonProperty("type") String type, @JsonProperty("comment") String comment) {
-      this.name = name;
-      this.type = type;
-      this.comment = comment;
-      this.fieldSchema = new FieldSchema(name, type, comment);
-    }
-
-    public FieldSchemaWrapper(FieldSchema fieldSchema) {
-      this.fieldSchema = fieldSchema;
-      this.name = fieldSchema.getName();
-      this.type = fieldSchema.getType();
-      this.comment = fieldSchema.getComment();
-    }
-
-    @JsonIgnore
-    public FieldSchema getFieldSchema() {
-      return fieldSchema;
-    }
-  }
-
-  public static class OrderWrapper {
-    @JsonIgnore
-    private Order ord;
-    @JsonProperty
-    public String col;
-    @JsonProperty
-    public int order;
-
-    @JsonCreator
-    public OrderWrapper(@JsonProperty("col") String col, @JsonProperty("order") int order) {
-      this.col = col;
-      this.order = order;
-    }
-
-    public OrderWrapper(Order ord) {
-      this.ord = ord;
-      this.col = ord.getCol();
-      this.order = ord.getOrder();
-    }
-
-    @JsonIgnore
-    public Order getOrder() {
-      return ord;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveTextRecordReader.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveTextRecordReader.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveTextRecordReader.java
deleted file mode 100644
index 116603c..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/HiveTextRecordReader.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.drill.common.exceptions.DrillRuntimeException;
-import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.exec.ops.FragmentContext;
-import org.apache.drill.exec.vector.NullableBigIntVector;
-import org.apache.drill.exec.vector.NullableIntVector;
-import org.apache.drill.exec.vector.NullableVarCharVector;
-import org.apache.drill.exec.vector.ValueVector;
-import org.apache.drill.exec.vector.allocator.VectorAllocator;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.InputSplit;
-
-import com.google.common.collect.Lists;
-
-/**
- * Note: Native hive text record reader is not complete in implementation. For now use
- * {@link org.apache.drill.exec.store.hive.HiveRecordReader}.
- */
-public class HiveTextRecordReader extends HiveRecordReader {
-
-  public final byte delimiter;
-  public final List<Integer> columnIds;
-  private final int numCols;
-
-  public HiveTextRecordReader(Table table, Partition partition, InputSplit inputSplit, List<SchemaPath> projectedColumns, FragmentContext context) throws ExecutionSetupException {
-    super(table, partition, inputSplit, projectedColumns, context);
-    String d = table.getSd().getSerdeInfo().getParameters().get("field.delim");
-    if (d != null) {
-      delimiter = d.getBytes()[0];
-    } else {
-      delimiter = (byte) 1;
-    }
-    assert delimiter > 0;
-    List<Integer> ids = Lists.newArrayList();
-    for (int i = 0; i < tableColumns.size(); i++) {
-      if (selectedColumnNames.contains(tableColumns.get(i))) {
-        ids.add(i);
-      }
-    }
-    columnIds = ids;
-    numCols = tableColumns.size();
-  }
-
-  public boolean setValue(PrimitiveObjectInspector.PrimitiveCategory pCat, ValueVector vv, int index, byte[] bytes, int start) {
-    switch(pCat) {
-      case BINARY:
-        throw new UnsupportedOperationException();
-      case BOOLEAN:
-        throw new UnsupportedOperationException();
-      case BYTE:
-        throw new UnsupportedOperationException();
-      case DECIMAL:
-        throw new UnsupportedOperationException();
-      case DOUBLE:
-        throw new UnsupportedOperationException();
-      case FLOAT:
-        throw new UnsupportedOperationException();
-      case INT: {
-        int value = 0;
-        byte b;
-        for (int i = start; (b = bytes[i]) != delimiter; i++) {
-          value = (value * 10) + b - 48;
-        }
-        return ((NullableIntVector) vv).getMutator().setSafe(index, value);
-      }
-      case LONG: {
-        long value = 0;
-        byte b;
-        for (int i = start; (b = bytes[i]) != delimiter; i++) {
-          value = (value * 10) + b - 48;
-        }
-        return ((NullableBigIntVector) vv).getMutator().setSafe(index, value);
-      }
-      case SHORT:
-        throw new UnsupportedOperationException();
-      case STRING: {
-        int end = start;
-        for (int i = start; i < bytes.length; i++) {
-          if (bytes[i] == delimiter) {
-            end = i;
-            break;
-          }
-          end = bytes.length;
-        }
-        return ((NullableVarCharVector) vv).getMutator().setSafe(index, bytes, start, end - start);
-      }
-      case TIMESTAMP:
-        throw new UnsupportedOperationException();
-
-      default:
-        throw new UnsupportedOperationException("Could not determine type");
-    }
-  }
-
-
-  @Override
-  public int next() {
-    for (ValueVector vv : vectors) {
-      VectorAllocator.getAllocator(vv, 50).alloc(TARGET_RECORD_COUNT);
-    }
-    try {
-      int recordCount = 0;
-      if (redoRecord != null) {
-        int length = ((Text) value).getLength();
-        byte[] bytes = ((Text) value).getBytes();
-        int[] delimPositions = new int[numCols];
-        delimPositions[0] = -1;
-        int p = 0;
-        for (int i = 0; i < length; i++) {
-          if (bytes[i] == delimiter) {
-            delimPositions[p++] = i;
-          }
-        }
-        for (int id : columnIds) {
-          boolean success = false; // setValue(primitiveCategories.get(id), vectors.get(id), recordCount, bytes, delimPositions[id]);
-          if (!success) {
-            throw new DrillRuntimeException(String.format("Failed to write value for column %s", selectedColumnNames.get(id)));
-          }
-
-        }
-        redoRecord = null;
-      }
-      while (recordCount < TARGET_RECORD_COUNT && reader.next(key, value)) {
-        int length = ((Text) value).getLength();
-        byte[] bytes = ((Text) value).getBytes();
-        int[] delimPositions = new int[numCols + 1];
-        delimPositions[0] = -1;
-        int p = 1;
-        for (int i = 0; i < length; i++) {
-          if (bytes[i] == delimiter) {
-            delimPositions[p++] = i;
-          }
-        }
-        for (int i = 0; i < columnIds.size(); i++) {
-          int id = columnIds.get(i);
-          boolean success = false; // setValue(primitiveCategories.get(i), vectors.get(i), recordCount, bytes, delimPositions[id] + 1);
-          if (!success) {
-            redoRecord = value;
-            if (partition != null) populatePartitionVectors(recordCount);
-            return recordCount;
-          }
-        }
-        recordCount++;
-      }
-      if (partition != null) populatePartitionVectors(recordCount);
-      return recordCount;
-    } catch (IOException e) {
-      throw new DrillRuntimeException(e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveTable.java
deleted file mode 100644
index 949fa06..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveTable.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive.schema;
-
-import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.List;
-
-import com.google.common.collect.Lists;
-import org.apache.drill.exec.planner.logical.DrillTable;
-import org.apache.drill.exec.store.hive.HiveReadEntry;
-import org.apache.drill.exec.store.hive.HiveStoragePlugin;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.eigenbase.reltype.RelDataType;
-import org.eigenbase.reltype.RelDataTypeFactory;
-import org.eigenbase.sql.SqlCollation;
-import org.eigenbase.sql.type.SqlTypeName;
-
-public class DrillHiveTable extends DrillTable{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillHiveTable.class);
-  
-  protected final Table hiveTable;
-  
-  public DrillHiveTable(String storageEngineName, HiveStoragePlugin plugin, HiveReadEntry readEntry) {
-    super(storageEngineName, plugin, readEntry);
-    this.hiveTable = new Table(readEntry.getTable());
-  }
-
-  @Override
-  public RelDataType getRowType(RelDataTypeFactory typeFactory) {
-    List<RelDataType> typeList = Lists.newArrayList();
-    List<String> fieldNameList = Lists.newArrayList();
-
-    List<FieldSchema> hiveFields = hiveTable.getCols();
-    for(FieldSchema hiveField : hiveFields) {
-      fieldNameList.add(hiveField.getName());
-      typeList.add(getNullableRelDataTypeFromHiveType(
-          typeFactory, TypeInfoUtils.getTypeInfoFromTypeString(hiveField.getType())));
-    }
-
-    for (FieldSchema field : hiveTable.getPartitionKeys()) {
-      fieldNameList.add(field.getName());
-      typeList.add(getNullableRelDataTypeFromHiveType(
-          typeFactory, TypeInfoUtils.getTypeInfoFromTypeString(field.getType())));
-    }
-
-    return typeFactory.createStructType(typeList, fieldNameList);
-  }
-
-  private RelDataType getNullableRelDataTypeFromHiveType(RelDataTypeFactory typeFactory, TypeInfo typeInfo) {
-    RelDataType relDataType = getRelDataTypeFromHiveType(typeFactory, typeInfo);
-    return typeFactory.createTypeWithNullability(relDataType, true);
-  }
-
-  private RelDataType getRelDataTypeFromHivePrimitiveType(RelDataTypeFactory typeFactory, PrimitiveTypeInfo pTypeInfo) {
-    switch(pTypeInfo.getPrimitiveCategory()) {
-      case BOOLEAN:
-        return typeFactory.createSqlType(SqlTypeName.BOOLEAN);
-
-      case BYTE:
-        return typeFactory.createSqlType(SqlTypeName.TINYINT);
-
-      case SHORT:
-        return typeFactory.createSqlType(SqlTypeName.SMALLINT);
-
-      case INT:
-        return typeFactory.createSqlType(SqlTypeName.INTEGER);
-
-      case LONG:
-        return typeFactory.createSqlType(SqlTypeName.BIGINT);
-
-      case FLOAT:
-        return typeFactory.createSqlType(SqlTypeName.FLOAT);
-
-      case DOUBLE:
-        return typeFactory.createSqlType(SqlTypeName.DOUBLE);
-
-      case DATE:
-        return typeFactory.createSqlType(SqlTypeName.DATE);
-
-      case TIMESTAMP:
-        return typeFactory.createSqlType(SqlTypeName.TIMESTAMP);
-
-      case BINARY:
-        return typeFactory.createSqlType(SqlTypeName.BINARY);
-
-      case DECIMAL:
-        final int precision = 38; // Hive 0.12 has standard precision
-        return typeFactory.createSqlType(SqlTypeName.DECIMAL, precision);
-
-      case STRING:
-      case VARCHAR: {
-        int maxLen = TypeInfoUtils.getCharacterLengthForType(pTypeInfo);
-        return typeFactory.createTypeWithCharsetAndCollation(
-          typeFactory.createSqlType(SqlTypeName.VARCHAR, maxLen), /*input type*/
-          Charset.forName("ISO-8859-1"), /*unicode char set*/
-          SqlCollation.IMPLICIT /* TODO: need to decide if implicit is the correct one */
-        );
-      }
-
-      case UNKNOWN:
-      case VOID:
-      default:
-        throwUnsupportedHiveDataTypeError(pTypeInfo.getPrimitiveCategory().toString());
-    }
-
-    return null;
-  }
-
-  private RelDataType getRelDataTypeFromHiveType(RelDataTypeFactory typeFactory, TypeInfo typeInfo) {
-    switch(typeInfo.getCategory()) {
-      case PRIMITIVE:
-        return getRelDataTypeFromHivePrimitiveType(typeFactory, ((PrimitiveTypeInfo) typeInfo));
-
-      case LIST: {
-        ListTypeInfo listTypeInfo = (ListTypeInfo)typeInfo;
-        RelDataType listElemTypeInfo = getRelDataTypeFromHiveType(typeFactory, listTypeInfo.getListElementTypeInfo());
-        return typeFactory.createArrayType(listElemTypeInfo, -1);
-      }
-
-      case MAP: {
-        MapTypeInfo mapTypeInfo = (MapTypeInfo)typeInfo;
-        RelDataType keyType = getRelDataTypeFromHiveType(typeFactory, mapTypeInfo.getMapKeyTypeInfo());
-        RelDataType valueType = getRelDataTypeFromHiveType(typeFactory, mapTypeInfo.getMapValueTypeInfo());
-        return typeFactory.createMapType(keyType, valueType);
-      }
-
-      case STRUCT: {
-        StructTypeInfo structTypeInfo = (StructTypeInfo)typeInfo;
-        ArrayList<String> fieldNames = structTypeInfo.getAllStructFieldNames();
-        ArrayList<TypeInfo> fieldHiveTypeInfoList = structTypeInfo.getAllStructFieldTypeInfos();
-        List<RelDataType> fieldRelDataTypeList = Lists.newArrayList();
-        for(TypeInfo fieldHiveType : fieldHiveTypeInfoList) {
-          fieldRelDataTypeList.add(getRelDataTypeFromHiveType(typeFactory, fieldHiveType));
-        }
-        return typeFactory.createStructType(fieldRelDataTypeList, fieldNames);
-      }
-
-      case UNION:
-        logger.warn("There is no UNION data type in SQL. Converting it to Sql type OTHER to avoid " +
-            "breaking INFORMATION_SCHEMA queries");
-        return typeFactory.createSqlType(SqlTypeName.OTHER);
-    }
-
-    throwUnsupportedHiveDataTypeError(typeInfo.getCategory().toString());
-    return null;
-  }
-
-  private void throwUnsupportedHiveDataTypeError(String hiveType) {
-    StringBuilder errMsg = new StringBuilder();
-    errMsg.append(String.format("Unsupported Hive data type %s. ", hiveType));
-    errMsg.append(System.getProperty("line.separator"));
-    errMsg.append("Following Hive data types are supported in Drill INFORMATION_SCHEMA: ");
-    errMsg.append("BOOLEAN, BYTE, SHORT, INT, LONG, FLOAT, DOUBLE, DATE, TIMESTAMP, BINARY, DECIMAL, STRING, " +
-        "VARCHAR, LIST, MAP, STRUCT and UNION");
-
-    throw new RuntimeException(errMsg.toString());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveViewTable.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveViewTable.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveViewTable.java
deleted file mode 100644
index b575972..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/DrillHiveViewTable.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive.schema;
-
-import net.hydromatic.optiq.Schema.TableType;
-import org.apache.drill.exec.planner.logical.DrillViewInfoProvider;
-import org.apache.drill.exec.store.hive.HiveReadEntry;
-import org.apache.drill.exec.store.hive.HiveStoragePlugin;
-
-public class DrillHiveViewTable extends DrillHiveTable implements DrillViewInfoProvider {
-
-  public DrillHiveViewTable(String storageEngineName, HiveStoragePlugin plugin, HiveReadEntry readEntry) {
-    super(storageEngineName, plugin, readEntry);
-  }
-
-  @Override
-  public TableType getJdbcTableType() {
-    return TableType.VIEW;
-  }
-
-  @Override
-  public String getViewSql() {
-    return hiveTable.getViewExpandedText();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/HiveDatabaseSchema.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/HiveDatabaseSchema.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/HiveDatabaseSchema.java
deleted file mode 100644
index 0df2374..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/HiveDatabaseSchema.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive.schema;
-
-import java.util.List;
-import java.util.Set;
-
-import net.hydromatic.optiq.Table;
-
-import org.apache.drill.exec.planner.logical.DrillTable;
-import org.apache.drill.exec.store.AbstractSchema;
-import org.apache.drill.exec.store.dfs.FileSystemConfig;
-import org.apache.drill.exec.store.hive.HiveStoragePluginConfig;
-import org.apache.drill.exec.store.hive.schema.HiveSchemaFactory.HiveSchema;
-
-import com.google.common.collect.Sets;
-
-public class HiveDatabaseSchema extends AbstractSchema{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveDatabaseSchema.class);
-
-  private final HiveSchema hiveSchema;
-  private final Set<String> tables;
-
-  public HiveDatabaseSchema( //
-      List<String> tableList, //
-      HiveSchema hiveSchema, //
-      String name) {
-    super(hiveSchema.getSchemaPath(), name);
-    this.hiveSchema = hiveSchema;
-    this.tables = Sets.newHashSet(tableList);
-  }
-
-  @Override
-  public Table getTable(String tableName) {
-    return hiveSchema.getDrillTable(this.name, tableName);
-  }
-
-  @Override
-  public Set<String> getTableNames() {
-    return tables;
-  }
-
-  @Override
-  public String getTypeName() {
-    return HiveStoragePluginConfig.NAME;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java
deleted file mode 100644
index 7e6b92b..0000000
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/hive/schema/HiveSchemaFactory.java
+++ /dev/null
@@ -1,292 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive.schema;
-
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-
-import com.google.common.collect.ImmutableList;
-
-import net.hydromatic.optiq.Schema;
-import net.hydromatic.optiq.SchemaPlus;
-
-import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.exec.planner.logical.DrillTable;
-import org.apache.drill.exec.rpc.user.DrillUser;
-import org.apache.drill.exec.rpc.user.UserSession;
-import org.apache.drill.exec.store.AbstractSchema;
-import org.apache.drill.exec.store.SchemaFactory;
-import org.apache.drill.exec.store.hive.HiveReadEntry;
-import org.apache.drill.exec.store.hive.HiveStoragePlugin;
-import org.apache.drill.exec.store.hive.HiveStoragePluginConfig;
-import org.apache.drill.exec.store.hive.HiveTable;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.thrift.TException;
-
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-public class HiveSchemaFactory implements SchemaFactory {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveSchemaFactory.class);
-
-  private static final String DATABASES = "databases";
-
-  private final HiveMetaStoreClient mClient;
-  private LoadingCache<String, List<String>> databases;
-  private LoadingCache<String, List<String>> tableNameLoader;
-  private LoadingCache<String, LoadingCache<String, HiveReadEntry>> tableLoaders;
-  private HiveStoragePlugin plugin;
-  private final String schemaName;
-
-  public HiveSchemaFactory(HiveStoragePlugin plugin, String name, HiveConf hiveConf) throws ExecutionSetupException {
-    this.schemaName = name;
-    this.plugin = plugin;
-
-    try {
-      this.mClient = new HiveMetaStoreClient(hiveConf);
-    } catch (MetaException e) {
-      throw new ExecutionSetupException("Failure setting up Hive metastore client.", e);
-    }
-
-    databases = CacheBuilder //
-        .newBuilder() //
-        .expireAfterAccess(1, TimeUnit.MINUTES) //
-        .build(new DatabaseLoader());
-
-    tableNameLoader = CacheBuilder //
-        .newBuilder() //
-        .expireAfterAccess(1, TimeUnit.MINUTES) //
-        .build(new TableNameLoader());
-
-    tableLoaders = CacheBuilder //
-        .newBuilder() //
-        .expireAfterAccess(4, TimeUnit.HOURS) //
-        .maximumSize(20) //
-        .build(new TableLoaderLoader());
-  }
-
-  private class TableNameLoader extends CacheLoader<String, List<String>> {
-
-    @Override
-    public List<String> load(String dbName) throws Exception {
-      try {
-        return mClient.getAllTables(dbName);
-      } catch (TException e) {
-        logger.warn("Failure while attempting to get hive tables", e);
-        mClient.reconnect();
-        return mClient.getAllTables(dbName);
-      }
-    }
-
-  }
-
-  private class DatabaseLoader extends CacheLoader<String, List<String>> {
-
-    @Override
-    public List<String> load(String key) throws Exception {
-      if (!DATABASES.equals(key))
-        throw new UnsupportedOperationException();
-      try {
-        return mClient.getAllDatabases();
-      } catch (TException e) {
-        logger.warn("Failure while attempting to get hive tables", e);
-        mClient.reconnect();
-        return mClient.getAllDatabases();
-      }
-    }
-  }
-
-  private class TableLoaderLoader extends CacheLoader<String, LoadingCache<String, HiveReadEntry>> {
-
-    @Override
-    public LoadingCache<String, HiveReadEntry> load(String key) throws Exception {
-      return CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.MINUTES).build(new TableLoader(key));
-    }
-
-  }
-
-  private class TableLoader extends CacheLoader<String, HiveReadEntry> {
-
-    private final String dbName;
-
-    public TableLoader(String dbName) {
-      super();
-      this.dbName = dbName;
-    }
-
-    @Override
-    public HiveReadEntry load(String key) throws Exception {
-      Table t = null;
-      try {
-        t = mClient.getTable(dbName, key);
-      } catch (TException e) {
-        mClient.reconnect();
-        t = mClient.getTable(dbName, key);
-      }
-
-      if (t == null)
-        throw new UnknownTableException(String.format("Unable to find table '%s'.", key));
-
-      List<Partition> partitions = null;
-      try {
-        partitions = mClient.listPartitions(dbName, key, Short.MAX_VALUE);
-      } catch (TException e) {
-        mClient.reconnect();
-        partitions = mClient.listPartitions(dbName, key, Short.MAX_VALUE);
-      }
-
-      List<HiveTable.HivePartition> hivePartitions = Lists.newArrayList();
-      for (Partition part : partitions) {
-        hivePartitions.add(new HiveTable.HivePartition(part));
-      }
-
-      if (hivePartitions.size() == 0)
-        hivePartitions = null;
-      return new HiveReadEntry(new HiveTable(t), hivePartitions);
-
-    }
-
-  }
-
-  @Override
-  public void registerSchemas(UserSession session, SchemaPlus parent) {
-    HiveSchema schema = new HiveSchema(schemaName);
-    SchemaPlus hPlus = parent.add(schemaName, schema);
-    schema.setHolder(hPlus);
-  }
-
-  class HiveSchema extends AbstractSchema {
-
-    private HiveDatabaseSchema defaultSchema;
-
-    public HiveSchema(String name) {
-      super(ImmutableList.<String>of(), name);
-      getSubSchema("default");
-    }
-
-    @Override
-    public Schema getSubSchema(String name) {
-      List<String> tables;
-      try {
-        tables = tableNameLoader.get(name);
-        HiveDatabaseSchema schema = new HiveDatabaseSchema(tables, this, name);
-        if(name.equals("default")){
-          this.defaultSchema = schema;
-        }
-        return schema;
-      } catch (ExecutionException e) {
-        logger.warn("Failure while attempting to access HiveDatabase '{}'.", name, e.getCause());
-        return null;
-      }
-
-    }
-
-
-    void setHolder(SchemaPlus plusOfThis){
-      for(String s : getSubSchemaNames()){
-        plusOfThis.add(s, getSubSchema(s));
-      }
-    }
-
-    @Override
-    public boolean showInInformationSchema() {
-      return false;
-    }
-
-    @Override
-    public Set<String> getSubSchemaNames() {
-      try{
-        List<String> dbs = databases.get(DATABASES);
-        return Sets.newHashSet(dbs);
-      }catch(ExecutionException e){
-        logger.warn("Failure while getting Hive database list.", e);
-      }
-      return super.getSubSchemaNames();
-    }
-
-    @Override
-    public net.hydromatic.optiq.Table getTable(String name) {
-      if(defaultSchema == null){
-        return super.getTable(name);
-      }
-      return defaultSchema.getTable(name);
-    }
-
-    @Override
-    public Set<String> getTableNames() {
-      if(defaultSchema == null){
-        return super.getTableNames();
-      }
-      return defaultSchema.getTableNames();
-    }
-
-    List<String> getTableNames(String dbName){
-      try{
-        return tableNameLoader.get(dbName);
-      }catch(ExecutionException e){
-        logger.warn("Failure while loading table names for database '{}'.", dbName, e.getCause());
-        return Collections.emptyList();
-      }
-    }
-
-    DrillTable getDrillTable(String dbName, String t){
-      HiveReadEntry entry = getSelectionBaseOnName(dbName, t);
-      if(entry == null) return null;
-
-      if (entry.getJdbcTableType() == TableType.VIEW) {
-        return new DrillHiveViewTable(schemaName, plugin, entry);
-      } else {
-        return new DrillHiveTable(schemaName, plugin, entry);
-      }
-    }
-
-    HiveReadEntry getSelectionBaseOnName(String dbName, String t) {
-      if(dbName == null) dbName = "default";
-      try{
-        return tableLoaders.get(dbName).get(t);
-      }catch(ExecutionException e){
-        logger.warn("Exception occurred while trying to read table. {}.{}", dbName, t, e.getCause());
-        return null;
-      }
-    }
-
-    @Override
-    public AbstractSchema getDefaultSchema() {
-      return defaultSchema;
-    }
-
-    @Override
-    public String getTypeName() {
-      return HiveStoragePluginConfig.NAME;
-    }
-
-  }
-
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestHiveUDFs.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestHiveUDFs.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestHiveUDFs.java
deleted file mode 100644
index b2fa898..0000000
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/TestHiveUDFs.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.physical.impl;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import mockit.Injectable;
-import mockit.NonStrictExpectations;
-
-import org.apache.drill.common.config.DrillConfig;
-import org.apache.drill.exec.ExecTest;
-import org.apache.drill.exec.expr.fn.FunctionImplementationRegistry;
-import org.apache.drill.exec.memory.TopLevelAllocator;
-import org.apache.drill.exec.ops.FragmentContext;
-import org.apache.drill.exec.physical.PhysicalPlan;
-import org.apache.drill.exec.physical.base.FragmentRoot;
-import org.apache.drill.exec.planner.PhysicalPlanReader;
-import org.apache.drill.exec.proto.BitControl.PlanFragment;
-import org.apache.drill.exec.proto.CoordinationProtos;
-import org.apache.drill.exec.rpc.user.UserServer.UserClientConnection;
-import org.apache.drill.exec.server.DrillbitContext;
-import org.apache.drill.exec.vector.Float4Vector;
-import org.apache.drill.exec.vector.NullableBigIntVector;
-import org.apache.drill.exec.vector.NullableBitVector;
-import org.apache.drill.exec.vector.NullableFloat8Vector;
-import org.apache.drill.exec.vector.NullableIntVector;
-import org.apache.drill.exec.vector.NullableVar16CharVector;
-import org.apache.drill.exec.vector.ValueVector;
-import org.apache.drill.exec.vector.Var16CharVector;
-import org.junit.Test;
-
-import com.codahale.metrics.MetricRegistry;
-import com.google.common.base.Charsets;
-import com.google.common.io.Resources;
-
-import java.util.Iterator;
-
-public class TestHiveUDFs extends ExecTest {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(TestHiveUDFs.class);
-
-  DrillConfig c = DrillConfig.create();
-  PhysicalPlanReader reader;
-  FunctionImplementationRegistry registry;
-  FragmentContext context;
-
-  private void setup(final DrillbitContext bitContext, UserClientConnection connection) throws Throwable {
-    if(reader == null)
-      reader = new PhysicalPlanReader(c, c.getMapper(), CoordinationProtos.DrillbitEndpoint.getDefaultInstance());
-    if(registry == null)
-      registry = new FunctionImplementationRegistry(c);
-    if(context == null)
-      context = new FragmentContext(bitContext, PlanFragment.getDefaultInstance(), connection, registry);
-  }
-
-  @Test
-  public void testGenericUDF(@Injectable final DrillbitContext bitContext,
-                      @Injectable UserClientConnection connection) throws Throwable {
-    new NonStrictExpectations(){{
-      bitContext.getMetrics(); result = new MetricRegistry();
-      bitContext.getAllocator(); result = new TopLevelAllocator();
-      bitContext.getOperatorCreatorRegistry(); result = new OperatorCreatorRegistry(c);
-    }};
-
-    String planString = Resources.toString(Resources.getResource("functions/hive/GenericUDF.json"), Charsets.UTF_8);
-
-    setup(bitContext, connection);
-    PhysicalPlan plan = reader.readPhysicalPlan(planString);
-    SimpleRootExec exec = new SimpleRootExec(ImplCreator.getExec(context, (FragmentRoot) plan.getSortedOperators(false).iterator().next()));
-
-    int numRecords = 0;
-
-    while(exec.next()){
-      // Output columns and types
-      //  1. str1 : Var16Char
-      //  2. upperStr1 : NullableVar16Char
-      //  3. unix_timestamp : NullableBigInt
-      //  4. concat : NullableVarChar
-      //  5. flt1 : Float4
-      //  6. format_number : NullableFloat8
-      //  7. nullableStr1 : NullableVar16Char
-      //  8. upperNullableStr1 : NullableVar16Char
-      Iterator<ValueVector> vv = exec.iterator();
-      Var16CharVector str1V = (Var16CharVector) vv.next();
-      NullableVar16CharVector upperStr1V = (NullableVar16CharVector) vv.next();
-      NullableBigIntVector unix_timestampV = (NullableBigIntVector) vv.next();
-      NullableVar16CharVector concatV = (NullableVar16CharVector) vv.next();
-      Float4Vector flt1V = (Float4Vector) vv.next();
-      NullableVar16CharVector format_numberV = (NullableVar16CharVector) vv.next();
-      NullableVar16CharVector nullableStr1V = ((NullableVar16CharVector) vv.next());
-      NullableVar16CharVector upperNullableStr1V = ((NullableVar16CharVector) vv.next());
-
-      for(int i=0; i<exec.getRecordCount(); i++) {
-
-
-        String in = new String(str1V.getAccessor().get(i), Charsets.UTF_16);
-        String upper = new String(upperStr1V.getAccessor().get(i), Charsets.UTF_16);
-        assertTrue(in.toUpperCase().equals(upper));
-
-        long unix_timestamp = unix_timestampV.getAccessor().get(i);
-
-        String concat = new String(concatV.getAccessor().get(i), Charsets.UTF_16);
-        assertTrue(concat.equals(in+"-"+in));
-
-        float flt1 = flt1V.getAccessor().get(i);
-        String format_number = new String(format_numberV.getAccessor().get(i), Charsets.UTF_16);
-
-
-        String nullableStr1 = null;
-        if (!nullableStr1V.getAccessor().isNull(i))
-          nullableStr1 = new String(nullableStr1V.getAccessor().get(i), Charsets.UTF_16);
-
-        String upperNullableStr1 = null;
-        if (!upperNullableStr1V.getAccessor().isNull(i))
-          upperNullableStr1 = new String(upperNullableStr1V.getAccessor().get(i), Charsets.UTF_16);
-
-        assertEquals(nullableStr1 != null, upperNullableStr1 != null);
-        if (nullableStr1 != null)
-          assertEquals(nullableStr1.toUpperCase(), upperNullableStr1);
-
-        System.out.println(in + ", " + upper + ", " + unix_timestamp + ", " + concat + ", " +
-          flt1 + ", " + format_number + ", " + nullableStr1 + ", " + upperNullableStr1);
-
-        numRecords++;
-      }
-    }
-
-    System.out.println("Processed " + numRecords + " records");
-
-    if(context.getFailureCause() != null){
-      throw context.getFailureCause();
-    }
-
-    assertTrue(!context.isFailed());
-  }
-
-  @Test
-  public void testUDF(@Injectable final DrillbitContext bitContext,
-                             @Injectable UserClientConnection connection) throws Throwable {
-    new NonStrictExpectations(){{
-      bitContext.getMetrics(); result = new MetricRegistry();
-      bitContext.getAllocator(); result = new TopLevelAllocator();
-      bitContext.getOperatorCreatorRegistry(); result = new OperatorCreatorRegistry(c);
-    }};
-
-    String planString = Resources.toString(Resources.getResource("functions/hive/UDF.json"), Charsets.UTF_8);
-
-    setup(bitContext, connection);
-    PhysicalPlan plan = reader.readPhysicalPlan(planString);
-    SimpleRootExec exec = new SimpleRootExec(ImplCreator.getExec(context, (FragmentRoot) plan.getSortedOperators(false).iterator().next()));
-
-    int numRecords = 0;
-    while(exec.next()){
-      // Output columns and types
-      // 1. str1 : Var16Char
-      // 2. str1Length : Int
-      // 3. str1Ascii : Int
-      // 4. flt1 : Float4
-      // 5. pow : Float8
-      Iterator<ValueVector> vv = exec.iterator();
-      Var16CharVector str1V = (Var16CharVector) vv.next();
-      NullableIntVector str1LengthV = (NullableIntVector) vv.next();
-      NullableIntVector str1AsciiV = (NullableIntVector) vv.next();
-      Float4Vector flt1V = (Float4Vector) vv.next();
-      NullableFloat8Vector powV = (NullableFloat8Vector) vv.next();
-
-      for(int i=0; i<exec.getRecordCount(); i++) {
-
-        String str1 = new String(str1V.getAccessor().get(i), Charsets.UTF_16);
-        int str1Length = str1LengthV.getAccessor().get(i);
-        assertTrue(str1.length() == str1Length);
-
-        int str1Ascii = str1AsciiV.getAccessor().get(i);
-
-        float flt1 = flt1V.getAccessor().get(i);
-
-        double pow = 0;
-        if (!powV.getAccessor().isNull(i)) {
-          pow = powV.getAccessor().get(i);
-          assertTrue(Math.pow(flt1, 2.0) == pow);
-        }
-
-        System.out.println(str1 + ", " + str1Length + ", " + str1Ascii + ", " + flt1 + ", " + pow);
-        numRecords++;
-      }
-    }
-
-    System.out.println("Processed " + numRecords + " records");
-
-    if(context.getFailureCause() != null){
-      throw context.getFailureCause();
-    }
-
-    assertTrue(!context.isFailed());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java b/exec/java-exec/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
deleted file mode 100644
index e051abb..0000000
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
+++ /dev/null
@@ -1,261 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.drill.exec.store.hive;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.sql.Date;
-import java.sql.Timestamp;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.CommandNeedRetryException;
-import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
-import org.apache.hadoop.hive.ql.session.SessionState;
-
-public class HiveTestDataGenerator {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveTestDataGenerator.class);
-
-  static int RETRIES = 5;
-  private Driver hiveDriver = null;
-  private static final String DB_DIR = "/tmp/drill_hive_db";
-  private static final String WH_DIR = "/tmp/drill_hive_wh";
-  
-  public static void main(String[] args) throws Exception {
-    HiveTestDataGenerator htd = new HiveTestDataGenerator();
-    htd.generateTestData();
-  }
-
-  private void cleanDir(String dir) throws IOException{
-    File f = new File(dir);
-    if(f.exists()){
-      FileUtils.cleanDirectory(f);
-      FileUtils.forceDelete(f);
-    }
-  }
-  
-  public void generateTestData() throws Exception {
-    
-    // remove data from previous runs.
-    cleanDir(DB_DIR);
-    cleanDir(WH_DIR);
-    
-    HiveConf conf = new HiveConf();
-
-    conf.set("javax.jdo.option.ConnectionURL", String.format("jdbc:derby:;databaseName=%s;create=true", DB_DIR));
-    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
-    conf.set("hive.metastore.warehouse.dir", WH_DIR);
-
-    SessionState ss = new SessionState(new HiveConf(SessionState.class));
-    SessionState.start(ss);
-    hiveDriver = new Driver(conf);
-
-    // generate (key, value) test data
-    String testDataFile = generateTestDataFile();
-
-    createTableAndLoadData("default", "kv", testDataFile);
-    executeQuery("CREATE DATABASE IF NOT EXISTS db1");
-    createTableAndLoadData("db1", "kv_db1", testDataFile);
-
-    // Generate data with date and timestamp data type
-    String testDateDataFile = generateTestDataFileWithDate();
-
-    // create table with date and timestamp data type
-    executeQuery("USE default");
-    executeQuery("CREATE TABLE IF NOT EXISTS default.foodate(a DATE, b TIMESTAMP) "+
-        "ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE");
-    executeQuery(String.format("LOAD DATA LOCAL INPATH '%s' OVERWRITE INTO TABLE default.foodate", testDateDataFile));
-
-    // create a table with no data
-    executeQuery("CREATE TABLE IF NOT EXISTS default.empty_table(a INT, b STRING)");
-
-    // create a Hive table that has columns with data types which are supported for reading in Drill.
-    testDataFile = generateAllTypesDataFile();
-    executeQuery(
-        "CREATE TABLE IF NOT EXISTS readtest (" +
-        "  binary_field BINARY," +
-        "  boolean_field BOOLEAN," +
-        "  tinyint_field TINYINT," +
-        "  decimal_field DECIMAL," +
-        "  double_field DOUBLE," +
-        "  float_field FLOAT," +
-        "  int_field INT," +
-        "  bigint_field BIGINT," +
-        "  smallint_field SMALLINT," +
-        "  string_field STRING," +
-        "  varchar_field VARCHAR(50)," +
-        "  timestamp_field TIMESTAMP," +
-        "  date_field DATE" +
-        ") PARTITIONED BY (" +
-        "  binary_part BINARY," +
-        "  boolean_part BOOLEAN," +
-        "  tinyint_part TINYINT," +
-        "  decimal_part DECIMAL," +
-        "  double_part DOUBLE," +
-        "  float_part FLOAT," +
-        "  int_part INT," +
-        "  bigint_part BIGINT," +
-        "  smallint_part SMALLINT," +
-        "  string_part STRING," +
-        "  varchar_part VARCHAR(50)," +
-        "  timestamp_part TIMESTAMP," +
-        "  date_part DATE" +
-        ") ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE"
-    );
-
-    // Add a partition to table 'readtest'
-    executeQuery(
-        "ALTER TABLE readtest ADD IF NOT EXISTS PARTITION ( " +
-        "  binary_part='binary', " +
-        "  boolean_part='true', " +
-        "  tinyint_part='64', " +
-        "  decimal_part='3489423929323435243', " +
-        "  double_part='8.345', " +
-        "  float_part='4.67', " +
-        "  int_part='123456', " +
-        "  bigint_part='234235', " +
-        "  smallint_part='3455', " +
-        "  string_part='string', " +
-        "  varchar_part='varchar', " +
-        "  timestamp_part='2013-07-05 17:01:00', " +
-        "  date_part='2013-07-05')"
-    );
-
-    // Load data into table 'readtest'
-    executeQuery(String.format("LOAD DATA LOCAL INPATH '%s' OVERWRITE INTO TABLE default.readtest PARTITION (" +
-        "  binary_part='binary', " +
-        "  boolean_part='true', " +
-        "  tinyint_part='64', " +
-        "  decimal_part='3489423929323435243', " +
-        "  double_part='8.345', " +
-        "  float_part='4.67', " +
-        "  int_part='123456', " +
-        "  bigint_part='234235', " +
-        "  smallint_part='3455', " +
-        "  string_part='string', " +
-        "  varchar_part='varchar', " +
-        "  timestamp_part='2013-07-05 17:01:00', " +
-        "  date_part='2013-07-05')", testDataFile));
-
-    // create a table that has all Hive types. This is to test how hive tables metadata is populated in
-    // Drill's INFORMATION_SCHEMA.
-    executeQuery("CREATE TABLE IF NOT EXISTS infoschematest(" +
-        "booleanType BOOLEAN, " +
-        "tinyintType TINYINT, " +
-        "smallintType SMALLINT, " +
-        "intType INT, " +
-        "bigintType BIGINT, " +
-        "floatType FLOAT, " +
-        "doubleType DOUBLE, " +
-        "dataType DATE, " +
-        "timestampType TIMESTAMP, " +
-        "binaryType BINARY, " +
-        "decimalType DECIMAL, " +
-        "stringType STRING, " +
-        "varCharType VARCHAR(20), " +
-        "listType ARRAY<STRING>, " +
-        "mapType MAP<STRING,INT>, " +
-        "structType STRUCT<sint:INT,sboolean:BOOLEAN,sstring:STRING>, " +
-        "uniontypeType UNIONTYPE<int, double, array<string>>)"
-    );
-
-    // create a Hive view to test how its metadata is populated in Drill's INFORMATION_SCHEMA
-    executeQuery("CREATE VIEW IF NOT EXISTS hiveview AS SELECT * FROM kv");
-
-    ss.close();
-  }
-
-  private void createTableAndLoadData(String dbName, String tblName, String dataFile) {
-    executeQuery(String.format("USE %s", dbName));
-    executeQuery(String.format("CREATE TABLE IF NOT EXISTS %s.%s(key INT, value STRING) "+
-        "ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS TEXTFILE", dbName, tblName));
-    executeQuery(String.format("LOAD DATA LOCAL INPATH '%s' OVERWRITE INTO TABLE %s.%s", dataFile, dbName, tblName));
-  }
-
-  private File getTempFile() throws Exception {
-    File file = null;
-    while (true) {
-      file = File.createTempFile("drill-hive-test", ".txt");
-      if (file.exists()) {
-        boolean success = file.delete();
-        if (success) {
-          break;
-        }
-      }
-      logger.debug("retry creating tmp file");
-    }
-
-    return file;
-  }
-
-  private String generateTestDataFile() throws Exception {
-    File file = getTempFile();
-
-    PrintWriter printWriter = new PrintWriter(file);
-    for (int i=1; i<=5; i++)
-      printWriter.println (String.format("%d, key_%d", i, i));
-    printWriter.close();
-
-    return file.getPath();
-  }
-
-  private String generateTestDataFileWithDate() throws Exception {
-    File file = getTempFile();
-
-    PrintWriter printWriter = new PrintWriter(file);
-    for (int i=1; i<=5; i++) {
-      Date date = new Date(System.currentTimeMillis());
-      Timestamp ts = new Timestamp(System.currentTimeMillis());
-      printWriter.println (String.format("%s,%s", date.toString(), ts.toString()));
-    }
-    printWriter.close();
-
-    return file.getPath();
-  }
-
-  private String generateAllTypesDataFile() throws Exception {
-    File file = getTempFile();
-
-    PrintWriter printWriter = new PrintWriter(file);
-    printWriter.println("YmluYXJ5ZmllbGQ=,false,34,3489423929323435243,8.345,4.67,123456,234235,3455,stringfield,varcharfield,2013-07-05 17:01:00,2013-07-05");
-    printWriter.close();
-
-    return file.getPath();
-  }
-
-  private void executeQuery(String query) {
-    CommandProcessorResponse response = null;
-    boolean failed = false;
-    int retryCount = RETRIES;
-
-    try {
-      response = hiveDriver.run(query);
-    } catch(CommandNeedRetryException ex) {
-      if (--retryCount == 0)
-        failed = true;
-    }
-
-    if (failed || response.getResponseCode() != 0 )
-      throw new RuntimeException(String.format("Failed to execute command '%s', errorMsg = '%s'",
-        query, (response != null ? response.getErrorMessage() : "")));
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/test/resources/functions/hive/GenericUDF.json
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/functions/hive/GenericUDF.json b/exec/java-exec/src/test/resources/functions/hive/GenericUDF.json
deleted file mode 100644
index e849e00..0000000
--- a/exec/java-exec/src/test/resources/functions/hive/GenericUDF.json
+++ /dev/null
@@ -1,45 +0,0 @@
-{
-    head:{
-        type:"APACHE_DRILL_PHYSICAL",
-        version:"1",
-        generator:{
-            type:"manual"
-        }
-    },
-    graph:[
-        {
-            @id:1,
-            pop:"mock-sub-scan",
-            url: "http://apache.org",
-            entries:[
-                {records: 100, types: [
-                   {name: "str1", type: "VAR16CHAR", mode: "REQUIRED"},
-                   {name: "str2", type: "VAR16CHAR", mode: "REQUIRED"},
-                   {name: "str3", type: "VAR16CHAR", mode: "REQUIRED"},
-                   {name: "nullableStr1", type: "VAR16CHAR", mode: "OPTIONAL"},
-                   {name: "flt1", type: "FLOAT4", mode: "REQUIRED"}
-                ]}
-            ]
-        },
-        {
-            @id:2,
-            child: 1,
-            pop:"project",
-            exprs: [
-                { ref: "str1", expr: "str1" },
-                { ref: "upperStr1", expr: "ucase(str1)" },
-                { ref: "unix_timestamp", expr: "unix_timestamp()" },
-                { ref: "concat", expr: "concat_ws('-', str2, str3)" },
-                { ref: "flt1", expr: "flt1" },
-                { ref: "format_number", expr: "format_number(cast(flt1 as float8), cast(2 as int))" },
-                { ref: "nullableStr1", expr: "nullableStr1" },
-                { ref: "upperNulableStr1", expr: "upper(nullableStr1)" }
-            ]
-        },
-        {
-            @id: 3,
-            child: 2,
-            pop: "screen"
-        }
-    ]
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/java-exec/src/test/resources/functions/hive/UDF.json
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/resources/functions/hive/UDF.json b/exec/java-exec/src/test/resources/functions/hive/UDF.json
deleted file mode 100644
index cd4c7b1..0000000
--- a/exec/java-exec/src/test/resources/functions/hive/UDF.json
+++ /dev/null
@@ -1,39 +0,0 @@
-{
-    head:{
-        type:"APACHE_DRILL_PHYSICAL",
-        version:"1",
-        generator:{
-            type:"manual"
-        }
-    },
-    graph:[
-        {
-            @id:1,
-            pop:"mock-sub-scan",
-            url: "http://apache.org",
-            entries:[
-                {records: 100, types: [
-                   {name: "str1", type: "VAR16CHAR", mode: "REQUIRED"},
-                   {name: "flt1", type: "FLOAT4", mode: "REQUIRED"}
-                ]}
-            ]
-        },
-        {
-            @id:2,
-            child: 1,
-            pop:"project",
-            exprs: [
-                { ref: "str1", expr: "str1" },
-                { ref: "str1Length", expr: "length(str1)" },
-                { ref: "str1Ascii", expr: "ascii(str1)" },
-                { ref: "flt1", expr: "flt1" },
-                { ref: "pow", expr: "pow(cast(flt1 as float8), 2.0)" }
-            ]
-        },
-        {
-            @id: 3,
-            child: 2,
-            pop: "screen"
-        }
-    ]
-}

http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/980dc87b/exec/jdbc/pom.xml
----------------------------------------------------------------------
diff --git a/exec/jdbc/pom.xml b/exec/jdbc/pom.xml
index 1cb5844..9906a5a 100644
--- a/exec/jdbc/pom.xml
+++ b/exec/jdbc/pom.xml
@@ -35,6 +35,11 @@
       <artifactId>drill-java-exec</artifactId>
       <version>${project.version}</version>
     </dependency>
+     <dependency>
+      <groupId>org.apache.drill.contrib.storage-hive</groupId>
+      <artifactId>drill-storage-hive-core</artifactId>
+      <version>${project.version}</version>
+    </dependency>
     <dependency>
       <groupId>org.apache.drill</groupId>
       <artifactId>drill-common</artifactId>
@@ -50,6 +55,12 @@
       <scope>test</scope>
     </dependency>
     <dependency>
+      <groupId>org.apache.drill.contrib.storage-hive</groupId>
+      <artifactId>drill-storage-hive-core</artifactId>
+      <version>${project.version}</version>
+      <classifier>tests</classifier>
+    </dependency>
+    <dependency>
       <groupId>pentaho</groupId>
       <artifactId>mondrian-data-foodmart-queries</artifactId>
       <version>0.3</version>