You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by ve...@apache.org on 2015/12/01 20:35:27 UTC
[1/3] drill git commit: DRILL-3739: Fix issues in reading Hive tables
with StorageHandler configuration (eg. Hive-HBase tables)
Repository: drill
Updated Branches:
refs/heads/master 53e7a696f -> 9cb553dfe
DRILL-3739: Fix issues in reading Hive tables with StorageHandler configuration (eg. Hive-HBase tables)
Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/9cb553df
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/9cb553df
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/9cb553df
Branch: refs/heads/master
Commit: 9cb553dfe90f367d99eb064abeda85d6bcbea1fe
Parents: 7de3429
Author: vkorukanti <ve...@gmail.com>
Authored: Wed Oct 21 11:01:23 2015 -0700
Committer: vkorukanti <ve...@gmail.com>
Committed: Tue Dec 1 10:37:16 2015 -0800
----------------------------------------------------------------------
.../drill/exec/store/hive/HiveRecordReader.java | 4 +--
.../apache/drill/exec/store/hive/HiveScan.java | 19 +++++---------
.../drill/exec/store/hive/HiveUtilities.java | 26 +++++++++++++++-----
.../apache/drill/exec/hive/TestHiveStorage.java | 11 +++++++++
.../exec/hive/TestInfoSchemaOnHiveStorage.java | 1 +
.../exec/store/hive/HiveTestDataGenerator.java | 8 ++++++
6 files changed, 48 insertions(+), 21 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/drill/blob/9cb553df/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
index f148479..f50f331 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
@@ -138,14 +138,14 @@ public class HiveRecordReader extends AbstractRecordReader {
finalOI = (StructObjectInspector)ObjectInspectorConverters.getConvertedOI(partitionOI, tableOI);
partTblObjectInspectorConverter = ObjectInspectorConverters.getConverter(partitionOI, finalOI);
- HiveUtilities.setInputFormatClass(job, partition.getSd());
+ HiveUtilities.setInputFormatClass(job, partition.getSd(), table);
} else {
// For non-partitioned tables, there is no need to create converter as there are no schema changes expected.
partitionSerDe = tableSerDe;
partitionOI = tableOI;
partTblObjectInspectorConverter = null;
finalOI = tableOI;
- HiveUtilities.setInputFormatClass(job, table.getSd());
+ HiveUtilities.setInputFormatClass(job, table.getSd(), table);
}
HiveUtilities.addConfToJob(job, properties, hiveConfigOverride);
http://git-wip-us.apache.org/repos/asf/drill/blob/9cb553df/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScan.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScan.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScan.java
index 85a8595..cd7d6e5 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScan.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveScan.java
@@ -178,34 +178,27 @@ public class HiveScan extends AbstractGroupScan {
splitInput(properties, table.getSd(), null);
} else {
for (final Partition partition : partitions) {
- final Properties properties = MetaStoreUtils.getPartitionMetadata(partition, table);
+ final Properties properties = HiveUtilities.getPartitionMetadata(partition, table);
splitInput(properties, partition.getSd(), partition);
}
}
- } catch (ReflectiveOperationException | IOException e) {
+ } catch (final Exception e) {
throw new ExecutionSetupException(e);
}
}
/* Split the input given in StorageDescriptor */
private void splitInput(final Properties properties, final StorageDescriptor sd, final Partition partition)
- throws ReflectiveOperationException, IOException {
+ throws Exception {
final JobConf job = new JobConf();
- for (final Object obj : properties.keySet()) {
- job.set((String) obj, (String) properties.get(obj));
- }
- for (final Map.Entry<String, String> entry : hiveReadEntry.hiveConfigOverride.entrySet()) {
- job.set(entry.getKey(), entry.getValue());
- }
- InputFormat<?, ?> format = (InputFormat<?, ?>)
- Class.forName(sd.getInputFormat()).getConstructor().newInstance();
- job.setInputFormat(format.getClass());
+ HiveUtilities.addConfToJob(job, properties, hiveReadEntry.hiveConfigOverride);
+ HiveUtilities.setInputFormatClass(job, sd, hiveReadEntry.getTable());
final Path path = new Path(sd.getLocation());
final FileSystem fs = path.getFileSystem(job);
if (fs.exists(path)) {
FileInputFormat.addInputPath(job, path);
- format = job.getInputFormat();
+ final InputFormat format = job.getInputFormat();
for (final InputSplit split : format.getSplits(job, 1)) {
inputSplits.add(split);
partitionMap.put(split, partition);
http://git-wip-us.apache.org/repos/asf/drill/blob/9cb553df/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveUtilities.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveUtilities.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveUtilities.java
index 8475c81..9bf4213 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveUtilities.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveUtilities.java
@@ -21,6 +21,7 @@ import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import io.netty.buffer.DrillBuf;
import org.apache.drill.common.exceptions.DrillRuntimeException;
+import org.apache.drill.common.exceptions.ExecutionSetupException;
import org.apache.drill.common.exceptions.UserException;
import org.apache.drill.common.types.TypeProtos;
import org.apache.drill.common.types.TypeProtos.DataMode;
@@ -56,6 +57,8 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
+import org.apache.hadoop.hive.ql.metadata.HiveUtils;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
@@ -73,7 +76,7 @@ import java.sql.Timestamp;
import java.util.Map;
import java.util.Properties;
-import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_INPUT_FORMAT;
+import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
public class HiveUtilities {
static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveUtilities.class);
@@ -365,17 +368,28 @@ public class HiveUtilities {
}
/**
- * Utility method which sets table or partition {@link InputFormat} class in given {@link JobConf} object. It gets
- * the class name from given StorageDescriptor object.
- *
+ * Utility method which sets table or partition {@link InputFormat} class in given {@link JobConf} object. First it
+ * tries to get the class name from given StorageDescriptor object. If it doesn't contain it tries to get it from
+ * StorageHandler class set in table properties. If not found throws an exception.
* @param job {@link JobConf} instance where InputFormat class is set.
* @param sd {@link StorageDescriptor} instance of currently reading partition or table (for non-partitioned tables).
+ * @param table Table object
* @throws Exception
*/
- public static void setInputFormatClass(final JobConf job, final StorageDescriptor sd)
+ public static void setInputFormatClass(final JobConf job, final StorageDescriptor sd, final Table table)
throws Exception {
final String inputFormatName = sd.getInputFormat();
- job.setInputFormat((Class<? extends InputFormat>) Class.forName(inputFormatName));
+ if (Strings.isNullOrEmpty(inputFormatName)) {
+ final String storageHandlerClass = table.getParameters().get(META_TABLE_STORAGE);
+ if (Strings.isNullOrEmpty(storageHandlerClass)) {
+ throw new ExecutionSetupException("Unable to get Hive table InputFormat class. There is neither " +
+ "InputFormat class explicitly specified nor StorageHandler class");
+ }
+ final HiveStorageHandler storageHandler = HiveUtils.getStorageHandler(job, storageHandlerClass);
+ job.setInputFormat(storageHandler.getInputFormatClass());
+ } else {
+ job.setInputFormat((Class<? extends InputFormat>) Class.forName(inputFormatName));
+ }
}
/**
http://git-wip-us.apache.org/repos/asf/drill/blob/9cb553df/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestHiveStorage.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestHiveStorage.java b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestHiveStorage.java
index 06c08ef..5c844e8 100644
--- a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestHiveStorage.java
+++ b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestHiveStorage.java
@@ -368,6 +368,17 @@ public class TestHiveStorage extends HiveTestBase {
}
}
+ @Test // DRILL-3739
+ public void readingFromStorageHandleBasedTable() throws Exception {
+ testBuilder()
+ .sqlQuery("SELECT * FROM hive.kv_sh ORDER BY key LIMIT 2")
+ .ordered()
+ .baselineColumns("key", "value")
+ .baselineValues(1, " key_1")
+ .baselineValues(2, " key_2")
+ .go();
+ }
+
@AfterClass
public static void shutdownOptions() throws Exception {
test(String.format("alter session set `%s` = false", PlannerSettings.ENABLE_DECIMAL_DATA_TYPE_KEY));
http://git-wip-us.apache.org/repos/asf/drill/blob/9cb553df/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestInfoSchemaOnHiveStorage.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestInfoSchemaOnHiveStorage.java b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestInfoSchemaOnHiveStorage.java
index 3234e43..9352ce0 100644
--- a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestInfoSchemaOnHiveStorage.java
+++ b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestInfoSchemaOnHiveStorage.java
@@ -40,6 +40,7 @@ public class TestInfoSchemaOnHiveStorage extends HiveTestBase {
.baselineValues("hive.default", "hiveview")
.baselineValues("hive.default", "kv")
.baselineValues("hive.default", "kv_parquet")
+ .baselineValues("hive.default", "kv_sh")
.go();
testBuilder()
http://git-wip-us.apache.org/repos/asf/drill/blob/9cb553df/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
index e5d843d..06473cd 100644
--- a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
+++ b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
@@ -435,6 +435,14 @@ public class HiveTestDataGenerator {
executeQuery(hiveDriver, "INSERT INTO TABLE kv_parquet PARTITION(part1) SELECT key, value, key FROM default.kv");
executeQuery(hiveDriver, "ALTER TABLE kv_parquet ADD COLUMNS (newcol string)");
+ // Create a StorageHandler based table (DRILL-3739)
+ executeQuery(hiveDriver, "CREATE TABLE kv_sh(key INT, value STRING) STORED BY " +
+ "'org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler'");
+ // Insert fails if the table directory already exists for tables with DefaultStorageHandlers. Its a known
+ // issue in Hive. So delete the table directory created as part of the CREATE TABLE
+ FileUtils.deleteQuietly(new File(whDir, "kv_sh"));
+ executeQuery(hiveDriver, "INSERT OVERWRITE TABLE kv_sh SELECT * FROM kv");
+
ss.close();
}
[3/3] drill git commit: DRILL-3938: Support reading from Hive tables
that have schema altered after the creation
Posted by ve...@apache.org.
DRILL-3938: Support reading from Hive tables that have schema altered after the creation
Also:
+ Remove "redoRecord" logic which is not needed after "automatic reallocation" (DRILL-1960) changes.
+ Remove HiveTestRecordReader. This is incomplete in implementation and not used anywhere. It is currently just
a burden to maintain with changes in its superclass HiveRecordReader
Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/bca72ab2
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/bca72ab2
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/bca72ab2
Branch: refs/heads/master
Commit: bca72ab26e94c1baca4b1d02e2531ce02182eb7b
Parents: 53e7a69
Author: vkorukanti <ve...@gmail.com>
Authored: Mon Oct 19 11:35:09 2015 -0700
Committer: vkorukanti <ve...@gmail.com>
Committed: Tue Dec 1 10:37:16 2015 -0800
----------------------------------------------------------------------
...onvertHiveParquetScanToDrillParquetScan.java | 15 +-
.../drill/exec/store/hive/HiveRecordReader.java | 177 +++++++++---------
.../exec/store/hive/HiveTextRecordReader.java | 181 -------------------
.../drill/exec/store/hive/HiveUtilities.java | 63 +++++++
.../apache/drill/exec/hive/TestHiveStorage.java | 26 +++
.../exec/hive/TestInfoSchemaOnHiveStorage.java | 1 +
.../exec/store/hive/HiveTestDataGenerator.java | 6 +
7 files changed, 198 insertions(+), 271 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/drill/blob/bca72ab2/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveParquetScanToDrillParquetScan.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveParquetScanToDrillParquetScan.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveParquetScanToDrillParquetScan.java
index 47700c9..14e4a6f 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveParquetScanToDrillParquetScan.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/planner/sql/logical/ConvertHiveParquetScanToDrillParquetScan.java
@@ -101,12 +101,25 @@ public class ConvertHiveParquetScanToDrillParquetScan extends StoragePluginOptim
return true;
}
+ final List<FieldSchema> tableSchema = hiveTable.getSd().getCols();
// Make sure all partitions have the same input format as the table input format
for (HivePartition partition : partitions) {
- Class<? extends InputFormat> inputFormat = getInputFormatFromSD(hiveTable, partition.getPartition().getSd());
+ final StorageDescriptor partitionSD = partition.getPartition().getSd();
+ Class<? extends InputFormat> inputFormat = getInputFormatFromSD(hiveTable, partitionSD);
if (inputFormat == null || !inputFormat.equals(tableInputFormat)) {
return false;
}
+
+ // Make sure the schema of the table and schema of the partition matches. If not return false. Schema changes
+ // between table and partition can happen when table schema is altered using ALTER statements after some
+ // partitions are already created. Currently native reader conversion doesn't handle schema changes between
+ // partition and table. Hive has extensive list of convert methods to convert from one type to rest of the
+ // possible types. Drill doesn't have the similar set of methods yet.
+ if (!partitionSD.getCols().equals(tableSchema)) {
+ logger.debug("Partitions schema is different from table schema. Currently native reader conversion can't " +
+ "handle schema difference between partitions and table");
+ return false;
+ }
}
return true;
http://git-wip-us.apache.org/repos/asf/drill/blob/bca72ab2/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
index 40fed61..f148479 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveRecordReader.java
@@ -48,6 +48,8 @@ import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
import org.apache.hadoop.hive.serde2.SerDe;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
@@ -76,14 +78,25 @@ public class HiveRecordReader extends AbstractRecordReader {
protected List<String> selectedPartitionNames = Lists.newArrayList();
protected List<TypeInfo> selectedPartitionTypes = Lists.newArrayList();
protected List<Object> selectedPartitionValues = Lists.newArrayList();
- protected List<String> tableColumns; // all columns in table (not including partition columns)
- protected SerDe serde;
- protected StructObjectInspector sInspector;
+
+ // SerDe of the reading partition (or table if the table is non-partitioned)
+ protected SerDe partitionSerDe;
+
+ // ObjectInspector to read data from partitionSerDe (for a non-partitioned table this is same as the table
+ // ObjectInspector).
+ protected StructObjectInspector partitionOI;
+
+ // Final ObjectInspector. We may not use the partitionOI directly if there are schema changes between the table and
+ // partition. If there are no schema changes then this is same as the partitionOI.
+ protected StructObjectInspector finalOI;
+
+ // Converter which converts data from partition schema to table schema.
+ private Converter partTblObjectInspectorConverter;
+
protected Object key, value;
protected org.apache.hadoop.mapred.RecordReader reader;
protected List<ValueVector> vectors = Lists.newArrayList();
protected List<ValueVector> pVectors = Lists.newArrayList();
- protected Object redoRecord;
protected boolean empty;
private Map<String, String> hiveConfigOverride;
private FragmentContext fragmentContext;
@@ -107,78 +120,65 @@ public class HiveRecordReader extends AbstractRecordReader {
}
private void init() throws ExecutionSetupException {
- Properties properties;
- JobConf job = new JobConf();
- if (partition != null) {
- properties = MetaStoreUtils.getPartitionMetadata(partition, table);
-
- // SerDe expects properties from Table, but above call doesn't add Table properties.
- // Include Table properties in final list in order to not to break SerDes that depend on
- // Table properties. For example AvroSerDe gets the schema from properties (passed as second argument)
- for (Map.Entry<String, String> entry : table.getParameters().entrySet()) {
- if (entry.getKey() != null && entry.getKey() != null) {
- properties.put(entry.getKey(), entry.getValue());
- }
- }
- } else {
- properties = MetaStoreUtils.getTableMetadata(table);
- }
- for (Object obj : properties.keySet()) {
- job.set((String) obj, (String) properties.get(obj));
- }
- for(Map.Entry<String, String> entry : hiveConfigOverride.entrySet()) {
- job.set(entry.getKey(), entry.getValue());
- }
+ final JobConf job = new JobConf();
// Get the configured default val
defaultPartitionValue = HiveUtilities.getDefaultPartitionValue(hiveConfigOverride);
- InputFormat format;
- String sLib = (partition == null) ? table.getSd().getSerdeInfo().getSerializationLib() : partition.getSd().getSerdeInfo().getSerializationLib();
- String inputFormatName = (partition == null) ? table.getSd().getInputFormat() : partition.getSd().getInputFormat();
try {
- format = (InputFormat) Class.forName(inputFormatName).getConstructor().newInstance();
- Class<?> c = Class.forName(sLib);
- serde = (SerDe) c.getConstructor().newInstance();
- serde.initialize(job, properties);
- } catch (ReflectiveOperationException | SerDeException e) {
- throw new ExecutionSetupException("Unable to instantiate InputFormat", e);
- }
- job.setInputFormat(format.getClass());
+ Properties properties = MetaStoreUtils.getTableMetadata(table);
+ final SerDe tableSerDe = createSerDe(job, table.getSd().getSerdeInfo().getSerializationLib(), properties);
+ final StructObjectInspector tableOI = getStructOI(tableSerDe);
- List<FieldSchema> partitionKeys = table.getPartitionKeys();
- List<String> partitionNames = Lists.newArrayList();
- for (FieldSchema field : partitionKeys) {
- partitionNames.add(field.getName());
- }
+ if (partition != null) {
+ properties = HiveUtilities.getPartitionMetadata(partition, table);
- try {
- ObjectInspector oi = serde.getObjectInspector();
- if (oi.getCategory() != ObjectInspector.Category.STRUCT) {
- throw new UnsupportedOperationException(String.format("%s category not supported", oi.getCategory()));
+ partitionSerDe = createSerDe(job, partition.getSd().getSerdeInfo().getSerializationLib(), properties);
+ partitionOI = getStructOI(partitionSerDe);
+
+ finalOI = (StructObjectInspector)ObjectInspectorConverters.getConvertedOI(partitionOI, tableOI);
+ partTblObjectInspectorConverter = ObjectInspectorConverters.getConverter(partitionOI, finalOI);
+ HiveUtilities.setInputFormatClass(job, partition.getSd());
+ } else {
+ // For non-partitioned tables, there is no need to create converter as there are no schema changes expected.
+ partitionSerDe = tableSerDe;
+ partitionOI = tableOI;
+ partTblObjectInspectorConverter = null;
+ finalOI = tableOI;
+ HiveUtilities.setInputFormatClass(job, table.getSd());
+ }
+
+ HiveUtilities.addConfToJob(job, properties, hiveConfigOverride);
+
+ // Get list of partition column names
+ final List<String> partitionNames = Lists.newArrayList();
+ for (FieldSchema field : table.getPartitionKeys()) {
+ partitionNames.add(field.getName());
}
- sInspector = (StructObjectInspector) oi;
- StructTypeInfo sTypeInfo = (StructTypeInfo) TypeInfoUtils.getTypeInfoFromObjectInspector(sInspector);
- List<Integer> columnIds = Lists.newArrayList();
+
+ // We should always get the columns names from ObjectInspector. For some of the tables (ex. avro) metastore
+ // may not contain the schema, instead it is derived from other sources such as table properties or external file.
+ // SerDe object knows how to get the schema with all the config and table properties passed in initialization.
+ // ObjectInspector created from the SerDe object has the schema.
+ final StructTypeInfo sTypeInfo = (StructTypeInfo) TypeInfoUtils.getTypeInfoFromObjectInspector(finalOI);
+ final List<String> tableColumnNames = sTypeInfo.getAllStructFieldNames();
+
+ // Select list of columns for project pushdown into Hive SerDe readers.
+ final List<Integer> columnIds = Lists.newArrayList();
if (isStarQuery()) {
- selectedColumnNames = sTypeInfo.getAllStructFieldNames();
- tableColumns = selectedColumnNames;
+ selectedColumnNames = tableColumnNames;
for(int i=0; i<selectedColumnNames.size(); i++) {
columnIds.add(i);
}
+ selectedPartitionNames = partitionNames;
} else {
- tableColumns = sTypeInfo.getAllStructFieldNames();
selectedColumnNames = Lists.newArrayList();
for (SchemaPath field : getColumns()) {
String columnName = field.getRootSegment().getPath();
- if (!tableColumns.contains(columnName)) {
- if (partitionNames.contains(columnName)) {
- selectedPartitionNames.add(columnName);
- } else {
- throw new ExecutionSetupException(String.format("Column %s does not exist", columnName));
- }
+ if (partitionNames.contains(columnName)) {
+ selectedPartitionNames.add(columnName);
} else {
- columnIds.add(tableColumns.indexOf(columnName));
+ columnIds.add(tableColumnNames.indexOf(columnName));
selectedColumnNames.add(columnName);
}
}
@@ -186,7 +186,7 @@ public class HiveRecordReader extends AbstractRecordReader {
ColumnProjectionUtils.appendReadColumns(job, columnIds, selectedColumnNames);
for (String columnName : selectedColumnNames) {
- ObjectInspector fieldOI = sInspector.getStructFieldRef(columnName).getFieldObjectInspector();
+ ObjectInspector fieldOI = finalOI.getStructFieldRef(columnName).getFieldObjectInspector();
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(fieldOI.getTypeName());
selectedColumnObjInspectors.add(fieldOI);
@@ -194,10 +194,6 @@ public class HiveRecordReader extends AbstractRecordReader {
selectedColumnFieldConverters.add(HiveFieldConverter.create(typeInfo, fragmentContext));
}
- if (isStarQuery()) {
- selectedPartitionNames = partitionNames;
- }
-
for (int i = 0; i < table.getPartitionKeys().size(); i++) {
FieldSchema field = table.getPartitionKeys().get(i);
if (selectedPartitionNames.contains(field.getName())) {
@@ -216,8 +212,8 @@ public class HiveRecordReader extends AbstractRecordReader {
if (!empty) {
try {
- reader = format.getRecordReader(inputSplit, job, Reporter.NULL);
- } catch (IOException e) {
+ reader = job.getInputFormat().getRecordReader(inputSplit, job, Reporter.NULL);
+ } catch (Exception e) {
throw new ExecutionSetupException("Failed to get o.a.hadoop.mapred.RecordReader from Hive InputFormat", e);
}
key = reader.createKey();
@@ -225,6 +221,25 @@ public class HiveRecordReader extends AbstractRecordReader {
}
}
+ /**
+ * Utility method which creates a SerDe object for given SerDe class name and properties.
+ */
+ private static SerDe createSerDe(final JobConf job, final String sLib, final Properties properties) throws Exception {
+ final Class<?> c = Class.forName(sLib);
+ final SerDe serde = (SerDe) c.getConstructor().newInstance();
+ serde.initialize(job, properties);
+
+ return serde;
+ }
+
+ private static StructObjectInspector getStructOI(final SerDe serDe) throws Exception {
+ ObjectInspector oi = serDe.getObjectInspector();
+ if (oi.getCategory() != ObjectInspector.Category.STRUCT) {
+ throw new UnsupportedOperationException(String.format("%s category not supported", oi.getCategory()));
+ }
+ return (StructObjectInspector) oi;
+ }
+
@Override
public void setup(@SuppressWarnings("unused") OperatorContext context, OutputMutator output)
throws ExecutionSetupException {
@@ -280,26 +295,12 @@ public class HiveRecordReader extends AbstractRecordReader {
try {
int recordCount = 0;
-
- if (redoRecord != null) {
- // Try writing the record that didn't fit into the last RecordBatch
- Object deSerializedValue = serde.deserialize((Writable) redoRecord);
- boolean status = readHiveRecordAndInsertIntoRecordBatch(deSerializedValue, recordCount);
- if (!status) {
- throw new DrillRuntimeException("Current record is too big to fit into allocated ValueVector buffer");
- }
- redoRecord = null;
- recordCount++;
- }
-
while (recordCount < TARGET_RECORD_COUNT && reader.next(key, value)) {
- Object deSerializedValue = serde.deserialize((Writable) value);
- boolean status = readHiveRecordAndInsertIntoRecordBatch(deSerializedValue, recordCount);
- if (!status) {
- redoRecord = value;
- setValueCountAndPopulatePartitionVectors(recordCount);
- return recordCount;
+ Object deSerializedValue = partitionSerDe.deserialize((Writable) value);
+ if (partTblObjectInspectorConverter != null) {
+ deSerializedValue = partTblObjectInspectorConverter.convert(deSerializedValue);
}
+ readHiveRecordAndInsertIntoRecordBatch(deSerializedValue, recordCount);
recordCount++;
}
@@ -310,18 +311,16 @@ public class HiveRecordReader extends AbstractRecordReader {
}
}
- private boolean readHiveRecordAndInsertIntoRecordBatch(Object deSerializedValue, int outputRecordIndex) {
+ private void readHiveRecordAndInsertIntoRecordBatch(Object deSerializedValue, int outputRecordIndex) {
for (int i = 0; i < selectedColumnNames.size(); i++) {
- String columnName = selectedColumnNames.get(i);
- Object hiveValue = sInspector.getStructFieldData(deSerializedValue, sInspector.getStructFieldRef(columnName));
+ final String columnName = selectedColumnNames.get(i);
+ Object hiveValue = finalOI.getStructFieldData(deSerializedValue, finalOI.getStructFieldRef(columnName));
if (hiveValue != null) {
selectedColumnFieldConverters.get(i).setSafeValue(selectedColumnObjInspectors.get(i), hiveValue,
vectors.get(i), outputRecordIndex);
}
}
-
- return true;
}
private void setValueCountAndPopulatePartitionVectors(int recordCount) {
http://git-wip-us.apache.org/repos/asf/drill/blob/bca72ab2/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveTextRecordReader.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveTextRecordReader.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveTextRecordReader.java
deleted file mode 100644
index 039e698..0000000
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveTextRecordReader.java
+++ /dev/null
@@ -1,181 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.drill.exec.store.hive;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.drill.common.exceptions.DrillRuntimeException;
-import org.apache.drill.common.exceptions.ExecutionSetupException;
-import org.apache.drill.common.expression.SchemaPath;
-import org.apache.drill.exec.ops.FragmentContext;
-import org.apache.drill.exec.vector.AllocationHelper;
-import org.apache.drill.exec.vector.NullableBigIntVector;
-import org.apache.drill.exec.vector.NullableIntVector;
-import org.apache.drill.exec.vector.NullableVarCharVector;
-import org.apache.drill.exec.vector.ValueVector;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.InputSplit;
-
-import com.google.common.collect.Lists;
-
-/**
- * Note: Native hive text record reader is not complete in implementation. For now use
- * {@link org.apache.drill.exec.store.hive.HiveRecordReader}.
- */
-public class HiveTextRecordReader extends HiveRecordReader {
-
- public final byte delimiter;
- public final List<Integer> columnIds;
- private final int numCols;
-
- public HiveTextRecordReader(Table table, Partition partition, InputSplit inputSplit, List<SchemaPath> projectedColumns, FragmentContext context) throws ExecutionSetupException {
- super(table, partition, inputSplit, projectedColumns, context, null, null);
- String d = table.getSd().getSerdeInfo().getParameters().get("field.delim");
- if (d != null) {
- delimiter = d.getBytes()[0];
- } else {
- delimiter = (byte) 1;
- }
- assert delimiter > 0;
- List<Integer> ids = Lists.newArrayList();
- for (int i = 0; i < tableColumns.size(); i++) {
- if (selectedColumnNames.contains(tableColumns.get(i))) {
- ids.add(i);
- }
- }
- columnIds = ids;
- numCols = tableColumns.size();
- }
-
- public void setValue(PrimitiveObjectInspector.PrimitiveCategory pCat, ValueVector vv, int index, byte[] bytes, int start) {
- switch(pCat) {
- case BINARY:
- throw new UnsupportedOperationException();
- case BOOLEAN:
- throw new UnsupportedOperationException();
- case BYTE:
- throw new UnsupportedOperationException();
- case DECIMAL:
- throw new UnsupportedOperationException();
- case DOUBLE:
- throw new UnsupportedOperationException();
- case FLOAT:
- throw new UnsupportedOperationException();
- case INT: {
- int value = 0;
- byte b;
- for (int i = start; (b = bytes[i]) != delimiter; i++) {
- value = (value * 10) + b - 48;
- }
- ((NullableIntVector) vv).getMutator().setSafe(index, value);
- }
- case LONG: {
- long value = 0;
- byte b;
- for (int i = start; (b = bytes[i]) != delimiter; i++) {
- value = (value * 10) + b - 48;
- }
- ((NullableBigIntVector) vv).getMutator().setSafe(index, value);
- }
- case SHORT:
- throw new UnsupportedOperationException();
- case STRING: {
- int end = start;
- for (int i = start; i < bytes.length; i++) {
- if (bytes[i] == delimiter) {
- end = i;
- break;
- }
- end = bytes.length;
- }
- ((NullableVarCharVector) vv).getMutator().setSafe(index, bytes, start, end - start);
- }
- case TIMESTAMP:
- throw new UnsupportedOperationException();
-
- default:
- throw new UnsupportedOperationException("Could not determine type");
- }
- }
-
-
- @Override
- public int next() {
- for (ValueVector vv : vectors) {
- AllocationHelper.allocateNew(vv, TARGET_RECORD_COUNT);
- }
- try {
- int recordCount = 0;
- if (redoRecord != null) {
- int length = ((Text) value).getLength();
- byte[] bytes = ((Text) value).getBytes();
- int[] delimPositions = new int[numCols];
- delimPositions[0] = -1;
- int p = 0;
- for (int i = 0; i < length; i++) {
- if (bytes[i] == delimiter) {
- delimPositions[p++] = i;
- }
- }
- for (int id : columnIds) {
- boolean success = false; // setValue(primitiveCategories.get(id), vectors.get(id), recordCount, bytes, delimPositions[id]);
- if (!success) {
- throw new DrillRuntimeException(String.format("Failed to write value for column %s", selectedColumnNames.get(id)));
- }
-
- }
- redoRecord = null;
- }
- while (recordCount < TARGET_RECORD_COUNT && reader.next(key, value)) {
- int length = ((Text) value).getLength();
- byte[] bytes = ((Text) value).getBytes();
- int[] delimPositions = new int[numCols + 1];
- delimPositions[0] = -1;
- int p = 1;
- for (int i = 0; i < length; i++) {
- if (bytes[i] == delimiter) {
- delimPositions[p++] = i;
- }
- }
- for (int i = 0; i < columnIds.size(); i++) {
- int id = columnIds.get(i);
- boolean success = false; // setValue(primitiveCategories.get(i), vectors.get(i), recordCount, bytes, delimPositions[id] + 1);
- if (!success) {
- redoRecord = value;
- if (partition != null) {
- populatePartitionVectors(recordCount);
- }
- return recordCount;
- }
- }
- recordCount++;
- }
- if (partition != null) {
- populatePartitionVectors(recordCount);
- }
- return recordCount;
- } catch (IOException e) {
- throw new DrillRuntimeException(e);
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/drill/blob/bca72ab2/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveUtilities.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveUtilities.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveUtilities.java
index 05e51e3..8475c81 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveUtilities.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveUtilities.java
@@ -52,12 +52,18 @@ import org.apache.drill.exec.work.ExecErrorConstants;
import org.apache.hadoop.hive.common.type.HiveDecimal;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.HiveDecimalUtils;
import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.JobConf;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
@@ -65,6 +71,9 @@ import java.math.BigDecimal;
import java.sql.Date;
import java.sql.Timestamp;
import java.util.Map;
+import java.util.Properties;
+
+import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.FILE_INPUT_FORMAT;
public class HiveUtilities {
static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveUtilities.class);
@@ -355,6 +364,60 @@ public class HiveUtilities {
return new HiveConf().getVar(ConfVars.DEFAULTPARTITIONNAME);
}
+ /**
+ * Utility method which sets table or partition {@link InputFormat} class in given {@link JobConf} object. It gets
+ * the class name from given StorageDescriptor object.
+ *
+ * @param job {@link JobConf} instance where InputFormat class is set.
+ * @param sd {@link StorageDescriptor} instance of currently reading partition or table (for non-partitioned tables).
+ * @throws Exception
+ */
+ public static void setInputFormatClass(final JobConf job, final StorageDescriptor sd)
+ throws Exception {
+ final String inputFormatName = sd.getInputFormat();
+ job.setInputFormat((Class<? extends InputFormat>) Class.forName(inputFormatName));
+ }
+
+ /**
+ * Utility method which adds give configs to {@link JobConf} object.
+ *
+ * @param job {@link JobConf} instance.
+ * @param properties New config properties
+ * @param hiveConfigOverride HiveConfig override.
+ */
+ public static void addConfToJob(final JobConf job, final Properties properties,
+ final Map<String, String> hiveConfigOverride) {
+ for (Object obj : properties.keySet()) {
+ job.set((String) obj, (String) properties.get(obj));
+ }
+ for(Map.Entry<String, String> entry : hiveConfigOverride.entrySet()) {
+ job.set(entry.getKey(), entry.getValue());
+ }
+ }
+
+ /**
+ * Wrapper around {@link MetaStoreUtils#getPartitionMetadata(Partition, Table)} which also adds parameters from table
+ * to properties returned by {@link MetaStoreUtils#getPartitionMetadata(Partition, Table)}.
+ *
+ * @param partition
+ * @param table
+ * @return
+ */
+ public static Properties getPartitionMetadata(final Partition partition, final Table table) {
+ final Properties properties = MetaStoreUtils.getPartitionMetadata(partition, table);
+
+ // SerDe expects properties from Table, but above call doesn't add Table properties.
+ // Include Table properties in final list in order to not to break SerDes that depend on
+ // Table properties. For example AvroSerDe gets the schema from properties (passed as second argument)
+ for (Map.Entry<String, String> entry : table.getParameters().entrySet()) {
+ if (entry.getKey() != null && entry.getKey() != null) {
+ properties.put(entry.getKey(), entry.getValue());
+ }
+ }
+
+ return properties;
+ }
+
public static void throwUnsupportedHiveDataTypeError(String unsupportedType) {
StringBuilder errMsg = new StringBuilder();
errMsg.append(String.format("Unsupported Hive data type %s. ", unsupportedType));
http://git-wip-us.apache.org/repos/asf/drill/blob/bca72ab2/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestHiveStorage.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestHiveStorage.java b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestHiveStorage.java
index 9d784fe..06c08ef 100644
--- a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestHiveStorage.java
+++ b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestHiveStorage.java
@@ -30,6 +30,8 @@ import java.math.BigDecimal;
import java.sql.Date;
import java.sql.Timestamp;
+import static org.junit.Assert.assertFalse;
+
public class TestHiveStorage extends HiveTestBase {
@BeforeClass
public static void setupOptions() throws Exception {
@@ -342,6 +344,30 @@ public class TestHiveStorage extends HiveTestBase {
.go();
}
+ @Test // DRILL-3938
+ public void readFromAlteredPartitionedTable() throws Exception {
+ testBuilder()
+ .sqlQuery("SELECT key, `value`, newcol FROM hive.kv_parquet ORDER BY key LIMIT 1")
+ .unOrdered()
+ .baselineColumns("key", "value", "newcol")
+ .baselineValues(1, " key_1", null)
+ .go();
+ }
+
+ @Test // DRILL-3938
+ public void nativeReaderIsDisabledForAlteredPartitionedTable() throws Exception {
+ try {
+ test(String.format("alter session set `%s` = true", ExecConstants.HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS));
+ final String query = "EXPLAIN PLAN FOR SELECT key, `value`, newcol FROM hive.kv_parquet ORDER BY key LIMIT 1";
+
+ // Make sure the HiveScan in plan has no native parquet reader
+ final String planStr = getPlanInString(query, JSON_FORMAT);
+ assertFalse("Hive native is not expected in the plan", planStr.contains("hive-drill-native-parquet-scan"));
+ } finally {
+ test(String.format("alter session set `%s` = false", ExecConstants.HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS));
+ }
+ }
+
@AfterClass
public static void shutdownOptions() throws Exception {
test(String.format("alter session set `%s` = false", PlannerSettings.ENABLE_DECIMAL_DATA_TYPE_KEY));
http://git-wip-us.apache.org/repos/asf/drill/blob/bca72ab2/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestInfoSchemaOnHiveStorage.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestInfoSchemaOnHiveStorage.java b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestInfoSchemaOnHiveStorage.java
index d203bd4..3234e43 100644
--- a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestInfoSchemaOnHiveStorage.java
+++ b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/hive/TestInfoSchemaOnHiveStorage.java
@@ -39,6 +39,7 @@ public class TestInfoSchemaOnHiveStorage extends HiveTestBase {
.baselineValues("hive.default", "infoschematest")
.baselineValues("hive.default", "hiveview")
.baselineValues("hive.default", "kv")
+ .baselineValues("hive.default", "kv_parquet")
.go();
testBuilder()
http://git-wip-us.apache.org/repos/asf/drill/blob/bca72ab2/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
index 17a433f..e5d843d 100644
--- a/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
+++ b/contrib/storage-hive/core/src/test/java/org/apache/drill/exec/store/hive/HiveTestDataGenerator.java
@@ -429,6 +429,12 @@ public class HiveTestDataGenerator {
executeQuery(hiveDriver, "DROP TABLE partition_pruning_test_loadtable");
+ // Create a partitioned parquet table (DRILL-3938)
+ executeQuery(hiveDriver,
+ "CREATE TABLE kv_parquet(key INT, value STRING) PARTITIONED BY (part1 int) STORED AS PARQUET");
+ executeQuery(hiveDriver, "INSERT INTO TABLE kv_parquet PARTITION(part1) SELECT key, value, key FROM default.kv");
+ executeQuery(hiveDriver, "ALTER TABLE kv_parquet ADD COLUMNS (newcol string)");
+
ss.close();
}
[2/3] drill git commit: DRILL-3893: Change Hive metadata cache
invalidation policy to "1 min after last write".
Posted by ve...@apache.org.
DRILL-3893: Change Hive metadata cache invalidation policy to "1 min after last write".
Project: http://git-wip-us.apache.org/repos/asf/drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/drill/commit/7de34293
Tree: http://git-wip-us.apache.org/repos/asf/drill/tree/7de34293
Diff: http://git-wip-us.apache.org/repos/asf/drill/diff/7de34293
Branch: refs/heads/master
Commit: 7de34293d44cb9ef40d7771f82de588fd7c33bc2
Parents: bca72ab
Author: vkorukanti <ve...@gmail.com>
Authored: Tue Oct 20 16:21:09 2015 -0700
Committer: vkorukanti <ve...@gmail.com>
Committed: Tue Dec 1 10:37:16 2015 -0800
----------------------------------------------------------------------
.../apache/drill/exec/store/hive/DrillHiveMetaStoreClient.java | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/drill/blob/7de34293/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/DrillHiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/DrillHiveMetaStoreClient.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/DrillHiveMetaStoreClient.java
index aa1d074..3667c8f 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/DrillHiveMetaStoreClient.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/DrillHiveMetaStoreClient.java
@@ -307,12 +307,12 @@ public abstract class DrillHiveMetaStoreClient extends HiveMetaStoreClient {
databases = CacheBuilder //
.newBuilder() //
- .expireAfterAccess(1, TimeUnit.MINUTES) //
+ .expireAfterWrite(1, TimeUnit.MINUTES) //
.build(new DatabaseLoader());
tableNameLoader = CacheBuilder //
.newBuilder() //
- .expireAfterAccess(1, TimeUnit.MINUTES) //
+ .expireAfterWrite(1, TimeUnit.MINUTES) //
.build(new TableNameLoader());
tableLoaders = CacheBuilder //
@@ -387,7 +387,7 @@ public abstract class DrillHiveMetaStoreClient extends HiveMetaStoreClient {
public LoadingCache<String, HiveReadEntry> load(String key) throws Exception {
return CacheBuilder
.newBuilder()
- .expireAfterAccess(1, TimeUnit.MINUTES)
+ .expireAfterWrite(1, TimeUnit.MINUTES)
.build(new TableLoader(key));
}
}