You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/07/03 18:00:34 UTC
[10/10] hive git commit: HIVE-19975 : Checking writeIdList per table
may not check the commit level of a partition on a partitioned table (Sergey
Shelukhin)
HIVE-19975 : Checking writeIdList per table may not check the commit level of a partition on a partitioned table (Sergey Shelukhin)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a47a80fe
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a47a80fe
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a47a80fe
Branch: refs/heads/master-txnstats
Commit: a47a80fed2f4fe812c3214afc451d11843fedcaa
Parents: 4db8b1c
Author: sergey <se...@apache.org>
Authored: Tue Jul 3 11:00:10 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Tue Jul 3 11:00:10 2018 -0700
----------------------------------------------------------------------
.../listener/DummyRawStoreFailEvent.java | 12 +-
.../hive/ql/exec/ColumnStatsUpdateTask.java | 4 -
.../org/apache/hadoop/hive/ql/exec/DDLTask.java | 34 +-
.../apache/hadoop/hive/ql/exec/MoveTask.java | 2 +-
.../ql/hooks/UpdateInputAccessTimeHook.java | 6 +-
.../org/apache/hadoop/hive/ql/io/AcidUtils.java | 8 +-
.../hadoop/hive/ql/lockmgr/DbTxnManager.java | 1 +
.../apache/hadoop/hive/ql/metadata/Hive.java | 147 +-
.../ql/optimizer/calcite/RelOptHiveTable.java | 1 +
.../ql/plan/ConditionalResolverMergeFiles.java | 2 +-
.../hadoop/hive/ql/stats/ColStatsProcessor.java | 8 +-
.../hive/ql/stats/StatsUpdaterThread.java | 2 +
.../apache/hadoop/hive/ql/stats/StatsUtils.java | 16 +-
.../hadoop/hive/ql/metadata/TestHive.java | 9 +-
.../results/clientpositive/acid_stats.q.out | 78 +-
.../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp | 2851 +++++----
.../gen/thrift/gen-cpp/ThriftHiveMetastore.h | 210 +-
.../ThriftHiveMetastore_server.skeleton.cpp | 7 +-
.../gen/thrift/gen-cpp/hive_metastore_types.cpp | 6054 +++++++++---------
.../gen/thrift/gen-cpp/hive_metastore_types.h | 118 +-
.../hive/metastore/api/AddPartitionsResult.java | 75 +-
.../hadoop/hive/metastore/api/AggrStats.java | 71 +-
.../hive/metastore/api/ColumnStatistics.java | 71 +-
.../hive/metastore/api/GetTableResult.java | 75 +-
.../metastore/api/IsolationLevelCompliance.java | 48 -
.../hadoop/hive/metastore/api/Partition.java | 306 +-
.../hive/metastore/api/PartitionSpec.java | 306 +-
.../metastore/api/PartitionsStatsResult.java | 75 +-
.../apache/hadoop/hive/metastore/api/Table.java | 306 +-
.../hive/metastore/api/TableStatsResult.java | 75 +-
.../hive/metastore/api/ThriftHiveMetastore.java | 4498 ++++++++-----
.../gen-php/metastore/ThriftHiveMetastore.php | 707 +-
.../src/gen/thrift/gen-php/metastore/Types.php | 275 +-
.../hive_metastore/ThriftHiveMetastore-remote | 13 +-
.../hive_metastore/ThriftHiveMetastore.py | 517 +-
.../gen/thrift/gen-py/hive_metastore/ttypes.py | 209 +-
.../gen/thrift/gen-rb/hive_metastore_types.rb | 77 +-
.../gen/thrift/gen-rb/thrift_hive_metastore.rb | 85 +-
.../hadoop/hive/common/StatsSetupConst.java | 4 +
.../hadoop/hive/metastore/HiveAlterHandler.java | 55 +-
.../hadoop/hive/metastore/HiveMetaStore.java | 20 +-
.../hive/metastore/HiveMetaStoreClient.java | 2 +-
.../hadoop/hive/metastore/ObjectStore.java | 177 +-
.../apache/hadoop/hive/metastore/RawStore.java | 10 +-
.../hive/metastore/cache/CachedStore.java | 19 +-
.../hadoop/hive/metastore/model/MPartition.java | 18 -
.../hadoop/hive/metastore/model/MTable.java | 18 -
.../metastore/txn/CompactionTxnHandler.java | 14 +-
.../hadoop/hive/metastore/txn/TxnDbUtil.java | 8 +-
.../hadoop/hive/metastore/txn/TxnHandler.java | 43 +
.../hadoop/hive/metastore/txn/TxnStore.java | 6 +
.../src/main/resources/package.jdo | 12 -
.../main/sql/derby/hive-schema-4.0.0.derby.sql | 4 +-
.../sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql | 4 -
.../main/sql/mssql/hive-schema-4.0.0.mssql.sql | 9 +-
.../sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql | 4 -
.../main/sql/mysql/hive-schema-4.0.0.mysql.sql | 4 -
.../sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql | 4 -
.../sql/oracle/hive-schema-4.0.0.oracle.sql | 8 +-
.../oracle/upgrade-3.1.0-to-4.0.0.oracle.sql | 4 -
.../sql/postgres/hive-schema-4.0.0.postgres.sql | 7 +-
.../upgrade-3.1.0-to-4.0.0.postgres.sql | 4 -
.../src/main/thrift/hive_metastore.thrift | 42 +-
.../DummyRawStoreControlledCommit.java | 14 +-
.../DummyRawStoreForJdoConnection.java | 8 +-
.../HiveMetaStoreClientPreCatalog.java | 10 +-
.../hive/metastore/TestHiveAlterHandler.java | 6 +-
.../hadoop/hive/metastore/TestObjectStore.java | 2 +-
.../hive/metastore/cache/TestCachedStore.java | 4 +-
.../org/apache/hive/common/util/TxnIdUtils.java | 31 -
70 files changed, 9584 insertions(+), 8350 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/a47a80fe/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index b2c8b67..cd036e6 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -321,10 +321,10 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
}
@Override
- public void alterTable(String catName, String dbName, String name, Table newTable)
+ public void alterTable(String catName, String dbName, String name, Table newTable, long queryTxnId, String queryValidWriteIds)
throws InvalidObjectException, MetaException {
if (shouldEventSucceed) {
- objectStore.alterTable(catName, dbName, name, newTable);
+ objectStore.alterTable(catName, dbName, name, newTable, queryTxnId, queryValidWriteIds);
} else {
throw new RuntimeException("Event failed.");
}
@@ -386,9 +386,9 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
@Override
public void alterPartition(String catName, String dbName, String tblName, List<String> partVals,
- Partition newPart) throws InvalidObjectException, MetaException {
+ Partition newPart, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
if (shouldEventSucceed) {
- objectStore.alterPartition(catName, dbName, tblName, partVals, newPart);
+ objectStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryTxnId, queryValidWriteIds);
} else {
throw new RuntimeException("Event failed.");
}
@@ -397,9 +397,9 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
@Override
public void alterPartitions(String catName, String dbName, String tblName,
List<List<String>> partValsList, List<Partition> newParts,
- long txnId, String writeIdList, long writeId)
+ long writeId, long queryTxnId, String queryValidWriteIds)
throws InvalidObjectException, MetaException {
- objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, txnId, writeIdList, writeId);
+ objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, writeId, queryTxnId, queryValidWriteIds);
}
@Override
http://git-wip-us.apache.org/repos/asf/hive/blob/a47a80fe/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
index 49752e5..61fb3d3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java
@@ -46,15 +46,11 @@ import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.DriverContext;
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.QueryState;
-import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.plan.ColumnStatsUpdateWork;
import org.apache.hadoop.hive.ql.plan.api.StageType;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hadoop.hive.serde2.io.DateWritableV2;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
http://git-wip-us.apache.org/repos/asf/hive/blob/a47a80fe/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 3b1e872..71b9587 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -1301,7 +1301,8 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
throw new AssertionError("Unsupported alter materialized view type! : " + alterMVDesc.getOp());
}
- db.alterTable(mv,environmentContext);
+ db.alterTable(mv, false, environmentContext, true);
+
return 0;
}
@@ -1451,7 +1452,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
tbl.getTTable().setPartitionKeys(newPartitionKeys);
- db.alterTable(tbl, null);
+ db.alterTable(tbl, false, null, true);
work.getInputs().add(new ReadEntity(tbl));
// We've already locked the table as the input, don't relock it as the output.
@@ -1477,7 +1478,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
if (touchDesc.getPartSpec() == null) {
- db.alterTable(tbl, environmentContext);
+ db.alterTable(tbl, false, environmentContext, true);
work.getInputs().add(new ReadEntity(tbl));
addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
} else {
@@ -1486,7 +1487,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
throw new HiveException("Specified partition does not exist");
}
try {
- db.alterPartition(touchDesc.getTableName(), part, environmentContext);
+ db.alterPartition(touchDesc.getTableName(), part, environmentContext, true);
} catch (InvalidOperationException e) {
throw new HiveException(e);
}
@@ -1835,7 +1836,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
authority.toString(),
harPartitionDir.getPath()); // make in Path to ensure no slash at the end
setArchived(p, harPath, partSpecInfo.values.size());
- db.alterPartition(simpleDesc.getTableName(), p, null);
+ db.alterPartition(simpleDesc.getTableName(), p, null, true);
}
} catch (Exception e) {
throw new HiveException("Unable to change the partition info for HAR", e);
@@ -2041,7 +2042,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
for(Partition p: partitions) {
setUnArchived(p);
try {
- db.alterPartition(simpleDesc.getTableName(), p, null);
+ db.alterPartition(simpleDesc.getTableName(), p, null, true);
} catch (InvalidOperationException e) {
throw new HiveException(e);
}
@@ -3730,7 +3731,8 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
} else {
cols = Hive.getFieldsFromDeserializer(colPath, deserializer);
List<String> parts = db.getPartitionNames(dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), (short) -1);
- AggrStats aggrStats = db.getAggrColStatsFor(dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, parts);
+ AggrStats aggrStats = db.getAggrColStatsFor(
+ dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, parts, false);
colStats = aggrStats.getColStats();
if (parts.size() == aggrStats.getPartsFound()) {
StatsSetupConst.setColumnStatsState(tblProps, colNames);
@@ -3741,13 +3743,15 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
tbl.setParameters(tblProps);
} else {
cols = Hive.getFieldsFromDeserializer(colPath, deserializer);
- colStats = db.getTableColumnStatistics(dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames);
+ colStats = db.getTableColumnStatistics(
+ dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, false);
}
} else {
List<String> partitions = new ArrayList<String>();
partitions.add(part.getName());
cols = Hive.getFieldsFromDeserializer(colPath, deserializer);
- colStats = db.getPartitionColumnStatistics(dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), partitions, colNames).get(part.getName());
+ colStats = db.getPartitionColumnStatistics(dbTab[0].toLowerCase(),
+ dbTab[1].toLowerCase(), partitions, colNames, false).get(part.getName());
}
} else {
cols = Hive.getFieldsFromDeserializer(colPath, deserializer);
@@ -3965,10 +3969,14 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
environmentContext = new EnvironmentContext();
}
environmentContext.putToProperties(HiveMetaHook.ALTER_TABLE_OPERATION_TYPE, alterTbl.getOp().name());
+ // Note: in the old default overloads that I've removed, "transactional" was true for tables,
+ // but false for partitions. Seems to be ok here because we are not updating
+ // partition-stats-related stuff from this call (alterTable).
if (allPartitions == null) {
- db.alterTable(alterTbl.getOldName(), tbl, alterTbl.getIsCascade(), environmentContext);
+ db.alterTable(alterTbl.getOldName(), tbl, alterTbl.getIsCascade(), environmentContext, true);
} else {
- db.alterPartitions(Warehouse.getQualifiedName(tbl.getTTable()), allPartitions, environmentContext);
+ db.alterPartitions(
+ Warehouse.getQualifiedName(tbl.getTTable()), allPartitions, environmentContext, false);
}
// Add constraints if necessary
addConstraints(db, alterTbl);
@@ -4943,7 +4951,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
// create the table
if (crtTbl.getReplaceMode()) {
// replace-mode creates are really alters using CreateTableDesc.
- db.alterTable(tbl, null);
+ db.alterTable(tbl, false, null, true);
} else {
if ((foreignKeys != null && foreignKeys.size() > 0) ||
(primaryKeys != null && primaryKeys.size() > 0) ||
@@ -5173,7 +5181,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
oldview.setOutputFormatClass(crtView.getOutputFormat());
}
oldview.checkValidity(null);
- db.alterTable(crtView.getViewName(), oldview, null);
+ db.alterTable(crtView.getViewName(), oldview, false, null, true);
addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_NO_LOCK));
} else {
// We create new view
http://git-wip-us.apache.org/repos/asf/hive/blob/a47a80fe/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index bf7749d..259d951 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -797,7 +797,7 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
}
if (updateBucketCols || updateSortCols) {
- db.alterPartition(table.getDbName(), table.getTableName(), partn, null);
+ db.alterPartition(table.getDbName(), table.getTableName(), partn, null, true);
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/a47a80fe/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java
index fc56a8b..4cf7c25 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java
@@ -63,7 +63,7 @@ public class UpdateInputAccessTimeHook {
String tblName = re.getTable().getTableName();
Table t = db.getTable(dbName, tblName);
t.setLastAccessTime(lastAccessTime);
- db.alterTable(dbName + "." + tblName, t, null);
+ db.alterTable(dbName + "." + tblName, t, false, null, true);
break;
}
case PARTITION: {
@@ -73,9 +73,9 @@ public class UpdateInputAccessTimeHook {
Table t = db.getTable(dbName, tblName);
p = db.getPartition(t, p.getSpec(), false);
p.setLastAccessTime(lastAccessTime);
- db.alterPartition(dbName, tblName, p, null);
+ db.alterPartition(dbName, tblName, p, null, true);
t.setLastAccessTime(lastAccessTime);
- db.alterTable(dbName + "." + tblName, t, null);
+ db.alterTable(dbName + "." + tblName, t, false, null, true);
break;
}
default:
http://git-wip-us.apache.org/repos/asf/hive/blob/a47a80fe/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 19dbc64..e54afc4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -1677,11 +1677,15 @@ public class AcidUtils {
if (txnId > 0 && isTransactionalTable(tbl)) {
validWriteIdList = getTableValidWriteIdList(conf, fullTableName);
if (isStatsUpdater) {
- // TODO# it should be invalid to update stats without write ID...
- // Why would there be a stats updater that doesn't have a write ID?
writeId = SessionState.get().getTxnMgr() != null ?
SessionState.get().getTxnMgr().getAllocatedTableWriteId(
tbl.getDbName(), tbl.getTableName()) : -1;
+ if (writeId < 1) {
+ // TODO: this is not ideal... stats updater that doesn't have write ID is currently
+ // "create table"; writeId would be 0/-1 here. No need to call this w/true.
+ LOG.debug("Stats updater for {}.{} doesn't have a write ID",
+ tbl.getDbName(), tbl.getTableName());
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/a47a80fe/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index 5267313..a05ae0c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -1047,6 +1047,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
}
try {
long writeId = getMS().allocateTableWriteId(txnId, dbName, tableName);
+ LOG.debug("Allocated write ID {} for {}.{}", writeId, dbName, tableName);
tableWriteIds.put(fullTableName, writeId);
return writeId;
} catch (TException e) {
http://git-wip-us.apache.org/repos/asf/hive/blob/a47a80fe/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 72b7cec..c757718 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -581,15 +581,11 @@ public class Hive {
createTable(tbl);
}
- public void alterTable(Table newTbl, EnvironmentContext environmentContext)
- throws HiveException {
- alterTable(newTbl.getDbName(), newTbl.getTableName(), newTbl, false, environmentContext);
- }
-
- public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext)
- throws HiveException {
- alterTable(fullyQlfdTblName, newTbl, false, environmentContext);
+ public void alterTable(Table newTbl, boolean cascade, EnvironmentContext environmentContext,
+ boolean transactional) throws HiveException {
+ alterTable(newTbl.getDbName(),
+ newTbl.getTableName(), newTbl, cascade, environmentContext, transactional);
}
/**
@@ -612,16 +608,13 @@ public class Hive {
alterTable(names[0], names[1], newTbl, false, environmentContext, transactional);
}
- public void alterTable(String fullyQlfdTblName, Table newTbl, boolean cascade, EnvironmentContext environmentContext)
+ public void alterTable(String fullyQlfdTblName, Table newTbl, boolean cascade,
+ EnvironmentContext environmentContext, boolean transactional)
throws HiveException {
String[] names = Utilities.getDbTableName(fullyQlfdTblName);
- alterTable(names[0], names[1], newTbl, cascade, environmentContext);
- }
- public void alterTable(String dbName, String tblName, Table newTbl, boolean cascade,
- EnvironmentContext environmentContext)
- throws HiveException {
- alterTable(dbName, tblName, newTbl, cascade, environmentContext, true);
+ alterTable(names[0], names[1], newTbl, cascade, environmentContext, transactional);
}
+
public void alterTable(String dbName, String tblName, Table newTbl, boolean cascade,
EnvironmentContext environmentContext, boolean transactional)
throws HiveException {
@@ -641,10 +634,11 @@ public class Hive {
// Take a table snapshot and set it to newTbl.
if (transactional) {
- setTableSnapshotForTransactionalTable(conf, newTbl, true);
+ setTableSnapshotForTransactionalTable(environmentContext, conf, newTbl, true);
}
- getMSC().alter_table_with_environmentContext(dbName, tblName, newTbl.getTTable(), environmentContext);
+ getMSC().alter_table_with_environmentContext(
+ dbName, tblName, newTbl.getTTable(), environmentContext);
} catch (MetaException e) {
throw new HiveException("Unable to alter table. " + e.getMessage(), e);
} catch (TException e) {
@@ -672,28 +666,11 @@ public class Hive {
* if the changes in metadata is not acceptable
* @throws TException
*/
- public void alterPartition(String tblName, Partition newPart, EnvironmentContext environmentContext)
+ public void alterPartition(String tblName, Partition newPart,
+ EnvironmentContext environmentContext, boolean transactional)
throws InvalidOperationException, HiveException {
String[] names = Utilities.getDbTableName(tblName);
- alterPartition(names[0], names[1], newPart, environmentContext);
- }
-
- /**
- * Updates the existing partition metadata with the new metadata.
- *
- * @param dbName
- * name of the exiting table's database
- * @param tblName
- * name of the existing table
- * @param newPart
- * new partition
- * @throws InvalidOperationException
- * if the changes in metadata is not acceptable
- * @throws TException
- */
- public void alterPartition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext)
- throws InvalidOperationException, HiveException {
- alterPartition(dbName, tblName, newPart, environmentContext, true);
+ alterPartition(names[0], names[1], newPart, environmentContext, transactional);
}
/**
@@ -723,10 +700,14 @@ public class Hive {
location = Utilities.getQualifiedPath(conf, new Path(location));
newPart.setLocation(location);
}
+ if (environmentContext == null) {
+ environmentContext = new EnvironmentContext();
+ }
if (transactional) {
- setTableSnapshotForTransactionalPartition(conf, newPart, true);
+ setTableSnapshotForTransactionalPartition(environmentContext, conf, newPart, true);
}
- getSynchronizedMSC().alter_partition(dbName, tblName, newPart.getTPartition(), environmentContext);
+ getSynchronizedMSC().alter_partition(
+ dbName, tblName, newPart.getTPartition(), environmentContext);
} catch (MetaException e) {
throw new HiveException("Unable to alter partition. " + e.getMessage(), e);
@@ -743,10 +724,6 @@ public class Hive {
newPart.checkValidity();
}
- public void alterPartitions(String tblName, List<Partition> newParts, EnvironmentContext environmentContext)
- throws InvalidOperationException, HiveException {
- alterPartitions(tblName, newParts, environmentContext, false);
- }
/**
* Updates the existing table metadata with the new metadata.
*
@@ -918,7 +895,7 @@ public class Hive {
}
}
// Set table snapshot to api.Table to make it persistent.
- setTableSnapshotForTransactionalTable(conf, tbl, true);
+ setTableSnapshotForTransactionalTable(null, conf, tbl, true);
if (primaryKeys == null && foreignKeys == null
&& uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null
&& checkConstraints == null) {
@@ -1150,6 +1127,7 @@ public class Hive {
// Get the table from metastore
org.apache.hadoop.hive.metastore.api.Table tTable = null;
try {
+ // Note: this is currently called w/true from StatsOptimizer only.
if (checkTransactional) {
ValidWriteIdList validWriteIdList = null;
long txnId = SessionState.get().getTxnMgr() != null ?
@@ -1821,7 +1799,8 @@ public class Hive {
Partition newTPart = oldPart != null ? oldPart : new Partition(tbl, partSpec, newPartPath);
alterPartitionSpecInMemory(tbl, partSpec, newTPart.getTPartition(), inheritTableSpecs, newPartPath.toString());
validatePartition(newTPart);
- setTableSnapshotForTransactionalPartition(conf, newTPart, true);
+ EnvironmentContext ec = new EnvironmentContext();
+ setTableSnapshotForTransactionalPartition(ec, conf, newTPart, true);
// If config is set, table is not temporary and partition being inserted exists, capture
// the list of files added. For not yet existing partitions (insert overwrite to new partition
@@ -1894,7 +1873,7 @@ public class Hive {
// insert into table T partition (ds) values ('Joe', 'today'); -- will fail with AlreadyExistsException
// In that case, we want to retry with alterPartition.
LOG.debug("Caught AlreadyExistsException, trying to alter partition instead");
- setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart);
+ setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart, ec);
} catch (Exception e) {
try {
final FileSystem newPathFileSystem = newPartPath.getFileSystem(this.getConf());
@@ -1913,7 +1892,7 @@ public class Hive {
addWriteNotificationLog(tbl, partSpec, newFiles, writeId);
}
} else {
- setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart);
+ setStatsPropAndAlterPartition(hasFollowingStatsTask, tbl, newTPart, ec);
}
perfLogger.PerfLogEnd("MoveTask", PerfLogger.LOAD_PARTITION);
@@ -2010,15 +1989,13 @@ public class Hive {
}
private void setStatsPropAndAlterPartition(boolean hasFollowingStatsTask, Table tbl,
- Partition newTPart) throws MetaException, TException {
- EnvironmentContext environmentContext = null;
+ Partition newTPart, EnvironmentContext ec) throws MetaException, TException {
if (hasFollowingStatsTask) {
- environmentContext = new EnvironmentContext();
- environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
+ ec.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
}
LOG.debug("Altering existing partition " + newTPart.getSpec());
getSynchronizedMSC().alter_partition(tbl.getDbName(), tbl.getTableName(),
- newTPart.getTPartition(), environmentContext);
+ newTPart.getTPartition(), ec);
}
/**
@@ -2453,7 +2430,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
}
- alterTable(tbl, environmentContext);
+ alterTable(tbl, false, environmentContext, true);
if (AcidUtils.isTransactionalTable(tbl)) {
addWriteNotificationLog(tbl, null, newFiles, writeId);
@@ -2481,8 +2458,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
org.apache.hadoop.hive.metastore.api.Partition part =
Partition.createMetaPartitionObject(tbl, partSpec, null);
AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl);
- part.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : 0);
- part.setValidWriteIdList(tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
+ part.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : 0);
return new Partition(tbl, getMSC().add_partition(part));
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
@@ -2500,9 +2476,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
for (int i = 0; i < size; ++i) {
org.apache.hadoop.hive.metastore.api.Partition tmpPart =
convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i), conf);
- if (tmpPart != null && tableSnapshot != null && tableSnapshot.getTxnId() > 0) {
- tmpPart.setTxnId(tableSnapshot.getTxnId());
- tmpPart.setValidWriteIdList(tableSnapshot.getValidWriteIdList());
+ if (tmpPart != null && tableSnapshot != null && tableSnapshot.getWriteId() > 0) {
+ tmpPart.setWriteId(tableSnapshot.getWriteId());
}
in.add(tmpPart);
}
@@ -2700,8 +2675,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) {
fullName = tbl.getFullyQualifiedName();
}
- Partition newPart = new Partition(tbl, tpart);
- alterPartition(fullName, newPart, null);
+ alterPartition(fullName, new Partition(tbl, tpart), null, true);
}
private void alterPartitionSpecInMemory(Table tbl,
@@ -4533,11 +4507,6 @@ private void constructOneLBLocationMap(FileStatus fSta,
}
public List<ColumnStatisticsObj> getTableColumnStatistics(
- String dbName, String tableName, List<String> colNames) throws HiveException {
- return getTableColumnStatistics(dbName, tableName, colNames, false);
- }
-
- public List<ColumnStatisticsObj> getTableColumnStatistics(
String dbName, String tableName, List<String> colNames, boolean checkTransactional)
throws HiveException {
@@ -4561,11 +4530,6 @@ private void constructOneLBLocationMap(FileStatus fSta,
}
}
- public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(String dbName,
- String tableName, List<String> partNames, List<String> colNames) throws HiveException {
- return getPartitionColumnStatistics(dbName, tableName, partNames, colNames, false);
- }
-
public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
String dbName, String tableName, List<String> partNames, List<String> colNames,
boolean checkTransactional)
@@ -4589,11 +4553,6 @@ private void constructOneLBLocationMap(FileStatus fSta,
}
public AggrStats getAggrColStatsFor(String dbName, String tblName,
- List<String> colNames, List<String> partName) {
- return getAggrColStatsFor(dbName, tblName, colNames, partName, false);
- }
-
- public AggrStats getAggrColStatsFor(String dbName, String tblName,
List<String> colNames, List<String> partName, boolean checkTransactional) {
long txnId = -1;
String writeIdList = null;
@@ -5404,34 +5363,42 @@ private void constructOneLBLocationMap(FileStatus fSta,
}
}
- private void setTableSnapshotForTransactionalTable(
- HiveConf conf, Table newTbl, boolean isStatsUpdater)
- throws LockException {
+ private void setTableSnapshotForTransactionalTable(EnvironmentContext ec, HiveConf conf,
+ Table newTbl, boolean isStatsUpdater) throws LockException {
org.apache.hadoop.hive.metastore.api.Table newTTbl = newTbl.getTTable();
AcidUtils.TableSnapshot tableSnapshot =
AcidUtils.getTableSnapshot(conf, newTbl, isStatsUpdater);
+ if (tableSnapshot == null) return;
+ if (ec != null) { // Can be null for create table case; we don't need to verify txn stats.
+ ec.putToProperties(StatsSetupConst.TXN_ID, Long.toString(tableSnapshot.getTxnId()));
+ if (tableSnapshot.getValidWriteIdList() != null) {
+ ec.putToProperties(StatsSetupConst.VALID_WRITE_IDS, tableSnapshot.getValidWriteIdList());
+ } else {
+ LOG.warn("Table snapshot has null write IDs for " + newTbl);
+ }
+ }
- newTTbl.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : -1);
- newTTbl.setValidWriteIdList(
- tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
if (isStatsUpdater) {
- newTTbl.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : -1);
+ newTTbl.setWriteId(tableSnapshot.getWriteId());
}
}
- private void setTableSnapshotForTransactionalPartition(
- HiveConf conf, Partition partition, boolean isStatsUpdater)
- throws LockException {
-
+ private void setTableSnapshotForTransactionalPartition(EnvironmentContext ec, HiveConf conf,
+ Partition partition, boolean isStatsUpdater) throws LockException {
AcidUtils.TableSnapshot tableSnapshot =
AcidUtils.getTableSnapshot(conf, partition.getTable(), isStatsUpdater);
org.apache.hadoop.hive.metastore.api.Partition tpartition = partition.getTPartition();
- tpartition.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : -1);
- tpartition.setValidWriteIdList(
- tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
+ if (tableSnapshot == null) return;
+ ec.putToProperties(StatsSetupConst.TXN_ID, Long.toString(tableSnapshot.getTxnId()));
+ if (tableSnapshot.getValidWriteIdList() != null) {
+ ec.putToProperties(StatsSetupConst.VALID_WRITE_IDS, tableSnapshot.getValidWriteIdList());
+ } else {
+ LOG.warn("Table snapshot has null write IDs for " + partition);
+ }
+
if (isStatsUpdater) {
- tpartition.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : -1);
+ tpartition.setWriteId(tableSnapshot.getWriteId());
}
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/a47a80fe/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
index 6cc6d02..f66f47a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
@@ -593,6 +593,7 @@ public class RelOptHiveTable extends RelOptAbstractTable {
return getColStat(projIndxLst, HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_STATS_ESTIMATE_STATS));
}
+ /** Note: DOES NOT CHECK txn stats. */
public List<ColStatistics> getColStat(List<Integer> projIndxLst, boolean allowMissingStats) {
List<ColStatistics> colStatsBldr = Lists.newArrayList();
Set<Integer> projIndxSet = new HashSet<Integer>(projIndxLst);
http://git-wip-us.apache.org/repos/asf/hive/blob/a47a80fe/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java
index e77fc3e..80f77b9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java
@@ -186,7 +186,7 @@ public class ConditionalResolverMergeFiles implements ConditionalResolver,
}
}
} else {
- Utilities.FILE_OP_LOGGER.info("Resolver returning movetask for " + dirPath, new Exception());
+ Utilities.FILE_OP_LOGGER.info("Resolver returning movetask for " + dirPath);
resTsks.add(mvTask);
}
} catch (IOException e) {
http://git-wip-us.apache.org/repos/asf/hive/blob/a47a80fe/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
index acebf52..39209b3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
import org.apache.hadoop.hive.ql.CompilationOpContext;
import org.apache.hadoop.hive.ql.exec.FetchOperator;
import org.apache.hadoop.hive.ql.io.AcidUtils;
+import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.Partition;
@@ -178,10 +179,13 @@ public class ColStatsProcessor implements IStatsProcessor {
}
SetPartitionsStatsRequest request = new SetPartitionsStatsRequest(colStats);
request.setNeedMerge(colStatDesc.isNeedMerge());
- if (AcidUtils.isTransactionalTable(tbl) && SessionState.get().getTxnMgr() != null) {
- request.setTxnId(SessionState.get().getTxnMgr().getCurrentTxnId());
+ HiveTxnManager txnMgr = AcidUtils.isTransactionalTable(tbl)
+ ? SessionState.get().getTxnMgr() : null;
+ if (txnMgr != null) {
+ request.setTxnId(txnMgr.getCurrentTxnId());
request.setValidWriteIdList(AcidUtils.getTableValidWriteIdList(conf,
AcidUtils.getFullTableName(tbl.getDbName(), tbl.getTableName())).toString());
+ request.setWriteId(txnMgr.getAllocatedTableWriteId(tbl.getDbName(), tbl.getTableName()));
}
db.setPartitionColumnStatistics(request);
return 0;
http://git-wip-us.apache.org/repos/asf/hive/blob/a47a80fe/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java
index ddca704..bb181a1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java
@@ -409,6 +409,8 @@ public class StatsUpdaterThread extends Thread implements MetaStoreThread {
List<String> allCols) throws MetaException {
ColumnStatistics existingStats = null;
try {
+ // Note: this should NOT do txn verification - we want to get outdated stats, to
+ // see if we need to update anything.
existingStats = rs.getTableColumnStatistics(cat, db, tbl, allCols);
} catch (NoSuchObjectException e) {
LOG.error("Cannot retrieve existing stats, skipping " + fullTableName, e);
http://git-wip-us.apache.org/repos/asf/hive/blob/a47a80fe/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
index 494939a..cb6913e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
@@ -128,6 +128,7 @@ public class StatsUtils {
/**
* Collect table, partition and column level statistics
+ * Note: DOES NOT CHECK txn stats.
* @param conf
* - hive configuration
* @param partList
@@ -226,6 +227,7 @@ public class StatsUtils {
}
}
+ /** Note: DOES NOT CHECK txn stats. */
public static Statistics collectStatistics(HiveConf conf, PrunedPartitionList partList,
Table table, List<ColumnInfo> schema, List<String> neededColumns, ColumnStatsList colStatsCache,
List<String> referencedColumns, boolean fetchColStats)
@@ -262,7 +264,10 @@ public class StatsUtils {
List<ColStatistics> colStats = Lists.newArrayList();
if (fetchColStats) {
- colStats = getTableColumnStats(table, schema, neededColumns, colStatsCache);
+ // Note: this is currently called from two notable places (w/false for checkTxn)
+ // 1) StatsRulesProcFactory.TableScanStatsRule via collectStatistics
+ // 2) RelOptHiveTable via getColStats and updateColStats.
+ colStats = getTableColumnStats(table, schema, neededColumns, colStatsCache, false);
if(colStats == null) {
colStats = Lists.newArrayList();
}
@@ -378,8 +383,11 @@ public class StatsUtils {
// size is 0, aggrStats is null after several retries. Thus, we can
// skip the step to connect to the metastore.
if (neededColsToRetrieve.size() > 0 && partNames.size() > 0) {
+ // Note: this is currently called from two notable places (w/false for checkTxn)
+ // 1) StatsRulesProcFactory.TableScanStatsRule via collectStatistics
+ // 2) RelOptHiveTable via getColStats and updateColStats.
aggrStats = Hive.get().getAggrColStatsFor(table.getDbName(), table.getTableName(),
- neededColsToRetrieve, partNames);
+ neededColsToRetrieve, partNames, false);
}
boolean statsRetrieved = aggrStats != null &&
@@ -990,7 +998,7 @@ public class StatsUtils {
*/
public static List<ColStatistics> getTableColumnStats(
Table table, List<ColumnInfo> schema, List<String> neededColumns,
- ColumnStatsList colStatsCache) {
+ ColumnStatsList colStatsCache, boolean checkTransactional) {
if (table.isMaterializedTable()) {
LOG.debug("Materialized table does not contain table statistics");
return null;
@@ -1019,7 +1027,7 @@ public class StatsUtils {
List<ColStatistics> stats = null;
try {
List<ColumnStatisticsObj> colStat = Hive.get().getTableColumnStatistics(
- dbName, tabName, colStatsToRetrieve);
+ dbName, tabName, colStatsToRetrieve, checkTransactional);
stats = convertColStats(colStat, tabName);
} catch (HiveException e) {
LOG.error("Failed to retrieve table statistics: ", e);
http://git-wip-us.apache.org/repos/asf/hive/blob/a47a80fe/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
index 930282d..d30bbde 100755
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
@@ -325,11 +325,8 @@ public class TestHive extends TestCase {
tbl.getParameters().put(hive_metastoreConstants.DDL_TIME,
ft.getParameters().get(hive_metastoreConstants.DDL_TIME));
// Txn stuff set by metastore
- if (tbl.getTTable().isSetTxnId()) {
- ft.getTTable().setTxnId(tbl.getTTable().getTxnId());
- }
- if (tbl.getTTable().isSetValidWriteIdList()) {
- ft.getTTable().setValidWriteIdList(tbl.getTTable().getValidWriteIdList());
+ if (tbl.getTTable().isSetWriteId()) {
+ ft.getTTable().setWriteId(tbl.getTTable().getWriteId());
}
assertTrue("Tables doesn't match: " + tableName + " (" + ft.getTTable()
+ "; " + tbl.getTTable() + ")", ft.getTTable().equals(tbl.getTTable()));
@@ -600,7 +597,7 @@ public class TestHive extends TestCase {
Table table = createPartitionedTable(dbName, tableName);
table.getParameters().put("auto.purge", "true");
- hm.alterTable(tableName, table, null);
+ hm.alterTable(tableName, table, false, null, true);
Map<String, String> partitionSpec = new ImmutableMap.Builder<String, String>()
.put("ds", "20141216")
http://git-wip-us.apache.org/repos/asf/hive/blob/a47a80fe/ql/src/test/results/clientpositive/acid_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/acid_stats.q.out b/ql/src/test/results/clientpositive/acid_stats.q.out
index 969433e..fd4ebe7 100644
--- a/ql/src/test/results/clientpositive/acid_stats.q.out
+++ b/ql/src/test/results/clientpositive/acid_stats.q.out
@@ -45,47 +45,12 @@ PREHOOK: type: QUERY
POSTHOOK: query: explain select count(key) from stats_part
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
+ Stage-0 is a root stage
STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: stats_part
- Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
- Select Operator
- expressions: key (type: int)
- outputColumnNames: key
- Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
- Group By Operator
- aggregations: count(key)
- mode: hash
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
- Reduce Output Operator
- sort order:
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
- value expressions: _col0 (type: bigint)
- Execution mode: vectorized
- Reduce Operator Tree:
- Group By Operator
- aggregations: count(VALUE._col0)
- mode: mergepartial
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: PARTIAL
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
Stage: Stage-0
Fetch Operator
- limit: -1
+ limit: 1
Processor Tree:
ListSink
@@ -102,47 +67,12 @@ PREHOOK: type: QUERY
POSTHOOK: query: explain select count(key) from stats_part
POSTHOOK: type: QUERY
STAGE DEPENDENCIES:
- Stage-1 is a root stage
- Stage-0 depends on stages: Stage-1
+ Stage-0 is a root stage
STAGE PLANS:
- Stage: Stage-1
- Map Reduce
- Map Operator Tree:
- TableScan
- alias: stats_part
- Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
- Select Operator
- expressions: key (type: int)
- outputColumnNames: key
- Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
- Group By Operator
- aggregations: count(key)
- mode: hash
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- Reduce Output Operator
- sort order:
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- value expressions: _col0 (type: bigint)
- Execution mode: vectorized
- Reduce Operator Tree:
- Group By Operator
- aggregations: count(VALUE._col0)
- mode: mergepartial
- outputColumnNames: _col0
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- File Output Operator
- compressed: false
- Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
- table:
- input format: org.apache.hadoop.mapred.SequenceFileInputFormat
- output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
- serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
Stage: Stage-0
Fetch Operator
- limit: -1
+ limit: 1
Processor Tree:
ListSink