You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by kr...@apache.org on 2022/02/22 05:36:44 UTC
[hive] branch master updated: HIVE-25918: Invalid stats after multi inserting into the same partition (Krisztian Kasa, reviewed by Zoltan Haindrich)
This is an automated email from the ASF dual-hosted git repository.
krisztiankasa pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new f15de94 HIVE-25918: Invalid stats after multi inserting into the same partition (Krisztian Kasa, reviewed by Zoltan Haindrich)
f15de94 is described below
commit f15de94c617c4566c87293479463cd90437beed5
Author: Krisztian Kasa <ka...@gmail.com>
AuthorDate: Tue Feb 22 06:36:24 2022 +0100
HIVE-25918: Invalid stats after multi inserting into the same partition (Krisztian Kasa, reviewed by Zoltan Haindrich)
---
.../java/org/apache/hadoop/hive/cli/CliDriver.java | 3 +-
...licationScenariosIncrementalLoadAcidTables.java | 2 +-
.../ql/parse/TestStatsReplicationScenarios.java | 2 +-
.../java/org/apache/hadoop/hive/ql/QTestUtil.java | 2 +-
.../HiveMetaStoreClientWithLocalCache.java | 25 ++++-
.../ql/metadata/SessionHiveMetaStoreClient.java | 10 --
.../hadoop/hive/ql/TxnCommandsBaseForTests.java | 2 +-
.../ql/lockmgr/DbTxnManagerEndToEndTestBase.java | 2 +-
.../hive/ql/metadata}/TestHiveMetaStoreClient.java | 3 +-
...TestHiveMetaStoreClientApiArgumentsChecker.java | 2 -
...HiveMetastoreClientListPartitionsTempTable.java | 6 +-
.../clientpositive/stats_part_multi_insert_acid.q | 21 ++++
.../llap/stats_part_multi_insert_acid.q.out | 116 +++++++++++++++++++++
.../apache/hive/service/server/HiveServer2.java | 2 +-
14 files changed, 174 insertions(+), 24 deletions(-)
diff --git a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
index 9e8107b..4d57b22 100644
--- a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
+++ b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
@@ -54,14 +54,13 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.conf.HiveVariableSource;
import org.apache.hadoop.hive.conf.Validator;
import org.apache.hadoop.hive.conf.VariableSubstitution;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClientWithLocalCache;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.ql.IDriver;
import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
import org.apache.hadoop.hive.ql.exec.mr.HadoopJobExecHelper;
import org.apache.hadoop.hive.ql.exec.tez.TezJobExecHelper;
import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry;
+import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreClientWithLocalCache;
import org.apache.hadoop.hive.ql.parse.CalcitePlanner;
import org.apache.hadoop.hive.ql.parse.HiveParser;
import org.apache.hadoop.hive.ql.processors.CommandProcessor;
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosIncrementalLoadAcidTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosIncrementalLoadAcidTables.java
index 05c7d2c..c31c9ef 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosIncrementalLoadAcidTables.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosIncrementalLoadAcidTables.java
@@ -23,8 +23,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClientWithLocalCache;
import org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder;
+import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreClientWithLocalCache;
import org.apache.hadoop.hive.ql.parse.repl.CopyUtils;
import org.apache.hadoop.hive.shims.Utils;
import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION;
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java
index cca82a8..5d15e1a 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.parse;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClientWithLocalCache;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.NotificationEvent;
@@ -33,6 +32,7 @@ import org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncod
import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore;
import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.BehaviourInjection;
import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.CallerArguments;
+import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreClientWithLocalCache;
import org.apache.hadoop.hive.shims.Utils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index 197c5c7..5336201 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -53,7 +53,7 @@ import org.apache.hadoop.hive.common.io.CachingPrintStream;
import org.apache.hadoop.hive.common.io.SessionStream;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClientWithLocalCache;
+import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreClientWithLocalCache;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.ql.QTestMiniClusters.FsType;
import org.apache.hadoop.hive.ql.cache.results.QueryResultsCache;
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientWithLocalCache.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreClientWithLocalCache.java
similarity index 96%
rename from standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientWithLocalCache.java
rename to ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreClientWithLocalCache.java
index e234049..625dbae 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientWithLocalCache.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreClientWithLocalCache.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hive.metastore;
+package org.apache.hadoop.hive.ql.metadata;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
@@ -25,6 +25,11 @@ import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.AggrStats;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.Database;
@@ -57,6 +62,8 @@ import org.apache.hadoop.hive.metastore.api.TableValidWriteIds;
import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
import org.apache.hadoop.hive.metastore.api.UniqueConstraintsResponse;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator;
import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator.ObjectEstimator;
import org.apache.thrift.TException;
@@ -500,9 +507,25 @@ public class HiveMetaStoreClientWithLocalCache extends HiveMetaStoreClient imple
* @return boolean
*/
private boolean isCacheEnabledAndInitialized() {
+ // Do not use the cache if session level query cache is also disabled
+ // Both caches can be used only at compilation time because execution may change
+ // DB objects (Tables, Partition metadata objects) and cache entries may already invalid
+ SessionState sessionState = SessionState.get();
+ if (sessionState == null || sessionState.getQueryCache(getQueryId()) == null) {
+ return false;
+ }
+
return INITIALIZED.get();
}
+ protected String getQueryId() {
+ try {
+ return Hive.get().getConf().get(HiveConf.ConfVars.HIVEQUERYID.varname);
+ } catch (HiveException e) {
+ LOG.error("Error getting query id. Query level and Global HMS caching will be disabled", e);
+ return null;
+ }
+ }
protected final Pair<List<ColumnStatisticsObj>, List<String>> getTableColumnStatisticsCache(CacheI cache,
TableStatsRequest rqst, TableWatermark watermark) {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
index 4744516..4778c2e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
@@ -48,7 +48,6 @@ import org.apache.hadoop.hive.common.ValidWriteIdList;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.io.HdfsUtils;
import org.apache.hadoop.hive.metastore.HiveMetaHookLoader;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClientWithLocalCache;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.PartFilterExprUtil;
import org.apache.hadoop.hive.metastore.PartitionDropOptions;
@@ -2512,15 +2511,6 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClientWithLocalCach
return null;
}
- private String getQueryId() {
- try {
- return Hive.get().getConf().get(HiveConf.ConfVars.HIVEQUERYID.varname);
- } catch (HiveException e) {
- LOG.error("Error getting query id. Query level HMS caching will be disabled", e);
- return null;
- }
- }
-
@Override
protected String getValidWriteIdList(String dbName, String tblName) {
try {
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java b/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java
index 05e45f5..c879b0a 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java
@@ -36,12 +36,12 @@ import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClientWithLocalCache;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil;
import org.apache.hadoop.hive.metastore.txn.TxnStore;
import org.apache.hadoop.hive.metastore.txn.TxnUtils;
import org.apache.hadoop.hive.ql.io.HiveInputFormat;
+import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreClientWithLocalCache;
import org.apache.hadoop.hive.ql.processors.CommandProcessorException;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.ql.txn.compactor.Cleaner;
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/DbTxnManagerEndToEndTestBase.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/DbTxnManagerEndToEndTestBase.java
index a78ae89..ad47f91 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/DbTxnManagerEndToEndTestBase.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/DbTxnManagerEndToEndTestBase.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.lockmgr;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClientWithLocalCache;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil;
import org.apache.hadoop.hive.metastore.txn.TxnStore;
@@ -28,6 +27,7 @@ import org.apache.hadoop.hive.metastore.txn.TxnUtils;
import org.apache.hadoop.hive.ql.Context;
import org.apache.hadoop.hive.ql.Driver;
import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreClientWithLocalCache;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.junit.After;
import org.junit.Assert;
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreClient.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreClient.java
similarity index 98%
rename from standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreClient.java
rename to ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreClient.java
index 568ec9a..ded6fd4 100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreClient.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreClient.java
@@ -16,10 +16,9 @@
* limitations under the License.
*/
-package org.apache.hadoop.hive.metastore;
+package org.apache.hadoop.hive.ql.metadata;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClientWithLocalCache;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsRequest;
import org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsResponse;
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreClientApiArgumentsChecker.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreClientApiArgumentsChecker.java
index 784b8e8..dfda8a5 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreClientApiArgumentsChecker.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveMetaStoreClientApiArgumentsChecker.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.TableType;
-import org.apache.hadoop.hive.metastore.TestHiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
@@ -43,7 +42,6 @@ import org.junit.Before;
import org.junit.Test;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java
index 8e2b1f5c..0ec2764 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.metadata;
import com.google.common.collect.Lists;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClientWithLocalCache;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
import org.apache.hadoop.hive.metastore.TestMetastoreExpr;
import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest;
@@ -38,8 +37,10 @@ import org.apache.hadoop.hive.metastore.client.TestListPartitions;
import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService;
+import org.apache.hadoop.hive.ql.QueryState;
import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.ql.session.LineageState;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.thrift.TException;
import org.junit.Before;
@@ -80,6 +81,9 @@ public class TestSessionHiveMetastoreClientListPartitionsTempTable
public void setUp() throws Exception {
initHiveConf();
SessionState.start(conf);
+ QueryState queryState = QueryState.getNewQueryState(conf, new LineageState());
+ queryState.createHMSCache();
+ SessionState.get().addQueryState(queryState.getQueryId(), queryState);
// setup metastore client cache
if (conf.getBoolVar(HiveConf.ConfVars.MSC_CACHE_ENABLED)) {
HiveMetaStoreClientWithLocalCache.init(conf);
diff --git a/ql/src/test/queries/clientpositive/stats_part_multi_insert_acid.q b/ql/src/test/queries/clientpositive/stats_part_multi_insert_acid.q
new file mode 100644
index 0000000..f750632
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/stats_part_multi_insert_acid.q
@@ -0,0 +1,21 @@
+-- Test multi inserting to the same partition
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create table source(p int, key int,value string);
+insert into source(p, key, value) values (101,42,'string42');
+
+create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true");
+
+from source
+insert into stats_part select key, value, p
+insert into stats_part select key, value, p;
+
+select p, key, value from stats_part;
+desc formatted stats_part;
+
+set hive.compute.query.using.stats=true;
+select count(*) from stats_part;
+
+set hive.compute.query.using.stats=false;
+select count(*) from stats_part;
diff --git a/ql/src/test/results/clientpositive/llap/stats_part_multi_insert_acid.q.out b/ql/src/test/results/clientpositive/llap/stats_part_multi_insert_acid.q.out
new file mode 100644
index 0000000..c40f7d7
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/stats_part_multi_insert_acid.q.out
@@ -0,0 +1,116 @@
+PREHOOK: query: create table source(p int, key int,value string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@source
+POSTHOOK: query: create table source(p int, key int,value string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@source
+PREHOOK: query: insert into source(p, key, value) values (101,42,'string42')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@source
+POSTHOOK: query: insert into source(p, key, value) values (101,42,'string42')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@source
+POSTHOOK: Lineage: source.key SCRIPT []
+POSTHOOK: Lineage: source.p SCRIPT []
+POSTHOOK: Lineage: source.value SCRIPT []
+PREHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stats_part
+POSTHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stats_part
+PREHOOK: query: from source
+insert into stats_part select key, value, p
+insert into stats_part select key, value, p
+PREHOOK: type: QUERY
+PREHOOK: Input: default@source
+PREHOOK: Output: default@stats_part
+POSTHOOK: query: from source
+insert into stats_part select key, value, p
+insert into stats_part select key, value, p
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@source
+POSTHOOK: Output: default@stats_part
+POSTHOOK: Output: default@stats_part@p=101
+POSTHOOK: Lineage: stats_part PARTITION(p=101).key SIMPLE [(source)source.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=101).key SIMPLE [(source)source.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=101).value SIMPLE [(source)source.FieldSchema(name:value, type:string, comment:null), ]
+POSTHOOK: Lineage: stats_part PARTITION(p=101).value SIMPLE [(source)source.FieldSchema(name:value, type:string, comment:null), ]
+PREHOOK: query: select p, key, value from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=101
+#### A masked pattern was here ####
+POSTHOOK: query: select p, key, value from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=101
+#### A masked pattern was here ####
+101 42 string42
+101 42 string42
+PREHOOK: query: desc formatted stats_part
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_part
+POSTHOOK: query: desc formatted stats_part
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_part
+# col_name data_type comment
+key int
+value string
+
+# Partition Information
+# col_name data_type comment
+p int
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"}
+ bucketing_version 2
+ numFiles 2
+ numPartitions 1
+ numRows 2
+ rawDataSize 0
+ totalSize 1470
+ transactional true
+ transactional_properties default
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
+InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+PREHOOK: query: select count(*) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+#### A masked pattern was here ####
+2
+PREHOOK: query: select count(*) from stats_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats_part
+PREHOOK: Input: default@stats_part@p=101
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from stats_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats_part
+POSTHOOK: Input: default@stats_part@p=101
+#### A masked pattern was here ####
+2
diff --git a/service/src/java/org/apache/hive/service/server/HiveServer2.java b/service/src/java/org/apache/hive/service/server/HiveServer2.java
index e32ff6c..9686067 100644
--- a/service/src/java/org/apache/hive/service/server/HiveServer2.java
+++ b/service/src/java/org/apache/hive/service/server/HiveServer2.java
@@ -62,7 +62,6 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
import org.apache.hadoop.hive.conf.HiveServer2TransportMode;
import org.apache.hadoop.hive.llap.coordinator.LlapCoordinator;
import org.apache.hadoop.hive.llap.registry.impl.LlapRegistryService;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClientWithLocalCache;
import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
@@ -74,6 +73,7 @@ import org.apache.hadoop.hive.ql.exec.tez.WorkloadManager;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry;
+import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreClientWithLocalCache;
import org.apache.hadoop.hive.ql.metadata.HiveUtils;
import org.apache.hadoop.hive.ql.metadata.events.NotificationEventPoll;
import org.apache.hadoop.hive.ql.parse.CalcitePlanner;