You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by om...@apache.org on 2018/04/26 14:58:31 UTC
[07/50] [abbrv] hive git commit: HIVE-19171 : Persist runtime
statistics in metastore (Zoltan Haindrich via Ashutosh Chauhan)
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 125d5a7..184ecb6 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -55,6 +55,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.regex.Pattern;
+import java.util.stream.Collectors;
import javax.jdo.JDOCanRetryException;
import javax.jdo.JDODataStoreException;
@@ -83,7 +84,6 @@ import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.metastore.MetaStoreDirectSql.SqlFilterForPushdown;
import org.apache.hadoop.hive.metastore.api.AggrStats;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.BasicTxnInfo;
import org.apache.hadoop.hive.metastore.api.Catalog;
import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
@@ -124,6 +124,7 @@ import org.apache.hadoop.hive.metastore.api.ResourceType;
import org.apache.hadoop.hive.metastore.api.ResourceUri;
import org.apache.hadoop.hive.metastore.api.Role;
import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+import org.apache.hadoop.hive.metastore.api.RuntimeStat;
import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
@@ -186,6 +187,7 @@ import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege;
import org.apache.hadoop.hive.metastore.model.MResourceUri;
import org.apache.hadoop.hive.metastore.model.MRole;
import org.apache.hadoop.hive.metastore.model.MRoleMap;
+import org.apache.hadoop.hive.metastore.model.MRuntimeStat;
import org.apache.hadoop.hive.metastore.model.MSchemaVersion;
import org.apache.hadoop.hive.metastore.model.MSerDeInfo;
import org.apache.hadoop.hive.metastore.model.MStorageDescriptor;
@@ -210,7 +212,6 @@ import org.apache.hadoop.hive.metastore.utils.FileUtils;
import org.apache.hadoop.hive.metastore.utils.JavaUtils;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.utils.ObjectPair;
-import org.apache.thrift.TDeserializer;
import org.apache.thrift.TException;
import org.datanucleus.AbstractNucleusContext;
import org.datanucleus.ClassLoaderResolver;
@@ -809,7 +810,9 @@ public class ObjectStore implements RawStore, Configurable {
pm.makePersistent(mCat);
committed = commitTransaction();
} finally {
- if (!committed) rollbackTransaction();
+ if (!committed) {
+ rollbackTransaction();
+ }
}
}
@@ -832,7 +835,9 @@ public class ObjectStore implements RawStore, Configurable {
pm.makePersistent(mCat);
committed = commitTransaction();
} finally {
- if (!committed) rollbackTransaction();
+ if (!committed) {
+ rollbackTransaction();
+ }
}
}
@@ -840,7 +845,9 @@ public class ObjectStore implements RawStore, Configurable {
public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException {
LOG.debug("Fetching catalog " + catalogName);
MCatalog mCat = getMCatalog(catalogName);
- if (mCat == null) throw new NoSuchObjectException("No catalog " + catalogName);
+ if (mCat == null) {
+ throw new NoSuchObjectException("No catalog " + catalogName);
+ }
return mCatToCat(mCat);
}
@@ -874,11 +881,15 @@ public class ObjectStore implements RawStore, Configurable {
openTransaction();
MCatalog mCat = getMCatalog(catalogName);
pm.retrieve(mCat);
- if (mCat == null) throw new NoSuchObjectException("No catalog " + catalogName);
+ if (mCat == null) {
+ throw new NoSuchObjectException("No catalog " + catalogName);
+ }
pm.deletePersistent(mCat);
committed = commitTransaction();
} finally {
- if (!committed) rollbackTransaction();
+ if (!committed) {
+ rollbackTransaction();
+ }
}
}
@@ -903,14 +914,18 @@ public class ObjectStore implements RawStore, Configurable {
private MCatalog catToMCat(Catalog cat) {
MCatalog mCat = new MCatalog();
mCat.setName(normalizeIdentifier(cat.getName()));
- if (cat.isSetDescription()) mCat.setDescription(cat.getDescription());
+ if (cat.isSetDescription()) {
+ mCat.setDescription(cat.getDescription());
+ }
mCat.setLocationUri(cat.getLocationUri());
return mCat;
}
private Catalog mCatToCat(MCatalog mCat) {
Catalog cat = new Catalog(mCat.getName(), mCat.getLocationUri());
- if (mCat.getDescription() != null) cat.setDescription(mCat.getDescription());
+ if (mCat.getDescription() != null) {
+ cat.setDescription(mCat.getDescription());
+ }
return cat;
}
@@ -1983,10 +1998,18 @@ public class ObjectStore implements RawStore, Configurable {
}
SerDeInfo serde =
new SerDeInfo(ms.getName(), ms.getSerializationLib(), convertMap(ms.getParameters()));
- if (ms.getDescription() != null) serde.setDescription(ms.getDescription());
- if (ms.getSerializerClass() != null) serde.setSerializerClass(ms.getSerializerClass());
- if (ms.getDeserializerClass() != null) serde.setDeserializerClass(ms.getDeserializerClass());
- if (ms.getSerdeType() > 0) serde.setSerdeType(SerdeType.findByValue(ms.getSerdeType()));
+ if (ms.getDescription() != null) {
+ serde.setDescription(ms.getDescription());
+ }
+ if (ms.getSerializerClass() != null) {
+ serde.setSerializerClass(ms.getSerializerClass());
+ }
+ if (ms.getDeserializerClass() != null) {
+ serde.setDeserializerClass(ms.getDeserializerClass());
+ }
+ if (ms.getSerdeType() > 0) {
+ serde.setSerdeType(SerdeType.findByValue(ms.getSerdeType()));
+ }
return serde;
}
@@ -3679,7 +3702,7 @@ public class ObjectStore implements RawStore, Configurable {
@Override
protected boolean canUseDirectSql(GetHelper<Integer> ctx) throws MetaException {
return directSql.generateSqlFilterForPushdown(ctx.getTable(), exprTree, filter);
- };
+ }
@Override
protected Integer getSqlResult(GetHelper<Integer> ctx) throws MetaException {
@@ -9998,7 +10021,9 @@ public class ObjectStore implements RawStore, Configurable {
pm.makePersistent(mSchema);
committed = commitTransaction();
} finally {
- if (!committed) rollbackTransaction();
+ if (!committed) {
+ rollbackTransaction();
+ }
}
}
@@ -10017,11 +10042,17 @@ public class ObjectStore implements RawStore, Configurable {
oldMSchema.setCompatibility(newSchema.getCompatibility().getValue());
oldMSchema.setValidationLevel(newSchema.getValidationLevel().getValue());
oldMSchema.setCanEvolve(newSchema.isCanEvolve());
- if (newSchema.isSetSchemaGroup()) oldMSchema.setSchemaGroup(newSchema.getSchemaGroup());
- if (newSchema.isSetDescription()) oldMSchema.setDescription(newSchema.getDescription());
+ if (newSchema.isSetSchemaGroup()) {
+ oldMSchema.setSchemaGroup(newSchema.getSchemaGroup());
+ }
+ if (newSchema.isSetDescription()) {
+ oldMSchema.setDescription(newSchema.getDescription());
+ }
committed = commitTransaction();
} finally {
- if (!committed) rollbackTransaction();
+ if (!committed) {
+ rollbackTransaction();
+ }
}
}
@@ -10035,7 +10066,9 @@ public class ObjectStore implements RawStore, Configurable {
committed = commitTransaction();
return schema;
} finally {
- if (!committed) rollbackTransaction();
+ if (!committed) {
+ rollbackTransaction();
+ }
}
}
@@ -10054,7 +10087,9 @@ public class ObjectStore implements RawStore, Configurable {
pm.retrieve(mSchema);
return mSchema;
} finally {
- if (query != null) query.closeAll();
+ if (query != null) {
+ query.closeAll();
+ }
}
}
@@ -10071,7 +10106,9 @@ public class ObjectStore implements RawStore, Configurable {
}
committed = commitTransaction();
} finally {
- if (!committed) rollbackTransaction();
+ if (!committed) {
+ rollbackTransaction();
+ }
}
}
@@ -10096,7 +10133,9 @@ public class ObjectStore implements RawStore, Configurable {
pm.makePersistent(mSchemaVersion);
committed = commitTransaction();
} finally {
- if (!committed) rollbackTransaction();;
+ if (!committed) {
+ rollbackTransaction();
+ }
}
}
@@ -10113,11 +10152,17 @@ public class ObjectStore implements RawStore, Configurable {
}
// We only support changing the SerDe mapping and the state.
- if (newVersion.isSetSerDe()) oldMSchemaVersion.setSerDe(convertToMSerDeInfo(newVersion.getSerDe()));
- if (newVersion.isSetState()) oldMSchemaVersion.setState(newVersion.getState().getValue());
+ if (newVersion.isSetSerDe()) {
+ oldMSchemaVersion.setSerDe(convertToMSerDeInfo(newVersion.getSerDe()));
+ }
+ if (newVersion.isSetState()) {
+ oldMSchemaVersion.setState(newVersion.getState().getValue());
+ }
committed = commitTransaction();
} finally {
- if (!committed) commitTransaction();
+ if (!committed) {
+ commitTransaction();
+ }
}
}
@@ -10132,7 +10177,9 @@ public class ObjectStore implements RawStore, Configurable {
committed = commitTransaction();
return schemaVersion;
} finally {
- if (!committed) rollbackTransaction();;
+ if (!committed) {
+ rollbackTransaction();
+ }
}
}
@@ -10152,11 +10199,15 @@ public class ObjectStore implements RawStore, Configurable {
pm.retrieve(mSchemaVersion);
if (mSchemaVersion != null) {
pm.retrieveAll(mSchemaVersion.getCols());
- if (mSchemaVersion.getSerDe() != null) pm.retrieve(mSchemaVersion.getSerDe());
+ if (mSchemaVersion.getSerDe() != null) {
+ pm.retrieve(mSchemaVersion.getSerDe());
+ }
}
return mSchemaVersion;
} finally {
- if (query != null) query.closeAll();
+ if (query != null) {
+ query.closeAll();
+ }
}
}
@@ -10180,7 +10231,9 @@ public class ObjectStore implements RawStore, Configurable {
pm.retrieve(mSchemaVersion);
if (mSchemaVersion != null) {
pm.retrieveAll(mSchemaVersion.getCols());
- if (mSchemaVersion.getSerDe() != null) pm.retrieve(mSchemaVersion.getSerDe());
+ if (mSchemaVersion.getSerDe() != null) {
+ pm.retrieve(mSchemaVersion.getSerDe());
+ }
}
SchemaVersion version = mSchemaVersion == null ? null : convertToSchemaVersion(mSchemaVersion);
committed = commitTransaction();
@@ -10206,11 +10259,15 @@ public class ObjectStore implements RawStore, Configurable {
query.setOrdering("version descending");
List<MSchemaVersion> mSchemaVersions = query.setParameters(name, dbName, catName).executeList();
pm.retrieveAll(mSchemaVersions);
- if (mSchemaVersions == null || mSchemaVersions.isEmpty()) return null;
+ if (mSchemaVersions == null || mSchemaVersions.isEmpty()) {
+ return null;
+ }
List<SchemaVersion> schemaVersions = new ArrayList<>(mSchemaVersions.size());
for (MSchemaVersion mSchemaVersion : mSchemaVersions) {
pm.retrieveAll(mSchemaVersion.getCols());
- if (mSchemaVersion.getSerDe() != null) pm.retrieve(mSchemaVersion.getSerDe());
+ if (mSchemaVersion.getSerDe() != null) {
+ pm.retrieve(mSchemaVersion.getSerDe());
+ }
schemaVersions.add(convertToSchemaVersion(mSchemaVersion));
}
committed = commitTransaction();
@@ -10232,8 +10289,12 @@ public class ObjectStore implements RawStore, Configurable {
Query query = null;
try {
openTransaction();
- if (colName != null) colName = normalizeIdentifier(colName);
- if (type != null) type = normalizeIdentifier(type);
+ if (colName != null) {
+ colName = normalizeIdentifier(colName);
+ }
+ if (type != null) {
+ type = normalizeIdentifier(type);
+ }
Map<String, String> parameters = new HashMap<>(3);
StringBuilder sql = new StringBuilder("select SCHEMA_VERSION_ID from " +
"SCHEMA_VERSION, COLUMNS_V2 where SCHEMA_VERSION.CD_ID = COLUMNS_V2.CD_ID ");
@@ -10259,12 +10320,16 @@ public class ObjectStore implements RawStore, Configurable {
query = pm.newQuery("javax.jdo.query.SQL", sql.toString());
query.setClass(MSchemaVersion.class);
List<MSchemaVersion> mSchemaVersions = query.setNamedParameters(parameters).executeList();
- if (mSchemaVersions == null || mSchemaVersions.isEmpty()) return Collections.emptyList();
+ if (mSchemaVersions == null || mSchemaVersions.isEmpty()) {
+ return Collections.emptyList();
+ }
pm.retrieveAll(mSchemaVersions);
List<SchemaVersion> schemaVersions = new ArrayList<>(mSchemaVersions.size());
for (MSchemaVersion mSchemaVersion : mSchemaVersions) {
pm.retrieveAll(mSchemaVersion.getCols());
- if (mSchemaVersion.getSerDe() != null) pm.retrieve(mSchemaVersion.getSerDe());
+ if (mSchemaVersion.getSerDe() != null) {
+ pm.retrieve(mSchemaVersion.getSerDe());
+ }
schemaVersions.add(convertToSchemaVersion(mSchemaVersion));
}
committed = commitTransaction();
@@ -10291,7 +10356,9 @@ public class ObjectStore implements RawStore, Configurable {
}
committed = commitTransaction();
} finally {
- if (!committed) rollbackTransaction();
+ if (!committed) {
+ rollbackTransaction();
+ }
}
}
@@ -10308,7 +10375,9 @@ public class ObjectStore implements RawStore, Configurable {
committed = commitTransaction();
return serde;
} finally {
- if (!committed) rollbackTransaction();;
+ if (!committed) {
+ rollbackTransaction();
+ }
}
}
@@ -10322,7 +10391,9 @@ public class ObjectStore implements RawStore, Configurable {
pm.retrieve(mSerDeInfo);
return mSerDeInfo;
} finally {
- if (query != null) query.closeAll();
+ if (query != null) {
+ query.closeAll();
+ }
}
}
@@ -10338,7 +10409,9 @@ public class ObjectStore implements RawStore, Configurable {
pm.makePersistent(mSerde);
committed = commitTransaction();
} finally {
- if (!committed) rollbackTransaction();
+ if (!committed) {
+ rollbackTransaction();
+ }
}
}
@@ -10355,7 +10428,9 @@ public class ObjectStore implements RawStore, Configurable {
}
private ISchema convertToISchema(MISchema mSchema) {
- if (mSchema == null) return null;
+ if (mSchema == null) {
+ return null;
+ }
ISchema schema = new ISchema(SchemaType.findByValue(mSchema.getSchemaType()),
mSchema.getName(),
mSchema.getDb().getCatalogName(),
@@ -10363,8 +10438,12 @@ public class ObjectStore implements RawStore, Configurable {
SchemaCompatibility.findByValue(mSchema.getCompatibility()),
SchemaValidation.findByValue(mSchema.getValidationLevel()),
mSchema.getCanEvolve());
- if (mSchema.getDescription() != null) schema.setDescription(mSchema.getDescription());
- if (mSchema.getSchemaGroup() != null) schema.setSchemaGroup(mSchema.getSchemaGroup());
+ if (mSchema.getDescription() != null) {
+ schema.setDescription(mSchema.getDescription());
+ }
+ if (mSchema.getSchemaGroup() != null) {
+ schema.setSchemaGroup(mSchema.getSchemaGroup());
+ }
return schema;
}
@@ -10385,19 +10464,33 @@ public class ObjectStore implements RawStore, Configurable {
}
private SchemaVersion convertToSchemaVersion(MSchemaVersion mSchemaVersion) throws MetaException {
- if (mSchemaVersion == null) return null;
+ if (mSchemaVersion == null) {
+ return null;
+ }
SchemaVersion schemaVersion = new SchemaVersion(
new ISchemaName(mSchemaVersion.getiSchema().getDb().getCatalogName(),
mSchemaVersion.getiSchema().getDb().getName(), mSchemaVersion.getiSchema().getName()),
mSchemaVersion.getVersion(),
mSchemaVersion.getCreatedAt(),
convertToFieldSchemas(mSchemaVersion.getCols().getCols()));
- if (mSchemaVersion.getState() > 0) schemaVersion.setState(SchemaVersionState.findByValue(mSchemaVersion.getState()));
- if (mSchemaVersion.getDescription() != null) schemaVersion.setDescription(mSchemaVersion.getDescription());
- if (mSchemaVersion.getSchemaText() != null) schemaVersion.setSchemaText(mSchemaVersion.getSchemaText());
- if (mSchemaVersion.getFingerprint() != null) schemaVersion.setFingerprint(mSchemaVersion.getFingerprint());
- if (mSchemaVersion.getName() != null) schemaVersion.setName(mSchemaVersion.getName());
- if (mSchemaVersion.getSerDe() != null) schemaVersion.setSerDe(convertToSerDeInfo(mSchemaVersion.getSerDe()));
+ if (mSchemaVersion.getState() > 0) {
+ schemaVersion.setState(SchemaVersionState.findByValue(mSchemaVersion.getState()));
+ }
+ if (mSchemaVersion.getDescription() != null) {
+ schemaVersion.setDescription(mSchemaVersion.getDescription());
+ }
+ if (mSchemaVersion.getSchemaText() != null) {
+ schemaVersion.setSchemaText(mSchemaVersion.getSchemaText());
+ }
+ if (mSchemaVersion.getFingerprint() != null) {
+ schemaVersion.setFingerprint(mSchemaVersion.getFingerprint());
+ }
+ if (mSchemaVersion.getName() != null) {
+ schemaVersion.setName(mSchemaVersion.getName());
+ }
+ if (mSchemaVersion.getSerDe() != null) {
+ schemaVersion.setSerDe(convertToSerDeInfo(mSchemaVersion.getSerDe()));
+ }
return schemaVersion;
}
@@ -11507,4 +11600,61 @@ public class ObjectStore implements RawStore, Configurable {
rollbackAndCleanup(commited, (Query)null);
}
}
+
+ @Override
+ public void addRuntimeStat(RuntimeStat stat) throws MetaException {
+ LOG.debug("runtimeStat: " + stat);
+ MRuntimeStat mStat = MRuntimeStat.fromThrift(stat);
+ pm.makePersistent(mStat);
+ }
+
+ @Override
+ public int deleteRuntimeStats(int maxRetainedWeight, int maxRetainSecs) throws MetaException {
+ List<MRuntimeStat> all = getMRuntimeStats();
+ int retentionTime = 0;
+ if (maxRetainSecs >= 0) {
+ retentionTime = (int) (System.currentTimeMillis() / 1000) - maxRetainSecs;
+ }
+ if (maxRetainedWeight < 0) {
+ maxRetainedWeight = Integer.MAX_VALUE;
+ }
+
+ Object maxIdToRemove = null;
+ long totalWeight = 0;
+ int deleted = 0;
+ for (MRuntimeStat mRuntimeStat : all) {
+ totalWeight += mRuntimeStat.getWeight();
+ if (totalWeight > maxRetainedWeight || mRuntimeStat.getCreatedTime() < retentionTime) {
+ LOG.debug("removing runtime stat: " + mRuntimeStat);
+ pm.deletePersistent(mRuntimeStat);
+ deleted++;
+ }
+ }
+ return deleted;
+ }
+
+ @Override
+ public List<RuntimeStat> getRuntimeStats() throws MetaException {
+ boolean committed = false;
+ try {
+ openTransaction();
+ List<MRuntimeStat> mStats = getMRuntimeStats();
+ List<RuntimeStat> stats = mStats.stream().map(MRuntimeStat::toThrift).collect(Collectors.toList());
+ committed = commitTransaction();
+ return stats;
+ } finally {
+ if (!committed) {
+ rollbackTransaction();
+ }
+ }
+ }
+
+ private List<MRuntimeStat> getMRuntimeStats() {
+ Query<MRuntimeStat> query = pm.newQuery(MRuntimeStat.class);
+ query.setOrdering("createTime descending");
+ List<MRuntimeStat> res = (List<MRuntimeStat>) query.execute();
+ pm.retrieveAll(res);
+ return res;
+ }
+
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index f6c46ee..2c9f2e5 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hive.metastore;
-import org.apache.hadoop.hive.metastore.api.AlterISchemaRequest;
import org.apache.hadoop.hive.metastore.api.CreationMetadata;
import org.apache.hadoop.hive.metastore.api.ISchemaName;
import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
@@ -38,7 +37,6 @@ import org.apache.hadoop.hive.metastore.api.AggrStats;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.Catalog;
import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.CreationMetadata;
import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -46,7 +44,6 @@ import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
import org.apache.hadoop.hive.metastore.api.Function;
import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
import org.apache.hadoop.hive.metastore.api.ISchema;
-import org.apache.hadoop.hive.metastore.api.ISchemaName;
import org.apache.hadoop.hive.metastore.api.InvalidInputException;
import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
@@ -66,8 +63,8 @@ import org.apache.hadoop.hive.metastore.api.PrincipalType;
import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
import org.apache.hadoop.hive.metastore.api.Role;
import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+import org.apache.hadoop.hive.metastore.api.RuntimeStat;
import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
-import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
@@ -81,7 +78,6 @@ import org.apache.hadoop.hive.metastore.api.Type;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMMapping;
import org.apache.hadoop.hive.metastore.api.WMNullablePool;
import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
@@ -1629,4 +1625,13 @@ public interface RawStore extends Configurable {
* @throws MetaException general database exception
*/
void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException;
+
+ /** Adds a RuntimeStat for persistence. */
+ void addRuntimeStat(RuntimeStat stat) throws MetaException;
+
+ /** Reads runtime statistic entries. */
+ List<RuntimeStat> getRuntimeStats() throws MetaException;
+
+ /** Removes outdated statistics. */
+ int deleteRuntimeStats(int maxRetained, int maxRetainSecs) throws MetaException;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RuntimeStatsCleanerTask.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RuntimeStatsCleanerTask.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RuntimeStatsCleanerTask.java
new file mode 100644
index 0000000..202058e
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RuntimeStatsCleanerTask.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.hive.metastore.RawStore;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Metastore task to handle RuntimeStat related expiration.
+ */
+public class RuntimeStatsCleanerTask implements MetastoreTaskThread {
+ private static final Logger LOG = LoggerFactory.getLogger(RuntimeStatsCleanerTask.class);
+
+ private Configuration conf;
+
+ @Override
+ public long runFrequency(TimeUnit unit) {
+ return MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.RUNTIME_STATS_CLEAN_FREQUENCY, unit);
+ }
+
+ @Override
+ public void setConf(Configuration configuration) {
+ conf = configuration;
+ }
+
+ @Override
+ public Configuration getConf() {
+ return conf;
+ }
+
+ @Override
+ public void run() {
+
+ try {
+ RawStore ms = HiveMetaStore.HMSHandler.getMSForConf(conf);
+ int maxRetained = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.RUNTIME_STATS_MAX_ENTRIES);
+ int maxRetainSecs=(int) MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.RUNTIME_STATS_MAX_AGE, TimeUnit.SECONDS);
+ int deleteCnt = ms.deleteRuntimeStats(maxRetained, maxRetainSecs);
+
+ if (deleteCnt > 0L){
+ LOG.info("Number of deleted entries: " + deleteCnt);
+ }
+ } catch (Exception e) {
+ LOG.error("Exception while trying to delete: " + e.getMessage(), e);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index ebdcbc2..92d000b 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -89,6 +89,7 @@ import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator;
import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory;
import org.apache.hadoop.hive.metastore.api.Role;
import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+import org.apache.hadoop.hive.metastore.api.RuntimeStat;
import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
@@ -220,8 +221,9 @@ public class CachedStore implements RawStore, Configurable {
LOG.info("Going to cache catalogs: "
+ org.apache.commons.lang.StringUtils.join(catalogsToCache, ", "));
List<Catalog> catalogs = new ArrayList<>(catalogsToCache.size());
- for (String catName : catalogsToCache)
+ for (String catName : catalogsToCache) {
catalogs.add(rawStore.getCatalog(catName));
+ }
sharedCache.populateCatalogsInCache(catalogs);
} catch (MetaException | NoSuchObjectException e) {
LOG.warn("Failed to populate catalogs in cache, going to try again", e);
@@ -2175,6 +2177,7 @@ public class CachedStore implements RawStore, Configurable {
return rawStore.addNotNullConstraints(nns);
}
+ @Override
public List<String> addDefaultConstraints(List<SQLDefaultConstraint> nns)
throws InvalidObjectException, MetaException {
// TODO constraintCache
@@ -2195,6 +2198,7 @@ public class CachedStore implements RawStore, Configurable {
rawStore.createISchema(schema);
}
+ @Override
public List<ColStatsObjWithSourceInfo> getPartitionColStatsForDatabase(String catName, String dbName)
throws MetaException, NoSuchObjectException {
return rawStore.getPartitionColStatsForDatabase(catName, dbName);
@@ -2466,4 +2470,19 @@ public class CachedStore implements RawStore, Configurable {
sharedCache.resetCatalogCache();
setCachePrewarmedState(false);
}
+
+ @Override
+ public void addRuntimeStat(RuntimeStat stat) throws MetaException {
+ rawStore.addRuntimeStat(stat);
+ }
+
+ @Override
+ public List<RuntimeStat> getRuntimeStats() throws MetaException {
+ return rawStore.getRuntimeStats();
+ }
+
+ @Override
+ public int deleteRuntimeStats(int maxRetained, int maxRetainSecs) throws MetaException {
+ return rawStore.deleteRuntimeStats(maxRetained, maxRetainSecs);
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
index 59749e4..552eeca 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hive.metastore.HiveAlterHandler;
import org.apache.hadoop.hive.metastore.MaterializationsCacheCleanerTask;
import org.apache.hadoop.hive.metastore.MaterializationsRebuildLockCleanerTask;
import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
+import org.apache.hadoop.hive.metastore.RuntimeStatsCleanerTask;
import org.apache.hadoop.hive.metastore.events.EventCleanerTask;
import org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager;
import org.apache.hadoop.hive.metastore.txn.AcidCompactionHistoryService;
@@ -577,6 +578,15 @@ public class MetastoreConf {
"hive.metastore.materializations.invalidation.max.duration",
86400, TimeUnit.SECONDS, "Maximum duration for query producing a materialization. After this time, transaction" +
"entries that are not relevant for materializations can be removed from invalidation cache."),
+
+ RUNTIME_STATS_CLEAN_FREQUENCY("runtime.stats.clean.frequency", "hive.metastore.runtime.stats.clean.frequency", 3600,
+ TimeUnit.SECONDS, "Frequency at which timer task runs to remove outdated runtime stat entries."),
+ RUNTIME_STATS_MAX_AGE("runtime.stats.max.age", "hive.metastore.runtime.stats.max.age", 86400 * 3, TimeUnit.SECONDS,
+ "Stat entries which are older than this are removed."),
+ RUNTIME_STATS_MAX_ENTRIES("runtime.stats.max.entries", "hive.metastore.runtime.stats.max.entries", 100_000,
+ "Maximum number of runtime stats to keep; unit is operator stat infos - a complicated query has ~100 of these."
+ + "See also: hive.query.reexecution.stats.cache.size"),
+
// Parameters for exporting metadata on table drop (requires the use of the)
// org.apache.hadoop.hive.ql.parse.MetaDataExportListener preevent listener
METADATA_EXPORT_LOCATION("metastore.metadata.export.location", "hive.metadata.export.location",
@@ -732,10 +742,10 @@ public class MetastoreConf {
+ "The only supported special character right now is '/'. This flag applies only to quoted table names.\n"
+ "The default value is true."),
TASK_THREADS_ALWAYS("metastore.task.threads.always", "metastore.task.threads.always",
- EventCleanerTask.class.getName() + "," +
+ EventCleanerTask.class.getName() + "," + RuntimeStatsCleanerTask.class.getName() + "," +
"org.apache.hadoop.hive.metastore.repl.DumpDirCleanerTask" + "," +
MaterializationsCacheCleanerTask.class.getName() + "," +
- MaterializationsRebuildLockCleanerTask.class.getName(),
+ MaterializationsRebuildLockCleanerTask.class.getName() + "," + RuntimeStatsCleanerTask.class.getName(),
"Comma separated list of tasks that will be started in separate threads. These will " +
"always be started, regardless of whether the metastore is running in embedded mode " +
"or in server mode. They must implement " + MetastoreTaskThread.class.getName()),
@@ -1127,16 +1137,22 @@ public class MetastoreConf {
*/
hiveSiteURL = findConfigFile(classLoader, "hive-site.xml");
}
- if (hiveSiteURL != null) conf.addResource(hiveSiteURL);
+ if (hiveSiteURL != null) {
+ conf.addResource(hiveSiteURL);
+ }
// Now add hivemetastore-site.xml. Again we add this before our own config files so that the
// newer overrides the older.
hiveMetastoreSiteURL = findConfigFile(classLoader, "hivemetastore-site.xml");
- if (hiveMetastoreSiteURL != null) conf.addResource(hiveMetastoreSiteURL);
+ if (hiveMetastoreSiteURL != null) {
+ conf.addResource(hiveMetastoreSiteURL);
+ }
// Add in our conf file
metastoreSiteURL = findConfigFile(classLoader, "metastore-site.xml");
- if (metastoreSiteURL != null) conf.addResource(metastoreSiteURL);
+ if (metastoreSiteURL != null) {
+ conf.addResource(metastoreSiteURL);
+ }
// If a system property that matches one of our conf value names is set then use the value
// it's set to to set our own conf value.
@@ -1268,8 +1284,12 @@ public class MetastoreConf {
public static Collection<String> getStringCollection(Configuration conf, ConfVars var) {
assert var.defaultVal.getClass() == String.class;
String val = conf.get(var.varname);
- if (val == null) val = conf.get(var.hiveName, (String)var.defaultVal);
- if (val == null) return Collections.emptySet();
+ if (val == null) {
+ val = conf.get(var.hiveName, (String)var.defaultVal);
+ }
+ if (val == null) {
+ return Collections.emptySet();
+ }
return StringUtils.asSet(val.split(","));
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MRuntimeStat.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MRuntimeStat.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MRuntimeStat.java
new file mode 100644
index 0000000..054ce7c
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MRuntimeStat.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.model;
+
+import org.apache.hadoop.hive.metastore.api.RuntimeStat;
+
+/**
+ * Represents a runtime stat query entry.
+ *
+ * As a query may contain a large number of operatorstat entries; they are stored together in a single row in the metastore.
+ * The number of operator stat entries this entity has; is shown in the weight column.
+ */
+public class MRuntimeStat {
+
+ private int createTime;
+ private int weight;
+ private byte[] payload;
+
+ public static MRuntimeStat fromThrift(RuntimeStat stat) {
+ MRuntimeStat ret = new MRuntimeStat();
+ ret.weight = stat.getWeight();
+ ret.payload = stat.getPayload();
+ ret.createTime = (int) (System.currentTimeMillis() / 1000);
+ return ret;
+ }
+
+ public static RuntimeStat toThrift(MRuntimeStat stat) {
+ RuntimeStat ret = new RuntimeStat();
+ ret.setWeight(stat.weight);
+ ret.setCreateTime(stat.createTime);
+ ret.setPayload(stat.payload);
+ return ret;
+ }
+
+ public int getWeight() {
+ return weight;
+ }
+
+ public int getCreatedTime() {
+ return createTime;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/main/resources/package.jdo
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/resources/package.jdo b/standalone-metastore/src/main/resources/package.jdo
index 9ddf598..221192e 100644
--- a/standalone-metastore/src/main/resources/package.jdo
+++ b/standalone-metastore/src/main/resources/package.jdo
@@ -1339,6 +1339,20 @@
<column name="SERDE_ID"/>
</field>
</class>
+ <class name="MRuntimeStat" identity-type="datastore" table="RUNTIME_STATS" detachable="true">
+ <datastore-identity>
+ <column name="RS_ID"/>
+ </datastore-identity>
+ <field name="createTime">
+ <column name="CREATE_TIME" jdbc-type="integer"/>
+ </field>
+ <field name="weight">
+ <column name="WEIGHT" jdbc-type="integer"/>
+ </field>
+ <field name="payload">
+ <column name="PAYLOAD" jdbc-type="BLOB" allows-null="true"/>
+ </field>
+ </class>
</package>
</jdo>
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql b/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
index adfa4c5..48d28cb 100644
--- a/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
+++ b/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
@@ -676,6 +676,16 @@ CREATE TABLE REPL_TXN_MAP (
RTM_TARGET_TXN_ID bigint NOT NULL,
PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
);
+
+CREATE TABLE "APP"."RUNTIME_STATS" (
+ "RS_ID" bigint primary key,
+ "CREATE_TIME" integer not null,
+ "WEIGHT" integer not null,
+ "PAYLOAD" BLOB
+);
+
+CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+
-- -----------------------------------------------------------------
-- Record schema version. Should be the last step in the init script
-- -----------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql b/standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql
index a75b740..ed6c4cd 100644
--- a/standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql
+++ b/standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql
@@ -238,6 +238,16 @@ CREATE TABLE MIN_HISTORY_LEVEL (
CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+
+CREATE TABLE "APP"."RUNTIME_STATS" (
+ "RS_ID" bigint primary key,
+ "CREATE_TIME" integer not null,
+ "WEIGHT" integer not null,
+ "PAYLOAD" BLOB
+);
+
+CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+
-- This needs to be the last thing done. Insert any changes above this line.
UPDATE "APP".VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql b/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
index 91c581c..6e31b16 100644
--- a/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
+++ b/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
@@ -1230,6 +1230,15 @@ CREATE UNIQUE INDEX PART_TABLE_PK ON SEQUENCE_TABLE (SEQUENCE_NAME);
INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1);
+CREATE TABLE RUNTIME_STATS (
+ RS_ID bigint primary key,
+ CREATE_TIME bigint NOT NULL,
+ WEIGHT bigint NOT NULL,
+ PAYLOAD varbinary(max)
+);
+
+CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+
-- -----------------------------------------------------------------
-- Record schema version. Should be the last step in the init script
-- -----------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql b/standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql
index 87f5884..c2504d3 100644
--- a/standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql
+++ b/standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql
@@ -306,6 +306,15 @@ PRIMARY KEY CLUSTERED
CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+CREATE TABLE RUNTIME_STATS (
+ RS_ID bigint primary key,
+ CREATE_TIME bigint NOT NULL,
+ WEIGHT bigint NOT NULL,
+ PAYLOAD varbinary(max)
+);
+
+CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+
-- These lines need to be last. Insert any changes above.
UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS MESSAGE;
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql b/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
index 7e2a57a..4309911 100644
--- a/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
+++ b/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
@@ -1154,6 +1154,16 @@ CREATE TABLE REPL_TXN_MAP (
PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE RUNTIME_STATS (
+ RS_ID bigint primary key,
+ CREATE_TIME bigint NOT NULL,
+ WEIGHT bigint NOT NULL,
+ PAYLOAD blob
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+
-- -----------------------------------------------------------------
-- Record schema version. Should be the last step in the init script
-- -----------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql b/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
index 5ba68ca..e01b4da 100644
--- a/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
+++ b/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
@@ -279,6 +279,15 @@ CREATE TABLE MIN_HISTORY_LEVEL (
CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+CREATE TABLE RUNTIME_STATS (
+ RS_ID bigint primary key,
+ CREATE_TIME bigint NOT NULL,
+ WEIGHT bigint NOT NULL,
+ PAYLOAD blob
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+
-- These lines need to be last. Insert any changes above.
UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS ' ';
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql b/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
index f9e1a19..a45c7bb 100644
--- a/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
+++ b/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
@@ -1124,6 +1124,16 @@ CREATE TABLE REPL_TXN_MAP (
PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
);
+CREATE TABLE RUNTIME_STATS (
+ RS_ID NUMBER primary key,
+ CREATE_TIME NUMBER(10) NOT NULL,
+ WEIGHT NUMBER(10) NOT NULL,
+ PAYLOAD BLOB
+);
+
+CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+
+
-- -----------------------------------------------------------------
-- Record schema version. Should be the last step in the init script
-- -----------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql b/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
index a769d24..327800b 100644
--- a/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
+++ b/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
@@ -297,6 +297,15 @@ CREATE TABLE MIN_HISTORY_LEVEL (
CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+CREATE TABLE RUNTIME_STATS (
+ RS_ID NUMBER primary key,
+ CREATE_TIME NUMBER(10) NOT NULL,
+ WEIGHT NUMBER(10) NOT NULL,
+ PAYLOAD BLOB
+);
+
+CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+
-- These lines need to be last. Insert any changes above.
UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual;
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql b/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
index 6fed072..2484744 100644
--- a/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
+++ b/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
@@ -1811,6 +1811,17 @@ CREATE TABLE REPL_TXN_MAP (
PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
);
+
+CREATE TABLE RUNTIME_STATS (
+ RS_ID bigint primary key,
+ CREATE_TIME bigint NOT NULL,
+ WEIGHT bigint NOT NULL,
+ PAYLOAD bytea
+);
+
+CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+
+
-- -----------------------------------------------------------------
-- Record schema version. Should be the last step in the init script
-- -----------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql b/standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql
index 7b6b3b7..63932a9 100644
--- a/standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql
+++ b/standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql
@@ -314,6 +314,15 @@ CREATE TABLE MIN_HISTORY_LEVEL (
CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+CREATE TABLE RUNTIME_STATS (
+ RS_ID bigint primary key,
+ CREATE_TIME bigint NOT NULL,
+ WEIGHT bigint NOT NULL,
+ PAYLOAD bytea
+);
+
+CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+
-- These lines need to be last. Insert any changes above.
UPDATE "VERSION" SET "SCHEMA_VERSION"='3.0.0', "VERSION_COMMENT"='Hive release version 3.0.0' where "VER_ID"=1;
SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0';
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/main/thrift/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/thrift/hive_metastore.thrift b/standalone-metastore/src/main/thrift/hive_metastore.thrift
index 12e4e40..c56a4f9 100644
--- a/standalone-metastore/src/main/thrift/hive_metastore.thrift
+++ b/standalone-metastore/src/main/thrift/hive_metastore.thrift
@@ -1517,6 +1517,15 @@ struct GetSerdeRequest {
1: string serdeName
}
+struct RuntimeStat {
+ 1: optional i32 createTime,
+ 2: required i32 weight,
+ 3: required binary payload
+}
+
+struct GetRuntimeStatsRequest {
+}
+
// Exceptions.
exception MetaException {
@@ -2171,6 +2180,9 @@ service ThriftHiveMetastore extends fb303.FacebookService
LockResponse get_lock_materialization_rebuild(1: string dbName, 2: string tableName, 3: i64 txnId)
bool heartbeat_lock_materialization_rebuild(1: string dbName, 2: string tableName, 3: i64 txnId)
+
+ void add_runtime_stats(1: RuntimeStat stat) throws(1:MetaException o1)
+ list<RuntimeStat> get_runtime_stats(1: GetRuntimeStatsRequest rqst) throws(1:MetaException o1)
}
// * Note about the DDL_TIME: When creating or altering a table or a partition,
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 304f567..defc68f 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -66,6 +66,7 @@ import org.apache.hadoop.hive.metastore.api.WMTrigger;
import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
import org.apache.hadoop.hive.metastore.api.Role;
import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+import org.apache.hadoop.hive.metastore.api.RuntimeStat;
import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
@@ -280,6 +281,7 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
objectStore.updateCreationMetadata(catName, dbname, tablename, cm);
}
+ @Override
public List<String> getTables(String catName, String dbName, String pattern) throws MetaException {
return objectStore.getTables(catName, dbName, pattern);
}
@@ -1092,6 +1094,7 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
return null;
}
+ @Override
public void createISchema(ISchema schema) throws AlreadyExistsException, MetaException,
NoSuchObjectException {
objectStore.createISchema(schema);
@@ -1161,4 +1164,19 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
public void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException {
objectStore.addSerde(serde);
}
+
+ @Override
+ public void addRuntimeStat(RuntimeStat stat) throws MetaException {
+ objectStore.addRuntimeStat(stat);
+ }
+
+ @Override
+ public List<RuntimeStat> getRuntimeStats() throws MetaException {
+ return objectStore.getRuntimeStats();
+ }
+
+ @Override
+ public int deleteRuntimeStats(int maxRetained, int maxRetainSecs) throws MetaException {
+ return objectStore.deleteRuntimeStats(maxRetained, maxRetainSecs);
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index 85c6727..20c5d8a 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -64,6 +64,7 @@ import org.apache.hadoop.hive.metastore.api.WMTrigger;
import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
import org.apache.hadoop.hive.metastore.api.Role;
import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+import org.apache.hadoop.hive.metastore.api.RuntimeStat;
import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
@@ -276,6 +277,7 @@ public class DummyRawStoreForJdoConnection implements RawStore {
throws MetaException {
}
+ @Override
public List<String> getTables(String catName, String dbName, String pattern) throws MetaException {
return Collections.emptyList();
}
@@ -1080,6 +1082,7 @@ public class DummyRawStoreForJdoConnection implements RawStore {
return null;
}
+ @Override
public void createISchema(ISchema schema) throws AlreadyExistsException, MetaException {
}
@@ -1148,4 +1151,18 @@ public class DummyRawStoreForJdoConnection implements RawStore {
public void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException {
}
+
+ @Override
+ public void addRuntimeStat(RuntimeStat stat) throws MetaException {
+ }
+
+ @Override
+ public List<RuntimeStat> getRuntimeStats() throws MetaException {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public int deleteRuntimeStats(int maxRetained, int maxRetainSecs) throws MetaException {
+ return 0;
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
index cb51763..bf87cfc 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
@@ -3352,4 +3352,14 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
public boolean heartbeatLockMaterializationRebuild(String dbName, String tableName, long txnId) throws TException {
throw new UnsupportedOperationException();
}
+
+ @Override
+ public void addRuntimeStat(RuntimeStat stat) throws TException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<RuntimeStat> getRuntimeStats() throws TException {
+ throw new UnsupportedOperationException();
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/b3e2d8a0/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestRuntimeStats.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestRuntimeStats.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestRuntimeStats.java
new file mode 100644
index 0000000..2db7a8b
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestRuntimeStats.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.client;
+
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+import org.apache.hadoop.hive.metastore.api.RuntimeStat;
+import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import java.util.List;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+
+@RunWith(Parameterized.class)
+@Category(MetastoreUnitTest.class)
+public class TestRuntimeStats extends MetaStoreClientTest {
+ private final AbstractMetaStoreService metaStore;
+ private IMetaStoreClient client;
+
+ public TestRuntimeStats(String name, AbstractMetaStoreService metaStore) throws Exception {
+ this.metaStore = metaStore;
+ this.metaStore.start();
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ client = metaStore.getClient();
+
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ client.close();
+ client = null;
+ }
+
+ @Test
+ public void testRuntimeStatHandling() throws Exception {
+ List<RuntimeStat> rs0 = client.getRuntimeStats();
+ assertNotNull(rs0);
+ assertEquals(0, rs0.size());
+
+ RuntimeStat stat = createStat(1);
+ client.addRuntimeStat(stat);
+
+ List<RuntimeStat> rs1 = client.getRuntimeStats();
+ assertNotNull(rs1);
+ assertEquals(1, rs1.size());
+ assertArrayEquals(stat.getPayload(), rs1.get(0).getPayload());
+ assertEquals(stat.getWeight(), rs1.get(0).getWeight());
+ // server sets createtime
+ assertNotEquals(stat.getCreateTime(), rs1.get(0).getCreateTime());
+
+ client.addRuntimeStat(createStat(2));
+ client.addRuntimeStat(createStat(3));
+ client.addRuntimeStat(createStat(4));
+
+ List<RuntimeStat> rs2 = client.getRuntimeStats();
+ assertEquals(4, rs2.size());
+
+ }
+
+ private RuntimeStat createStat(int w) {
+
+ byte[] payload = new byte[w];
+ for (int i = 0; i < payload.length; i++) {
+ payload[i] = 'x';
+ }
+
+ RuntimeStat stat = new RuntimeStat();
+ stat.setWeight(w);
+ stat.setPayload(payload);
+ return stat;
+ }
+
+}