You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/01/20 00:11:54 UTC
[8/8] hive git commit: HIVE-18231 : validate resource plan - part 2 -
validate action and trigger expressions (Harish Jaiprakash,
reviewed by Sergey Shelukhin)
HIVE-18231 : validate resource plan - part 2 - validate action and trigger expressions (Harish Jaiprakash, reviewed by Sergey Shelukhin)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/90d236af
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/90d236af
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/90d236af
Branch: refs/heads/master
Commit: 90d236affcb19b52ca66029e6646c5d751dc5f02
Parents: 6d890fa
Author: sergey <se...@apache.org>
Authored: Fri Jan 19 16:11:38 2018 -0800
Committer: sergey <se...@apache.org>
Committed: Fri Jan 19 16:11:38 2018 -0800
----------------------------------------------------------------------
.../listener/DummyRawStoreFailEvent.java | 3 +-
.../org/apache/hadoop/hive/ql/exec/DDLTask.java | 5 +-
.../apache/hadoop/hive/ql/metadata/Hive.java | 3 +-
.../formatting/JsonMetaDataFormatter.java | 19 +-
.../formatting/MetaDataFormatUtils.java | 3 +
.../metadata/formatting/MetaDataFormatter.java | 4 +-
.../formatting/TextMetaDataFormatter.java | 13 +-
.../org/apache/hadoop/hive/ql/parse/HiveLexer.g | 15 +
.../hadoop/hive/ql/parse/ParseDriver.java | 46 +-
.../hadoop/hive/ql/parse/ResourcePlanParser.g | 46 +-
.../org/apache/hadoop/hive/ql/wm/Action.java | 46 +-
.../hadoop/hive/ql/wm/ExpressionFactory.java | 33 +-
.../formatting/TestJsonRPFormatter.java | 10 +-
.../hive/ql/wm/TestExpressionFactory.java | 115 +
.../apache/hadoop/hive/ql/wm/TestTrigger.java | 46 +-
.../test/queries/clientpositive/resourceplan.q | 45 +-
.../clientpositive/llap/resourceplan.q.out | 260 +-
.../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp | 2326 ++++++++--------
.../gen/thrift/gen-cpp/hive_metastore_types.cpp | 524 ++--
.../gen/thrift/gen-cpp/hive_metastore_types.h | 10 +-
.../hive/metastore/api/ThriftHiveMetastore.java | 2536 +++++++++---------
.../WMGetTriggersForResourePlanResponse.java | 36 +-
.../api/WMValidateResourcePlanResponse.java | 183 +-
.../gen-php/metastore/ThriftHiveMetastore.php | 1428 +++++-----
.../src/gen/thrift/gen-php/metastore/Types.php | 73 +-
.../hive_metastore/ThriftHiveMetastore.py | 962 +++----
.../gen/thrift/gen-py/hive_metastore/ttypes.py | 41 +-
.../gen/thrift/gen-rb/hive_metastore_types.rb | 4 +-
.../hadoop/hive/metastore/HiveMetaStore.java | 5 +-
.../hive/metastore/HiveMetaStoreClient.java | 4 +-
.../hadoop/hive/metastore/IMetaStoreClient.java | 3 +-
.../hadoop/hive/metastore/ObjectStore.java | 74 +-
.../apache/hadoop/hive/metastore/RawStore.java | 4 +-
.../hive/metastore/cache/CachedStore.java | 3 +-
.../src/main/thrift/hive_metastore.thrift | 1 +
.../DummyRawStoreControlledCommit.java | 3 +-
.../DummyRawStoreForJdoConnection.java | 5 +-
37 files changed, 4763 insertions(+), 4174 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hive/blob/90d236af/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 9ec41e3..78b2637 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMTrigger;
+import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
import org.apache.hadoop.hive.metastore.api.Role;
import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
@@ -1011,7 +1012,7 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
}
@Override
- public List<String> validateResourcePlan(String name)
+ public WMValidateResourcePlanResponse validateResourcePlan(String name)
throws NoSuchObjectException, InvalidObjectException, MetaException {
return objectStore.validateResourcePlan(name);
}
http://git-wip-us.apache.org/repos/asf/hive/blob/90d236af/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 96fc330..51ef390 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -109,6 +109,7 @@ import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
import org.apache.hadoop.hive.metastore.api.WMTrigger;
+import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
import org.apache.hadoop.hive.metastore.txn.TxnStore;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.ql.CompilationOpContext;
@@ -717,9 +718,9 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
private int alterResourcePlan(Hive db, AlterResourcePlanDesc desc) throws HiveException {
if (desc.shouldValidate()) {
- List<String> errors = db.validateResourcePlan(desc.getResourcePlanName());
+ WMValidateResourcePlanResponse result = db.validateResourcePlan(desc.getResourcePlanName());
try (DataOutputStream out = getOutputStream(desc.getResFile())) {
- formatter.showErrors(out, errors);
+ formatter.showErrors(out, result);
} catch (IOException e) {
throw new HiveException(e);
};
http://git-wip-us.apache.org/repos/asf/hive/blob/90d236af/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index f1610eb..23983d8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -137,6 +137,7 @@ import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMPool;
import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMTrigger;
+import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.ql.ErrorMsg;
@@ -4877,7 +4878,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
}
}
- public List<String> validateResourcePlan(String rpName) throws HiveException {
+ public WMValidateResourcePlanResponse validateResourcePlan(String rpName) throws HiveException {
try {
return getMSC().validateResourcePlan(rpName);
} catch (Exception e) {
http://git-wip-us.apache.org/repos/asf/hive/blob/90d236af/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
index 3142901..77e5678 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
+import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -50,7 +51,6 @@ import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.PrimaryKeyInfo;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.metadata.UniqueConstraint;
-import org.codehaus.jackson.JsonGenerationException;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.map.ObjectMapper;
@@ -578,15 +578,26 @@ public class JsonMetaDataFormatter implements MetaDataFormatter {
}
@Override
- public void showErrors(DataOutputStream out, List<String> errors) throws HiveException {
+ public void showErrors(DataOutputStream out, WMValidateResourcePlanResponse response)
+ throws HiveException {
JsonGenerator generator = null;
try {
generator = new ObjectMapper().getJsonFactory().createJsonGenerator(out);
- generator.writeStartArray();
- for (String error : errors) {
+ generator.writeStartObject();
+
+ generator.writeArrayFieldStart("errors");
+ for (String error : response.getErrors()) {
+ generator.writeString(error);
+ }
+ generator.writeEndArray();
+
+ generator.writeArrayFieldStart("warnings");
+ for (String error : response.getWarnings()) {
generator.writeString(error);
}
generator.writeEndArray();
+
+ generator.writeEndObject();
} catch (IOException e) {
throw new HiveException(e);
} finally {
http://git-wip-us.apache.org/repos/asf/hive/blob/90d236af/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
index f4cbbd4..3b87824 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java
@@ -782,6 +782,7 @@ public final class MetaDataFormatUtils {
rpFormatter.endTriggers();
rpFormatter.startMappings();
for (Map.Entry<String, List<String>> mappingsOfType : mappings.entrySet()) {
+ mappingsOfType.getValue().sort(String::compareTo);
rpFormatter.formatMappingType(mappingsOfType.getKey(), mappingsOfType.getValue());
}
if (isDefault) {
@@ -807,6 +808,8 @@ public final class MetaDataFormatUtils {
for (PoolTreeNode child : children) {
child.sortChildren();
}
+ triggers.sort((WMTrigger t1, WMTrigger t2)
+ -> t1.getTriggerName().compareTo(t2.getTriggerName()));
}
static PoolTreeNode makePoolTree(WMFullResourcePlan fullRp) {
http://git-wip-us.apache.org/repos/asf/hive/blob/90d236af/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
index 50d2f57..88d5554 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
+import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo;
import org.apache.hadoop.hive.ql.metadata.Hive;
import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -128,6 +129,7 @@ public interface MetaDataFormatter {
void showFullResourcePlan(DataOutputStream out, WMFullResourcePlan resourcePlan)
throws HiveException;
- void showErrors(DataOutputStream out, List<String> errors) throws HiveException;
+ void showErrors(DataOutputStream out, WMValidateResourcePlanResponse errors)
+ throws HiveException;
}
http://git-wip-us.apache.org/repos/asf/hive/blob/90d236af/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
index 74c5998..b743df0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
+import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo;
import org.apache.hadoop.hive.ql.metadata.Hive;
@@ -724,10 +725,16 @@ class TextMetaDataFormatter implements MetaDataFormatter {
out.write(str(val));
}
- public void showErrors(DataOutputStream out, List<String> errors) throws HiveException {
+ public void showErrors(DataOutputStream out, WMValidateResourcePlanResponse response)
+ throws HiveException {
try {
- for (String error : errors) {
- out.write(error.getBytes("UTF-8"));
+ for (String error : response.getErrors()) {
+ write(out, error);
+ out.write(terminator);
+ }
+ for (String warning : response.getWarnings()) {
+ write(out, "warn: ");
+ write(out, warning);
out.write(terminator);
}
} catch (IOException e) {
http://git-wip-us.apache.org/repos/asf/hive/blob/90d236af/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
index ef2aa34..78cbf25 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
@@ -469,6 +469,21 @@ ByteLengthLiteral
(Digit)+ ('b' | 'B' | 'k' | 'K' | 'm' | 'M' | 'g' | 'G')
;
+TimeFullLiteral
+ :
+ (Digit)+ ('NS' | 'NSEC' | 'NSECS' | 'NANOSECOND' | 'NANOSECONDS' |
+ 'US' | 'USEC' | 'USECS' | 'MICROSECOND' | 'MICROSECONDS' |
+ 'MS' | 'MSEC' | 'MSECS' | 'MILLISECOND' | 'MILLISECONDS' |
+ 'SEC' | 'SECS' | 'SECOND' | 'SECONDS' |
+ 'MIN' | 'MINS' | 'MINUTE' | 'MINUTES' |
+ 'HOUR' | 'HOURS' | 'DAY' | 'DAYS')
+ ;
+
+ByteLengthFullLiteral
+ :
+ (Digit)+ ('KB' | 'MB' | 'GB' | 'TB' | 'PB')
+ ;
+
Number
:
(Digit)+ ( DOT (Digit)* (Exponent)? | Exponent)?
http://git-wip-us.apache.org/repos/asf/hive/blob/90d236af/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java
index e6f0b22..bda3c21 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java
@@ -32,8 +32,6 @@ import org.antlr.runtime.tree.TreeAdaptor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.sun.tools.extcheck.Main;
-
import org.apache.hadoop.hive.ql.Context;
/**
@@ -294,7 +292,7 @@ public class ParseDriver {
throw new ParseException(parser.errors);
}
- return (ASTNode) r.getTree();
+ return r.getTree();
}
public ASTNode parseExpression(String command) throws ParseException {
LOG.info("Parsing expression: " + command);
@@ -321,4 +319,46 @@ public class ParseDriver {
return (ASTNode) r.getTree();
}
+
+ public ASTNode parseTriggerExpression(String command) throws ParseException {
+ HiveLexerX lexer = new HiveLexerX(new ANTLRNoCaseStringStream(command));
+ TokenRewriteStream tokens = new TokenRewriteStream(lexer);
+ HiveParser parser = new HiveParser(tokens);
+ parser.setTreeAdaptor(adaptor);
+ HiveParser_ResourcePlanParser.triggerExpressionStandalone_return r = null;
+ try {
+ r = parser.gResourcePlanParser.triggerExpressionStandalone();
+ } catch (RecognitionException e) {
+ e.printStackTrace();
+ throw new ParseException(parser.errors);
+ }
+ if (lexer.getErrors().size() != 0) {
+ throw new ParseException(lexer.getErrors());
+ } else if (parser.errors.size() != 0) {
+ throw new ParseException(parser.errors);
+ }
+
+ return r.getTree();
+ }
+
+ public ASTNode parseTriggerActionExpression(String command) throws ParseException {
+ HiveLexerX lexer = new HiveLexerX(new ANTLRNoCaseStringStream(command));
+ TokenRewriteStream tokens = new TokenRewriteStream(lexer);
+ HiveParser parser = new HiveParser(tokens);
+ parser.setTreeAdaptor(adaptor);
+ HiveParser_ResourcePlanParser.triggerActionExpressionStandalone_return r = null;
+ try {
+ r = parser.gResourcePlanParser.triggerActionExpressionStandalone();
+ } catch (RecognitionException e) {
+ e.printStackTrace();
+ throw new ParseException(parser.errors);
+ }
+ if (lexer.getErrors().size() != 0) {
+ throw new ParseException(lexer.getErrors());
+ } else if (parser.errors.size() != 0) {
+ throw new ParseException(parser.errors);
+ }
+
+ return r.getTree();
+ }
}
http://git-wip-us.apache.org/repos/asf/hive/blob/90d236af/ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g
index e3ea1f2..21f2d45 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ResourcePlanParser.g
@@ -16,14 +16,33 @@
*/
parser grammar ResourcePlanParser;
-options
-{
+options {
output=AST;
ASTLabelType=ASTNode;
backtrack=false;
k=3;
}
+@members {
+ @Override
+ public Object recoverFromMismatchedSet(IntStream input,
+ RecognitionException re, BitSet follow) throws RecognitionException {
+ return gParent.recoverFromMismatchedSet(input, re, follow);
+ }
+
+ @Override
+ public void displayRecognitionError(String[] tokenNames,
+ RecognitionException e) {
+ gParent.displayRecognitionError(tokenNames, e);
+ }
+}
+
+@rulecatch {
+ catch (RecognitionException e) {
+ throw e;
+ }
+}
+
resourcePlanDdlStatements
: createResourcePlanStatement
| alterResourcePlanStatement
@@ -133,9 +152,15 @@ poolPath
triggerExpression
@init { gParent.pushMsg("triggerExpression", state); }
@after { gParent.popMsg(state); }
- : triggerOrExpression -> ^(TOK_TRIGGER_EXPRESSION triggerOrExpression)
+ : triggerAtomExpression -> ^(TOK_TRIGGER_EXPRESSION triggerAtomExpression)
;
+triggerExpressionStandalone : triggerExpression EOF ;
+
+/*
+ The rules triggerOrExpression and triggerAndExpression are not being used right now.
+ Only > operator is supported, this should be changed if logic in ExpressionFactory changes.
+*/
triggerOrExpression
@init { gParent.pushMsg("triggerOrExpression", state); }
@after { gParent.popMsg(state); }
@@ -151,22 +176,21 @@ triggerAndExpression
triggerAtomExpression
@init { gParent.pushMsg("triggerAtomExpression", state); }
@after { gParent.popMsg(state); }
- : (identifier comparisionOperator triggerLiteral)
- | (LPAREN triggerOrExpression RPAREN)
+ : identifier comparisionOperator triggerLiteral
;
triggerLiteral
@init { gParent.pushMsg("triggerLiteral", state); }
@after { gParent.popMsg(state); }
- : (Number (KW_HOUR|KW_MINUTE|KW_SECOND)?)
- | ByteLengthLiteral
- | StringLiteral
+ : Number
+ | TimeFullLiteral
+ | ByteLengthFullLiteral
;
comparisionOperator
@init { gParent.pushMsg("comparisionOperator", state); }
@after { gParent.popMsg(state); }
- : EQUAL | LESSTHAN | LESSTHANOREQUALTO | GREATERTHAN | GREATERTHANOREQUALTO
+ : GREATERTHAN
;
triggerActionExpression
@@ -176,6 +200,8 @@ triggerActionExpression
| (KW_MOVE^ KW_TO! poolPath)
;
+triggerActionExpressionStandalone : triggerActionExpression EOF ;
+
createTriggerStatement
@init { gParent.pushMsg("create trigger statement", state); }
@after { gParent.popMsg(state); }
@@ -263,7 +289,7 @@ createMappingStatement
alterMappingStatement
@init { gParent.pushMsg("alter mapping statement", state); }
@after { gParent.popMsg(state); }
- : (KW_ALTER mappingType=(KW_USER | KW_GROUP | KW_APPLICATION) KW_MAPPING
+ : (KW_ALTER mappingType=(KW_USER | KW_GROUP | KW_APPLICATION)
KW_MAPPING name=StringLiteral
KW_IN rpName=identifier ((KW_TO path=poolPath) | unmanaged)
(KW_WITH KW_ORDER order=Number)?)
http://git-wip-us.apache.org/repos/asf/hive/blob/90d236af/ql/src/java/org/apache/hadoop/hive/ql/wm/Action.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/wm/Action.java b/ql/src/java/org/apache/hadoop/hive/ql/wm/Action.java
index 921ad54..7258ad5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/wm/Action.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/wm/Action.java
@@ -17,6 +17,12 @@ package org.apache.hadoop.hive.ql.wm;
import java.util.Objects;
+import org.antlr.runtime.tree.Tree;
+import org.apache.hadoop.hive.ql.parse.ASTNode;
+import org.apache.hadoop.hive.ql.parse.HiveParser;
+import org.apache.hadoop.hive.ql.parse.ParseDriver;
+import org.apache.hadoop.hive.ql.parse.ParseException;
+
/**
* Action that gets invoked for trigger violations.
*/
@@ -46,16 +52,40 @@ public class Action {
private final String poolName;
public static Action fromMetastoreExpression(String metastoreActionExpression) {
- if (metastoreActionExpression.equalsIgnoreCase(Type.KILL_QUERY.getDisplayName())) {
+ ParseDriver driver = new ParseDriver();
+ ASTNode node = null;
+ try {
+ node = driver.parseTriggerActionExpression(metastoreActionExpression);
+ } catch (ParseException e) {
+ throw new IllegalArgumentException(
+ "Invalid action expression: " + metastoreActionExpression, e);
+ }
+ if (node == null || node.getChildCount() != 2 ||
+ node.getChild(1).getType() != HiveParser.EOF) {
+ throw new IllegalArgumentException(
+ "Invalid action expression: " + metastoreActionExpression);
+ }
+ node = (ASTNode) node.getChild(0);
+ switch (node.getType()) {
+ case HiveParser.KW_KILL:
+ if (node.getChildCount() != 0) {
+ throw new IllegalArgumentException("Invalid KILL action");
+ }
return new Action(Type.KILL_QUERY);
- } else {
- final String poolName = metastoreActionExpression.substring(Type.MOVE_TO_POOL.getDisplayName().length()).trim();
- if (poolName.isEmpty()) {
- throw new IllegalArgumentException("Invalid move action expression (" + metastoreActionExpression + "). Pool " +
- "name is empty");
- } else {
- return new Action(Type.MOVE_TO_POOL, poolName);
+ case HiveParser.KW_MOVE: {
+ if (node.getChildCount() != 1) {
+ throw new IllegalArgumentException("Invalid move to action, expected poolPath");
+ }
+ Tree poolNode = node.getChild(0);
+ StringBuilder poolPath = new StringBuilder(poolNode.getText());
+ for (int i = 0; i < poolNode.getChildCount(); ++i) {
+ poolPath.append(poolNode.getChild(0).getText());
}
+ return new Action(Type.MOVE_TO_POOL, poolPath.toString());
+ }
+ default:
+ throw new IllegalArgumentException("Unhandled action expression, type: " + node.getType() +
+ ": " + metastoreActionExpression);
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/90d236af/ql/src/java/org/apache/hadoop/hive/ql/wm/ExpressionFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/wm/ExpressionFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/wm/ExpressionFactory.java
index 953faa8..2299a1b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/wm/ExpressionFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/wm/ExpressionFactory.java
@@ -19,6 +19,10 @@ import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.Validator;
+import org.apache.hadoop.hive.ql.parse.ASTNode;
+import org.apache.hadoop.hive.ql.parse.HiveParser;
+import org.apache.hadoop.hive.ql.parse.ParseDriver;
+import org.apache.hadoop.hive.ql.parse.ParseException;
/**
* Factory to create expressions
@@ -30,16 +34,33 @@ public class ExpressionFactory {
return null;
}
- // TODO: Only ">" predicate is supported right now, this has to be extended to support expression tree when
- // multiple conditions are required. HIVE-17622
+ ParseDriver driver = new ParseDriver();
+ ASTNode node = null;
+ try {
+ node = driver.parseTriggerExpression(expression);
+ } catch (ParseException e) {
+ throw new IllegalArgumentException("Invalid expression: " + expression, e);
+ }
+ if (node.getChildCount() == 2 && node.getChild(1).getType() == HiveParser.EOF) {
+ node = (ASTNode) node.getChild(0);
+ }
+ if (node.getType() != HiveParser.TOK_TRIGGER_EXPRESSION) {
+ throw new IllegalArgumentException(
+ "Expected trigger expression, got: " + node.toStringTree());
+ }
+
+ if (node.getChildCount() != 3) {
+ throw new IllegalArgumentException("Only single > condition supported: " + expression);
+ }
- String[] tokens = expression.split(Expression.Predicate.GREATER_THAN.getSymbol());
- if (tokens.length != 2) {
+ // Only ">" predicate is supported right now, this has to be extended to support
+ // expression tree when multiple conditions are required. HIVE-17622
+ if (node.getChild(1).getType() != HiveParser.GREATERTHAN) {
throw new IllegalArgumentException("Invalid predicate in expression");
}
- final String counterName = tokens[0].trim();
- final String counterValueStr = tokens[1].trim();
+ final String counterName = node.getChild(0).getText();
+ final String counterValueStr = node.getChild(2).getText().toLowerCase();
if (counterName.isEmpty()) {
throw new IllegalArgumentException("Counter name cannot be empty!");
}
http://git-wip-us.apache.org/repos/asf/hive/blob/90d236af/ql/src/test/org/apache/hadoop/hive/ql/metadata/formatting/TestJsonRPFormatter.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/formatting/TestJsonRPFormatter.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/formatting/TestJsonRPFormatter.java
index ed854e7..9e8d290 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/formatting/TestJsonRPFormatter.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/formatting/TestJsonRPFormatter.java
@@ -25,6 +25,7 @@ import static org.junit.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.util.ArrayList;
+import java.util.HashSet;
import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
import org.apache.hadoop.hive.metastore.api.WMMapping;
@@ -152,8 +153,13 @@ public class TestJsonRPFormatter {
JsonNode type0 = pool2.get("mappings").get(0);
assertEquals("user", type0.get("type").asText());
assertTrue(type0.get("values").isArray());
- assertEquals("foo", type0.get("values").get(0).asText());
- assertEquals("bar", type0.get("values").get(1).asText());
+ assertEquals(2, type0.get("values").size());
+ HashSet<String> vals = new HashSet<>();
+ for (int i = 0; i < type0.get("values").size(); ++i) {
+ vals.add(type0.get("values").get(i).asText());
+ }
+ assertTrue(vals.contains("foo"));
+ assertTrue(vals.contains("bar"));
JsonNode pool1 = jsonTree.get("pools").get(1);
assertEquals("pool1", pool1.get("name").asText());
http://git-wip-us.apache.org/repos/asf/hive/blob/90d236af/ql/src/test/org/apache/hadoop/hive/ql/wm/TestExpressionFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/wm/TestExpressionFactory.java b/ql/src/test/org/apache/hadoop/hive/ql/wm/TestExpressionFactory.java
new file mode 100644
index 0000000..074794c
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/wm/TestExpressionFactory.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.wm;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+import org.apache.hadoop.hive.ql.wm.Expression.Predicate;
+import org.junit.Test;
+
+public class TestExpressionFactory {
+ @Test
+ public void testSize() {
+ Expression expr = null;
+
+ expr = ExpressionFactory.fromString("BYTES_READ > 5");
+ assertNotNull(expr);
+ assertEquals(Predicate.GREATER_THAN, expr.getPredicate());
+ assertEquals("BYTES_READ", expr.getCounterLimit().getName());
+ assertEquals(5, expr.getCounterLimit().getLimit());
+
+ expr = ExpressionFactory.fromString("BYTES_READ > 5kb");
+ assertNotNull(expr);
+ assertEquals(Predicate.GREATER_THAN, expr.getPredicate());
+ assertEquals("BYTES_READ", expr.getCounterLimit().getName());
+ assertEquals(5 * (1 << 10), expr.getCounterLimit().getLimit());
+
+ expr = ExpressionFactory.fromString("BYTES_READ > 2mb");
+ assertNotNull(expr);
+ assertEquals(Predicate.GREATER_THAN, expr.getPredicate());
+ assertEquals("BYTES_READ", expr.getCounterLimit().getName());
+ assertEquals(2 * (1 << 20), expr.getCounterLimit().getLimit());
+
+ expr = ExpressionFactory.fromString("BYTES_READ > 3gb");
+ assertNotNull(expr);
+ assertEquals(Predicate.GREATER_THAN, expr.getPredicate());
+ assertEquals("BYTES_READ", expr.getCounterLimit().getName());
+ assertEquals(3L * (1 << 30), expr.getCounterLimit().getLimit());
+
+ expr = ExpressionFactory.fromString("SHUFFLE_BYTES > 7tb");
+ assertNotNull(expr);
+ assertEquals(Predicate.GREATER_THAN, expr.getPredicate());
+ assertEquals("SHUFFLE_BYTES", expr.getCounterLimit().getName());
+ assertEquals(7L * (1L << 40), expr.getCounterLimit().getLimit());
+
+ expr = ExpressionFactory.fromString("SHUFFLE_BYTES > 6pb");
+ assertNotNull(expr);
+ assertEquals(Predicate.GREATER_THAN, expr.getPredicate());
+ assertEquals("SHUFFLE_BYTES", expr.getCounterLimit().getName());
+ assertEquals(6L * (1L << 50), expr.getCounterLimit().getLimit());
+
+ expr = ExpressionFactory.fromString("BYTES_WRITTEN > 27");
+ assertNotNull(expr);
+ assertEquals(Predicate.GREATER_THAN, expr.getPredicate());
+ assertEquals("BYTES_WRITTEN", expr.getCounterLimit().getName());
+ assertEquals(27, expr.getCounterLimit().getLimit());
+ }
+
+ @Test
+ public void testTime() {
+ Expression expr = null;
+
+ expr = ExpressionFactory.fromString("ELAPSED_TIME > 1");
+ assertNotNull(expr);
+ assertEquals(Predicate.GREATER_THAN, expr.getPredicate());
+ assertEquals("ELAPSED_TIME", expr.getCounterLimit().getName());
+ assertEquals(1, expr.getCounterLimit().getLimit());
+
+ expr = ExpressionFactory.fromString("ELAPSED_TIME > 1ms");
+ assertNotNull(expr);
+ assertEquals(Predicate.GREATER_THAN, expr.getPredicate());
+ assertEquals("ELAPSED_TIME", expr.getCounterLimit().getName());
+ assertEquals(1, expr.getCounterLimit().getLimit());
+
+ expr = ExpressionFactory.fromString("ELAPSED_TIME > 1sec");
+ assertNotNull(expr);
+ assertEquals(Predicate.GREATER_THAN, expr.getPredicate());
+ assertEquals("ELAPSED_TIME", expr.getCounterLimit().getName());
+ assertEquals(1000, expr.getCounterLimit().getLimit());
+
+ expr = ExpressionFactory.fromString("ELAPSED_TIME > 1min");
+ assertNotNull(expr);
+ assertEquals(Predicate.GREATER_THAN, expr.getPredicate());
+ assertEquals("ELAPSED_TIME", expr.getCounterLimit().getName());
+ assertEquals(60 * 1000, expr.getCounterLimit().getLimit());
+
+ expr = ExpressionFactory.fromString("ELAPSED_TIME > 1hour");
+ assertNotNull(expr);
+ assertEquals(Predicate.GREATER_THAN, expr.getPredicate());
+ assertEquals("ELAPSED_TIME", expr.getCounterLimit().getName());
+ assertEquals(3600 * 1000, expr.getCounterLimit().getLimit());
+
+ expr = ExpressionFactory.fromString("ELAPSED_TIME > 1day");
+ assertNotNull(expr);
+ assertEquals(Predicate.GREATER_THAN, expr.getPredicate());
+ assertEquals("ELAPSED_TIME", expr.getCounterLimit().getName());
+ assertEquals(24 * 3600 * 1000, expr.getCounterLimit().getLimit());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/90d236af/ql/src/test/org/apache/hadoop/hive/ql/wm/TestTrigger.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/wm/TestTrigger.java b/ql/src/test/org/apache/hadoop/hive/ql/wm/TestTrigger.java
index b686783..a3e8336 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/wm/TestTrigger.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/wm/TestTrigger.java
@@ -185,25 +185,25 @@ public class TestTrigger {
assertEquals(expected, expression);
assertEquals(expected.hashCode(), expression.hashCode());
- expression = ExpressionFactory.fromString(" SHUFFLE_BYTES > 1 gB");
+ expression = ExpressionFactory.fromString(" SHUFFLE_BYTES > 1gB");
expected = ExpressionFactory.createExpression(new FileSystemCounterLimit("",
FileSystemCounterLimit.FSCounter.SHUFFLE_BYTES, 1024 * 1024 * 1024));
assertEquals(expected, expression);
assertEquals(expected.hashCode(), expression.hashCode());
- expression = ExpressionFactory.fromString(" SHUFFLE_BYTES > 1 TB");
+ expression = ExpressionFactory.fromString(" SHUFFLE_BYTES > 1TB");
expected = ExpressionFactory.createExpression(new FileSystemCounterLimit("",
FileSystemCounterLimit.FSCounter.SHUFFLE_BYTES, 1024L * 1024 * 1024 * 1024));
assertEquals(expected, expression);
assertEquals(expected.hashCode(), expression.hashCode());
- expression = ExpressionFactory.fromString(" SHUFFLE_BYTES > 100 B");
+ expression = ExpressionFactory.fromString(" SHUFFLE_BYTES > 100");
expected = ExpressionFactory.createExpression(new FileSystemCounterLimit("",
FileSystemCounterLimit.FSCounter.SHUFFLE_BYTES, 100));
assertEquals(expected, expression);
assertEquals(expected.hashCode(), expression.hashCode());
- expression = ExpressionFactory.fromString(" SHUFFLE_BYTES > 100bytes");
+ expression = ExpressionFactory.fromString(" SHUFFLE_BYTES > 100");
expected = ExpressionFactory.createExpression(new FileSystemCounterLimit("",
FileSystemCounterLimit.FSCounter.SHUFFLE_BYTES, 100));
assertEquals(expected, expression);
@@ -213,38 +213,38 @@ public class TestTrigger {
@Test
public void testIllegalSizeCounterValue1() {
thrown.expect(IllegalArgumentException.class);
- thrown.expectMessage("Invalid size unit");
+ thrown.expectMessage("Invalid expression: SHUFFLE_BYTES > 300GiB");
ExpressionFactory.fromString(" SHUFFLE_BYTES > 300GiB");
}
@Test
public void testIllegalSizeCounterValue2() {
thrown.expect(IllegalArgumentException.class);
- thrown.expectMessage("Invalid size unit");
+ thrown.expectMessage("Invalid expression: SHUFFLE_BYTES > 300 foo");
ExpressionFactory.fromString(" SHUFFLE_BYTES > 300 foo");
}
@Test
public void testTimeValidationInTrigger() {
- Expression expression = ExpressionFactory.fromString(" elapsed_TIME > 300 s");
+ Expression expression = ExpressionFactory.fromString(" elapsed_TIME > 300sec");
Expression expected = ExpressionFactory.createExpression(new TimeCounterLimit(TimeCounterLimit.TimeCounter
.ELAPSED_TIME, 300000));
assertEquals(expected, expression);
assertEquals(expected.hashCode(), expression.hashCode());
- expression = ExpressionFactory.fromString(" elapsed_TIME > 300 seconds");
+ expression = ExpressionFactory.fromString(" elapsed_TIME > 300seconds");
expected = ExpressionFactory.createExpression(new TimeCounterLimit(TimeCounterLimit.TimeCounter
.ELAPSED_TIME, 300000));
assertEquals(expected, expression);
assertEquals(expected.hashCode(), expression.hashCode());
- expression = ExpressionFactory.fromString(" elapsed_TIME > 300 sec");
+ expression = ExpressionFactory.fromString(" elapsed_TIME > 300sec");
expected = ExpressionFactory.createExpression(new TimeCounterLimit(TimeCounterLimit.TimeCounter
.ELAPSED_TIME, 300000));
assertEquals(expected, expression);
assertEquals(expected.hashCode(), expression.hashCode());
- expression = ExpressionFactory.fromString(" elapsed_TIME > 300s");
+ expression = ExpressionFactory.fromString(" elapsed_TIME > 300second");
expected = ExpressionFactory.createExpression(new TimeCounterLimit(TimeCounterLimit.TimeCounter
.ELAPSED_TIME, 300000));
assertEquals(expected, expression);
@@ -262,7 +262,7 @@ public class TestTrigger {
assertEquals(expected, expression);
assertEquals(expected.hashCode(), expression.hashCode());
- expression = ExpressionFactory.fromString(" elapsed_TIME > 300000000 microseconds");
+ expression = ExpressionFactory.fromString(" elapsed_TIME > 300000000microseconds");
expected = ExpressionFactory.createExpression(new TimeCounterLimit(TimeCounterLimit.TimeCounter
.ELAPSED_TIME, 300000));
assertEquals(expected, expression);
@@ -278,14 +278,14 @@ public class TestTrigger {
@Test
public void testIllegalTimeCounterValue1() {
thrown.expect(IllegalArgumentException.class);
- thrown.expectMessage("Invalid time unit");
- ExpressionFactory.fromString(" elapsed_TIME > 300 light years");
+ thrown.expectMessage("Invalid expression: elapsed_TIME > 300lightyears");
+ ExpressionFactory.fromString(" elapsed_TIME > 300lightyears");
}
@Test
public void testIllegalTimeCounterValue2() {
thrown.expect(IllegalArgumentException.class);
- thrown.expectMessage("Invalid time unit");
+ thrown.expectMessage("Invalid expression: elapsed_TIME > 300secTOR");
ExpressionFactory.fromString(" elapsed_TIME > 300secTOR");
}
@@ -296,7 +296,7 @@ public class TestTrigger {
assertEquals("MOVE TO etl", Action.fromMetastoreExpression("MOVE TO etl").toString());
thrown.expect(IllegalArgumentException.class);
- thrown.expectMessage("Invalid move action expression (MOVE TO ). Pool name is empty");
+ thrown.expectMessage("Invalid action expression: MOVE TO ");
assertEquals(Action.Type.MOVE_TO_POOL, Action.fromMetastoreExpression("MOVE TO ").getType());
}
@@ -327,56 +327,56 @@ public class TestTrigger {
@Test
public void testIllegalExpressionsUnsupportedPredicate() {
thrown.expect(IllegalArgumentException.class);
- thrown.expectMessage("Invalid predicate in expression");
+ thrown.expectMessage("Invalid expression: BYTES_READ < 1024");
ExpressionFactory.fromString("BYTES_READ < 1024");
}
@Test
public void testIllegalExpressionsMissingLimit() {
thrown.expect(IllegalArgumentException.class);
- thrown.expectMessage("Invalid predicate in expression");
+ thrown.expectMessage("Invalid expression: BYTES_READ >");
ExpressionFactory.fromString("BYTES_READ >");
}
@Test
public void testIllegalExpressionsMissingCounter() {
thrown.expect(IllegalArgumentException.class);
- thrown.expectMessage("Counter name cannot be empty!");
+ thrown.expectMessage("Invalid expression: > 1024");
ExpressionFactory.fromString("> 1024");
}
@Test
public void testIllegalExpressionsMultipleLimit() {
thrown.expect(IllegalArgumentException.class);
- thrown.expectMessage("Invalid predicate in expression");
+ thrown.expectMessage("Invalid expression: BYTES_READ > 1024 > 1025");
ExpressionFactory.fromString("BYTES_READ > 1024 > 1025");
}
@Test
public void testIllegalExpressionsMultipleCounters() {
thrown.expect(IllegalArgumentException.class);
- thrown.expectMessage("Invalid predicate in expression");
+ thrown.expectMessage("Invalid expression: BYTES_READ > BYTES_READ > 1025");
ExpressionFactory.fromString("BYTES_READ > BYTES_READ > 1025");
}
@Test
public void testIllegalExpressionsInvalidLimitPost() {
thrown.expect(IllegalArgumentException.class);
- thrown.expectMessage("Invalid size unit");
+ thrown.expectMessage("Invalid expression: BYTES_READ > 1024aaaa");
ExpressionFactory.fromString("BYTES_READ > 1024aaaa");
}
@Test
public void testIllegalExpressionsInvalidLimitPre() {
thrown.expect(IllegalArgumentException.class);
- thrown.expectMessage("Invalid counter value");
+ thrown.expectMessage("Invalid expression: BYTES_READ > foo1024");
ExpressionFactory.fromString("BYTES_READ > foo1024");
}
@Test
public void testIllegalExpressionsInvalidNegativeLimit() {
thrown.expect(IllegalArgumentException.class);
- thrown.expectMessage("Illegal value for counter limit. Expected a positive long value.");
+ thrown.expectMessage("Invalid expression: BYTES_READ > -1024");
ExpressionFactory.fromString("BYTES_READ > -1024");
}
}
http://git-wip-us.apache.org/repos/asf/hive/blob/90d236af/ql/src/test/queries/clientpositive/resourceplan.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/resourceplan.q b/ql/src/test/queries/clientpositive/resourceplan.q
index d2aec73..7314585 100644
--- a/ql/src/test/queries/clientpositive/resourceplan.q
+++ b/ql/src/test/queries/clientpositive/resourceplan.q
@@ -27,7 +27,8 @@ SHOW RESOURCE PLAN plan_1;
SELECT * FROM SYS.WM_RESOURCEPLANS;
-- Create and show plan_2.
-CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=4;
+CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=5;
+ALTER RESOURCE PLAN plan_2 SET QUERY_PARALLELISM=10;
SHOW RESOURCE PLANS;
SHOW RESOURCE PLAN plan_2;
SELECT * FROM SYS.WM_RESOURCEPLANS;
@@ -147,30 +148,38 @@ SELECT * FROM SYS.WM_RESOURCEPLANS;
CREATE RESOURCE PLAN plan_1;
-CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > 10k AND BYTES_READ <= 1M OR ELAPSED_TIME > 30 SECOND AND ELAPSED_TIME < 1 MINUTE DO KILL;
+CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > 10kb DO KILL;
SELECT * FROM SYS.WM_TRIGGERS;
-- Duplicate should fail.
-CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ = 10G DO KILL;
+CREATE TRIGGER plan_1.trigger_1 WHEN ELAPSED_TIME > 300 DO KILL;
-CREATE TRIGGER plan_1.trigger_2 WHEN BYTES_READ > 100 DO MOVE TO slow_pool;
+-- Invalid triggers should fail.
+CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME > 30sec AND BYTES_READ > 10 DO MOVE TO slow_pool;
+CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME > 30second OR BYTES_READ > 10 DO MOVE TO slow_pool;
+CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME >= 30seconds DO MOVE TO slow_pool;
+CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME < 30hour DO MOVE TO slow_pool;
+CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME <= 30min DO MOVE TO slow_pool;
+CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME = 0day DO MOVE TO slow_pool;
+
+CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME > 30hour DO MOVE TO slow_pool;
SELECT * FROM SYS.WM_TRIGGERS;
-ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ = 1000 DO KILL;
+ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ > 1min DO KILL;
SELECT * FROM SYS.WM_TRIGGERS;
DROP TRIGGER plan_1.trigger_1;
SELECT * FROM SYS.WM_TRIGGERS;
-- No edit on active resource plan.
-CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ = 0m DO MOVE TO null_pool;
+CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ > 100mb DO MOVE TO null_pool;
-- Add trigger with reserved keywords.
-CREATE TRIGGER `table`.`table` WHEN BYTES_WRITTEN > 100K DO MOVE TO `table`;
-CREATE TRIGGER `table`.`trigger` WHEN BYTES_WRITTEN > 100K DO MOVE TO `default`;
-CREATE TRIGGER `table`.`database` WHEN BYTES_WRITTEN > 1M DO MOVE TO `default`;
+CREATE TRIGGER `table`.`table` WHEN BYTES_WRITTEN > 100KB DO MOVE TO `table`;
+CREATE TRIGGER `table`.`trigger` WHEN BYTES_WRITTEN > 100MB DO MOVE TO `default`;
+CREATE TRIGGER `table`.`database` WHEN BYTES_WRITTEN > 1GB DO MOVE TO `default`;
CREATE TRIGGER `table`.`trigger1` WHEN ELAPSED_TIME > 10 DO KILL;
-CREATE TRIGGER `table`.`trigger2` WHEN BYTES_READ > 100 DO KILL;
+CREATE TRIGGER `table`.`trigger2` WHEN ELAPSED_TIME > 1hour DO KILL;
SELECT * FROM SYS.WM_TRIGGERS;
DROP TRIGGER `table`.`database`;
SELECT * FROM SYS.WM_TRIGGERS;
@@ -179,17 +188,17 @@ SELECT * FROM SYS.WM_TRIGGERS;
ALTER RESOURCE PLAN plan_1 ENABLE;
SELECT * FROM SYS.WM_RESOURCEPLANS;
DROP TRIGGER plan_1.trigger_2;
-ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ = 1000g DO KILL;
+ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ > 1000gb DO KILL;
-- Cannot drop/change trigger from active plan.
ALTER RESOURCE PLAN plan_1 ACTIVATE;
SELECT * FROM SYS.WM_RESOURCEPLANS;
DROP TRIGGER plan_1.trigger_2;
-ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ = 1000K DO KILL;
+ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ > 1000KB DO KILL;
-- Once disabled we should be able to change it.
ALTER RESOURCE PLAN plan_2 DISABLE;
-CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ = 0 DO MOVE TO null_pool;
+CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ > 0 DO MOVE TO null_pool;
SELECT * FROM SYS.WM_TRIGGERS;
@@ -212,13 +221,13 @@ CREATE POOL plan_2.default.c1 WITH
ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='fair';
CREATE POOL plan_2.default.c2 WITH
- QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.2;
+ QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.7;
--- Cannot activate c1 + c2 = 0.5
+-- Cannot activate c1 + c2 = 1.0
ALTER RESOURCE PLAN plan_2 VALIDATE;
ALTER RESOURCE PLAN plan_2 ENABLE ACTIVATE;
-ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1;
+ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.5, QUERY_PARALLELISM = 1;
ALTER POOL plan_2.default.c2 SET SCHEDULING_POLICY='fair';
SELECT * FROM SYS.WM_POOLS;
ALTER POOL plan_2.default.c2 UNSET SCHEDULING_POLICY;
@@ -371,8 +380,8 @@ SELECT * FROM SYS.WM_MAPPINGS;
CREATE RESOURCE PLAN plan_4a LIKE plan_4;
CREATE POOL plan_4a.pool1 WITH SCHEDULING_POLICY='fair', QUERY_PARALLELISM=2, ALLOC_FRACTION=0.0;
CREATE USER MAPPING "user1" IN plan_4a TO pool1;
-CREATE TRIGGER plan_4a.trigger_1 WHEN BYTES_READ = 10G DO KILL;
-CREATE TRIGGER plan_4a.trigger_2 WHEN BYTES_READ = 11G DO KILL;
+CREATE TRIGGER plan_4a.trigger_1 WHEN BYTES_READ > 10GB DO KILL;
+CREATE TRIGGER plan_4a.trigger_2 WHEN BYTES_READ > 11GB DO KILL;
ALTER POOL plan_4a.pool1 ADD TRIGGER trigger_2;
CREATE RESOURCE PLAN plan_4b LIKE plan_4a;
http://git-wip-us.apache.org/repos/asf/hive/blob/90d236af/ql/src/test/results/clientpositive/llap/resourceplan.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/resourceplan.q.out b/ql/src/test/results/clientpositive/llap/resourceplan.q.out
index 68508e9..b23720d 100644
--- a/ql/src/test/results/clientpositive/llap/resourceplan.q.out
+++ b/ql/src/test/results/clientpositive/llap/resourceplan.q.out
@@ -3210,22 +3210,26 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
plan_1 DISABLED NULL default
-PREHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=4
+PREHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=5
PREHOOK: type: CREATE RESOURCEPLAN
-POSTHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=4
+POSTHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM=5
POSTHOOK: type: CREATE RESOURCEPLAN
+PREHOOK: query: ALTER RESOURCE PLAN plan_2 SET QUERY_PARALLELISM=10
+PREHOOK: type: ALTER RESOURCEPLAN
+POSTHOOK: query: ALTER RESOURCE PLAN plan_2 SET QUERY_PARALLELISM=10
+POSTHOOK: type: ALTER RESOURCEPLAN
PREHOOK: query: SHOW RESOURCE PLANS
PREHOOK: type: SHOW RESOURCEPLAN
POSTHOOK: query: SHOW RESOURCE PLANS
POSTHOOK: type: SHOW RESOURCEPLAN
plan_1 DISABLED
-plan_2 DISABLED 4
+plan_2 DISABLED 10
PREHOOK: query: SHOW RESOURCE PLAN plan_2
PREHOOK: type: SHOW RESOURCEPLAN
POSTHOOK: query: SHOW RESOURCE PLAN plan_2
POSTHOOK: type: SHOW RESOURCEPLAN
-plan_2[status=DISABLED,parallelism=4,defaultPool=default]
- + default[allocFraction=1.0,schedulingPolicy=null,parallelism=4]
+plan_2[status=DISABLED,parallelism=10,defaultPool=default]
+ + default[allocFraction=1.0,schedulingPolicy=null,parallelism=5]
| mapped for default
PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
PREHOOK: type: QUERY
@@ -3236,7 +3240,7 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
plan_1 DISABLED NULL default
-plan_2 DISABLED 4 default
+plan_2 DISABLED 10 default
FAILED: SemanticException Invalid create arguments (tok_create_rp plan_3 (tok_query_parallelism 5) (tok_default_pool all))
PREHOOK: query: ALTER RESOURCE PLAN plan_1 RENAME TO plan_2
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3250,7 +3254,7 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
plan_1 DISABLED NULL default
-plan_2 DISABLED 4 default
+plan_2 DISABLED 10 default
PREHOOK: query: ALTER RESOURCE PLAN plan_1 RENAME TO plan_3
PREHOOK: type: ALTER RESOURCEPLAN
POSTHOOK: query: ALTER RESOURCE PLAN plan_1 RENAME TO plan_3
@@ -3263,7 +3267,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 DISABLED 4 default
+plan_2 DISABLED 10 default
plan_3 DISABLED NULL default
PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 4
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3277,7 +3281,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 DISABLED 4 default
+plan_2 DISABLED 10 default
plan_3 DISABLED 4 default
PREHOOK: query: ALTER RESOURCE PLAN plan_3 UNSET QUERY_PARALLELISM
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3291,7 +3295,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 DISABLED 4 default
+plan_2 DISABLED 10 default
plan_3 DISABLED NULL default
PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = default1
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3304,7 +3308,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 DISABLED 4 default
+plan_2 DISABLED 10 default
plan_3 DISABLED NULL default
PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3328,7 +3332,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 DISABLED 4 default
+plan_2 DISABLED 10 default
plan_3 DISABLED NULL default
PREHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3341,7 +3345,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 DISABLED 4 default
+plan_2 DISABLED 10 default
plan_3 DISABLED NULL default
PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3355,7 +3359,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 DISABLED 4 default
+plan_2 DISABLED 10 default
plan_3 DISABLED NULL default
PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3369,7 +3373,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 DISABLED 4 default
+plan_2 DISABLED 10 default
plan_3 ENABLED NULL default
PREHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3383,7 +3387,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 DISABLED 4 default
+plan_2 DISABLED 10 default
plan_3 ACTIVE NULL default
PREHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3397,7 +3401,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 DISABLED 4 default
+plan_2 DISABLED 10 default
plan_3 ACTIVE NULL default
PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3410,7 +3414,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 DISABLED 4 default
+plan_2 DISABLED 10 default
plan_3 ACTIVE NULL default
PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3423,7 +3427,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 DISABLED 4 default
+plan_2 DISABLED 10 default
plan_3 ACTIVE NULL default
PREHOOK: query: DISABLE WORKLOAD MANAGEMENT
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3437,7 +3441,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 DISABLED 4 default
+plan_2 DISABLED 10 default
plan_3 ENABLED NULL default
PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3455,7 +3459,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 DISABLED 4 default
+plan_2 DISABLED 10 default
plan_3 ACTIVE NULL default
PREHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3469,7 +3473,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 ENABLED 4 default
+plan_2 ENABLED 10 default
plan_3 ACTIVE NULL default
PREHOOK: query: ALTER RESOURCE PLAN plan_2 ACTIVATE
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3483,7 +3487,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 ACTIVE 4 default
+plan_2 ACTIVE 10 default
plan_3 ENABLED NULL default
PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3497,7 +3501,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 ACTIVE 4 default
+plan_2 ACTIVE 10 default
plan_3 ENABLED NULL default
PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3511,7 +3515,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 ACTIVE 4 default
+plan_2 ACTIVE 10 default
plan_3 DISABLED NULL default
PREHOOK: query: DROP RESOURCE PLAN plan_2
PREHOOK: type: DROP RESOURCEPLAN
@@ -3528,7 +3532,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 ACTIVE 4 default
+plan_2 ACTIVE 10 default
PREHOOK: query: CREATE RESOURCE PLAN `table`
PREHOOK: type: CREATE RESOURCEPLAN
POSTHOOK: query: CREATE RESOURCE PLAN `table`
@@ -3545,15 +3549,15 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
-plan_2 ACTIVE 4 default
+plan_2 ACTIVE 10 default
table DISABLED 1 default
PREHOOK: query: CREATE RESOURCE PLAN plan_1
PREHOOK: type: CREATE RESOURCEPLAN
POSTHOOK: query: CREATE RESOURCE PLAN plan_1
POSTHOOK: type: CREATE RESOURCEPLAN
-PREHOOK: query: CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > 10k AND BYTES_READ <= 1M OR ELAPSED_TIME > 30 SECOND AND ELAPSED_TIME < 1 MINUTE DO KILL
+PREHOOK: query: CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > 10kb DO KILL
PREHOOK: type: CREATE TRIGGER
-POSTHOOK: query: CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > 10k AND BYTES_READ <= 1M OR ELAPSED_TIME > 30 SECOND AND ELAPSED_TIME < 1 MINUTE DO KILL
+POSTHOOK: query: CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ > 10kb DO KILL
POSTHOOK: type: CREATE TRIGGER
PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
PREHOOK: type: QUERY
@@ -3563,13 +3567,19 @@ POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_triggers
#### A masked pattern was here ####
-plan_1 trigger_1 BYTES_READ > 10k AND BYTES_READ <= 1M OR ELAPSED_TIME > 30 SECOND AND ELAPSED_TIME < 1 MINUTE KILL
-PREHOOK: query: CREATE TRIGGER plan_1.trigger_1 WHEN BYTES_READ = 10G DO KILL
+plan_1 trigger_1 BYTES_READ > 10kb KILL
+PREHOOK: query: CREATE TRIGGER plan_1.trigger_1 WHEN ELAPSED_TIME > 300 DO KILL
PREHOOK: type: CREATE TRIGGER
FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. AlreadyExistsException(message:Trigger already exists, use alter: )
-PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN BYTES_READ > 100 DO MOVE TO slow_pool
+FAILED: ParseException line 4:58 mismatched input 'AND' expecting DO near '30sec' in create trigger statement
+FAILED: ParseException line 2:61 mismatched input 'OR' expecting DO near '30second' in create trigger statement
+FAILED: ParseException line 2:50 mismatched input '>=' expecting > near 'ELAPSED_TIME' in comparisionOperator
+FAILED: ParseException line 2:50 mismatched input '<' expecting > near 'ELAPSED_TIME' in comparisionOperator
+FAILED: ParseException line 2:50 mismatched input '<=' expecting > near 'ELAPSED_TIME' in comparisionOperator
+FAILED: ParseException line 2:50 mismatched input '=' expecting > near 'ELAPSED_TIME' in comparisionOperator
+PREHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME > 30hour DO MOVE TO slow_pool
PREHOOK: type: CREATE TRIGGER
-POSTHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN BYTES_READ > 100 DO MOVE TO slow_pool
+POSTHOOK: query: CREATE TRIGGER plan_1.trigger_2 WHEN ELAPSED_TIME > 30hour DO MOVE TO slow_pool
POSTHOOK: type: CREATE TRIGGER
PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
PREHOOK: type: QUERY
@@ -3579,11 +3589,11 @@ POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_triggers
#### A masked pattern was here ####
-plan_1 trigger_1 BYTES_READ > 10k AND BYTES_READ <= 1M OR ELAPSED_TIME > 30 SECOND AND ELAPSED_TIME < 1 MINUTE KILL
-plan_1 trigger_2 BYTES_READ > 100 MOVE TO slow_pool
-PREHOOK: query: ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ = 1000 DO KILL
+plan_1 trigger_1 BYTES_READ > 10kb KILL
+plan_1 trigger_2 ELAPSED_TIME > 30hour MOVE TO slow_pool
+PREHOOK: query: ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ > 1min DO KILL
PREHOOK: type: ALTER TRIGGER
-POSTHOOK: query: ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ = 1000 DO KILL
+POSTHOOK: query: ALTER TRIGGER plan_1.trigger_1 WHEN BYTES_READ > 1min DO KILL
POSTHOOK: type: ALTER TRIGGER
PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
PREHOOK: type: QUERY
@@ -3593,8 +3603,8 @@ POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_triggers
#### A masked pattern was here ####
-plan_1 trigger_1 BYTES_READ = 1000 KILL
-plan_1 trigger_2 BYTES_READ > 100 MOVE TO slow_pool
+plan_1 trigger_1 BYTES_READ > 1min KILL
+plan_1 trigger_2 ELAPSED_TIME > 30hour MOVE TO slow_pool
PREHOOK: query: DROP TRIGGER plan_1.trigger_1
PREHOOK: type: DROP TRIGGER
POSTHOOK: query: DROP TRIGGER plan_1.trigger_1
@@ -3607,29 +3617,29 @@ POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_triggers
#### A masked pattern was here ####
-plan_1 trigger_2 BYTES_READ > 100 MOVE TO slow_pool
-PREHOOK: query: CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ = 0m DO MOVE TO null_pool
+plan_1 trigger_2 ELAPSED_TIME > 30hour MOVE TO slow_pool
+PREHOOK: query: CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ > 100mb DO MOVE TO null_pool
PREHOOK: type: CREATE TRIGGER
FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
-PREHOOK: query: CREATE TRIGGER `table`.`table` WHEN BYTES_WRITTEN > 100K DO MOVE TO `table`
+PREHOOK: query: CREATE TRIGGER `table`.`table` WHEN BYTES_WRITTEN > 100KB DO MOVE TO `table`
PREHOOK: type: CREATE TRIGGER
-POSTHOOK: query: CREATE TRIGGER `table`.`table` WHEN BYTES_WRITTEN > 100K DO MOVE TO `table`
+POSTHOOK: query: CREATE TRIGGER `table`.`table` WHEN BYTES_WRITTEN > 100KB DO MOVE TO `table`
POSTHOOK: type: CREATE TRIGGER
-PREHOOK: query: CREATE TRIGGER `table`.`trigger` WHEN BYTES_WRITTEN > 100K DO MOVE TO `default`
+PREHOOK: query: CREATE TRIGGER `table`.`trigger` WHEN BYTES_WRITTEN > 100MB DO MOVE TO `default`
PREHOOK: type: CREATE TRIGGER
-POSTHOOK: query: CREATE TRIGGER `table`.`trigger` WHEN BYTES_WRITTEN > 100K DO MOVE TO `default`
+POSTHOOK: query: CREATE TRIGGER `table`.`trigger` WHEN BYTES_WRITTEN > 100MB DO MOVE TO `default`
POSTHOOK: type: CREATE TRIGGER
-PREHOOK: query: CREATE TRIGGER `table`.`database` WHEN BYTES_WRITTEN > 1M DO MOVE TO `default`
+PREHOOK: query: CREATE TRIGGER `table`.`database` WHEN BYTES_WRITTEN > 1GB DO MOVE TO `default`
PREHOOK: type: CREATE TRIGGER
-POSTHOOK: query: CREATE TRIGGER `table`.`database` WHEN BYTES_WRITTEN > 1M DO MOVE TO `default`
+POSTHOOK: query: CREATE TRIGGER `table`.`database` WHEN BYTES_WRITTEN > 1GB DO MOVE TO `default`
POSTHOOK: type: CREATE TRIGGER
PREHOOK: query: CREATE TRIGGER `table`.`trigger1` WHEN ELAPSED_TIME > 10 DO KILL
PREHOOK: type: CREATE TRIGGER
POSTHOOK: query: CREATE TRIGGER `table`.`trigger1` WHEN ELAPSED_TIME > 10 DO KILL
POSTHOOK: type: CREATE TRIGGER
-PREHOOK: query: CREATE TRIGGER `table`.`trigger2` WHEN BYTES_READ > 100 DO KILL
+PREHOOK: query: CREATE TRIGGER `table`.`trigger2` WHEN ELAPSED_TIME > 1hour DO KILL
PREHOOK: type: CREATE TRIGGER
-POSTHOOK: query: CREATE TRIGGER `table`.`trigger2` WHEN BYTES_READ > 100 DO KILL
+POSTHOOK: query: CREATE TRIGGER `table`.`trigger2` WHEN ELAPSED_TIME > 1hour DO KILL
POSTHOOK: type: CREATE TRIGGER
PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
PREHOOK: type: QUERY
@@ -3639,12 +3649,12 @@ POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_triggers
#### A masked pattern was here ####
-plan_1 trigger_2 BYTES_READ > 100 MOVE TO slow_pool
-table database BYTES_WRITTEN > 1M MOVE TO default
-table table BYTES_WRITTEN > 100K MOVE TO table
-table trigger BYTES_WRITTEN > 100K MOVE TO default
+plan_1 trigger_2 ELAPSED_TIME > 30hour MOVE TO slow_pool
+table database BYTES_WRITTEN > 1GB MOVE TO default
+table table BYTES_WRITTEN > 100KB MOVE TO table
+table trigger BYTES_WRITTEN > 100MB MOVE TO default
table trigger1 ELAPSED_TIME > 10 KILL
-table trigger2 BYTES_READ > 100 KILL
+table trigger2 ELAPSED_TIME > 1hour KILL
PREHOOK: query: DROP TRIGGER `table`.`database`
PREHOOK: type: DROP TRIGGER
POSTHOOK: query: DROP TRIGGER `table`.`database`
@@ -3657,11 +3667,11 @@ POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_triggers
#### A masked pattern was here ####
-plan_1 trigger_2 BYTES_READ > 100 MOVE TO slow_pool
-table table BYTES_WRITTEN > 100K MOVE TO table
-table trigger BYTES_WRITTEN > 100K MOVE TO default
+plan_1 trigger_2 ELAPSED_TIME > 30hour MOVE TO slow_pool
+table table BYTES_WRITTEN > 100KB MOVE TO table
+table trigger BYTES_WRITTEN > 100MB MOVE TO default
table trigger1 ELAPSED_TIME > 10 KILL
-table trigger2 BYTES_READ > 100 KILL
+table trigger2 ELAPSED_TIME > 1hour KILL
PREHOOK: query: ALTER RESOURCE PLAN plan_1 ENABLE
PREHOOK: type: ALTER RESOURCEPLAN
POSTHOOK: query: ALTER RESOURCE PLAN plan_1 ENABLE
@@ -3675,12 +3685,12 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
plan_1 ENABLED NULL default
-plan_2 ACTIVE 4 default
+plan_2 ACTIVE 10 default
table DISABLED 1 default
PREHOOK: query: DROP TRIGGER plan_1.trigger_2
PREHOOK: type: DROP TRIGGER
FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
-PREHOOK: query: ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ = 1000g DO KILL
+PREHOOK: query: ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ > 1000gb DO KILL
PREHOOK: type: ALTER TRIGGER
FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
PREHOOK: query: ALTER RESOURCE PLAN plan_1 ACTIVATE
@@ -3696,21 +3706,21 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
plan_1 ACTIVE NULL default
-plan_2 ENABLED 4 default
+plan_2 ENABLED 10 default
table DISABLED 1 default
PREHOOK: query: DROP TRIGGER plan_1.trigger_2
PREHOOK: type: DROP TRIGGER
FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
-PREHOOK: query: ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ = 1000K DO KILL
+PREHOOK: query: ALTER TRIGGER plan_1.trigger_2 WHEN BYTES_READ > 1000KB DO KILL
PREHOOK: type: ALTER TRIGGER
FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
PREHOOK: query: ALTER RESOURCE PLAN plan_2 DISABLE
PREHOOK: type: ALTER RESOURCEPLAN
POSTHOOK: query: ALTER RESOURCE PLAN plan_2 DISABLE
POSTHOOK: type: ALTER RESOURCEPLAN
-PREHOOK: query: CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ = 0 DO MOVE TO null_pool
+PREHOOK: query: CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ > 0 DO MOVE TO null_pool
PREHOOK: type: CREATE TRIGGER
-POSTHOOK: query: CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ = 0 DO MOVE TO null_pool
+POSTHOOK: query: CREATE TRIGGER plan_2.trigger_1 WHEN BYTES_READ > 0 DO MOVE TO null_pool
POSTHOOK: type: CREATE TRIGGER
PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
PREHOOK: type: QUERY
@@ -3720,12 +3730,12 @@ POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_triggers
#### A masked pattern was here ####
-plan_1 trigger_2 BYTES_READ > 100 MOVE TO slow_pool
-plan_2 trigger_1 BYTES_READ = 0 MOVE TO null_pool
-table table BYTES_WRITTEN > 100K MOVE TO table
-table trigger BYTES_WRITTEN > 100K MOVE TO default
+plan_1 trigger_2 ELAPSED_TIME > 30hour MOVE TO slow_pool
+plan_2 trigger_1 BYTES_READ > 0 MOVE TO null_pool
+table table BYTES_WRITTEN > 100KB MOVE TO table
+table trigger BYTES_WRITTEN > 100MB MOVE TO default
table trigger1 ELAPSED_TIME > 10 KILL
-table trigger2 BYTES_READ > 100 KILL
+table trigger2 ELAPSED_TIME > 1hour KILL
PREHOOK: query: CREATE POOL plan_1.default WITH
ALLOC_FRACTION=1.0, QUERY_PARALLELISM=5, SCHEDULING_POLICY='default'
PREHOOK: type: CREATE POOL
@@ -3743,7 +3753,7 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_pools
#### A masked pattern was here ####
plan_1 default 1.0 4 NULL
-plan_2 default 1.0 4 NULL
+plan_2 default 1.0 5 NULL
table default 1.0 4 NULL
FAILED: SemanticException Invalid scheduling policy invalid
PREHOOK: query: CREATE POOL plan_2.default.c1 WITH
@@ -3753,23 +3763,22 @@ POSTHOOK: query: CREATE POOL plan_2.default.c1 WITH
ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='fair'
POSTHOOK: type: CREATE POOL
PREHOOK: query: CREATE POOL plan_2.default.c2 WITH
- QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.2
+ QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.7
PREHOOK: type: CREATE POOL
POSTHOOK: query: CREATE POOL plan_2.default.c2 WITH
- QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.2
+ QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.7
POSTHOOK: type: CREATE POOL
PREHOOK: query: ALTER RESOURCE PLAN plan_2 VALIDATE
PREHOOK: type: ALTER RESOURCEPLAN
POSTHOOK: query: ALTER RESOURCE PLAN plan_2 VALIDATE
POSTHOOK: type: ALTER RESOURCEPLAN
-Sum of children pools' alloc fraction should be equal 1.0 got: 0.5 for pool: default
-Sum of children pools' query parallelism: 5 is not equal to pool parallelism: 4 for pool: default
+Sum of children pools' alloc fraction should be less than 1 got: 1.0 for pool: default
PREHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE ACTIVATE
PREHOOK: type: ALTER RESOURCEPLAN
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:ResourcePlan: plan_2 is invalid: [Sum of children pools' alloc fraction should be equal 1.0 got: 0.5 for pool: default, Sum of children pools' query parallelism: 5 is not equal to pool parallelism: 4 for pool: default])
-PREHOOK: query: ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:ResourcePlan: plan_2 is invalid: [Sum of children pools' alloc fraction should be less than 1 got: 1.0 for pool: default])
+PREHOOK: query: ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.5, QUERY_PARALLELISM = 1
PREHOOK: type: ALTER POOL
-POSTHOOK: query: ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1
+POSTHOOK: query: ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.5, QUERY_PARALLELISM = 1
POSTHOOK: type: ALTER POOL
PREHOOK: query: ALTER POOL plan_2.default.c2 SET SCHEDULING_POLICY='fair'
PREHOOK: type: ALTER POOL
@@ -3784,9 +3793,9 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_pools
#### A masked pattern was here ####
plan_1 default 1.0 4 NULL
-plan_2 default 1.0 4 NULL
+plan_2 default 1.0 5 NULL
plan_2 default.c1 0.3 3 fair
-plan_2 default.c2 0.7 1 fair
+plan_2 default.c2 0.5 1 fair
table default 1.0 4 NULL
PREHOOK: query: ALTER POOL plan_2.default.c2 UNSET SCHEDULING_POLICY
PREHOOK: type: ALTER POOL
@@ -3801,14 +3810,15 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_pools
#### A masked pattern was here ####
plan_1 default 1.0 4 NULL
-plan_2 default 1.0 4 NULL
+plan_2 default 1.0 5 NULL
plan_2 default.c1 0.3 3 fair
-plan_2 default.c2 0.7 1 NULL
+plan_2 default.c2 0.5 1 NULL
table default 1.0 4 NULL
PREHOOK: query: ALTER RESOURCE PLAN plan_2 VALIDATE
PREHOOK: type: ALTER RESOURCEPLAN
POSTHOOK: query: ALTER RESOURCE PLAN plan_2 VALIDATE
POSTHOOK: type: ALTER RESOURCEPLAN
+warn: Sum of all pools' query parallelism: 9 is less than resource plan query parallelism: 10
PREHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE ACTIVATE
PREHOOK: type: ALTER RESOURCEPLAN
POSTHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE ACTIVATE
@@ -3834,9 +3844,9 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_pools
#### A masked pattern was here ####
plan_1 default 1.0 4 NULL
-plan_2 def 1.0 4 NULL
+plan_2 def 1.0 5 NULL
plan_2 def.c1 0.3 3 fair
-plan_2 def.c2 0.7 1 NULL
+plan_2 def.c2 0.5 1 NULL
table default 1.0 4 NULL
PREHOOK: query: DROP POOL plan_2.default
PREHOOK: type: DROP POOL
@@ -3850,9 +3860,9 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_pools
#### A masked pattern was here ####
plan_1 default 1.0 4 NULL
-plan_2 def 1.0 4 NULL
+plan_2 def 1.0 5 NULL
plan_2 def.c1 0.3 3 fair
-plan_2 def.c2 0.7 1 NULL
+plan_2 def.c2 0.5 1 NULL
table default 1.0 4 NULL
PREHOOK: query: CREATE POOL plan_2.child1.child2 WITH
QUERY_PARALLELISM=2, SCHEDULING_POLICY='fifo', ALLOC_FRACTION=0.8
@@ -3895,9 +3905,9 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_pools
#### A masked pattern was here ####
plan_1 default 1.0 4 NULL
-plan_2 def 1.0 4 NULL
+plan_2 def 1.0 5 NULL
plan_2 def.c1 0.3 3 fair
-plan_2 def.c2 0.7 1 NULL
+plan_2 def.c2 0.5 1 NULL
table default 1.0 4 NULL
table table 0.0 1 fifo
table table.pool1 0.9 3 fair
@@ -3916,9 +3926,9 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_pools
#### A masked pattern was here ####
plan_1 default 1.0 4 NULL
-plan_2 def 1.0 4 NULL
+plan_2 def 1.0 5 NULL
plan_2 def.c1 0.3 3 fair
-plan_2 def.c2 0.7 1 NULL
+plan_2 def.c2 0.5 1 NULL
table default 1.0 4 NULL
table table 0.0 1 fifo
table table.pool 0.9 3 fair
@@ -3936,9 +3946,9 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_pools
#### A masked pattern was here ####
plan_1 default 1.0 4 NULL
-plan_2 def 1.0 4 NULL
+plan_2 def 1.0 5 NULL
plan_2 def.c1 0.3 3 fair
-plan_2 def.c2 0.7 1 NULL
+plan_2 def.c2 0.5 1 NULL
table default 1.0 4 NULL
table table 0.0 1 fifo
table table.pool 0.9 3 fair
@@ -3956,9 +3966,9 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_pools
#### A masked pattern was here ####
plan_1 default 1.0 4 NULL
-plan_2 def 1.0 4 NULL
+plan_2 def 1.0 5 NULL
plan_2 def.c1 0.3 3 fair
-plan_2 def.c2 0.7 1 NULL
+plan_2 def.c2 0.5 1 NULL
table default 1.0 4 NULL
table table 0.0 1 fifo
table table.pool 0.9 3 fair
@@ -3973,7 +3983,7 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
plan_1 ACTIVE NULL default
-plan_2 DISABLED 4 def
+plan_2 DISABLED 10 def
table DISABLED 1 default
PREHOOK: query: ALTER RESOURCE PLAN `table` SET DEFAULT POOL = `table`.pool
PREHOOK: type: ALTER RESOURCEPLAN
@@ -3992,9 +4002,9 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_pools
#### A masked pattern was here ####
plan_1 default 1.0 4 NULL
-plan_2 def 1.0 4 NULL
+plan_2 def 1.0 5 NULL
plan_2 def.c1 0.3 3 fair
-plan_2 def.c2 0.7 1 NULL
+plan_2 def.c2 0.5 1 NULL
table table 0.0 1 fifo
table table.pool 0.9 3 fair
table table.pool.child1 0.3 1 fair
@@ -4012,7 +4022,7 @@ POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_resourceplans
#### A masked pattern was here ####
plan_1 ACTIVE NULL default
-plan_2 DISABLED 4 def
+plan_2 DISABLED 10 def
table DISABLED 1 NULL
PREHOOK: query: ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1
PREHOOK: type: ALTER POOL
@@ -4068,18 +4078,18 @@ POSTHOOK: query: SHOW RESOURCE PLAN `table`
POSTHOOK: type: SHOW RESOURCEPLAN
table[status=DISABLED,parallelism=1,defaultPool=null]
+ table[allocFraction=0.0,schedulingPolicy=fifo,parallelism=1]
- | trigger table: if (BYTES_WRITTEN > 100K) { MOVE TO table }
+ | trigger table: if (BYTES_WRITTEN > 100KB) { MOVE TO table }
+ pool[allocFraction=0.9,schedulingPolicy=fair,parallelism=3]
+ child2[allocFraction=0.7,schedulingPolicy=fair,parallelism=3]
- | trigger trigger2: if (BYTES_READ > 100) { KILL }
| trigger trigger1: if (ELAPSED_TIME > 10) { KILL }
+ | trigger trigger2: if (ELAPSED_TIME > 1hour) { KILL }
+ child1[allocFraction=0.3,schedulingPolicy=fair,parallelism=1]
+ | trigger table: if (BYTES_WRITTEN > 100KB) { MOVE TO table }
| trigger trigger1: if (ELAPSED_TIME > 10) { KILL }
- | trigger table: if (BYTES_WRITTEN > 100K) { MOVE TO table }
+ <unmanaged queries>
| trigger trigger1: if (ELAPSED_TIME > 10) { KILL }
+ <unused triggers>
- | trigger trigger: if (BYTES_WRITTEN > 100K) { MOVE TO default }
+ | trigger trigger: if (BYTES_WRITTEN > 100MB) { MOVE TO default }
PREHOOK: query: ALTER TRIGGER `table`.`trigger1` DROP FROM POOL `table`.pool.child2
PREHOOK: type: ALTER POOL
POSTHOOK: query: ALTER TRIGGER `table`.`trigger1` DROP FROM POOL `table`.pool.child2
@@ -4179,12 +4189,12 @@ PREHOOK: query: SHOW RESOURCE PLAN plan_2
PREHOOK: type: SHOW RESOURCEPLAN
POSTHOOK: query: SHOW RESOURCE PLAN plan_2
POSTHOOK: type: SHOW RESOURCEPLAN
-plan_2[status=DISABLED,parallelism=4,defaultPool=def]
- + def[allocFraction=1.0,schedulingPolicy=null,parallelism=4]
+plan_2[status=DISABLED,parallelism=10,defaultPool=def]
+ + def[allocFraction=1.0,schedulingPolicy=null,parallelism=5]
| mapped for users: user2
| mapped for default
- + c2[allocFraction=0.7,schedulingPolicy=null,parallelism=1]
- | trigger trigger_1: if (BYTES_READ = 0) { MOVE TO null_pool }
+ + c2[allocFraction=0.5,schedulingPolicy=null,parallelism=1]
+ | trigger trigger_1: if (BYTES_READ > 0) { MOVE TO null_pool }
| mapped for groups: group2
+ c1[allocFraction=0.3,schedulingPolicy=fair,parallelism=3]
| mapped for groups: group1
@@ -4247,11 +4257,11 @@ PREHOOK: query: SHOW RESOURCE PLAN plan_2
PREHOOK: type: SHOW RESOURCEPLAN
POSTHOOK: query: SHOW RESOURCE PLAN plan_2
POSTHOOK: type: SHOW RESOURCEPLAN
-plan_2[status=DISABLED,parallelism=4,defaultPool=def]
- + def[allocFraction=1.0,schedulingPolicy=null,parallelism=4]
+plan_2[status=DISABLED,parallelism=10,defaultPool=def]
+ + def[allocFraction=1.0,schedulingPolicy=null,parallelism=5]
| mapped for default
- + c2[allocFraction=0.7,schedulingPolicy=null,parallelism=1]
- | trigger trigger_1: if (BYTES_READ = 0) { MOVE TO null_pool }
+ + c2[allocFraction=0.5,schedulingPolicy=null,parallelism=1]
+ | trigger trigger_1: if (BYTES_READ > 0) { MOVE TO null_pool }
+ c1[allocFraction=0.3,schedulingPolicy=fair,parallelism=3]
| mapped for groups: group1
+ <unmanaged queries>
@@ -4297,11 +4307,11 @@ POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_triggers
#### A masked pattern was here ####
-plan_1 trigger_2 BYTES_READ > 100 MOVE TO slow_pool
-table table BYTES_WRITTEN > 100K MOVE TO table
-table trigger BYTES_WRITTEN > 100K MOVE TO default
+plan_1 trigger_2 ELAPSED_TIME > 30hour MOVE TO slow_pool
+table table BYTES_WRITTEN > 100KB MOVE TO table
+table trigger BYTES_WRITTEN > 100MB MOVE TO default
table trigger1 ELAPSED_TIME > 10 KILL
-table trigger2 BYTES_READ > 100 KILL
+table trigger2 ELAPSED_TIME > 1hour KILL
PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS
PREHOOK: type: QUERY
PREHOOK: Input: sys@wm_pools_to_triggers
@@ -4331,13 +4341,13 @@ PREHOOK: query: CREATE USER MAPPING "user1" IN plan_4a TO pool1
PREHOOK: type: CREATE MAPPING
POSTHOOK: query: CREATE USER MAPPING "user1" IN plan_4a TO pool1
POSTHOOK: type: CREATE MAPPING
-PREHOOK: query: CREATE TRIGGER plan_4a.trigger_1 WHEN BYTES_READ = 10G DO KILL
+PREHOOK: query: CREATE TRIGGER plan_4a.trigger_1 WHEN BYTES_READ > 10GB DO KILL
PREHOOK: type: CREATE TRIGGER
-POSTHOOK: query: CREATE TRIGGER plan_4a.trigger_1 WHEN BYTES_READ = 10G DO KILL
+POSTHOOK: query: CREATE TRIGGER plan_4a.trigger_1 WHEN BYTES_READ > 10GB DO KILL
POSTHOOK: type: CREATE TRIGGER
-PREHOOK: query: CREATE TRIGGER plan_4a.trigger_2 WHEN BYTES_READ = 11G DO KILL
+PREHOOK: query: CREATE TRIGGER plan_4a.trigger_2 WHEN BYTES_READ > 11GB DO KILL
PREHOOK: type: CREATE TRIGGER
-POSTHOOK: query: CREATE TRIGGER plan_4a.trigger_2 WHEN BYTES_READ = 11G DO KILL
+POSTHOOK: query: CREATE TRIGGER plan_4a.trigger_2 WHEN BYTES_READ > 11GB DO KILL
POSTHOOK: type: CREATE TRIGGER
PREHOOK: query: ALTER POOL plan_4a.pool1 ADD TRIGGER trigger_2
PREHOOK: type: ALTER POOL
@@ -4391,15 +4401,15 @@ POSTHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
POSTHOOK: type: QUERY
POSTHOOK: Input: sys@wm_triggers
#### A masked pattern was here ####
-plan_1 trigger_2 BYTES_READ > 100 MOVE TO slow_pool
-plan_4a trigger_1 BYTES_READ = 10G KILL
-plan_4a trigger_2 BYTES_READ = 11G KILL
-plan_4b trigger_1 BYTES_READ = 10G KILL
-plan_4b trigger_2 BYTES_READ = 11G KILL
-table table BYTES_WRITTEN > 100K MOVE TO table
-table trigger BYTES_WRITTEN > 100K MOVE TO default
+plan_1 trigger_2 ELAPSED_TIME > 30hour MOVE TO slow_pool
+plan_4a trigger_1 BYTES_READ > 10GB KILL
+plan_4a trigger_2 BYTES_READ > 11GB KILL
+plan_4b trigger_1 BYTES_READ > 10GB KILL
+plan_4b trigger_2 BYTES_READ > 11GB KILL
+table table BYTES_WRITTEN > 100KB MOVE TO table
+table trigger BYTES_WRITTEN > 100MB MOVE TO default
table trigger1 ELAPSED_TIME > 10 KILL
-table trigger2 BYTES_READ > 100 KILL
+table trigger2 ELAPSED_TIME > 1hour KILL
PREHOOK: query: SELECT * FROM SYS.WM_POOLS_TO_TRIGGERS
PREHOOK: type: QUERY
PREHOOK: Input: sys@wm_pools_to_triggers