You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2017/12/22 20:29:21 UTC

hive git commit: HIVE-18257 : implement scheduling policy configuration instead of hardcoding fair scheduling (Sergey Shelukhin, reviewed by Prasanth Jayachandran and Harish Jaiprakash)

Repository: hive
Updated Branches:
  refs/heads/master e2e1139db -> 0652c5e83


HIVE-18257 : implement scheduling policy configuration instead of hardcoding fair scheduling (Sergey Shelukhin, reviewed by Prasanth Jayachandran and Harish Jaiprakash)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0652c5e8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0652c5e8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0652c5e8

Branch: refs/heads/master
Commit: 0652c5e83ba22eac8f22275f640c12c64578df93
Parents: e2e1139
Author: sergey <se...@apache.org>
Authored: Fri Dec 22 12:28:09 2017 -0800
Committer: sergey <se...@apache.org>
Committed: Fri Dec 22 12:29:00 2017 -0800

----------------------------------------------------------------------
 .../hive/ql/exec/tez/WorkloadManager.java       | 52 +++++++++----
 .../hive/ql/parse/DDLSemanticAnalyzer.java      | 11 ++-
 .../hive/ql/exec/tez/TestWorkloadManager.java   | 58 +++++++++++++-
 .../test/queries/clientpositive/resourceplan.q  | 13 ++--
 .../clientpositive/llap/resourceplan.q.out      | 79 ++++++++++----------
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp | 10 +++
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |  9 +++
 .../metastore/api/WMPoolSchedulingPolicy.java   | 45 +++++++++++
 .../src/gen/thrift/gen-php/metastore/Types.php  |  9 +++
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  | 14 ++++
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |  7 ++
 .../hadoop/hive/metastore/ObjectStore.java      | 15 +++-
 .../hive/metastore/utils/MetaStoreUtils.java    | 20 ++++-
 .../src/main/thrift/hive_metastore.thrift       |  5 ++
 14 files changed, 281 insertions(+), 66 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0652c5e8/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
index 387d078..8f26a2d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hive.ql.exec.tez;
 
+import org.apache.hadoop.hive.metastore.api.WMPoolSchedulingPolicy;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
@@ -249,7 +252,7 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
 
     final long triggerValidationIntervalMs = HiveConf.getTimeVar(conf,
       HiveConf.ConfVars.HIVE_TRIGGER_VALIDATION_INTERVAL_MS, TimeUnit.MILLISECONDS);
-    TriggerActionHandler triggerActionHandler = new KillMoveTriggerActionHandler(this);
+    TriggerActionHandler<?> triggerActionHandler = new KillMoveTriggerActionHandler(this);
     triggerValidatorRunnable = new PerPoolTriggerValidatorRunnable(perPoolProviders, triggerActionHandler,
       triggerValidationIntervalMs);
     startTriggerValidator(triggerValidationIntervalMs);
@@ -980,10 +983,10 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
         }
         PoolState state = oldPools == null ? null : oldPools.remove(fullName);
         if (state == null) {
-          state = new PoolState(fullName, qp, fraction);
+          state = new PoolState(fullName, qp, fraction, pool.getSchedulingPolicy());
         } else {
           // This will also take care of the queries if query parallelism changed.
-          state.update(qp, fraction, syncWork, e);
+          state.update(qp, fraction, syncWork, e, pool.getSchedulingPolicy());
           poolsToRedistribute.add(fullName);
         }
         state.setTriggers(new LinkedList<Trigger>());
@@ -1665,10 +1668,12 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
     private double finalFractionRemaining;
     private int queryParallelism = -1;
     private List<Trigger> triggers = new ArrayList<>();
+    private WMPoolSchedulingPolicy schedulingPolicy;
 
-    public PoolState(String fullName, int queryParallelism, double fraction) {
+    public PoolState(String fullName, int queryParallelism, double fraction,
+        String schedulingPolicy) {
       this.fullName = fullName;
-      update(queryParallelism, fraction, null, null);
+      update(queryParallelism, fraction, null, null, schedulingPolicy);
     }
 
     public int getTotalActiveSessions() {
@@ -1676,9 +1681,16 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
     }
 
     public void update(int queryParallelism, double fraction,
-        WmThreadSyncWork syncWork, EventState e) {
+        WmThreadSyncWork syncWork, EventState e, String schedulingPolicy) {
       this.finalFraction = this.finalFractionRemaining = fraction;
       this.queryParallelism = queryParallelism;
+      try {
+        this.schedulingPolicy = MetaStoreUtils.parseSchedulingPolicy(schedulingPolicy);
+      } catch (IllegalArgumentException ex) {
+        // This should be validated at change time; let's fall back to a default here.
+        LOG.error("Unknown scheduling policy " + schedulingPolicy + "; using FAIR");
+        this.schedulingPolicy = WMPoolSchedulingPolicy.FAIR;
+      }
       // TODO: two possible improvements
       //       1) Right now we kill all the queries here; we could just kill -qpDelta.
       //       2) After the queries are killed queued queries would take their place.
@@ -1706,14 +1718,28 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
     }
 
     public double updateAllocationPercentages() {
-      // TODO: real implementation involving in-the-pool policy interface, etc.
-      double allocation = finalFractionRemaining / (sessions.size() + initializingSessions.size());
-      for (WmTezSession session : sessions) {
-        session.setClusterFraction(allocation);
+      switch (schedulingPolicy) {
+      case FAIR:
+        int totalSessions = sessions.size() + initializingSessions.size();
+        if (totalSessions == 0) return 0;
+        double allocation = finalFractionRemaining / totalSessions;
+        for (WmTezSession session : sessions) {
+          session.setClusterFraction(allocation);
+        }
+        // Do not give out the capacity of the initializing sessions to the running ones;
+        // we expect init to be fast.
+        return finalFractionRemaining - allocation * initializingSessions.size();
+      case FIFO:
+        if (sessions.isEmpty()) return 0;
+        boolean isFirst = true;
+        for (WmTezSession session : sessions) {
+          session.setClusterFraction(isFirst ? finalFractionRemaining : 0);
+          isFirst = false;
+        }
+        return finalFractionRemaining;
+      default:
+        throw new AssertionError("Unexpected enum value " + schedulingPolicy);
       }
-      // Do not give out the capacity of the initializing sessions to the running ones;
-      // we expect init to be fast.
-      return finalFractionRemaining - allocation * initializingSessions.size();
     }
 
     public LinkedList<WmTezSession> getSessions() {

http://git-wip-us.apache.org/repos/asf/hive/blob/0652c5e8/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index bce9aa1..fde50d7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -18,9 +18,10 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
+import org.apache.hadoop.hive.ql.exec.tez.WorkloadManager;
+
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
-
 import org.antlr.runtime.tree.CommonTree;
 import org.antlr.runtime.tree.Tree;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
@@ -173,7 +174,6 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.util.StringUtils;
-
 import java.io.FileNotFoundException;
 import java.io.Serializable;
 import java.lang.reflect.Constructor;
@@ -191,7 +191,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-
 import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASELOCATION;
 import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DATABASEPROPERTIES;
 
@@ -1159,7 +1158,11 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
         pool.setQueryParallelism(Integer.parseInt(param));
         break;
       case HiveParser.TOK_SCHEDULING_POLICY:
-        pool.setSchedulingPolicy(PlanUtils.stripQuotes(param));
+        String schedulingPolicyStr = PlanUtils.stripQuotes(param);
+        if (!MetaStoreUtils.isValidSchedulingPolicy(schedulingPolicyStr)) {
+          throw new SemanticException("Invalid scheduling policy " + schedulingPolicyStr);
+        }
+        pool.setSchedulingPolicy(schedulingPolicyStr);
         break;
       case HiveParser.TOK_PATH:
         throw new SemanticException("Invalid parameter path in create pool");

http://git-wip-us.apache.org/repos/asf/hive/blob/0652c5e8/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
index 62af917..e90e227 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
@@ -127,9 +127,14 @@ public class TestWorkloadManager {
   }
 
   public static WMPool pool(String path, int qp, double alloc) {
+    return pool(path, qp, alloc, "fair");
+  }
+
+  public static WMPool pool(String path, int qp, double alloc, String policy) {
     WMPool pool = new WMPool("rp", path);
     pool.setAllocFraction(alloc);
     pool.setQueryParallelism(qp);
+    pool.setSchedulingPolicy(policy);
     return pool;
   }
 
@@ -267,8 +272,6 @@ public class TestWorkloadManager {
     assertEquals("test", session.getQueueName());
   }
 
-  // Note (unrelated to epsilon): all the fraction checks are valid with the current logic in the
-  //                              absence of policies. This will change when there are policies.
   private final static double EPSILON = 0.001;
 
   @Test(timeout = 10000)
@@ -700,6 +703,57 @@ public class TestWorkloadManager {
   }
 
 
+
+  @Test(timeout=10000)
+  public void testFifoSchedulingPolicy() throws Exception {
+    final HiveConf conf = createConf();
+    MockQam qam = new MockQam();
+    WMFullResourcePlan plan = new WMFullResourcePlan(
+        plan(), Lists.newArrayList(pool("A", 3, 1f, "fair")));
+    plan.getPlan().setDefaultPoolPath("A");
+    final WorkloadManager wm = new WorkloadManagerForTest("test", conf, qam, plan);
+    wm.start();
+ 
+    // 2 running.
+    WmTezSession sessionA1 = (WmTezSession) wm.getSession(
+        null, new MappingInput("A", null), conf, null),
+        sessionA2 = (WmTezSession) wm.getSession(null, new MappingInput("A", null), conf, null);
+    assertEquals(0.5f, sessionA1.getClusterFraction(), EPSILON);
+    assertEquals(0.5f, sessionA2.getClusterFraction(), EPSILON);
+
+    // Change the resource plan to use fifo policy.
+    plan = new WMFullResourcePlan(plan(), Lists.newArrayList(pool("A", 3, 1f, "fifo")));
+    plan.getPlan().setDefaultPoolPath("A");
+    wm.updateResourcePlanAsync(plan).get();
+    assertEquals(1f, sessionA1.getClusterFraction(), EPSILON);
+    assertEquals(0f, sessionA2.getClusterFraction(), EPSILON);
+    assertEquals("A", sessionA2.getPoolName());
+
+    // Add another session.
+    WmTezSession sessionA3 = (WmTezSession) wm.getSession(
+        null, new MappingInput("A", null), conf, null);
+    assertEquals(0f, sessionA3.getClusterFraction(), EPSILON);
+    assertEquals("A", sessionA3.getPoolName());
+
+    // Make sure the allocation is transfered correctly on return.
+    sessionA1.returnToSessionManager();
+    assertEquals(1f, sessionA2.getClusterFraction(), EPSILON);
+    assertEquals(0f, sessionA3.getClusterFraction(), EPSILON);
+    assertEquals("A", sessionA3.getPoolName());
+
+    // Make sure reuse changes the FIFO order of the session.
+    WmTezSession sessionA4 =  (WmTezSession) wm.getSession(
+        sessionA2, new MappingInput("A", null), conf, null);
+    assertSame(sessionA2, sessionA4);
+    assertEquals(1f, sessionA3.getClusterFraction(), EPSILON);
+    assertEquals(0f, sessionA2.getClusterFraction(), EPSILON);
+    assertEquals("A", sessionA2.getPoolName());
+
+    sessionA3.returnToSessionManager();
+    assertEquals(1f, sessionA2.getClusterFraction(), EPSILON);
+    sessionA2.returnToSessionManager();
+  }
+
   @Test(timeout=10000)
   public void testDisableEnable() throws Exception {
     final HiveConf conf = createConf();

http://git-wip-us.apache.org/repos/asf/hive/blob/0652c5e8/ql/src/test/queries/clientpositive/resourceplan.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/resourceplan.q b/ql/src/test/queries/clientpositive/resourceplan.q
index 997292a..799b435 100644
--- a/ql/src/test/queries/clientpositive/resourceplan.q
+++ b/ql/src/test/queries/clientpositive/resourceplan.q
@@ -201,7 +201,10 @@ CREATE POOL plan_2.default WITH
 SELECT * FROM SYS.WM_POOLS;
 
 CREATE POOL plan_2.default.c1 WITH
-    ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='priority';
+    ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='invalid';
+
+CREATE POOL plan_2.default.c1 WITH
+    ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='fair';
 
 CREATE POOL plan_2.default.c2 WITH
     QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.2;
@@ -226,16 +229,16 @@ SELECT * FROM SYS.WM_POOLS;
 
 -- Create failed no parent pool found.
 CREATE POOL plan_2.child1.child2 WITH
-    QUERY_PARALLELISM=2, SCHEDULING_POLICY='fcfs', ALLOC_FRACTION=0.8;
+    QUERY_PARALLELISM=2, SCHEDULING_POLICY='fifo', ALLOC_FRACTION=0.8;
 
 -- Create nested pools.
 CREATE POOL `table`.`table` WITH
-  SCHEDULING_POLICY='random', ALLOC_FRACTION=0.5, QUERY_PARALLELISM=1;
+  SCHEDULING_POLICY='fifo', ALLOC_FRACTION=0.5, QUERY_PARALLELISM=1;
 
 CREATE POOL `table`.`table`.pool1 WITH
-  SCHEDULING_POLICY='priority', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.9;
+  SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.9;
 CREATE POOL `table`.`table`.pool1.child1 WITH
-  SCHEDULING_POLICY='random', QUERY_PARALLELISM=1, ALLOC_FRACTION=0.3;
+  SCHEDULING_POLICY='fair', QUERY_PARALLELISM=1, ALLOC_FRACTION=0.3;
 CREATE POOL `table`.`table`.pool1.child2 WITH
   SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.7;
 ALTER POOL `table`.`table` SET ALLOC_FRACTION=0.0;

http://git-wip-us.apache.org/repos/asf/hive/blob/0652c5e8/ql/src/test/results/clientpositive/llap/resourceplan.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/resourceplan.q.out b/ql/src/test/results/clientpositive/llap/resourceplan.q.out
index d9f22b8..8cb3062 100644
--- a/ql/src/test/results/clientpositive/llap/resourceplan.q.out
+++ b/ql/src/test/results/clientpositive/llap/resourceplan.q.out
@@ -3671,11 +3671,12 @@ POSTHOOK: Input: sys@wm_pools
 plan_1	default	1.0	4	NULL
 plan_2	default	1.0	4	NULL
 table	default	1.0	4	NULL
+FAILED: SemanticException Invalid scheduling policy invalid
 PREHOOK: query: CREATE POOL plan_2.default.c1 WITH
-    ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='priority'
+    ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='fair'
 PREHOOK: type: CREATE POOL
 POSTHOOK: query: CREATE POOL plan_2.default.c1 WITH
-    ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='priority'
+    ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='fair'
 POSTHOOK: type: CREATE POOL
 PREHOOK: query: CREATE POOL plan_2.default.c2 WITH
     QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.2
@@ -3726,7 +3727,7 @@ POSTHOOK: Input: sys@wm_pools
 #### A masked pattern was here ####
 plan_1	default	1.0	4	NULL
 plan_2	def	1.0	4	NULL
-plan_2	def.c1	0.3	3	priority
+plan_2	def.c1	0.3	3	fair
 plan_2	def.c2	0.7	1	fair
 table	default	1.0	4	NULL
 PREHOOK: query: DROP POOL plan_2.default
@@ -3742,30 +3743,30 @@ POSTHOOK: Input: sys@wm_pools
 #### A masked pattern was here ####
 plan_1	default	1.0	4	NULL
 plan_2	def	1.0	4	NULL
-plan_2	def.c1	0.3	3	priority
+plan_2	def.c1	0.3	3	fair
 plan_2	def.c2	0.7	1	fair
 table	default	1.0	4	NULL
 PREHOOK: query: CREATE POOL plan_2.child1.child2 WITH
-    QUERY_PARALLELISM=2, SCHEDULING_POLICY='fcfs', ALLOC_FRACTION=0.8
+    QUERY_PARALLELISM=2, SCHEDULING_POLICY='fifo', ALLOC_FRACTION=0.8
 PREHOOK: type: CREATE POOL
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. NoSuchObjectException(message:Pool path is invalid, the parent does not exist)
 PREHOOK: query: CREATE POOL `table`.`table` WITH
-  SCHEDULING_POLICY='random', ALLOC_FRACTION=0.5, QUERY_PARALLELISM=1
+  SCHEDULING_POLICY='fifo', ALLOC_FRACTION=0.5, QUERY_PARALLELISM=1
 PREHOOK: type: CREATE POOL
 POSTHOOK: query: CREATE POOL `table`.`table` WITH
-  SCHEDULING_POLICY='random', ALLOC_FRACTION=0.5, QUERY_PARALLELISM=1
+  SCHEDULING_POLICY='fifo', ALLOC_FRACTION=0.5, QUERY_PARALLELISM=1
 POSTHOOK: type: CREATE POOL
 PREHOOK: query: CREATE POOL `table`.`table`.pool1 WITH
-  SCHEDULING_POLICY='priority', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.9
+  SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.9
 PREHOOK: type: CREATE POOL
 POSTHOOK: query: CREATE POOL `table`.`table`.pool1 WITH
-  SCHEDULING_POLICY='priority', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.9
+  SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.9
 POSTHOOK: type: CREATE POOL
 PREHOOK: query: CREATE POOL `table`.`table`.pool1.child1 WITH
-  SCHEDULING_POLICY='random', QUERY_PARALLELISM=1, ALLOC_FRACTION=0.3
+  SCHEDULING_POLICY='fair', QUERY_PARALLELISM=1, ALLOC_FRACTION=0.3
 PREHOOK: type: CREATE POOL
 POSTHOOK: query: CREATE POOL `table`.`table`.pool1.child1 WITH
-  SCHEDULING_POLICY='random', QUERY_PARALLELISM=1, ALLOC_FRACTION=0.3
+  SCHEDULING_POLICY='fair', QUERY_PARALLELISM=1, ALLOC_FRACTION=0.3
 POSTHOOK: type: CREATE POOL
 PREHOOK: query: CREATE POOL `table`.`table`.pool1.child2 WITH
   SCHEDULING_POLICY='fair', QUERY_PARALLELISM=3, ALLOC_FRACTION=0.7
@@ -3787,12 +3788,12 @@ POSTHOOK: Input: sys@wm_pools
 #### A masked pattern was here ####
 plan_1	default	1.0	4	NULL
 plan_2	def	1.0	4	NULL
-plan_2	def.c1	0.3	3	priority
+plan_2	def.c1	0.3	3	fair
 plan_2	def.c2	0.7	1	fair
 table	default	1.0	4	NULL
-table	table	0.0	1	random
-table	table.pool1	0.9	3	priority
-table	table.pool1.child1	0.3	1	random
+table	table	0.0	1	fifo
+table	table.pool1	0.9	3	fair
+table	table.pool1.child1	0.3	1	fair
 table	table.pool1.child2	0.7	3	fair
 PREHOOK: query: ALTER POOL `table`.`table`.pool1 SET PATH = `table`.pool
 PREHOOK: type: ALTER POOL
@@ -3808,12 +3809,12 @@ POSTHOOK: Input: sys@wm_pools
 #### A masked pattern was here ####
 plan_1	default	1.0	4	NULL
 plan_2	def	1.0	4	NULL
-plan_2	def.c1	0.3	3	priority
+plan_2	def.c1	0.3	3	fair
 plan_2	def.c2	0.7	1	fair
 table	default	1.0	4	NULL
-table	table	0.0	1	random
-table	table.pool	0.9	3	priority
-table	table.pool.child1	0.3	1	random
+table	table	0.0	1	fifo
+table	table.pool	0.9	3	fair
+table	table.pool.child1	0.3	1	fair
 table	table.pool.child2	0.7	3	fair
 PREHOOK: query: DROP POOL `table`.`table`
 PREHOOK: type: DROP POOL
@@ -3828,12 +3829,12 @@ POSTHOOK: Input: sys@wm_pools
 #### A masked pattern was here ####
 plan_1	default	1.0	4	NULL
 plan_2	def	1.0	4	NULL
-plan_2	def.c1	0.3	3	priority
+plan_2	def.c1	0.3	3	fair
 plan_2	def.c2	0.7	1	fair
 table	default	1.0	4	NULL
-table	table	0.0	1	random
-table	table.pool	0.9	3	priority
-table	table.pool.child1	0.3	1	random
+table	table	0.0	1	fifo
+table	table.pool	0.9	3	fair
+table	table.pool.child1	0.3	1	fair
 table	table.pool.child2	0.7	3	fair
 PREHOOK: query: DROP POOL `table`.default
 PREHOOK: type: DROP POOL
@@ -3848,12 +3849,12 @@ POSTHOOK: Input: sys@wm_pools
 #### A masked pattern was here ####
 plan_1	default	1.0	4	NULL
 plan_2	def	1.0	4	NULL
-plan_2	def.c1	0.3	3	priority
+plan_2	def.c1	0.3	3	fair
 plan_2	def.c2	0.7	1	fair
 table	default	1.0	4	NULL
-table	table	0.0	1	random
-table	table.pool	0.9	3	priority
-table	table.pool.child1	0.3	1	random
+table	table	0.0	1	fifo
+table	table.pool	0.9	3	fair
+table	table.pool.child1	0.3	1	fair
 table	table.pool.child2	0.7	3	fair
 PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 PREHOOK: type: QUERY
@@ -3884,11 +3885,11 @@ POSTHOOK: Input: sys@wm_pools
 #### A masked pattern was here ####
 plan_1	default	1.0	4	NULL
 plan_2	def	1.0	4	NULL
-plan_2	def.c1	0.3	3	priority
+plan_2	def.c1	0.3	3	fair
 plan_2	def.c2	0.7	1	fair
-table	table	0.0	1	random
-table	table.pool	0.9	3	priority
-table	table.pool.child1	0.3	1	random
+table	table	0.0	1	fifo
+table	table.pool	0.9	3	fair
+table	table.pool.child1	0.3	1	fair
 table	table.pool.child2	0.7	3	fair
 PREHOOK: query: ALTER POOL plan_2.def.c1 ADD TRIGGER trigger_1
 PREHOOK: type: ALTER POOL
@@ -4067,8 +4068,8 @@ POSTHOOK: Input: sys@wm_pools
 plan_1	default	1.0	4	NULL
 plan_2	default	1.0	4	NULL
 plan_4	default	1.0	4	NULL
-table	table	0.0	1	random
-table	table.pool	0.9	3	priority
+table	table	0.0	1	fifo
+table	table.pool	0.9	3	fair
 PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
 PREHOOK: type: QUERY
 PREHOOK: Input: sys@wm_triggers
@@ -4161,8 +4162,8 @@ plan_4a	pool1	0.0	2	fair
 plan_4b	default	1.0	4	NULL
 plan_4b	pool1	0.0	2	fair
 plan_4b	pool2	0.0	3	fair
-table	table	0.0	1	random
-table	table.pool	0.9	3	priority
+table	table	0.0	1	fifo
+table	table.pool	0.9	3	fair
 PREHOOK: query: SELECT * FROM SYS.WM_TRIGGERS
 PREHOOK: type: QUERY
 PREHOOK: Input: sys@wm_triggers
@@ -4235,8 +4236,8 @@ plan_4a	pool1	0.0	2	fair
 plan_4a	pool2	0.0	3	fair
 plan_4a-old-0	default	1.0	4	NULL
 plan_4a-old-0	pool1	0.0	2	fair
-table	table	0.0	1	random
-table	table.pool	0.9	3	priority
+table	table	0.0	1	fifo
+table	table.pool	0.9	3	fair
 PREHOOK: query: REPLACE ACTIVE RESOURCE PLAN WITH plan_4a
 PREHOOK: type: ALTER RESOURCEPLAN
 POSTHOOK: query: REPLACE ACTIVE RESOURCE PLAN WITH plan_4a
@@ -4302,5 +4303,5 @@ plan_4-old-1	pool1	0.0	2	fair
 plan_4-old-1	pool2	0.0	3	fair
 plan_4a-old-0	default	1.0	4	NULL
 plan_4a-old-0	pool1	0.0	2	fair
-table	table	0.0	1	random
-table	table.pool	0.9	3	priority
+table	table	0.0	1	fifo
+table	table.pool	0.9	3	fair

http://git-wip-us.apache.org/repos/asf/hive/blob/0652c5e8/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index 9f78146..7b9cb72 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -199,6 +199,16 @@ const char* _kWMResourcePlanStatusNames[] = {
 };
 const std::map<int, const char*> _WMResourcePlanStatus_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kWMResourcePlanStatusValues, _kWMResourcePlanStatusNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
 
+int _kWMPoolSchedulingPolicyValues[] = {
+  WMPoolSchedulingPolicy::FAIR,
+  WMPoolSchedulingPolicy::FIFO
+};
+const char* _kWMPoolSchedulingPolicyNames[] = {
+  "FAIR",
+  "FIFO"
+};
+const std::map<int, const char*> _WMPoolSchedulingPolicy_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(2, _kWMPoolSchedulingPolicyValues, _kWMPoolSchedulingPolicyNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
 
 Version::~Version() throw() {
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/0652c5e8/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 96e5234..d9aef9a 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -177,6 +177,15 @@ struct WMResourcePlanStatus {
 
 extern const std::map<int, const char*> _WMResourcePlanStatus_VALUES_TO_NAMES;
 
+struct WMPoolSchedulingPolicy {
+  enum type {
+    FAIR = 1,
+    FIFO = 2
+  };
+};
+
+extern const std::map<int, const char*> _WMPoolSchedulingPolicy_VALUES_TO_NAMES;
+
 class Version;
 
 class FieldSchema;

http://git-wip-us.apache.org/repos/asf/hive/blob/0652c5e8/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMPoolSchedulingPolicy.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMPoolSchedulingPolicy.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMPoolSchedulingPolicy.java
new file mode 100644
index 0000000..0ceb26b
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMPoolSchedulingPolicy.java
@@ -0,0 +1,45 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum WMPoolSchedulingPolicy implements org.apache.thrift.TEnum {
+  FAIR(1),
+  FIFO(2);
+
+  private final int value;
+
+  private WMPoolSchedulingPolicy(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static WMPoolSchedulingPolicy findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return FAIR;
+      case 2:
+        return FIFO;
+      default:
+        return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/0652c5e8/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
index 3e15816..5bb2272 100644
--- a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -187,6 +187,15 @@ final class WMResourcePlanStatus {
   );
 }
 
+final class WMPoolSchedulingPolicy {
+  const FAIR = 1;
+  const FIFO = 2;
+  static public $__names = array(
+    1 => 'FAIR',
+    2 => 'FIFO',
+  );
+}
+
 class Version {
   static $_TSPEC;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0652c5e8/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index b5671f4..4a58081 100644
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -281,6 +281,20 @@ class WMResourcePlanStatus:
     "DISABLED": 3,
   }
 
+class WMPoolSchedulingPolicy:
+  FAIR = 1
+  FIFO = 2
+
+  _VALUES_TO_NAMES = {
+    1: "FAIR",
+    2: "FIFO",
+  }
+
+  _NAMES_TO_VALUES = {
+    "FAIR": 1,
+    "FIFO": 2,
+  }
+
 
 class Version:
   """

http://git-wip-us.apache.org/repos/asf/hive/blob/0652c5e8/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 3d8346d..9b377dd 100644
--- a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -133,6 +133,13 @@ module WMResourcePlanStatus
   VALID_VALUES = Set.new([ACTIVE, ENABLED, DISABLED]).freeze
 end
 
+module WMPoolSchedulingPolicy
+  FAIR = 1
+  FIFO = 2
+  VALUE_MAP = {1 => "FAIR", 2 => "FIFO"}
+  VALID_VALUES = Set.new([FAIR, FIFO]).freeze
+end
+
 class Version
   include ::Thrift::Struct, ::Thrift::Struct_Union
   VERSION = 1

http://git-wip-us.apache.org/repos/asf/hive/blob/0652c5e8/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index eb811f0..98f27d8 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -10052,7 +10052,10 @@ public class ObjectStore implements RawStore, Configurable {
         currentPoolData.queryParallelism = pool.getQueryParallelism();
         parentPoolData.totalChildrenQueryParallelism += pool.getQueryParallelism();
       }
-      // Check for valid pool.getSchedulingPolicy();
+      if (!MetaStoreUtils.isValidSchedulingPolicy(pool.getSchedulingPolicy())) {
+        errors.add("Invalid scheduling policy "
+            + pool.getSchedulingPolicy() + " for pool: " + pool.getPath());
+      }
     }
     for (Entry<String, PoolData> entry : poolInfo.entrySet()) {
       PoolData poolData = entry.getValue();
@@ -10266,8 +10269,12 @@ public class ObjectStore implements RawStore, Configurable {
       if (!poolParentExists(resourcePlan, pool.getPoolPath())) {
         throw new NoSuchObjectException("Pool path is invalid, the parent does not exist");
       }
+      String policy = pool.getSchedulingPolicy();
+      if (!MetaStoreUtils.isValidSchedulingPolicy(policy)) {
+        throw new InvalidOperationException("Invalid scheduling policy " + policy);
+      }
       MWMPool mPool = new MWMPool(resourcePlan, pool.getPoolPath(), pool.getAllocFraction(),
-          pool.getQueryParallelism(), pool.getSchedulingPolicy());
+          pool.getQueryParallelism(), policy);
       pm.makePersistent(mPool);
       commited = commitTransaction();
     } catch (Exception e) {
@@ -10294,6 +10301,10 @@ public class ObjectStore implements RawStore, Configurable {
         mPool.setQueryParallelism(pool.getQueryParallelism());
       }
       if (pool.isSetSchedulingPolicy()) {
+        String policy = pool.getSchedulingPolicy();
+        if (!MetaStoreUtils.isValidSchedulingPolicy(policy)) {
+          throw new InvalidOperationException("Invalid scheduling policy " + policy);
+        }
         mPool.setSchedulingPolicy(pool.getSchedulingPolicy());
       }
       if (pool.isSetPoolPath() && !pool.getPoolPath().equals(mPool.getPath())) {

http://git-wip-us.apache.org/repos/asf/hive/blob/0652c5e8/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
index cde34bc..8bc4ce7 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hive.metastore.utils;
 
+import org.apache.hadoop.hive.metastore.api.WMPoolSchedulingPolicy;
+
 import com.google.common.base.Predicates;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -61,7 +63,6 @@ import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.util.MachineList;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-
 import javax.annotation.Nullable;
 import java.io.File;
 import java.io.IOException;
@@ -1560,4 +1561,21 @@ public class MetaStoreUtils {
     }
     return cols;
   }
+
+
+  public static boolean isValidSchedulingPolicy(String str) {
+    try {
+      parseSchedulingPolicy(str);
+      return true;
+    } catch (IllegalArgumentException ex) {
+    }
+    return false;
+  }
+
+  public static WMPoolSchedulingPolicy parseSchedulingPolicy(String schedulingPolicy) {
+    if (schedulingPolicy == null) return WMPoolSchedulingPolicy.FAIR;
+    schedulingPolicy = schedulingPolicy.trim().toUpperCase();
+    if ("DEFAULT".equals(schedulingPolicy)) return WMPoolSchedulingPolicy.FAIR;
+    return Enum.valueOf(WMPoolSchedulingPolicy.class, schedulingPolicy);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/0652c5e8/standalone-metastore/src/main/thrift/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/thrift/hive_metastore.thrift b/standalone-metastore/src/main/thrift/hive_metastore.thrift
index de07179..e5dd47d 100644
--- a/standalone-metastore/src/main/thrift/hive_metastore.thrift
+++ b/standalone-metastore/src/main/thrift/hive_metastore.thrift
@@ -1039,6 +1039,11 @@ enum WMResourcePlanStatus {
   DISABLED = 3
 }
 
+enum  WMPoolSchedulingPolicy {
+  FAIR = 1,
+  FIFO = 2
+}
+
 struct WMResourcePlan {
   1: required string name;
   2: optional WMResourcePlanStatus status;