You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/10/30 20:01:18 UTC

[6/6] hive git commit: HIVE-20793 : add RP namespacing to workload management (Sergey Shelukhin, reviewed by Prasanth Jayachandran)

HIVE-20793 : add RP namespacing to workload management (Sergey Shelukhin, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5258c67e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5258c67e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5258c67e

Branch: refs/heads/master
Commit: 5258c67e9558bd2d98e4887d3dd8e3eb8aa5d763
Parents: b701720
Author: sergey <se...@apache.org>
Authored: Tue Oct 30 12:44:14 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Tue Oct 30 12:57:17 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   4 +
 .../listener/DummyRawStoreFailEvent.java        |  44 +-
 .../upgrade/hive/hive-schema-4.0.0.hive.sql     |  11 +
 .../hive/upgrade-3.1.0-to-4.0.0.hive.sql        | 137 ++++
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |   5 +
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  75 +-
 .../hadoop/hive/ql/metadata/TestHive.java       |  68 +-
 .../test/queries/clientpositive/resourceplan.q  |   6 +-
 .../clientpositive/llap/resourceplan.q.out      | 732 ++++++++++---------
 .../api/WMAlterResourcePlanRequest.java         | 114 ++-
 ...CreateOrDropTriggerToPoolMappingRequest.java | 114 ++-
 .../hive/metastore/api/WMDropPoolRequest.java   | 114 ++-
 .../api/WMDropResourcePlanRequest.java          | 114 ++-
 .../metastore/api/WMDropTriggerRequest.java     | 114 ++-
 .../api/WMGetActiveResourcePlanRequest.java     | 112 ++-
 .../api/WMGetAllResourcePlanRequest.java        | 112 ++-
 .../metastore/api/WMGetResourcePlanRequest.java | 114 ++-
 .../api/WMGetTriggersForResourePlanRequest.java | 114 ++-
 .../hadoop/hive/metastore/api/WMMapping.java    | 114 ++-
 .../hive/metastore/api/WMNullablePool.java      | 114 ++-
 .../metastore/api/WMNullableResourcePlan.java   | 114 ++-
 .../hadoop/hive/metastore/api/WMPool.java       | 114 ++-
 .../hive/metastore/api/WMPoolTrigger.java       | 112 ++-
 .../hive/metastore/api/WMResourcePlan.java      | 114 ++-
 .../hadoop/hive/metastore/api/WMTrigger.java    | 114 ++-
 .../api/WMValidateResourcePlanRequest.java      | 114 ++-
 .../src/gen/thrift/gen-php/metastore/Types.php  | 399 +++++++++-
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  | 263 ++++++-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |  66 +-
 .../hive/metastore/HiveMetaStoreClient.java     |  35 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |  20 +-
 .../src/main/thrift/hive_metastore.thrift       |  17 +
 .../hadoop/hive/metastore/HiveMetaStore.java    |  26 +-
 .../hadoop/hive/metastore/ObjectStore.java      | 204 ++++--
 .../apache/hadoop/hive/metastore/RawStore.java  |  28 +-
 .../hive/metastore/cache/CachedStore.java       |  46 +-
 .../hive/metastore/model/MWMResourcePlan.java   |   9 +
 .../src/main/resources/package.jdo              |   4 +
 .../main/sql/derby/hive-schema-4.0.0.derby.sql  |   4 +-
 .../sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql  |   7 +
 .../main/sql/mssql/hive-schema-4.0.0.mssql.sql  |   3 +-
 .../sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql  |   6 +
 .../main/sql/mysql/hive-schema-4.0.0.mysql.sql  |   3 +-
 .../sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql  |   7 +
 .../sql/oracle/hive-schema-4.0.0.oracle.sql     |   3 +-
 .../oracle/upgrade-3.2.0-to-4.0.0.oracle.sql    |   6 +
 .../sql/postgres/hive-schema-4.0.0.postgres.sql |   3 +-
 .../upgrade-3.2.0-to-4.0.0.postgres.sql         |   7 +
 .../DummyRawStoreControlledCommit.java          |  44 +-
 .../DummyRawStoreForJdoConnection.java          |  22 +-
 .../HiveMetaStoreClientPreCatalog.java          |  36 +-
 51 files changed, 3603 insertions(+), 679 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/5258c67e/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 917aaeb..102e6c6 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -3206,6 +3206,10 @@ public class HiveConf extends Configuration {
     HIVE_SERVER2_TEZ_INTERACTIVE_QUEUE("hive.server2.tez.interactive.queue", "",
         "A single YARN queues to use for Hive Interactive sessions. When this is specified,\n" +
         "workload management is enabled and used for these sessions."),
+    HIVE_SERVER2_WM_NAMESPACE("hive.server2.wm.namespace", "default",
+        "The WM namespace to use when one metastore is used by multiple compute clusters each \n" +
+        "with their own workload management. The special value 'default' (the default) will \n" +
+        "also include any resource plans created before the namespaces were introduced."),
     HIVE_SERVER2_WM_WORKER_THREADS("hive.server2.wm.worker.threads", 4,
         "Number of worker threads to use to perform the synchronous operations with Tez\n" +
         "sessions for workload management (e.g. opening, closing, etc.)"),

http://git-wip-us.apache.org/repos/asf/hive/blob/5258c67e/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index c3e1e8e..d9fb645 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -1105,36 +1105,36 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
   }
 
   @Override
-  public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException, MetaException {
-    return objectStore.getResourcePlan(name);
+  public WMFullResourcePlan getResourcePlan(String name, String ns) throws NoSuchObjectException, MetaException {
+    return objectStore.getResourcePlan(name, ns);
   }
 
   @Override
-  public List<WMResourcePlan> getAllResourcePlans() throws MetaException {
-    return objectStore.getAllResourcePlans();
+  public List<WMResourcePlan> getAllResourcePlans(String ns) throws MetaException {
+    return objectStore.getAllResourcePlans(ns);
   }
 
   @Override
-  public WMFullResourcePlan alterResourcePlan(String name, WMNullableResourcePlan resourcePlan,
+  public WMFullResourcePlan alterResourcePlan(String name, String ns, WMNullableResourcePlan resourcePlan,
       boolean canActivateDisabled, boolean canDeactivate, boolean isReplace)
       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException, MetaException {
-    return objectStore.alterResourcePlan(name, resourcePlan, canActivateDisabled, canDeactivate, isReplace);
+    return objectStore.alterResourcePlan(name, ns, resourcePlan, canActivateDisabled, canDeactivate, isReplace);
   }
 
   @Override
-  public WMFullResourcePlan getActiveResourcePlan() throws MetaException {
-    return objectStore.getActiveResourcePlan();
+  public WMFullResourcePlan getActiveResourcePlan(String ns) throws MetaException {
+    return objectStore.getActiveResourcePlan(ns);
   }
 
   @Override
-  public WMValidateResourcePlanResponse validateResourcePlan(String name)
+  public WMValidateResourcePlanResponse validateResourcePlan(String name, String ns)
       throws NoSuchObjectException, InvalidObjectException, MetaException {
-    return objectStore.validateResourcePlan(name);
+    return objectStore.validateResourcePlan(name, ns);
   }
 
   @Override
-  public void dropResourcePlan(String name) throws NoSuchObjectException, MetaException {
-    objectStore.dropResourcePlan(name);
+  public void dropResourcePlan(String name, String ns) throws NoSuchObjectException, MetaException {
+    objectStore.dropResourcePlan(name, ns);
   }
 
   @Override
@@ -1151,15 +1151,15 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
   }
 
   @Override
-  public void dropWMTrigger(String resourcePlanName, String triggerName)
+  public void dropWMTrigger(String resourcePlanName, String triggerName, String ns)
       throws NoSuchObjectException, InvalidOperationException, MetaException {
-    objectStore.dropWMTrigger(resourcePlanName, triggerName);
+    objectStore.dropWMTrigger(resourcePlanName, triggerName, ns);
   }
 
   @Override
-  public List<WMTrigger> getTriggersForResourcePlan(String resourcePlanName)
+  public List<WMTrigger> getTriggersForResourcePlan(String resourcePlanName, String ns)
       throws NoSuchObjectException, MetaException {
-    return objectStore.getTriggersForResourcePlan(resourcePlanName);
+    return objectStore.getTriggersForResourcePlan(resourcePlanName, ns);
   }
 
   @Override
@@ -1175,9 +1175,9 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
   }
 
   @Override
-  public void dropWMPool(String resourcePlanName, String poolPath)
+  public void dropWMPool(String resourcePlanName, String poolPath, String ns)
       throws NoSuchObjectException, InvalidOperationException, MetaException {
-    objectStore.dropWMPool(resourcePlanName, poolPath);
+    objectStore.dropWMPool(resourcePlanName, poolPath, ns);
   }
 
   @Override
@@ -1195,15 +1195,15 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
 
   @Override
   public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
-      String poolPath) throws AlreadyExistsException, NoSuchObjectException,
+      String poolPath, String ns) throws AlreadyExistsException, NoSuchObjectException,
       InvalidOperationException, MetaException {
-    objectStore.createWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath);
+    objectStore.createWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath, ns);
   }
 
   @Override
   public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
-      String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException {
-    objectStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath);
+      String poolPath, String ns) throws NoSuchObjectException, InvalidOperationException, MetaException {
+    objectStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath, ns);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/5258c67e/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql b/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
index a69046f..db1384b 100644
--- a/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
+++ b/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql
@@ -941,6 +941,7 @@ FROM `PARTITION_PARAMS` GROUP BY `PART_ID`;
 
 CREATE EXTERNAL TABLE IF NOT EXISTS `WM_RESOURCEPLANS` (
   `NAME` string,
+  `NS` string,
   `STATUS` string,
   `QUERY_PARALLELISM` int,
   `DEFAULT_POOL_PATH` string
@@ -951,6 +952,7 @@ TBLPROPERTIES (
 "hive.sql.query" =
 "SELECT
   \"WM_RESOURCEPLAN\".\"NAME\",
+  case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS,
   \"STATUS\",
   \"WM_RESOURCEPLAN\".\"QUERY_PARALLELISM\",
   \"WM_POOL\".\"PATH\"
@@ -960,6 +962,7 @@ FROM
 
 CREATE EXTERNAL TABLE IF NOT EXISTS `WM_TRIGGERS` (
   `RP_NAME` string,
+  `NS` string,
   `NAME` string,
   `TRIGGER_EXPRESSION` string,
   `ACTION_EXPRESSION` string
@@ -970,6 +973,7 @@ TBLPROPERTIES (
 "hive.sql.query" =
 "SELECT
   r.\"NAME\" AS RP_NAME,
+  case when r.\"NS\" is null then 'default' else r.\"NS\" end,
   t.\"NAME\" AS NAME,
   \"TRIGGER_EXPRESSION\",
   \"ACTION_EXPRESSION\"
@@ -983,6 +987,7 @@ ON
 
 CREATE EXTERNAL TABLE IF NOT EXISTS `WM_POOLS` (
   `RP_NAME` string,
+  `NS` string,
   `PATH` string,
   `ALLOC_FRACTION` double,
   `QUERY_PARALLELISM` int,
@@ -994,6 +999,7 @@ TBLPROPERTIES (
 "hive.sql.query" =
 "SELECT
   \"WM_RESOURCEPLAN\".\"NAME\",
+  case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS,
   \"WM_POOL\".\"PATH\",
   \"WM_POOL\".\"ALLOC_FRACTION\",
   \"WM_POOL\".\"QUERY_PARALLELISM\",
@@ -1008,6 +1014,7 @@ ON
 
 CREATE EXTERNAL TABLE IF NOT EXISTS `WM_POOLS_TO_TRIGGERS` (
   `RP_NAME` string,
+  `NS` string,
   `POOL_PATH` string,
   `TRIGGER_NAME` string
 )
@@ -1017,6 +1024,7 @@ TBLPROPERTIES (
 "hive.sql.query" =
 "SELECT
   \"WM_RESOURCEPLAN\".\"NAME\" AS RP_NAME,
+  case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS,
   \"WM_POOL\".\"PATH\" AS POOL_PATH,
   \"WM_TRIGGER\".\"NAME\" AS TRIGGER_NAME
 FROM \"WM_POOL_TO_TRIGGER\"
@@ -1026,6 +1034,7 @@ FROM \"WM_POOL_TO_TRIGGER\"
 UNION
 SELECT
   \"WM_RESOURCEPLAN\".\"NAME\" AS RP_NAME,
+  case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS,
   '<unmanaged queries>' AS POOL_PATH,
   \"WM_TRIGGER\".\"NAME\" AS TRIGGER_NAME
 FROM \"WM_TRIGGER\"
@@ -1036,6 +1045,7 @@ WHERE CAST(\"WM_TRIGGER\".\"IS_IN_UNMANAGED\" AS CHAR) IN ('1', 't')
 
 CREATE EXTERNAL TABLE IF NOT EXISTS `WM_MAPPINGS` (
   `RP_NAME` string,
+  `NS` string,
   `ENTITY_TYPE` string,
   `ENTITY_NAME` string,
   `POOL_PATH` string,
@@ -1047,6 +1057,7 @@ TBLPROPERTIES (
 "hive.sql.query" =
 "SELECT
   \"WM_RESOURCEPLAN\".\"NAME\",
+  case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS,
   \"ENTITY_TYPE\",
   \"ENTITY_NAME\",
   case when \"WM_POOL\".\"PATH\" is null then '<unmanaged>' else \"WM_POOL\".\"PATH\" end,

http://git-wip-us.apache.org/repos/asf/hive/blob/5258c67e/metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql b/metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql
index 4c77020..6cb12f9 100644
--- a/metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql
+++ b/metastore/scripts/upgrade/hive/upgrade-3.1.0-to-4.0.0.hive.sql
@@ -2,6 +2,143 @@ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0';
 
 USE SYS;
 
+-- HIVE-20793
+DROP TABLE IF EXISTS `WM_RESOURCEPLANS`;
+CREATE EXTERNAL TABLE IF NOT EXISTS `WM_RESOURCEPLANS` (
+  `NAME` string,
+  `NS` string,
+  `STATUS` string,
+  `QUERY_PARALLELISM` int,
+  `DEFAULT_POOL_PATH` string
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+  \"WM_RESOURCEPLAN\".\"NAME\",
+  case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS,
+  \"STATUS\",
+  \"WM_RESOURCEPLAN\".\"QUERY_PARALLELISM\",
+  \"WM_POOL\".\"PATH\"
+FROM
+  \"WM_RESOURCEPLAN\" LEFT OUTER JOIN \"WM_POOL\" ON \"WM_RESOURCEPLAN\".\"DEFAULT_POOL_ID\" = \"WM_POOL\".\"POOL_ID\""
+);
+
+DROP TABLE IF EXISTS `WM_TRIGGERS`;
+CREATE EXTERNAL TABLE IF NOT EXISTS `WM_TRIGGERS` (
+  `RP_NAME` string,
+  `NS` string,
+  `NAME` string,
+  `TRIGGER_EXPRESSION` string,
+  `ACTION_EXPRESSION` string
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+  r.\"NAME\" AS RP_NAME,
+  case when r.\"NS\" is null then 'default' else r.\"NS\" end,
+  t.\"NAME\" AS NAME,
+  \"TRIGGER_EXPRESSION\",
+  \"ACTION_EXPRESSION\"
+FROM
+  \"WM_TRIGGER\" t
+JOIN
+  \"WM_RESOURCEPLAN\" r
+ON
+  t.\"RP_ID\" = r.\"RP_ID\""
+);
+
+DROP TABLE IF EXISTS `WM_POOLS`;
+CREATE EXTERNAL TABLE IF NOT EXISTS `WM_POOLS` (
+  `RP_NAME` string,
+  `NS` string,
+  `PATH` string,
+  `ALLOC_FRACTION` double,
+  `QUERY_PARALLELISM` int,
+  `SCHEDULING_POLICY` string
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+  \"WM_RESOURCEPLAN\".\"NAME\",
+  case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS,
+  \"WM_POOL\".\"PATH\",
+  \"WM_POOL\".\"ALLOC_FRACTION\",
+  \"WM_POOL\".\"QUERY_PARALLELISM\",
+  \"WM_POOL\".\"SCHEDULING_POLICY\"
+FROM
+  \"WM_POOL\"
+JOIN
+  \"WM_RESOURCEPLAN\"
+ON
+  \"WM_POOL\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\""
+);
+
+DROP TABLE IF EXISTS `WM_POOLS_TO_TRIGGERS`;
+CREATE EXTERNAL TABLE IF NOT EXISTS `WM_POOLS_TO_TRIGGERS` (
+  `RP_NAME` string,
+  `NS` string,
+  `POOL_PATH` string,
+  `TRIGGER_NAME` string
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+  \"WM_RESOURCEPLAN\".\"NAME\" AS RP_NAME,
+  case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS,
+  \"WM_POOL\".\"PATH\" AS POOL_PATH,
+  \"WM_TRIGGER\".\"NAME\" AS TRIGGER_NAME
+FROM \"WM_POOL_TO_TRIGGER\"
+  JOIN \"WM_POOL\" ON \"WM_POOL_TO_TRIGGER\".\"POOL_ID\" = \"WM_POOL\".\"POOL_ID\"
+  JOIN \"WM_TRIGGER\" ON \"WM_POOL_TO_TRIGGER\".\"TRIGGER_ID\" = \"WM_TRIGGER\".\"TRIGGER_ID\"
+  JOIN \"WM_RESOURCEPLAN\" ON \"WM_POOL\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\"
+UNION
+SELECT
+  \"WM_RESOURCEPLAN\".\"NAME\" AS RP_NAME,
+  case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS,
+  '<unmanaged queries>' AS POOL_PATH,
+  \"WM_TRIGGER\".\"NAME\" AS TRIGGER_NAME
+FROM \"WM_TRIGGER\"
+  JOIN \"WM_RESOURCEPLAN\" ON \"WM_TRIGGER\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\"
+WHERE CAST(\"WM_TRIGGER\".\"IS_IN_UNMANAGED\" AS CHAR) IN ('1', 't')
+"
+);
+
+DROP TABLE IF EXISTS `WM_MAPPINGS`;
+CREATE EXTERNAL TABLE IF NOT EXISTS `WM_MAPPINGS` (
+  `RP_NAME` string,
+  `NS` string,
+  `ENTITY_TYPE` string,
+  `ENTITY_NAME` string,
+  `POOL_PATH` string,
+  `ORDERING` int
+)
+STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
+TBLPROPERTIES (
+"hive.sql.database.type" = "METASTORE",
+"hive.sql.query" =
+"SELECT
+  \"WM_RESOURCEPLAN\".\"NAME\",
+  case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS,
+  \"ENTITY_TYPE\",
+  \"ENTITY_NAME\",
+  case when \"WM_POOL\".\"PATH\" is null then '<unmanaged>' else \"WM_POOL\".\"PATH\" end,
+  \"ORDERING\"
+FROM \"WM_MAPPING\"
+JOIN \"WM_RESOURCEPLAN\" ON \"WM_MAPPING\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\"
+LEFT OUTER JOIN \"WM_POOL\" ON \"WM_POOL\".\"POOL_ID\" = \"WM_MAPPING\".\"POOL_ID\"
+"
+);
+
+
+
 DROP TABLE IF EXISTS `VERSION`;
 
 CREATE OR REPLACE VIEW `VERSION` AS SELECT 1 AS `VER_ID`, '4.0.0' AS `SCHEMA_VERSION`,

http://git-wip-us.apache.org/repos/asf/hive/blob/5258c67e/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 6790a06..a11e867 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -698,6 +698,11 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     return 0;
   }
 
+  // Note: the resource plan operations are going to be annotated with namespace based on the config
+  //       inside Hive.java. We don't want HS2 to be aware of namespaces beyond that, or to even see
+  //       that there exist other namespaces, because one HS2 always operates inside just one and we
+  //       don't want this complexity to bleed everywhere. Therefore, this code doesn't care about
+  //       namespaces - Hive.java will transparently scope everything. That's the idea anyway.
   private int alterResourcePlan(Hive db, AlterResourcePlanDesc desc) throws HiveException {
     if (desc.shouldValidate()) {
       WMValidateResourcePlanResponse result = db.validateResourcePlan(desc.getResourcePlanName());

http://git-wip-us.apache.org/repos/asf/hive/blob/5258c67e/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 012a670..11d8f0c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -5637,9 +5637,15 @@ private void constructOneLBLocationMap(FileStatus fSta,
     }
   }
 
-
   public void createResourcePlan(WMResourcePlan resourcePlan, String copyFromName, boolean ifNotExists)
       throws HiveException {
+    String ns = conf.getVar(ConfVars.HIVE_SERVER2_WM_NAMESPACE);
+    if (resourcePlan.isSetNs() && !ns.equals(resourcePlan.getNs())) {
+      throw new HiveException("Cannot create a plan in a different NS; was "
+          + resourcePlan.getNs() + ", configured " + ns);
+    }
+    resourcePlan.setNs(ns);
+
     try {
       getMSC().createResourcePlan(resourcePlan, copyFromName);
     } catch (AlreadyExistsException e) {
@@ -5653,7 +5659,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
   public WMFullResourcePlan getResourcePlan(String rpName) throws HiveException {
     try {
-      return getMSC().getResourcePlan(rpName);
+      return getMSC().getResourcePlan(rpName, conf.getVar(ConfVars.HIVE_SERVER2_WM_NAMESPACE));
     } catch (NoSuchObjectException e) {
       return null;
     } catch (Exception e) {
@@ -5663,7 +5669,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
   public List<WMResourcePlan> getAllResourcePlans() throws HiveException {
     try {
-      return getMSC().getAllResourcePlans();
+      return getMSC().getAllResourcePlans(conf.getVar(ConfVars.HIVE_SERVER2_WM_NAMESPACE));
     } catch (Exception e) {
       throw new HiveException(e);
     }
@@ -5671,7 +5677,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
   public void dropResourcePlan(String rpName, boolean ifExists) throws HiveException {
     try {
-      getMSC().dropResourcePlan(rpName);
+      String ns = conf.getVar(ConfVars.HIVE_SERVER2_WM_NAMESPACE);
+      getMSC().dropResourcePlan(rpName, ns);
     } catch (NoSuchObjectException e) {
       if (!ifExists) {
         throw new HiveException(e, ErrorMsg.RESOURCE_PLAN_NOT_EXISTS, rpName);
@@ -5684,7 +5691,13 @@ private void constructOneLBLocationMap(FileStatus fSta,
   public WMFullResourcePlan alterResourcePlan(String rpName, WMNullableResourcePlan resourcePlan,
       boolean canActivateDisabled, boolean isForceDeactivate, boolean isReplace) throws HiveException {
     try {
-      return getMSC().alterResourcePlan(rpName, resourcePlan, canActivateDisabled,
+      String ns = conf.getVar(ConfVars.HIVE_SERVER2_WM_NAMESPACE);
+      if (resourcePlan.isSetNs() && !ns.equals(resourcePlan.getNs())) {
+        throw new HiveException("Cannot modify a plan in a different NS; was "
+            + resourcePlan.getNs() + ", configured " + ns);
+      }
+      resourcePlan.setNs(ns);
+      return getMSC().alterResourcePlan(rpName, ns, resourcePlan, canActivateDisabled,
           isForceDeactivate, isReplace);
     } catch (Exception e) {
       throw new HiveException(e);
@@ -5693,7 +5706,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
   public WMFullResourcePlan getActiveResourcePlan() throws HiveException {
     try {
-      return getMSC().getActiveResourcePlan();
+      String ns = conf.getVar(ConfVars.HIVE_SERVER2_WM_NAMESPACE);
+      return getMSC().getActiveResourcePlan(ns);
     } catch (Exception e) {
       throw new HiveException(e);
     }
@@ -5701,7 +5715,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
   public WMValidateResourcePlanResponse validateResourcePlan(String rpName) throws HiveException {
     try {
-      return getMSC().validateResourcePlan(rpName);
+      String ns = conf.getVar(ConfVars.HIVE_SERVER2_WM_NAMESPACE);
+      return getMSC().validateResourcePlan(rpName, ns);
     } catch (Exception e) {
       throw new HiveException(e);
     }
@@ -5709,6 +5724,12 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
   public void createWMTrigger(WMTrigger trigger) throws HiveException {
     try {
+      String ns = conf.getVar(ConfVars.HIVE_SERVER2_WM_NAMESPACE);
+      if (trigger.isSetNs() && !ns.equals(trigger.getNs())) {
+        throw new HiveException("Cannot create a trigger in a different NS; was "
+            + trigger.getNs() + ", configured " + ns);
+      }
+      trigger.setNs(ns);
       getMSC().createWMTrigger(trigger);
     } catch (Exception e) {
       throw new HiveException(e);
@@ -5717,6 +5738,12 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
   public void alterWMTrigger(WMTrigger trigger) throws HiveException {
     try {
+      String ns = conf.getVar(ConfVars.HIVE_SERVER2_WM_NAMESPACE);
+      if (trigger.isSetNs() && !ns.equals(trigger.getNs())) {
+        throw new HiveException("Cannot modify a trigger in a different NS; was "
+            + trigger.getNs() + ", configured " + ns);
+      }
+      trigger.setNs(ns);
       getMSC().alterWMTrigger(trigger);
     } catch (Exception e) {
       throw new HiveException(e);
@@ -5725,7 +5752,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
   public void dropWMTrigger(String rpName, String triggerName) throws HiveException {
     try {
-      getMSC().dropWMTrigger(rpName, triggerName);
+      getMSC().dropWMTrigger(rpName, triggerName, conf.getVar(ConfVars.HIVE_SERVER2_WM_NAMESPACE));
     } catch (Exception e) {
       throw new HiveException(e);
     }
@@ -5733,6 +5760,12 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
   public void createWMPool(WMPool pool) throws HiveException {
     try {
+      String ns = conf.getVar(ConfVars.HIVE_SERVER2_WM_NAMESPACE);
+      if (pool.isSetNs() && !ns.equals(pool.getNs())) {
+        throw new HiveException("Cannot create a pool in a different NS; was "
+            + pool.getNs() + ", configured " + ns);
+      }
+      pool.setNs(ns);
       getMSC().createWMPool(pool);
     } catch (Exception e) {
       throw new HiveException(e);
@@ -5741,6 +5774,12 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
   public void alterWMPool(WMNullablePool pool, String poolPath) throws HiveException {
     try {
+      String ns = conf.getVar(ConfVars.HIVE_SERVER2_WM_NAMESPACE);
+      if (pool.isSetNs() && !ns.equals(pool.getNs())) {
+        throw new HiveException("Cannot modify a pool in a different NS; was "
+            + pool.getNs() + ", configured " + ns);
+      }
+      pool.setNs(ns);
       getMSC().alterWMPool(pool, poolPath);
     } catch (Exception e) {
       throw new HiveException(e);
@@ -5749,7 +5788,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
   public void dropWMPool(String resourcePlanName, String poolPath) throws HiveException {
     try {
-      getMSC().dropWMPool(resourcePlanName, poolPath);
+      getMSC().dropWMPool(resourcePlanName, poolPath,
+          conf.getVar(ConfVars.HIVE_SERVER2_WM_NAMESPACE));
     } catch (Exception e) {
       throw new HiveException(e);
     }
@@ -5758,6 +5798,12 @@ private void constructOneLBLocationMap(FileStatus fSta,
   public void createOrUpdateWMMapping(WMMapping mapping, boolean isUpdate)
       throws HiveException {
     try {
+      String ns = conf.getVar(ConfVars.HIVE_SERVER2_WM_NAMESPACE);
+      if (mapping.isSetNs() && !ns.equals(mapping.getNs())) {
+        throw new HiveException("Cannot create a mapping in a different NS; was "
+            + mapping.getNs() + ", configured " + ns);
+      }
+      mapping.setNs(ns);
       getMSC().createOrUpdateWMMapping(mapping, isUpdate);
     } catch (Exception e) {
       throw new HiveException(e);
@@ -5766,17 +5812,24 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
   public void dropWMMapping(WMMapping mapping) throws HiveException {
     try {
+      String ns = conf.getVar(ConfVars.HIVE_SERVER2_WM_NAMESPACE);
+      if (mapping.isSetNs() && !ns.equals(mapping.getNs())) {
+        throw new HiveException("Cannot modify a mapping in a different NS; was "
+            + mapping.getNs() + ", configured " + ns);
+      }
+      mapping.setNs(ns);
       getMSC().dropWMMapping(mapping);
     } catch (Exception e) {
       throw new HiveException(e);
     }
   }
 
-
+  // TODO: eh
   public void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerName,
       String poolPath, boolean shouldDrop) throws HiveException {
     try {
-      getMSC().createOrDropTriggerToPoolMapping(resourcePlanName, triggerName, poolPath, shouldDrop);
+      getMSC().createOrDropTriggerToPoolMapping(resourcePlanName, triggerName, poolPath,
+          shouldDrop, conf.getVar(ConfVars.HIVE_SERVER2_WM_NAMESPACE));
     } catch (Exception e) {
       throw new HiveException(e);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/5258c67e/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
index e57db93..8d55fec 100755
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
@@ -26,6 +26,7 @@ import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.atomic.AtomicReference;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.fs.FileStatus;
@@ -37,7 +38,13 @@ import org.apache.hadoop.hive.metastore.PartitionDropOptions;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
+import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
+import org.apache.hadoop.hive.metastore.api.WMPool;
+import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
+import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat;
 import org.apache.hadoop.hive.ql.session.SessionState;
@@ -59,8 +66,10 @@ import org.apache.logging.log4j.core.config.Configuration;
 import org.apache.logging.log4j.core.config.LoggerConfig;
 import org.apache.thrift.protocol.TBinaryProtocol;
 import org.junit.Assert;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
 
 import junit.framework.TestCase;
 
@@ -76,20 +85,21 @@ public class TestHive extends TestCase {
   protected void setUp() throws Exception {
     super.setUp();
     hiveConf = new HiveConf(this.getClass());
-    hiveConf
-    .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
+    hm = setUpImpl(hiveConf);
+  }
+
+  private static Hive setUpImpl(HiveConf hiveConf) throws Exception {
+    hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
         "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
     // enable trash so it can be tested
     hiveConf.setFloat("fs.trash.checkpoint.interval", 30);  // FS_TRASH_CHECKPOINT_INTERVAL_KEY (hadoop-2)
     hiveConf.setFloat("fs.trash.interval", 30);             // FS_TRASH_INTERVAL_KEY (hadoop-2)
     SessionState.start(hiveConf);
     try {
-      hm = Hive.get(hiveConf);
+      return Hive.get(hiveConf);
     } catch (Exception e) {
       System.err.println(StringUtils.stringifyException(e));
-      System.err
-          .println("Unable to initialize Hive Metastore using configuration: \n "
-          + hiveConf);
+      System.err.println("Unable to initialize Hive Metastore using configuration: \n" + hiveConf);
       throw e;
     }
   }
@@ -422,6 +432,52 @@ public class TestHive extends TestCase {
     }
   }
 
+  public void testWmNamespaceHandling() throws Throwable {
+    HiveConf hiveConf = new HiveConf(this.getClass());
+    Hive hm = setUpImpl(hiveConf);
+    // TODO: threadlocals... Why is all this Hive client stuff like that?!!
+    final AtomicReference<Hive> hm2r = new AtomicReference<>();
+    Thread pointlessThread = new Thread(new Runnable() {
+      @Override
+      public void run() {
+        HiveConf hiveConf2 = new HiveConf(this.getClass());
+        hiveConf2.setVar(ConfVars.HIVE_SERVER2_WM_NAMESPACE, "hm2");
+        try {
+          hm2r.set(setUpImpl(hiveConf2));
+        } catch (Exception e) {
+          System.err.println(StringUtils.stringifyException(e));
+        }
+      }
+    });
+    pointlessThread.start();
+    pointlessThread.join();
+    Hive hm2 = hm2r.get();
+    assertNotNull(hm2);
+
+    hm.createResourcePlan(new WMResourcePlan("hm"), null, false);
+    assertEquals(1, hm.getAllResourcePlans().size());
+    assertEquals(0, hm2.getAllResourcePlans().size());
+    hm2.createResourcePlan(new WMResourcePlan("hm"), null, false);
+    WMNullableResourcePlan changes = new WMNullableResourcePlan();
+    changes.setStatus(WMResourcePlanStatus.ACTIVE);
+    hm.alterResourcePlan("hm", changes, true, false, false);
+    // We should not be able to modify the active plan.
+    WMPool pool = new WMPool("hm", "foo");
+    pool.setAllocFraction(0);
+    pool.setQueryParallelism(1);
+    try {
+      hm.createWMPool(pool);
+      fail("Expected exception");
+    } catch (HiveException e) {
+    }
+    // But we should still be able to modify the other plan.
+    pool.unsetNs(); // The call to create sets the namespace.
+    hm2.createWMPool(pool);
+    // Make the 2nd plan active in a different namespace.
+    changes.unsetNs();
+    hm2.alterResourcePlan("hm", changes, true, false, false);
+  }
+
   public void testDropTableTrash() throws Throwable {
     if (!ShimLoader.getHadoopShims().supportTrashFeature()) {
       return; // it's hadoop-1

http://git-wip-us.apache.org/repos/asf/hive/blob/5258c67e/ql/src/test/queries/clientpositive/resourceplan.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/resourceplan.q b/ql/src/test/queries/clientpositive/resourceplan.q
index fae9701..46aae72 100644
--- a/ql/src/test/queries/clientpositive/resourceplan.q
+++ b/ql/src/test/queries/clientpositive/resourceplan.q
@@ -10,7 +10,7 @@ set hive.cbo.enable=false;
 show grant user hive_test_user;
 
 -- Initialize the hive schema.
-source ../../metastore/scripts/upgrade/hive/hive-schema-3.1.0.hive.sql;
+source ../../metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql;
 
 -- SORT_QUERY_RESULTS
 
@@ -240,13 +240,13 @@ CREATE POOL plan_2.default.c1 WITH
     ALLOC_FRACTION=0.3, QUERY_PARALLELISM=3, SCHEDULING_POLICY='fair';
 
 CREATE POOL plan_2.default.c2 WITH
-    QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.7;
+    QUERY_PARALLELISM=2, SCHEDULING_POLICY='fair', ALLOC_FRACTION=0.75;
 
 -- Cannot activate c1 + c2 = 1.0
 ALTER RESOURCE PLAN plan_2 VALIDATE;
 ALTER RESOURCE PLAN plan_2 ENABLE ACTIVATE;
 
-ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.5, QUERY_PARALLELISM = 1;
+ALTER POOL plan_2.default.c2 SET ALLOC_FRACTION = 0.7, QUERY_PARALLELISM = 1;
 ALTER POOL plan_2.default.c2 SET SCHEDULING_POLICY='fair';
 SELECT * FROM SYS.WM_POOLS;
 ALTER POOL plan_2.default.c2 UNSET SCHEDULING_POLICY;