You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2017/11/08 22:36:14 UTC

[1/2] hive git commit: HIVE-17902 : add notions of default pool and start adding unmanaged mapping (Sergey Shelukhin, reviewed by Prasanth Jayachandran, Harish Jaiprakash)

Repository: hive
Updated Branches:
  refs/heads/master b62d36510 -> a42314deb


http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index 92a2788..c0ad739 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -20792,6 +20792,11 @@ void WMResourcePlan::__set_queryParallelism(const int32_t val) {
 __isset.queryParallelism = true;
 }
 
+void WMResourcePlan::__set_defaultPoolPath(const std::string& val) {
+  this->defaultPoolPath = val;
+__isset.defaultPoolPath = true;
+}
+
 uint32_t WMResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -20840,6 +20845,14 @@ uint32_t WMResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) {
           xfer += iprot->skip(ftype);
         }
         break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->defaultPoolPath);
+          this->__isset.defaultPoolPath = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
       default:
         xfer += iprot->skip(ftype);
         break;
@@ -20873,6 +20886,11 @@ uint32_t WMResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) con
     xfer += oprot->writeI32(this->queryParallelism);
     xfer += oprot->writeFieldEnd();
   }
+  if (this->__isset.defaultPoolPath) {
+    xfer += oprot->writeFieldBegin("defaultPoolPath", ::apache::thrift::protocol::T_STRING, 4);
+    xfer += oprot->writeString(this->defaultPoolPath);
+    xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
@@ -20883,6 +20901,7 @@ void swap(WMResourcePlan &a, WMResourcePlan &b) {
   swap(a.name, b.name);
   swap(a.status, b.status);
   swap(a.queryParallelism, b.queryParallelism);
+  swap(a.defaultPoolPath, b.defaultPoolPath);
   swap(a.__isset, b.__isset);
 }
 
@@ -20890,12 +20909,14 @@ WMResourcePlan::WMResourcePlan(const WMResourcePlan& other864) {
   name = other864.name;
   status = other864.status;
   queryParallelism = other864.queryParallelism;
+  defaultPoolPath = other864.defaultPoolPath;
   __isset = other864.__isset;
 }
 WMResourcePlan& WMResourcePlan::operator=(const WMResourcePlan& other865) {
   name = other865.name;
   status = other865.status;
   queryParallelism = other865.queryParallelism;
+  defaultPoolPath = other865.defaultPoolPath;
   __isset = other865.__isset;
   return *this;
 }
@@ -20905,6 +20926,7 @@ void WMResourcePlan::printTo(std::ostream& out) const {
   out << "name=" << to_string(name);
   out << ", " << "status="; (__isset.status ? (out << to_string(status)) : (out << "<null>"));
   out << ", " << "queryParallelism="; (__isset.queryParallelism ? (out << to_string(queryParallelism)) : (out << "<null>"));
+  out << ", " << "defaultPoolPath="; (__isset.defaultPoolPath ? (out << to_string(defaultPoolPath)) : (out << "<null>"));
   out << ")";
 }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 63d3762..cdf0570 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -8509,9 +8509,10 @@ inline std::ostream& operator<<(std::ostream& out, const TableMeta& obj)
 }
 
 typedef struct _WMResourcePlan__isset {
-  _WMResourcePlan__isset() : status(false), queryParallelism(false) {}
+  _WMResourcePlan__isset() : status(false), queryParallelism(false), defaultPoolPath(false) {}
   bool status :1;
   bool queryParallelism :1;
+  bool defaultPoolPath :1;
 } _WMResourcePlan__isset;
 
 class WMResourcePlan {
@@ -8519,13 +8520,14 @@ class WMResourcePlan {
 
   WMResourcePlan(const WMResourcePlan&);
   WMResourcePlan& operator=(const WMResourcePlan&);
-  WMResourcePlan() : name(), status((WMResourcePlanStatus::type)0), queryParallelism(0) {
+  WMResourcePlan() : name(), status((WMResourcePlanStatus::type)0), queryParallelism(0), defaultPoolPath() {
   }
 
   virtual ~WMResourcePlan() throw();
   std::string name;
   WMResourcePlanStatus::type status;
   int32_t queryParallelism;
+  std::string defaultPoolPath;
 
   _WMResourcePlan__isset __isset;
 
@@ -8535,6 +8537,8 @@ class WMResourcePlan {
 
   void __set_queryParallelism(const int32_t val);
 
+  void __set_defaultPoolPath(const std::string& val);
+
   bool operator == (const WMResourcePlan & rhs) const
   {
     if (!(name == rhs.name))
@@ -8547,6 +8551,10 @@ class WMResourcePlan {
       return false;
     else if (__isset.queryParallelism && !(queryParallelism == rhs.queryParallelism))
       return false;
+    if (__isset.defaultPoolPath != rhs.__isset.defaultPoolPath)
+      return false;
+    else if (__isset.defaultPoolPath && !(defaultPoolPath == rhs.defaultPoolPath))
+      return false;
     return true;
   }
   bool operator != (const WMResourcePlan &rhs) const {

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapability.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapability.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapability.java
index 8b0d71c..8fc8311 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapability.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapability.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Autogenerated by Thrift Compiler (0.9.3)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
@@ -7,13 +7,17 @@
 package org.apache.hadoop.hive.metastore.api;
 
 
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
 public enum ClientCapability implements org.apache.thrift.TEnum {
   TEST_CAPABILITY(1),
   INSERT_ONLY_TABLES(2);
 
   private final int value;
 
-  ClientCapability(int value) {
+  private ClientCapability(int value) {
     this.value = value;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMResourcePlan.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMResourcePlan.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMResourcePlan.java
index 795ece6..d9f8c02 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMResourcePlan.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMResourcePlan.java
@@ -41,6 +41,7 @@ import org.slf4j.LoggerFactory;
   private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);
   private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.I32, (short)2);
   private static final org.apache.thrift.protocol.TField QUERY_PARALLELISM_FIELD_DESC = new org.apache.thrift.protocol.TField("queryParallelism", org.apache.thrift.protocol.TType.I32, (short)3);
+  private static final org.apache.thrift.protocol.TField DEFAULT_POOL_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("defaultPoolPath", org.apache.thrift.protocol.TType.STRING, (short)4);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -51,6 +52,7 @@ import org.slf4j.LoggerFactory;
   private String name; // required
   private WMResourcePlanStatus status; // optional
   private int queryParallelism; // optional
+  private String defaultPoolPath; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -60,7 +62,8 @@ import org.slf4j.LoggerFactory;
      * @see WMResourcePlanStatus
      */
     STATUS((short)2, "status"),
-    QUERY_PARALLELISM((short)3, "queryParallelism");
+    QUERY_PARALLELISM((short)3, "queryParallelism"),
+    DEFAULT_POOL_PATH((short)4, "defaultPoolPath");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -81,6 +84,8 @@ import org.slf4j.LoggerFactory;
           return STATUS;
         case 3: // QUERY_PARALLELISM
           return QUERY_PARALLELISM;
+        case 4: // DEFAULT_POOL_PATH
+          return DEFAULT_POOL_PATH;
         default:
           return null;
       }
@@ -123,7 +128,7 @@ import org.slf4j.LoggerFactory;
   // isset id assignments
   private static final int __QUERYPARALLELISM_ISSET_ID = 0;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.STATUS,_Fields.QUERY_PARALLELISM};
+  private static final _Fields optionals[] = {_Fields.STATUS,_Fields.QUERY_PARALLELISM,_Fields.DEFAULT_POOL_PATH};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -133,6 +138,8 @@ import org.slf4j.LoggerFactory;
         new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, WMResourcePlanStatus.class)));
     tmpMap.put(_Fields.QUERY_PARALLELISM, new org.apache.thrift.meta_data.FieldMetaData("queryParallelism", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.DEFAULT_POOL_PATH, new org.apache.thrift.meta_data.FieldMetaData("defaultPoolPath", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(WMResourcePlan.class, metaDataMap);
   }
@@ -159,6 +166,9 @@ import org.slf4j.LoggerFactory;
       this.status = other.status;
     }
     this.queryParallelism = other.queryParallelism;
+    if (other.isSetDefaultPoolPath()) {
+      this.defaultPoolPath = other.defaultPoolPath;
+    }
   }
 
   public WMResourcePlan deepCopy() {
@@ -171,6 +181,7 @@ import org.slf4j.LoggerFactory;
     this.status = null;
     setQueryParallelismIsSet(false);
     this.queryParallelism = 0;
+    this.defaultPoolPath = null;
   }
 
   public String getName() {
@@ -249,6 +260,29 @@ import org.slf4j.LoggerFactory;
     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __QUERYPARALLELISM_ISSET_ID, value);
   }
 
+  public String getDefaultPoolPath() {
+    return this.defaultPoolPath;
+  }
+
+  public void setDefaultPoolPath(String defaultPoolPath) {
+    this.defaultPoolPath = defaultPoolPath;
+  }
+
+  public void unsetDefaultPoolPath() {
+    this.defaultPoolPath = null;
+  }
+
+  /** Returns true if field defaultPoolPath is set (has been assigned a value) and false otherwise */
+  public boolean isSetDefaultPoolPath() {
+    return this.defaultPoolPath != null;
+  }
+
+  public void setDefaultPoolPathIsSet(boolean value) {
+    if (!value) {
+      this.defaultPoolPath = null;
+    }
+  }
+
   public void setFieldValue(_Fields field, Object value) {
     switch (field) {
     case NAME:
@@ -275,6 +309,14 @@ import org.slf4j.LoggerFactory;
       }
       break;
 
+    case DEFAULT_POOL_PATH:
+      if (value == null) {
+        unsetDefaultPoolPath();
+      } else {
+        setDefaultPoolPath((String)value);
+      }
+      break;
+
     }
   }
 
@@ -289,6 +331,9 @@ import org.slf4j.LoggerFactory;
     case QUERY_PARALLELISM:
       return getQueryParallelism();
 
+    case DEFAULT_POOL_PATH:
+      return getDefaultPoolPath();
+
     }
     throw new IllegalStateException();
   }
@@ -306,6 +351,8 @@ import org.slf4j.LoggerFactory;
       return isSetStatus();
     case QUERY_PARALLELISM:
       return isSetQueryParallelism();
+    case DEFAULT_POOL_PATH:
+      return isSetDefaultPoolPath();
     }
     throw new IllegalStateException();
   }
@@ -350,6 +397,15 @@ import org.slf4j.LoggerFactory;
         return false;
     }
 
+    boolean this_present_defaultPoolPath = true && this.isSetDefaultPoolPath();
+    boolean that_present_defaultPoolPath = true && that.isSetDefaultPoolPath();
+    if (this_present_defaultPoolPath || that_present_defaultPoolPath) {
+      if (!(this_present_defaultPoolPath && that_present_defaultPoolPath))
+        return false;
+      if (!this.defaultPoolPath.equals(that.defaultPoolPath))
+        return false;
+    }
+
     return true;
   }
 
@@ -372,6 +428,11 @@ import org.slf4j.LoggerFactory;
     if (present_queryParallelism)
       list.add(queryParallelism);
 
+    boolean present_defaultPoolPath = true && (isSetDefaultPoolPath());
+    list.add(present_defaultPoolPath);
+    if (present_defaultPoolPath)
+      list.add(defaultPoolPath);
+
     return list.hashCode();
   }
 
@@ -413,6 +474,16 @@ import org.slf4j.LoggerFactory;
         return lastComparison;
       }
     }
+    lastComparison = Boolean.valueOf(isSetDefaultPoolPath()).compareTo(other.isSetDefaultPoolPath());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDefaultPoolPath()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.defaultPoolPath, other.defaultPoolPath);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
     return 0;
   }
 
@@ -456,6 +527,16 @@ import org.slf4j.LoggerFactory;
       sb.append(this.queryParallelism);
       first = false;
     }
+    if (isSetDefaultPoolPath()) {
+      if (!first) sb.append(", ");
+      sb.append("defaultPoolPath:");
+      if (this.defaultPoolPath == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.defaultPoolPath);
+      }
+      first = false;
+    }
     sb.append(")");
     return sb.toString();
   }
@@ -529,6 +610,14 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
+          case 4: // DEFAULT_POOL_PATH
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.defaultPoolPath = iprot.readString();
+              struct.setDefaultPoolPathIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
           default:
             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
@@ -559,6 +648,13 @@ import org.slf4j.LoggerFactory;
         oprot.writeI32(struct.queryParallelism);
         oprot.writeFieldEnd();
       }
+      if (struct.defaultPoolPath != null) {
+        if (struct.isSetDefaultPoolPath()) {
+          oprot.writeFieldBegin(DEFAULT_POOL_PATH_FIELD_DESC);
+          oprot.writeString(struct.defaultPoolPath);
+          oprot.writeFieldEnd();
+        }
+      }
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -584,13 +680,19 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetQueryParallelism()) {
         optionals.set(1);
       }
-      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetDefaultPoolPath()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
       if (struct.isSetStatus()) {
         oprot.writeI32(struct.status.getValue());
       }
       if (struct.isSetQueryParallelism()) {
         oprot.writeI32(struct.queryParallelism);
       }
+      if (struct.isSetDefaultPoolPath()) {
+        oprot.writeString(struct.defaultPoolPath);
+      }
     }
 
     @Override
@@ -598,7 +700,7 @@ import org.slf4j.LoggerFactory;
       TTupleProtocol iprot = (TTupleProtocol) prot;
       struct.name = iprot.readString();
       struct.setNameIsSet(true);
-      BitSet incoming = iprot.readBitSet(2);
+      BitSet incoming = iprot.readBitSet(3);
       if (incoming.get(0)) {
         struct.status = org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus.findByValue(iprot.readI32());
         struct.setStatusIsSet(true);
@@ -607,6 +709,10 @@ import org.slf4j.LoggerFactory;
         struct.queryParallelism = iprot.readI32();
         struct.setQueryParallelismIsSet(true);
       }
+      if (incoming.get(2)) {
+        struct.defaultPoolPath = iprot.readString();
+        struct.setDefaultPoolPathIsSet(true);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
index 716638c..8dc556b 100644
--- a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -20458,6 +20458,10 @@ class WMResourcePlan {
    * @var int
    */
   public $queryParallelism = null;
+  /**
+   * @var string
+   */
+  public $defaultPoolPath = null;
 
   public function __construct($vals=null) {
     if (!isset(self::$_TSPEC)) {
@@ -20474,6 +20478,10 @@ class WMResourcePlan {
           'var' => 'queryParallelism',
           'type' => TType::I32,
           ),
+        4 => array(
+          'var' => 'defaultPoolPath',
+          'type' => TType::STRING,
+          ),
         );
     }
     if (is_array($vals)) {
@@ -20486,6 +20494,9 @@ class WMResourcePlan {
       if (isset($vals['queryParallelism'])) {
         $this->queryParallelism = $vals['queryParallelism'];
       }
+      if (isset($vals['defaultPoolPath'])) {
+        $this->defaultPoolPath = $vals['defaultPoolPath'];
+      }
     }
   }
 
@@ -20529,6 +20540,13 @@ class WMResourcePlan {
             $xfer += $input->skip($ftype);
           }
           break;
+        case 4:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->defaultPoolPath);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
         default:
           $xfer += $input->skip($ftype);
           break;
@@ -20557,6 +20575,11 @@ class WMResourcePlan {
       $xfer += $output->writeI32($this->queryParallelism);
       $xfer += $output->writeFieldEnd();
     }
+    if ($this->defaultPoolPath !== null) {
+      $xfer += $output->writeFieldBegin('defaultPoolPath', TType::STRING, 4);
+      $xfer += $output->writeString($this->defaultPoolPath);
+      $xfer += $output->writeFieldEnd();
+    }
     $xfer += $output->writeFieldStop();
     $xfer += $output->writeStructEnd();
     return $xfer;

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index efb5708..7d7d28d 100644
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -14428,6 +14428,7 @@ class WMResourcePlan:
    - name
    - status
    - queryParallelism
+   - defaultPoolPath
   """
 
   thrift_spec = (
@@ -14435,12 +14436,14 @@ class WMResourcePlan:
     (1, TType.STRING, 'name', None, None, ), # 1
     (2, TType.I32, 'status', None, None, ), # 2
     (3, TType.I32, 'queryParallelism', None, None, ), # 3
+    (4, TType.STRING, 'defaultPoolPath', None, None, ), # 4
   )
 
-  def __init__(self, name=None, status=None, queryParallelism=None,):
+  def __init__(self, name=None, status=None, queryParallelism=None, defaultPoolPath=None,):
     self.name = name
     self.status = status
     self.queryParallelism = queryParallelism
+    self.defaultPoolPath = defaultPoolPath
 
   def read(self, iprot):
     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -14466,6 +14469,11 @@ class WMResourcePlan:
           self.queryParallelism = iprot.readI32()
         else:
           iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.STRING:
+          self.defaultPoolPath = iprot.readString()
+        else:
+          iprot.skip(ftype)
       else:
         iprot.skip(ftype)
       iprot.readFieldEnd()
@@ -14488,6 +14496,10 @@ class WMResourcePlan:
       oprot.writeFieldBegin('queryParallelism', TType.I32, 3)
       oprot.writeI32(self.queryParallelism)
       oprot.writeFieldEnd()
+    if self.defaultPoolPath is not None:
+      oprot.writeFieldBegin('defaultPoolPath', TType.STRING, 4)
+      oprot.writeString(self.defaultPoolPath)
+      oprot.writeFieldEnd()
     oprot.writeFieldStop()
     oprot.writeStructEnd()
 
@@ -14502,6 +14514,7 @@ class WMResourcePlan:
     value = (value * 31) ^ hash(self.name)
     value = (value * 31) ^ hash(self.status)
     value = (value * 31) ^ hash(self.queryParallelism)
+    value = (value * 31) ^ hash(self.defaultPoolPath)
     return value
 
   def __repr__(self):

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 1b94db2..192f881 100644
--- a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -3258,11 +3258,13 @@ class WMResourcePlan
   NAME = 1
   STATUS = 2
   QUERYPARALLELISM = 3
+  DEFAULTPOOLPATH = 4
 
   FIELDS = {
     NAME => {:type => ::Thrift::Types::STRING, :name => 'name'},
     STATUS => {:type => ::Thrift::Types::I32, :name => 'status', :optional => true, :enum_class => ::WMResourcePlanStatus},
-    QUERYPARALLELISM => {:type => ::Thrift::Types::I32, :name => 'queryParallelism', :optional => true}
+    QUERYPARALLELISM => {:type => ::Thrift::Types::I32, :name => 'queryParallelism', :optional => true},
+    DEFAULTPOOLPATH => {:type => ::Thrift::Types::STRING, :name => 'defaultPoolPath', :optional => true}
   }
 
   def struct_fields; FIELDS; end

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index eff62aa..188135c 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -21,16 +21,12 @@ package org.apache.hadoop.hive.metastore;
 import static org.apache.commons.lang.StringUtils.join;
 import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier;
 
+import com.google.common.collect.Sets;
 import org.apache.hadoop.hive.metastore.api.WMPoolTrigger;
-
 import org.apache.hadoop.hive.metastore.api.WMMapping;
-
 import org.apache.hadoop.hive.metastore.model.MWMMapping;
-
 import org.apache.hadoop.hive.metastore.api.WMPool;
-
 import org.apache.hadoop.hive.metastore.model.MWMPool;
-
 import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 
 import java.io.IOException;
@@ -9503,17 +9499,25 @@ public class ObjectStore implements RawStore, Configurable {
   }
 
   @Override
-  public void createResourcePlan(WMResourcePlan resourcePlan)
+  public void createResourcePlan(WMResourcePlan resourcePlan, int defaultPoolSize)
       throws AlreadyExistsException, MetaException {
     boolean commited = false;
     String rpName = normalizeIdentifier(resourcePlan.getName());
     Integer queryParallelism = resourcePlan.isSetQueryParallelism() ?
         resourcePlan.getQueryParallelism() : null;
-    MWMResourcePlan rp = new MWMResourcePlan(rpName, queryParallelism,
-        MWMResourcePlan.Status.DISABLED);
+    MWMResourcePlan rp = new MWMResourcePlan(
+        rpName, queryParallelism, MWMResourcePlan.Status.DISABLED);
     try {
       openTransaction();
       pm.makePersistent(rp);
+      // TODO: ideally, this should be moved outside to HiveMetaStore to be shared between
+      //       all the RawStore-s. Right now there's no method to create a pool.
+      if (defaultPoolSize > 0) {
+        MWMPool defaultPool = new MWMPool(rp, "default", null, 1.0, defaultPoolSize, null);
+        pm.makePersistent(defaultPool);
+        rp.setPools(Sets.newHashSet(defaultPool));
+        rp.setDefaultPool(defaultPool);
+      }
       commited = commitTransaction();
     } catch (Exception e) {
       checkForConstraintException(e, "Resource plan already exists: ");
@@ -9535,6 +9539,9 @@ public class ObjectStore implements RawStore, Configurable {
     if (mplan.getQueryParallelism() != null) {
       rp.setQueryParallelism(mplan.getQueryParallelism());
     }
+    if (mplan.getDefaultPool() != null) {
+      rp.setDefaultPoolPath(mplan.getDefaultPool().getPath());
+    }
     return rp;
   }
 
@@ -9663,6 +9670,14 @@ public class ObjectStore implements RawStore, Configurable {
         result = switchStatus(
             name, mResourcePlan, resourcePlan.getStatus().name(), canActivateDisabled);
       }
+      if (resourcePlan.isSetDefaultPoolPath()) {
+        MWMPool pool = getPoolByPath(resourcePlan, resourcePlan.getDefaultPoolPath());
+        if (pool == null) {
+          throw new NoSuchObjectException(
+              "Cannot find pool: " + resourcePlan.getDefaultPoolPath());
+        }
+        mResourcePlan.setDefaultPool(pool);
+      }
       commited = commitTransaction();
       return result;
     } catch (Exception e) {
@@ -9673,6 +9688,25 @@ public class ObjectStore implements RawStore, Configurable {
     }
   }
 
+
+  private MWMPool getPoolByPath(WMResourcePlan parent, String path) {
+    // Note: this doesn't do recursion because we will do that on create/alter.
+    boolean commited = false;
+    Query query = null;
+    try {
+      openTransaction();
+      query = pm.newQuery(MWMPool.class, "path == pname and resourcePlan == rp");
+      query.declareParameters("java.lang.String pname, MWMResourcePlan rp");
+      query.setUnique(true);
+      MWMPool pool = (MWMPool) query.execute(path, parent);
+      pm.retrieve(pool);
+      commited = commitTransaction();
+      return pool;
+    } finally {
+      rollbackAndCleanup(commited, query);
+    }
+  }
+
   @Override
   public WMFullResourcePlan getActiveResourcePlan() throws MetaException {
     boolean commited = false;
@@ -9683,17 +9717,16 @@ public class ObjectStore implements RawStore, Configurable {
       query.declareParameters("java.lang.String activeStatus");
       query.setUnique(true);
       MWMResourcePlan mResourcePlan = (MWMResourcePlan) query.execute(Status.ACTIVE.toString());
+      commited = commitTransaction();
       if (mResourcePlan == null) {
         return null; // No active plan.
       }
-      commited = commitTransaction();
       return fullFromMResourcePlan(mResourcePlan);
     } finally {
       rollbackAndCleanup(commited, query);
     }
   }
 
-
   private WMFullResourcePlan switchStatus(String name, MWMResourcePlan mResourcePlan,
       String status, boolean canActivateDisabled) throws InvalidOperationException {
     Status currentStatus = mResourcePlan.getStatus();
@@ -9801,11 +9834,23 @@ public class ObjectStore implements RawStore, Configurable {
     Query query = null;
     try {
       openTransaction();
-      query = pm.newQuery(MWMResourcePlan.class, "name == resourcePlanName && status != \"ACTIVE\"");
-      query.declareParameters("java.lang.String resourcePlanName");
-      if (query.deletePersistentAll(name) == 0) {
-        throw new NoSuchObjectException("Cannot find resourcePlan: " + name + " or its active");
-      }
+      query = pm.newQuery(MWMResourcePlan.class, "name == rpname");
+      query.declareParameters("java.lang.String rpname");
+      query.setUnique(true);
+      MWMResourcePlan resourcePlan = (MWMResourcePlan) query.execute(name);
+      pm.retrieve(resourcePlan);
+      if (resourcePlan == null) {
+        throw new NoSuchObjectException("There is no resource plan named: " + name);
+      }
+      if (resourcePlan.getStatus() == Status.ACTIVE) {
+        throw new MetaException("Cannot drop an active resource plan");
+      }
+      // First, drop all the dependencies.
+      resourcePlan.setDefaultPool(null);
+      pm.deletePersistentAll(resourcePlan.getTriggers());
+      pm.deletePersistentAll(resourcePlan.getMappings());
+      pm.deletePersistentAll(resourcePlan.getPools());
+      pm.deletePersistent(resourcePlan);
       commited = commitTransaction();
     } finally {
       rollbackAndCleanup(commited, query);

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index 4fe70a3..db148a1 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -751,7 +751,7 @@ public interface RawStore extends Configurable {
    */
   String getMetastoreDbUuid() throws MetaException;
 
-  void createResourcePlan(WMResourcePlan resourcePlan)
+  void createResourcePlan(WMResourcePlan resourcePlan, int defaultPoolSize)
       throws AlreadyExistsException, MetaException;
 
   WMResourcePlan getResourcePlan(String name) throws NoSuchObjectException, MetaException;

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index 43d72e4..c61f27b 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -2242,9 +2242,9 @@ public class CachedStore implements RawStore, Configurable {
   }
 
   @Override
-  public void createResourcePlan(WMResourcePlan resourcePlan)
+  public void createResourcePlan(WMResourcePlan resourcePlan, int defaultPoolSize)
       throws AlreadyExistsException, MetaException {
-    rawStore.createResourcePlan(resourcePlan);
+    rawStore.createResourcePlan(resourcePlan, defaultPoolSize);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
index f8ebc12..8e35d44 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
@@ -740,6 +740,10 @@ public class MetastoreConf {
     WRITE_SET_REAPER_INTERVAL("metastore.writeset.reaper.interval",
         "hive.writeset.reaper.interval", 60, TimeUnit.SECONDS,
         "Frequency of WriteSet reaper runs"),
+    WM_DEFAULT_POOL_SIZE("metastore.wm.default.pool.size",
+        "hive.metastore.wm.default.pool.size", 4,
+        "The size of a default pool to create when creating an empty resource plan;\n" +
+        "If not positive, no default pool will be created."),
 
     // Hive values we have copied and use as is
     // These two are used to indicate that we are running tests

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMPool.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMPool.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMPool.java
index 0c1e786..e00a020 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMPool.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMPool.java
@@ -23,11 +23,11 @@ import java.util.Set;
 public class MWMPool {
   private MWMResourcePlan resourcePlan;
   private String path;
-  private MWMPool parentPool;
   private Double allocFraction;
   private Integer queryParallelism;
   private Set<MWMTrigger> triggers;
   private String schedulingPolicy;
+  private MWMPool parentPool;
 
   public MWMPool() {}
 
@@ -35,7 +35,6 @@ public class MWMPool {
       Double allocFraction, Integer queryParallelism, String schedulingPolicy) {
     this.resourcePlan = resourcePlan;
     this.path = path;
-    this.parentPool = parentPool;
     this.allocFraction = allocFraction;
     this.queryParallelism = queryParallelism;
     this.schedulingPolicy = schedulingPolicy;
@@ -57,14 +56,6 @@ public class MWMPool {
     this.path = path;
   }
 
-  public MWMPool getParentPool() {
-    return parentPool;
-  }
-
-  public void setParentPool(MWMPool parentPool) {
-    this.parentPool = parentPool;
-  }
-
   public Double getAllocFraction() {
     return allocFraction;
   }
@@ -96,4 +87,12 @@ public class MWMPool {
   public void setSchedulingPolicy(String schedulingPolicy) {
     this.schedulingPolicy = schedulingPolicy;
   }
+
+  public MWMPool getParentPool() {
+    return parentPool;
+  }
+
+  public void setParentPool(MWMPool parentPool) {
+    this.parentPool = parentPool;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMResourcePlan.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMResourcePlan.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMResourcePlan.java
index 27a1bd8..40110ce 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMResourcePlan.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MWMResourcePlan.java
@@ -30,6 +30,8 @@ public class MWMResourcePlan {
   private Set<MWMPool> pools;
   private Set<MWMTrigger> triggers;
   private Set<MWMMapping> mappings;
+  private MWMPool defaultPool;
+
 
   public enum Status {
     ACTIVE,
@@ -77,6 +79,14 @@ public class MWMResourcePlan {
     this.pools = pools;
   }
 
+  public MWMPool getDefaultPool() {
+    return defaultPool;
+  }
+
+  public void setDefaultPool(MWMPool defaultPool) {
+    this.defaultPool = defaultPool;
+  }
+
   public Set<MWMTrigger> getTriggers() {
     return triggers;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/standalone-metastore/src/main/resources/package.jdo
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/resources/package.jdo b/standalone-metastore/src/main/resources/package.jdo
index b271e02..3242630 100644
--- a/standalone-metastore/src/main/resources/package.jdo
+++ b/standalone-metastore/src/main/resources/package.jdo
@@ -1095,6 +1095,9 @@
       <field name="status">
         <column name="STATUS" jdbc-type="string" allows-null="false"/>
       </field>
+      <field name="defaultPool">
+        <column name="DEFAULT_POOL_ID" jdbc-type="integer" allows-null="true"/>
+      </field>
       <index name="UniqueResourcePlan" unique="true">
         <column name="NAME"/>
       </index>

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/standalone-metastore/src/main/thrift/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/thrift/hive_metastore.thrift b/standalone-metastore/src/main/thrift/hive_metastore.thrift
index 9f92bf6..4832a6f 100644
--- a/standalone-metastore/src/main/thrift/hive_metastore.thrift
+++ b/standalone-metastore/src/main/thrift/hive_metastore.thrift
@@ -1043,6 +1043,7 @@ struct WMResourcePlan {
   1: required string name;
   2: optional WMResourcePlanStatus status;
   3: optional i32 queryParallelism;
+  4: optional string defaultPoolPath;
 }
 
 struct WMPool {


[2/2] hive git commit: HIVE-17902 : add notions of default pool and start adding unmanaged mapping (Sergey Shelukhin, reviewed by Prasanth Jayachandran, Harish Jaiprakash)

Posted by se...@apache.org.
HIVE-17902 : add notions of default pool and start adding unmanaged mapping (Sergey Shelukhin, reviewed by Prasanth Jayachandran, Harish Jaiprakash)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a42314de
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a42314de
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a42314de

Branch: refs/heads/master
Commit: a42314deb07a1c8d9d4daeaa799ad1c1ebb0c6c9
Parents: b62d365
Author: sergey <se...@apache.org>
Authored: Wed Nov 8 14:32:36 2017 -0800
Committer: sergey <se...@apache.org>
Committed: Wed Nov 8 14:35:52 2017 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  10 +-
 .../listener/DummyRawStoreFailEvent.java        |   4 +-
 .../hive/jdbc/TestTriggersWorkloadManager.java  |   4 +-
 .../upgrade/derby/046-HIVE-17566.derby.sql      |   5 +-
 .../upgrade/derby/hive-schema-3.0.0.derby.sql   |  12 +-
 .../upgrade/hive/hive-schema-3.0.0.hive.sql     |  10 +-
 .../upgrade/mssql/031-HIVE-17566.mssql.sql      |   7 +-
 .../upgrade/mssql/hive-schema-3.0.0.mssql.sql   |   7 +-
 .../upgrade/mysql/046-HIVE-17566.mysql.sql      |   5 +-
 .../upgrade/mysql/hive-schema-3.0.0.mysql.sql   |   5 +-
 .../upgrade/oracle/046-HIVE-17566.oracle.sql    |   6 +-
 .../upgrade/oracle/hive-schema-3.0.0.oracle.sql |   7 +-
 .../postgres/045-HIVE-17566.postgres.sql        |   7 +-
 .../postgres/hive-schema-3.0.0.postgres.sql     |   8 +-
 metastore/src/gen/thrift/gen-py/__init__.py     |   0
 .../hadoop/hive/metastore/HiveMetaStore.java    |   8 +-
 .../DummyRawStoreControlledCommit.java          |   4 +-
 .../DummyRawStoreForJdoConnection.java          |   3 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |   4 +
 .../apache/hadoop/hive/ql/exec/tez/TezTask.java |  15 +--
 .../hive/ql/exec/tez/UserPoolMapping.java       |  56 +++++----
 .../hive/ql/exec/tez/WorkloadManager.java       |  38 +++++--
 .../ql/exec/tez/WorkloadManagerFederation.java  |  57 ++++++++++
 .../formatting/JsonMetaDataFormatter.java       |   3 +
 .../formatting/TextMetaDataFormatter.java       |   6 +
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |  41 ++++---
 .../org/apache/hadoop/hive/ql/parse/HiveLexer.g |   2 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |  22 +++-
 .../hadoop/hive/ql/parse/IdentifiersParser.g    |   2 +
 .../hive/ql/plan/AlterResourcePlanDesc.java     |  26 +++--
 .../hive/ql/exec/tez/TestWorkloadManager.java   |  88 +++++++-------
 .../test/queries/clientpositive/resourceplan.q  |   4 +
 .../clientpositive/llap/resourceplan.q.out      | 107 +++++++++--------
 .../apache/hive/service/server/HiveServer2.java |   4 +-
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  22 ++++
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |  12 +-
 .../hive/metastore/api/ClientCapability.java    |   8 +-
 .../hive/metastore/api/WMResourcePlan.java      | 114 ++++++++++++++++++-
 .../src/gen/thrift/gen-php/metastore/Types.php  |  23 ++++
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  15 ++-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |   4 +-
 .../hadoop/hive/metastore/ObjectStore.java      |  75 +++++++++---
 .../apache/hadoop/hive/metastore/RawStore.java  |   2 +-
 .../hive/metastore/cache/CachedStore.java       |   4 +-
 .../hive/metastore/conf/MetastoreConf.java      |   4 +
 .../hadoop/hive/metastore/model/MWMPool.java    |  19 ++--
 .../hive/metastore/model/MWMResourcePlan.java   |  10 ++
 .../src/main/resources/package.jdo              |   3 +
 .../src/main/thrift/hive_metastore.thrift       |   1 +
 49 files changed, 662 insertions(+), 241 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index a26ea21..bd25bc7 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -288,7 +288,8 @@ public class HiveConf extends Configuration {
       HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL,
       HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL,
       HiveConf.ConfVars.METASTORE_FASTPATH,
-      HiveConf.ConfVars.METASTORE_HBASE_FILE_METADATA_THREADS
+      HiveConf.ConfVars.METASTORE_HBASE_FILE_METADATA_THREADS,
+      HiveConf.ConfVars.METASTORE_WM_DEFAULT_POOL_SIZE
       };
 
   /**
@@ -669,7 +670,12 @@ public class HiveConf extends Configuration {
     METASTORE_TCP_KEEP_ALIVE("hive.metastore.server.tcp.keepalive", true,
         "Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections."),
 
-    METASTORE_INT_ORIGINAL("hive.metastore.archive.intermediate.original",
+    METASTORE_WM_DEFAULT_POOL_SIZE("hive.metastore.wm.default.pool.size", 4,
+        "The size of a default pool to create when creating an empty resource plan;\n" +
+        "If not positive, no default pool will be created."),
+
+
+   METASTORE_INT_ORIGINAL("hive.metastore.archive.intermediate.original",
         "_INTERMEDIATE_ORIGINAL",
         "Intermediate dir suffixes used for archiving. Not important what they\n" +
         "are, as long as collisions are avoided"),

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 996c005..7196756 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -981,9 +981,9 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
   }
 
   @Override
-  public void createResourcePlan(WMResourcePlan resourcePlan)
+  public void createResourcePlan(WMResourcePlan resourcePlan, int defaultPoolSize)
       throws AlreadyExistsException, MetaException {
-    objectStore.createResourcePlan(resourcePlan);
+    objectStore.createResourcePlan(resourcePlan, defaultPoolSize);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java
index 012361a..79ba1f4 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestTriggersWorkloadManager.java
@@ -84,11 +84,9 @@ public class TestTriggersWorkloadManager extends TestTriggersTezSessionPoolManag
     WMPool pool = new WMPool("rp", "llap");
     pool.setAllocFraction(1.0f);
     pool.setQueryParallelism(1);
-    WMMapping mapping = new WMMapping("rp", "DEFAULT", "");
-    mapping.setPoolName("llap");
     WMFullResourcePlan rp = new WMFullResourcePlan(
         new WMResourcePlan("rp"), Lists.newArrayList(pool));
-    rp.addToMappings(mapping);
+    rp.getPlan().setDefaultPoolPath("llap");
     for (Trigger trigger : triggers) {
       rp.addToTriggers(wmTriggerFromTrigger(trigger));
       rp.addToPoolTriggers(new WMPoolTrigger("llap", trigger.getName()));

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/metastore/scripts/upgrade/derby/046-HIVE-17566.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/046-HIVE-17566.derby.sql b/metastore/scripts/upgrade/derby/046-HIVE-17566.derby.sql
index f9b0765..9f12153 100644
--- a/metastore/scripts/upgrade/derby/046-HIVE-17566.derby.sql
+++ b/metastore/scripts/upgrade/derby/046-HIVE-17566.derby.sql
@@ -1,4 +1,4 @@
-CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, QUERY_PARALLELISM INTEGER, STATUS VARCHAR(20) NOT NULL);
+CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, QUERY_PARALLELISM INTEGER, STATUS VARCHAR(20) NOT NULL, DEFAULT_POOL_ID BIGINT);
 CREATE UNIQUE INDEX "APP"."UNIQUE_WM_RESOURCEPLAN" ON "APP"."WM_RESOURCEPLAN" ("NAME");
 ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_PK" PRIMARY KEY ("RP_ID");
 
@@ -7,6 +7,7 @@ CREATE UNIQUE INDEX "APP"."UNIQUE_WM_POOL" ON "APP"."WM_POOL" ("RP_ID", "PATH");
 ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_PK" PRIMARY KEY ("POOL_ID");
 ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
 ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK2" FOREIGN KEY ("PARENT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
 
 CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024));
 CREATE UNIQUE INDEX "APP"."UNIQUE_WM_TRIGGER" ON "APP"."WM_TRIGGER" ("RP_ID", "NAME");
@@ -18,7 +19,7 @@ ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_PK" PR
 ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
 ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "APP"."WM_TRIGGER" ("TRIGGER_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
 
-CREATE TABLE "APP"."WM_MAPPING" (MAPPING_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, ENTITY_TYPE VARCHAR(10) NOT NULL, ENTITY_NAME VARCHAR(128) NOT NULL, POOL_ID BIGINT NOT NULL, ORDERING INTEGER);
+CREATE TABLE "APP"."WM_MAPPING" (MAPPING_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, ENTITY_TYPE VARCHAR(10) NOT NULL, ENTITY_NAME VARCHAR(128) NOT NULL, POOL_ID BIGINT, ORDERING INTEGER);
 CREATE UNIQUE INDEX "APP"."UNIQUE_WM_MAPPING" ON "APP"."WM_MAPPING" ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME");
 ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_PK" PRIMARY KEY ("MAPPING_ID");
 ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql b/metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql
index 054978e..1d21fa2 100644
--- a/metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql
+++ b/metastore/scripts/upgrade/derby/hive-schema-3.0.0.derby.sql
@@ -110,15 +110,15 @@ CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_INTEGER_IDX"
 
 CREATE TABLE "APP"."METASTORE_DB_PROPERTIES" ("PROPERTY_KEY" VARCHAR(255) NOT NULL, "PROPERTY_VALUE" VARCHAR(1000) NOT NULL, "DESCRIPTION" VARCHAR(1000));
 
-CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID INTEGER NOT NULL, NAME VARCHAR(128) NOT NULL, QUERY_PARALLELISM INTEGER, STATUS VARCHAR(20) NOT NULL);
+CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, QUERY_PARALLELISM INTEGER, STATUS VARCHAR(20) NOT NULL, DEFAULT_POOL_ID BIGINT);
 
-CREATE TABLE "APP"."WM_POOL" (POOL_ID INTEGER NOT NULL, RP_ID INTEGER NOT NULL, PATH VARCHAR(1024) NOT NULL, PARENT_POOL_ID INTEGER, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER, SCHEDULING_POLICY VARCHAR(1024));
+CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, PARENT_POOL_ID BIGINT, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER, SCHEDULING_POLICY VARCHAR(1024));
 
-CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID INTEGER NOT NULL, RP_ID INTEGER NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024));
+CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024));
 
-CREATE TABLE "APP"."WM_POOL_TO_TRIGGER"  (POOL_ID INTEGER NOT NULL, TRIGGER_ID INTEGER NOT NULL);
+CREATE TABLE "APP"."WM_POOL_TO_TRIGGER"  (POOL_ID BIGINT NOT NULL, TRIGGER_ID BIGINT NOT NULL);
 
-CREATE TABLE "APP"."WM_MAPPING" (MAPPING_ID INTEGER NOT NULL, RP_ID INTEGER NOT NULL, ENTITY_TYPE VARCHAR(10) NOT NULL, ENTITY_NAME VARCHAR(128) NOT NULL, POOL_ID INTEGER NOT NULL, ORDERING INTEGER);
+CREATE TABLE "APP"."WM_MAPPING" (MAPPING_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, ENTITY_TYPE VARCHAR(10) NOT NULL, ENTITY_NAME VARCHAR(128) NOT NULL, POOL_ID BIGINT, ORDERING INTEGER);
 
 -- ----------------------------------------------
 -- DML Statements
@@ -358,6 +358,8 @@ ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") R
 
 ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK2" FOREIGN KEY ("PARENT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
 
+ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
 ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_PK" PRIMARY KEY ("TRIGGER_ID");
 
 ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql b/metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql
index c1578fc..68d8d37 100644
--- a/metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql
+++ b/metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql
@@ -950,18 +950,20 @@ FROM `PARTITION_PARAMS` GROUP BY `PART_ID`;
 CREATE TABLE IF NOT EXISTS `WM_RESOURCEPLANS` (
   `NAME` string,
   `STATUS` string,
-  `QUERY_PARALLELISM` int
+  `QUERY_PARALLELISM` int,
+  `DEFAULT_POOL_PATH` string
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
 TBLPROPERTIES (
 "hive.sql.database.type" = "METASTORE",
 "hive.sql.query" =
 "SELECT
-  \"NAME\",
+  \"WM_RESOURCEPLAN\".\"NAME\",
   \"STATUS\",
-  \"QUERY_PARALLELISM\"
+  \"WM_RESOURCEPLAN\".\"QUERY_PARALLELISM\",
+  \"WM_POOL\".\"PATH\"
 FROM
-  \"WM_RESOURCEPLAN\""
+  \"WM_RESOURCEPLAN\" LEFT OUTER JOIN \"WM_POOL\" ON \"WM_RESOURCEPLAN\".\"DEFAULT_POOL_ID\" = \"WM_POOL\".\"POOL_ID\""
 );
 
 CREATE TABLE IF NOT EXISTS `WM_TRIGGERS` (

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/metastore/scripts/upgrade/mssql/031-HIVE-17566.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/031-HIVE-17566.mssql.sql b/metastore/scripts/upgrade/mssql/031-HIVE-17566.mssql.sql
index a13d976..06d82e0 100644
--- a/metastore/scripts/upgrade/mssql/031-HIVE-17566.mssql.sql
+++ b/metastore/scripts/upgrade/mssql/031-HIVE-17566.mssql.sql
@@ -3,7 +3,8 @@ CREATE TABLE WM_RESOURCEPLAN
     RP_ID bigint NOT NULL,
     "NAME" nvarchar(128) NOT NULL,
     QUERY_PARALLELISM int,
-    STATUS nvarchar(20) NOT NULL
+    STATUS nvarchar(20) NOT NULL,
+    DEFAULT_POOL_ID bigint
 );
 
 ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID);
@@ -27,7 +28,7 @@ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID);
 CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, "NAME");
 ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
 ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
-
+ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFAULT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
 
 CREATE TABLE WM_TRIGGER
 (
@@ -64,7 +65,7 @@ CREATE TABLE WM_MAPPING
     RP_ID bigint NOT NULL,
     ENTITY_TYPE nvarchar(10) NOT NULL,
     ENTITY_NAME nvarchar(128) NOT NULL,
-    POOL_ID bigint NOT NULL,
+    POOL_ID bigint,
     ORDERING int
 );
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql
index 1ceb723..b189c31 100644
--- a/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql
+++ b/metastore/scripts/upgrade/mssql/hive-schema-3.0.0.mssql.sql
@@ -602,7 +602,8 @@ CREATE TABLE WM_RESOURCEPLAN
     RP_ID bigint NOT NULL,
     "NAME" nvarchar(128) NOT NULL,
     QUERY_PARALLELISM int,
-    STATUS nvarchar(20) NOT NULL
+    STATUS nvarchar(20) NOT NULL,
+    DEFAULT_POOL_ID bigint
 );
 
 ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID);
@@ -645,7 +646,7 @@ CREATE TABLE WM_MAPPING
     RP_ID bigint NOT NULL,
     ENTITY_TYPE nvarchar(10) NOT NULL,
     ENTITY_NAME nvarchar(128) NOT NULL,
-    POOL_ID bigint NOT NULL,
+    POOL_ID bigint,
     ORDERING int
 );
 
@@ -916,6 +917,8 @@ CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME");
 
 CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH);
 
+ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFAULT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
+
 ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
 
 ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID);

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/metastore/scripts/upgrade/mysql/046-HIVE-17566.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/046-HIVE-17566.mysql.sql b/metastore/scripts/upgrade/mysql/046-HIVE-17566.mysql.sql
index 03ee4cf..cff0b85 100644
--- a/metastore/scripts/upgrade/mysql/046-HIVE-17566.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/046-HIVE-17566.mysql.sql
@@ -3,6 +3,7 @@ CREATE TABLE IF NOT EXISTS WM_RESOURCEPLAN (
     `NAME` varchar(128) NOT NULL,
     `QUERY_PARALLELISM` int(11),
     `STATUS` varchar(20) NOT NULL,
+    `DEFAULT_POOL_ID` bigint(20),
     PRIMARY KEY (`RP_ID`),
     KEY `UNIQUE_WM_RESOURCEPLAN` (`NAME`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
@@ -22,6 +23,8 @@ CREATE TABLE IF NOT EXISTS WM_POOL
     CONSTRAINT `WM_POOL_FK2` FOREIGN KEY (`PARENT_POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 
+ALTER TABLE `WM_RESOURCEPLAN` ADD CONSTRAINT `WM_RESOURCEPLAN_FK1` FOREIGN KEY (`DEFAULT_POOL_ID`) REFERENCES `WM_POOL`(`POOL_ID`);
+
 CREATE TABLE IF NOT EXISTS WM_TRIGGER
 (
     `TRIGGER_ID` bigint(20) NOT NULL,
@@ -49,7 +52,7 @@ CREATE TABLE IF NOT EXISTS WM_MAPPING
     `RP_ID` bigint(20) NOT NULL,
     `ENTITY_TYPE` varchar(10) NOT NULL,
     `ENTITY_NAME` varchar(128) NOT NULL,
-    `POOL_ID` bigint(20) NOT NULL,
+    `POOL_ID` bigint(20),
     `ORDERING` int,
     PRIMARY KEY (`MAPPING_ID`),
     KEY `UNIQUE_WM_MAPPING` (`RP_ID`, `ENTITY_TYPE`, `ENTITY_NAME`),

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql
index 0664854..702a929 100644
--- a/metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql
+++ b/metastore/scripts/upgrade/mysql/hive-schema-3.0.0.mysql.sql
@@ -854,6 +854,7 @@ CREATE TABLE IF NOT EXISTS WM_RESOURCEPLAN (
     `NAME` varchar(128) NOT NULL,
     `QUERY_PARALLELISM` int(11),
     `STATUS` varchar(20) NOT NULL,
+    `DEFAULT_POOL_ID` bigint(20),
     PRIMARY KEY (`RP_ID`),
     KEY `UNIQUE_WM_RESOURCEPLAN` (`NAME`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
@@ -873,6 +874,8 @@ CREATE TABLE IF NOT EXISTS WM_POOL
     CONSTRAINT `WM_POOL_FK2` FOREIGN KEY (`PARENT_POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 
+ALTER TABLE `WM_RESOURCEPLAN` ADD CONSTRAINT `WM_RESOURCEPLAN_FK1` FOREIGN KEY (`DEFAULT_POOL_ID`) REFERENCES `WM_POOL`(`POOL_ID`);
+
 CREATE TABLE IF NOT EXISTS WM_TRIGGER
 (
     `TRIGGER_ID` bigint(20) NOT NULL,
@@ -900,7 +903,7 @@ CREATE TABLE IF NOT EXISTS WM_MAPPING
     `RP_ID` bigint(20) NOT NULL,
     `ENTITY_TYPE` varchar(10) NOT NULL,
     `ENTITY_NAME` varchar(128) NOT NULL,
-    `POOL_ID` bigint(20) NOT NULL,
+    `POOL_ID` bigint(20),
     `ORDERING` int,
     PRIMARY KEY (`MAPPING_ID`),
     KEY `UNIQUE_WM_MAPPING` (`RP_ID`, `ENTITY_TYPE`, `ENTITY_NAME`),

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/metastore/scripts/upgrade/oracle/046-HIVE-17566.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/046-HIVE-17566.oracle.sql b/metastore/scripts/upgrade/oracle/046-HIVE-17566.oracle.sql
index a13d976..ceab459 100644
--- a/metastore/scripts/upgrade/oracle/046-HIVE-17566.oracle.sql
+++ b/metastore/scripts/upgrade/oracle/046-HIVE-17566.oracle.sql
@@ -3,7 +3,8 @@ CREATE TABLE WM_RESOURCEPLAN
     RP_ID bigint NOT NULL,
     "NAME" nvarchar(128) NOT NULL,
     QUERY_PARALLELISM int,
-    STATUS nvarchar(20) NOT NULL
+    STATUS nvarchar(20) NOT NULL,
+    DEFAULT_POOL_ID bigint
 );
 
 ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID);
@@ -28,6 +29,7 @@ CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, "NAME");
 ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
 ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
 
+ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFAULT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
 
 CREATE TABLE WM_TRIGGER
 (
@@ -64,7 +66,7 @@ CREATE TABLE WM_MAPPING
     RP_ID bigint NOT NULL,
     ENTITY_TYPE nvarchar(10) NOT NULL,
     ENTITY_NAME nvarchar(128) NOT NULL,
-    POOL_ID bigint NOT NULL,
+    POOL_ID bigint,
     ORDERING int
 );
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql
index a94985a..6e9c7ff 100644
--- a/metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql
+++ b/metastore/scripts/upgrade/oracle/hive-schema-3.0.0.oracle.sql
@@ -583,7 +583,8 @@ CREATE TABLE WM_RESOURCEPLAN
     RP_ID bigint NOT NULL,
     "NAME" nvarchar(128) NOT NULL,
     QUERY_PARALLELISM int,
-    STATUS nvarchar(20) NOT NULL
+    STATUS nvarchar(20) NOT NULL,
+    DEFAULT_POOL_ID bigint
 );
 
 ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID);
@@ -626,7 +627,7 @@ CREATE TABLE WM_MAPPING
     RP_ID bigint NOT NULL,
     ENTITY_TYPE nvarchar(10) NOT NULL,
     ENTITY_NAME nvarchar(128) NOT NULL,
-    POOL_ID bigint NOT NULL,
+    POOL_ID bigint,
     ORDERING int
 );
 
@@ -873,6 +874,8 @@ CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME");
 
 CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH);
 
+ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFAULT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
+
 ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
 
 ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID);

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/metastore/scripts/upgrade/postgres/045-HIVE-17566.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/045-HIVE-17566.postgres.sql b/metastore/scripts/upgrade/postgres/045-HIVE-17566.postgres.sql
index e80e612..07fb6b7 100644
--- a/metastore/scripts/upgrade/postgres/045-HIVE-17566.postgres.sql
+++ b/metastore/scripts/upgrade/postgres/045-HIVE-17566.postgres.sql
@@ -2,7 +2,8 @@ CREATE TABLE "WM_RESOURCEPLAN" (
     "RP_ID" bigint NOT NULL,
     "NAME" character varying(128) NOT NULL,
     "QUERY_PARALLELISM" integer,
-    "STATUS" character varying(20) NOT NULL
+    "STATUS" character varying(20) NOT NULL,
+    "DEFAULT_POOL_ID" bigint
 );
 
 ALTER TABLE ONLY "WM_RESOURCEPLAN"
@@ -33,6 +34,8 @@ ALTER TABLE ONLY "WM_POOL"
 ALTER TABLE ONLY "WM_POOL"
     ADD CONSTRAINT "WM_POOL_FK2" FOREIGN KEY ("PARENT_POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
 
+ALTER TABLE ONLY "WM_RESOURCEPLAN"
+    ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
 
 CREATE TABLE "WM_TRIGGER" (
     "TRIGGER_ID" bigint NOT NULL,
@@ -72,7 +75,7 @@ CREATE TABLE "WM_MAPPING" (
     "RP_ID" bigint NOT NULL,
     "ENTITY_TYPE" character varying(10) NOT NULL,
     "ENTITY_NAME" character varying(128) NOT NULL,
-    "POOL_ID" bigint NOT NULL,
+    "POOL_ID" bigint,
     "ORDERING" integer
 );
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql
index be1bb1e..7504604 100644
--- a/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql
+++ b/metastore/scripts/upgrade/postgres/hive-schema-3.0.0.postgres.sql
@@ -623,7 +623,8 @@ CREATE TABLE "WM_RESOURCEPLAN" (
     "RP_ID" bigint NOT NULL,
     "NAME" character varying(128) NOT NULL,
     "QUERY_PARALLELISM" integer,
-    "STATUS" character varying(20) NOT NULL
+    "STATUS" character varying(20) NOT NULL,
+    "DEFAULT_POOL_ID" bigint
 );
 
 CREATE TABLE "WM_POOL" (
@@ -654,7 +655,7 @@ CREATE TABLE "WM_MAPPING" (
     "RP_ID" bigint NOT NULL,
     "ENTITY_TYPE" character varying(10) NOT NULL,
     "ENTITY_NAME" character varying(128) NOT NULL,
-    "POOL_ID" bigint NOT NULL,
+    "POOL_ID" bigint,
     "ORDERING" integer
 );
 
@@ -1544,6 +1545,9 @@ ALTER TABLE ONLY "FUNC_RU"
 
 -- Resource plan FK constraints.
 
+ALTER TABLE ONLY "WM_RESOURCEPLAN"
+    ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
+
 ALTER TABLE ONLY "WM_POOL"
     ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/metastore/src/gen/thrift/gen-py/__init__.py
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/__init__.py b/metastore/src/gen/thrift/gen-py/__init__.py
deleted file mode 100644
index e69de29..0000000

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index ed58b41..a54a82b 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -22,6 +22,9 @@ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_COMMEN
 import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
 import static org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName;
 
+import com.google.common.collect.Sets;
+import org.apache.hadoop.hive.metastore.model.MWMPool;
+
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.security.PrivilegedExceptionAction;
@@ -7419,8 +7422,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     @Override
     public WMCreateResourcePlanResponse create_resource_plan(WMCreateResourcePlanRequest request)
         throws AlreadyExistsException, InvalidObjectException, MetaException, TException {
+      int defaultPoolSize = MetastoreConf.getIntVar(
+          hiveConf, MetastoreConf.ConfVars.WM_DEFAULT_POOL_SIZE);
+
       try {
-        getMS().createResourcePlan(request.getResourcePlan());
+        getMS().createResourcePlan(request.getResourcePlan(), defaultPoolSize);
         return new WMCreateResourcePlanResponse();
       } catch (MetaException e) {
         LOG.error("Exception while trying to persist resource plan", e);

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 4df7c97..95aeb25 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -941,9 +941,9 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
   }
 
   @Override
-  public void createResourcePlan(WMResourcePlan resourcePlan)
+  public void createResourcePlan(WMResourcePlan resourcePlan, int defaultPoolSize)
       throws AlreadyExistsException, MetaException {
-    objectStore.createResourcePlan(resourcePlan);
+    objectStore.createResourcePlan(resourcePlan, defaultPoolSize);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index a41e5a0..e8400be 100644
--- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -953,7 +953,8 @@ public class DummyRawStoreForJdoConnection implements RawStore {
   }
 
   @Override
-  public void createResourcePlan(WMResourcePlan resourcePlan) throws MetaException {
+  public void createResourcePlan(
+      WMResourcePlan resourcePlan, int defaultPoolSize) throws MetaException {
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 826ae56..0a34633 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -705,6 +705,10 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
       resourcePlan.setQueryParallelism(desc.getQueryParallelism());
     }
 
+    if (desc.getDefaultPoolPath() != null) {
+      resourcePlan.setDefaultPoolPath(desc.getDefaultPoolPath());
+    }
+
     boolean isActivate = false, isInTest = HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST);
     WorkloadManager wm = null;
     if (desc.getStatus() != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
index a1b7cfb..3bb4f58 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hive.ql.exec.tez;
 
+import org.apache.hadoop.hive.ql.exec.tez.UserPoolMapping.MappingInput;
+
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
@@ -157,17 +159,8 @@ public class TezTask extends Task<TezWork> {
         LOG.warn("The session: " + session + " has not been opened");
       }
       Set<String> desiredCounters = new HashSet<>();
-      if (WorkloadManager.isInUse(ss.getConf())) {
-        WorkloadManager wm = WorkloadManager.getInstance();
-        // TODO: in future, we may also pass getUserIpAddress.
-        // Note: for now this will just block to wait for a session based on parallelism.
-        session = wm.getSession(session, ss.getUserName(), conf);
-        desiredCounters.addAll(wm.getTriggerCounterNames());
-      } else {
-        TezSessionPoolManager pm = TezSessionPoolManager.getInstance();
-        session = pm.getSession(session, conf, false, getWork().getLlapMode());
-        desiredCounters.addAll(pm.getTriggerCounterNames());
-      }
+      session = WorkloadManagerFederation.getSession(session, conf,
+          new MappingInput(ss.getUserName()), getWork().getLlapMode(), desiredCounters);
 
       TriggerContext triggerContext = ctx.getTriggerContext();
       triggerContext.setDesiredCounters(desiredCounters);

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java
index 9954c24..50cf4da 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java
@@ -34,53 +34,63 @@ class UserPoolMapping {
       this.priority = priority;
     }
     int priority;
+    /** The destination pool; null means unmanaged. */
     String fullPoolName;
     @Override
     public String toString() {
-      return "[" + fullPoolName + ", priority=" + priority + "]";
+      return "[" + (fullPoolName == null ? "unmanaged" : fullPoolName)
+          + ", priority=" + priority + "]";
     }
   }
 
+  /** Contains all the information necessary to map a query to a pool. */
+  public static final class MappingInput {
+    public final String userName;
+    // TODO: we may add app name, group name, etc. later
+
+    public MappingInput(String userName) {
+      this.userName = userName;
+    }
+
+    @Override
+    public String toString() {
+      return userName;
+    }
+  }
+
+
   private final Map<String, Mapping> userMappings = new HashMap<>();
-  private final String defaultPoolName;
+  private final String defaultPoolPath;
 
-  public UserPoolMapping(List<WMMapping> mappings) {
-    String defaultPoolName = null;
-    for (WMMapping mapping : mappings) {
-      MappingType type = MappingType.valueOf(mapping.getEntityType().toUpperCase());
-      switch (type) {
-      case USER: {
+  public UserPoolMapping(List<WMMapping> mappings, String defaultPoolPath) {
+    if (mappings != null) {
+      for (WMMapping mapping : mappings) {
+        MappingType type = MappingType.valueOf(mapping.getEntityType().toUpperCase());
+        switch (type) {
+        case USER: {
           Mapping val = new Mapping(mapping.getPoolName(), mapping.getOrdering());
           Mapping oldValue = userMappings.put(mapping.getEntityName(), val);
           if (oldValue != null) {
             throw new AssertionError("Duplicate mapping for user " + mapping.getEntityName()
                 + "; " + oldValue + " and " + val);
           }
-        break;
-      }
-      case DEFAULT: {
-        String poolName = mapping.getPoolName();
-        if (defaultPoolName != null) {
-          throw new AssertionError("Duplicate default mapping; "
-              + defaultPoolName + " and " + poolName);
+          break;
+        }
+        default: throw new AssertionError("Unknown type " + type);
         }
-        defaultPoolName = poolName;
-        break;
-      }
-      default: throw new AssertionError("Unknown type " + mapping.getEntityType());
       }
     }
-    this.defaultPoolName = defaultPoolName;
+    this.defaultPoolPath = defaultPoolPath;
   }
 
-  public String mapSessionToPoolName(String userName) {
+  public String mapSessionToPoolName(MappingInput input) {
     // For now, we only have user rules, so this is very simple.
     // In future we'd also look up groups (each groups the user is in initially; we may do it
     // the opposite way if the user is a member of many groups but there are not many rules),
     // whatever user supplies in connection string to HS2, etc.
     // If multiple rules match, we'd need to get the highest priority one.
-    Mapping userMapping = userMappings.get(userName);
+    Mapping userMapping = userMappings.get(input.userName);
     if (userMapping != null) return userMapping.fullPoolName;
-    return defaultPoolName;
+    return defaultPoolPath;
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
index 169991c..039881f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.ql.exec.tez;
 
+import org.apache.hadoop.hive.ql.exec.tez.UserPoolMapping.MappingInput;
 import org.apache.hadoop.hive.ql.wm.ExpressionFactory;
 
 import org.apache.hadoop.hive.ql.wm.Trigger.Action;
@@ -434,7 +435,7 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
     //    want query level fairness, and don't want the get in queue to hold up a session.
     GetRequest req;
     while ((req = e.getRequests.pollFirst()) != null) {
-      LOG.debug("Processing a new get request from " + req.userName);
+      LOG.debug("Processing a new get request from " + req.mappingInput);
       queueGetRequestOnMasterThread(req, poolsToRedistribute, syncWork);
     }
     e.toReuse.clear();
@@ -591,7 +592,8 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
     // FIXME: Add Triggers from metastore to poolstate
     // Note: we assume here that plan has been validated beforehand, so we don't verify
     //       that fractions or query parallelism add up, etc.
-    this.userPoolMapping = new UserPoolMapping(e.resourcePlanToApply.getMappings());
+    this.userPoolMapping = new UserPoolMapping(e.resourcePlanToApply.getMappings(),
+        e.resourcePlanToApply.getPlan().getDefaultPoolPath());
     HashMap<String, PoolState> oldPools = pools;
     pools = new HashMap<>();
 
@@ -681,10 +683,10 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
 
   private void queueGetRequestOnMasterThread(
       GetRequest req, HashSet<String> poolsToRedistribute, WmThreadSyncWork syncWork) {
-    String poolName = userPoolMapping.mapSessionToPoolName(req.userName);
+    String poolName = userPoolMapping.mapSessionToPoolName(req.mappingInput);
     if (poolName == null) {
-      req.future.setException(new HiveException(
-          "Cannot find any pool mapping for user " + req.userName));
+      req.future.setException(new NoPoolMappingException(
+          "Cannot find any pool mapping for " + req.mappingInput));
       returnSessionOnFailedReuse(req, syncWork, poolsToRedistribute);
       return;
     }
@@ -856,13 +858,14 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
       }
     };
     private final long order;
-    private final String userName;
+    private final MappingInput mappingInput;
     private final SettableFuture<WmTezSession> future;
     private WmTezSession sessionToReuse;
 
-    private GetRequest(String userName, SettableFuture<WmTezSession> future,
+    private GetRequest(MappingInput mappingInput, SettableFuture<WmTezSession> future,
         WmTezSession sessionToReuse, long order) {
-      this.userName = userName;
+      assert mappingInput != null;
+      this.mappingInput = mappingInput;
       this.future = future;
       this.sessionToReuse = sessionToReuse;
       this.order = order;
@@ -870,18 +873,18 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
 
     @Override
     public String toString() {
-      return "[#" + order + ", " + userName + ", reuse " + sessionToReuse + "]";
+      return "[#" + order + ", " + mappingInput + ", reuse " + sessionToReuse + "]";
     }
   }
 
   public TezSessionState getSession(
-      TezSessionState session, String userName, HiveConf conf) throws Exception {
+      TezSessionState session, MappingInput input, HiveConf conf) throws Exception {
     // Note: not actually used for pool sessions; verify some things like doAs are not set.
     validateConfig(conf);
     SettableFuture<WmTezSession> future = SettableFuture.create();
     WmTezSession wmSession = checkSessionForReuse(session);
     GetRequest req = new GetRequest(
-        userName, future, wmSession, getRequestVersion.incrementAndGet());
+        input, future, wmSession, getRequestVersion.incrementAndGet());
     currentLock.lock();
     try {
       current.getRequests.add(req);
@@ -1434,8 +1437,21 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
     }
   }
 
+  boolean isManaged(MappingInput input) {
+    // This is always replaced atomically, so we don't care about concurrency here.
+    return userPoolMapping.mapSessionToPoolName(input) != null;
+  }
+
   @VisibleForTesting
   TezSessionPool<WmTezSession> getTezAmPool() {
     return tezAmPool;
   }
+
+  public final static class NoPoolMappingException extends Exception {
+    public NoPoolMappingException(String message) {
+      super(message);
+    }
+
+    private static final long serialVersionUID = 346375346724L;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManagerFederation.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManagerFederation.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManagerFederation.java
new file mode 100644
index 0000000..70adc33
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManagerFederation.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */package org.apache.hadoop.hive.ql.exec.tez;
+
+import java.util.Set;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.tez.UserPoolMapping.MappingInput;
+
+public class WorkloadManagerFederation {
+
+  public static TezSessionState getSession(TezSessionState session, HiveConf conf,
+      MappingInput input, boolean isUnmanagedLlapMode, Set<String> desiredCounters) throws Exception {
+    // 1. If WM is not present just go to unmanaged.
+    if (!WorkloadManager.isInUse(conf)) {
+      return getUnmanagedSession(session, conf, desiredCounters, isUnmanagedLlapMode);
+    }
+    WorkloadManager wm = WorkloadManager.getInstance();
+    // We will ask WM for preliminary mapping. This allows us to escape to the unmanaged path
+    // quickly in the common case. It's still possible that resource plan will be updated and
+    // so our preliminary mapping won't work out. We'll handle that below.
+    if (!wm.isManaged(input)) {
+      return getUnmanagedSession(session, conf, desiredCounters, isUnmanagedLlapMode);
+    }
+
+    try {
+      // Note: this may just block to wait for a session based on parallelism.
+      TezSessionState result = wm.getSession(session, input, conf);
+      desiredCounters.addAll(wm.getTriggerCounterNames());
+      return result;
+    } catch (WorkloadManager.NoPoolMappingException ex) {
+      return getUnmanagedSession(session, conf, desiredCounters, isUnmanagedLlapMode);
+    }
+  }
+
+  private static TezSessionState getUnmanagedSession(
+      TezSessionState session, HiveConf conf, Set<String> desiredCounters, boolean isWorkLlapNode) throws Exception {
+    TezSessionPoolManager pm = TezSessionPoolManager.getInstance();
+    session = pm.getSession(session, conf, false, isWorkLlapNode);
+    desiredCounters.addAll(pm.getTriggerCounterNames());
+    return session;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
index f1258ba..2a568a7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
@@ -432,6 +432,9 @@ public class JsonMetaDataFormatter implements MetaDataFormatter {
         if (plan.isSetQueryParallelism()) {
           generator.writeNumberField("queryParallelism", plan.getQueryParallelism());
         }
+        if (plan.isSetDefaultPoolPath()) {
+          generator.writeStringField("defaultPoolPath", plan.getDefaultPoolPath());
+        }
         generator.writeEndObject();
       }
       generator.writeEndArray();

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
index 9df1b82..3fda5ab 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
@@ -557,6 +557,12 @@ class TextMetaDataFormatter implements MetaDataFormatter {
         }
         out.write(separator);
         out.write(plan.getStatus().name().getBytes("UTF-8"));
+        out.write(separator);
+        if (plan.isSetDefaultPoolPath()) {
+          out.write(plan.getDefaultPoolPath().getBytes("UTF-8"));
+        } else {
+          out.writeBytes("null");
+        }
         out.write(terminator);
       }
     } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 29b904e..579f2df 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -890,6 +890,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
       Tree child = ast.getChild(i);
       switch (child.getType()) {
       case HiveParser.TOK_VALIDATE:
+        if (desc != null) throw new SemanticException("Invalid ALTER VALIDATE command");
         desc = AlterResourcePlanDesc.createValidatePlan(rpName);
         break;
       case HiveParser.TOK_ACTIVATE:
@@ -912,30 +913,40 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
         }
         break;
       case HiveParser.TOK_DISABLE:
-        if (desc != null) {
-          throw new SemanticException("Invalid ALTER DISABLE command");
-        }
+        if (desc != null) throw new SemanticException("Invalid ALTER DISABLE command");
         desc = AlterResourcePlanDesc.createChangeStatus(rpName, WMResourcePlanStatus.DISABLED);
         break;
       case HiveParser.TOK_QUERY_PARALLELISM:
-        if (ast.getChildCount() <= (i + 1)) {
-          throw new SemanticException(
-              "Expected number for query parallelism in alter resource plan statment");
+        if (child.getChildCount() != 1) {
+          throw new SemanticException("Expected one argument");
+        }
+        if (desc == null) {
+          desc = AlterResourcePlanDesc.createSet(rpName);
+        }
+        desc.setQueryParallelism(Integer.parseInt(child.getChild(0).getText()));
+        break;
+      case HiveParser.TOK_DEFAULT_POOL:
+        if (child.getChildCount() != 1) {
+          throw new SemanticException("Expected one argument");
         }
-        int queryParallelism = Integer.parseInt(ast.getChild(++i).getText());
-        desc = AlterResourcePlanDesc.createChangeParallelism(rpName, queryParallelism);
+        if (desc == null) {
+          desc = AlterResourcePlanDesc.createSet(rpName);
+        }
+        desc.setDefaultPoolPath(child.getChild(0).getText());
         break;
       case HiveParser.TOK_RENAME:
-        if (ast.getChildCount() <= (i + 1)) {
-          throw new SemanticException(
-              "Expected new name for rename in alter resource plan statment");
+        if (desc != null) throw new SemanticException("Invalid ALTER RENAME command");
+        if (ast.getChildCount() == (i + 1)) {
+          throw new SemanticException("Expected an argument");
         }
-        String name = ast.getChild(++i).getText();
-        desc = AlterResourcePlanDesc.createRenamePlan(rpName, name);
+        if (desc == null) {
+          desc = AlterResourcePlanDesc.createSet(rpName);
+        }
+        desc.setNewName(ast.getChild(++i).getText());
         break;
       default:
-        throw new SemanticException("Unexpected token in alter resource plan statement: "
-            + ast.getChild(1).getType());
+        throw new SemanticException(
+          "Unexpected token in alter resource plan statement: " + child.getType());
       }
     }
     rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc), conf));

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
index 0263df0..d61fce9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
@@ -358,6 +358,8 @@ KW_PLAN: 'PLAN';
 KW_QUERY_PARALLELISM: 'QUERY_PARALLELISM';
 KW_PLANS: 'PLANS';
 KW_ACTIVATE: 'ACTIVATE';
+KW_DEFAULT: 'DEFAULT';
+KW_POOL: 'POOL';
 KW_MOVE: 'MOVE';
 KW_DO: 'DO';
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index ac95da9..0bbd9be 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -413,6 +413,7 @@ TOK_VALIDATE;
 TOK_ACTIVATE;
 TOK_QUERY_PARALLELISM;
 TOK_RENAME;
+TOK_DEFAULT_POOL;
 TOK_CREATE_TRIGGER;
 TOK_ALTER_TRIGGER;
 TOK_DROP_TRIGGER;
@@ -594,6 +595,8 @@ import org.apache.hadoop.hive.conf.HiveConf;
     xlateMap.put("KW_QUERY_PARALLELISM", "QUERY_PARALLELISM");
     xlateMap.put("KW_PLANS", "PLANS");
     xlateMap.put("KW_ACTIVATE", "ACTIVATE");
+    xlateMap.put("KW_DEFAULT", "DEFAULT");
+    xlateMap.put("KW_POOL", "POOL");
     xlateMap.put("KW_MOVE", "MOVE");
     xlateMap.put("KW_DO", "DO");
 
@@ -1002,6 +1005,22 @@ createResourcePlanStatement
     -> ^(TOK_CREATERESOURCEPLAN $name $parallelism?)
     ;
 
+alterRpSet
+@init { pushMsg("alterRpSet", state); }
+@after { popMsg(state); }
+  : (
+     (KW_QUERY_PARALLELISM EQUAL parallelism=Number -> ^(TOK_QUERY_PARALLELISM $parallelism))
+   | (KW_DEFAULT KW_POOL EQUAL poolName=StringLiteral -> ^(TOK_DEFAULT_POOL $poolName))
+    )
+  ;
+
+alterRpSetList
+@init { pushMsg("alterRpSetList", state); }
+@after { popMsg(state); }
+  :
+  alterRpSet (COMMA alterRpSet)* -> alterRpSet+
+  ;
+
 activate : KW_ACTIVATE -> ^(TOK_ACTIVATE);
 enable : KW_ENABLE -> ^(TOK_ENABLE);
 
@@ -1011,8 +1030,7 @@ alterResourcePlanStatement
     : KW_ALTER KW_RESOURCE KW_PLAN name=identifier (
           (KW_VALIDATE -> ^(TOK_ALTER_RP $name TOK_VALIDATE))
         | (KW_DISABLE -> ^(TOK_ALTER_RP $name TOK_DISABLE))
-        | (KW_SET KW_QUERY_PARALLELISM EQUAL parallelism=Number
-           -> ^(TOK_ALTER_RP $name TOK_QUERY_PARALLELISM $parallelism))
+        | (KW_SET setList=alterRpSetList -> ^(TOK_ALTER_RP $name $setList))
         | (KW_RENAME KW_TO newName=identifier
            -> ^(TOK_ALTER_RP $name TOK_RENAME $newName))
         | ((activate+ enable? | enable+ activate?) -> ^(TOK_ALTER_RP $name activate? enable?))

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
index 69a1d63..a0eca4b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
@@ -829,6 +829,8 @@ nonReserved
     | KW_WAIT
     | KW_ZONE
     | KW_TIMESTAMPTZ
+    | KW_DEFAULT
+    | KW_POOL
 
 ;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java
index a845613..b6298da 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterResourcePlanDesc.java
@@ -32,35 +32,32 @@ public class AlterResourcePlanDesc extends DDLDesc implements Serializable {
   private Integer queryParallelism;
   private WMResourcePlanStatus status;
   private boolean validate;
+  private String defaultPoolPath;
   private boolean isEnableActivate;
 
   public AlterResourcePlanDesc() {}
 
   private AlterResourcePlanDesc(String rpName, String newName, Integer queryParallelism,
-      WMResourcePlanStatus status, boolean validate) {
+      WMResourcePlanStatus status, boolean validate, String defaultPoolPath) {
     this.rpName = rpName;
     this.newName = newName;
     this.queryParallelism = queryParallelism;
     this.status = status;
     this.validate = validate;
+    this.defaultPoolPath = defaultPoolPath;
   }
 
-  public static AlterResourcePlanDesc createChangeParallelism(String rpName,
-      int queryParallelism) {
-    return new AlterResourcePlanDesc(rpName, null, queryParallelism, null, false);
+  public static AlterResourcePlanDesc createSet(String rpName) {
+    return new AlterResourcePlanDesc(rpName, null, null, null, false, null);
   }
 
   public static AlterResourcePlanDesc createChangeStatus(
       String rpName, WMResourcePlanStatus status) {
-    return new AlterResourcePlanDesc(rpName, null, null, status, false);
+    return new AlterResourcePlanDesc(rpName, null, null, status, false, null);
   }
 
   public static AlterResourcePlanDesc createValidatePlan(String rpName) {
-    return new AlterResourcePlanDesc(rpName, null, null, null, true);
-  }
-
-  public static AlterResourcePlanDesc createRenamePlan(String rpName, String newName) {
-    return new AlterResourcePlanDesc(rpName, newName, null, null, false);
+    return new AlterResourcePlanDesc(rpName, null, null, null, true, null);
   }
 
   @Explain(displayName="resourcePlanName", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
@@ -81,6 +78,15 @@ public class AlterResourcePlanDesc extends DDLDesc implements Serializable {
     this.newName = newName;
   }
 
+  @Explain(displayName="Default pool", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+  public String getDefaultPoolPath() {
+    return defaultPoolPath;
+  }
+
+  public void setDefaultPoolPath(String defaultPoolPath) {
+    this.defaultPoolPath = defaultPoolPath;
+  }
+
   @Explain(displayName="queryParallelism", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
   public Integer getQueryParallelism() {
     return queryParallelism;

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
index 0347e91..84a35cc 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
@@ -23,12 +23,10 @@ import static org.junit.Assert.*;
 import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Mockito.*;
 
+import org.apache.hadoop.hive.ql.exec.tez.UserPoolMapping.MappingInput;
 import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
-
 import org.apache.hadoop.hive.metastore.api.WMMapping;
-
 import org.apache.hadoop.hive.metastore.api.WMPool;
-
 import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 
 import com.google.common.util.concurrent.SettableFuture;
@@ -77,7 +75,7 @@ public class TestWorkloadManager {
         cdl.countDown();
       }
       try {
-       session.set((WmTezSession) wm.getSession(old, userName, conf));
+       session.set((WmTezSession) wm.getSession(old, new MappingInput(userName), conf));
       } catch (Throwable e) {
         error.compareAndSet(null, e);
       }
@@ -136,11 +134,9 @@ public class TestWorkloadManager {
     }
 
     private static WMFullResourcePlan createDummyPlan(int numSessions) {
-      WMMapping mapping = new WMMapping("rp", "DEFAULT", "");
-      mapping.setPoolName("llap");
-      WMFullResourcePlan plan = new WMFullResourcePlan();
-      plan.addToPools(pool("llap", numSessions, 1.0f));
-      plan.addToMappings(mapping);
+      WMFullResourcePlan plan = new WMFullResourcePlan(new WMResourcePlan("rp"), 
+          Lists.newArrayList(pool("llap", numSessions, 1.0f)));
+      plan.getPlan().setDefaultPoolPath("llap");
       return plan;
     }
 
@@ -152,9 +148,9 @@ public class TestWorkloadManager {
 
     @Override
     public TezSessionState getSession(
-        TezSessionState session, String userName, HiveConf conf) throws Exception {
+        TezSessionState session, MappingInput input, HiveConf conf) throws Exception {
       // We want to wait for the iteration to finish and set the cluster fraction.
-      TezSessionState state = super.getSession(session, userName, conf);
+      TezSessionState state = super.getSession(session, input, conf);
       ensureWm();
       return state;
     }
@@ -193,17 +189,17 @@ public class TestWorkloadManager {
     TezSessionState nonPool = mock(TezSessionState.class);
     when(nonPool.getConf()).thenReturn(conf);
     doNothing().when(nonPool).close(anyBoolean());
-    TezSessionState session = wm.getSession(nonPool, null, conf);
+    TezSessionState session = wm.getSession(nonPool, new MappingInput("user"), conf);
     verify(nonPool).close(anyBoolean());
     assertNotSame(nonPool, session);
     session.returnToSessionManager();
     TezSessionPoolSession diffPool = mock(TezSessionPoolSession.class);
     when(diffPool.getConf()).thenReturn(conf);
     doNothing().when(diffPool).returnToSessionManager();
-    session = wm.getSession(diffPool, null, conf);
+    session = wm.getSession(diffPool, new MappingInput("user"), conf);
     verify(diffPool).returnToSessionManager();
     assertNotSame(diffPool, session);
-    TezSessionState session2 = wm.getSession(session, null, conf);
+    TezSessionState session2 = wm.getSession(session, new MappingInput("user"), conf);
     assertSame(session, session2);
   }
 
@@ -215,11 +211,11 @@ public class TestWorkloadManager {
     wm.start();
     // The queue should be ignored.
     conf.set(TezConfiguration.TEZ_QUEUE_NAME, "test2");
-    TezSessionState session = wm.getSession(null, null, conf);
+    TezSessionState session = wm.getSession(null, new MappingInput("user"), conf);
     assertEquals("test", session.getQueueName());
     assertEquals("test", conf.get(TezConfiguration.TEZ_QUEUE_NAME));
     session.setQueueName("test2");
-    session = wm.getSession(session, null, conf);
+    session = wm.getSession(session, new MappingInput("user"), conf);
     assertEquals("test", session.getQueueName());
   }
 
@@ -234,7 +230,7 @@ public class TestWorkloadManager {
     MockQam qam = new MockQam();
     WorkloadManager wm = new WorkloadManagerForTest("test", conf, 1, qam);
     wm.start();
-    WmTezSession session = (WmTezSession) wm.getSession(null, null, conf);
+    WmTezSession session = (WmTezSession) wm.getSession(null, new MappingInput("user"), conf);
     assertEquals(1.0, session.getClusterFraction(), EPSILON);
     qam.assertWasCalled();
     WmTezSession session2 = (WmTezSession) session.reopen(conf, null);
@@ -252,10 +248,10 @@ public class TestWorkloadManager {
     MockQam qam = new MockQam();
     WorkloadManager wm = new WorkloadManagerForTest("test", conf, 2, qam);
     wm.start();
-    WmTezSession session = (WmTezSession) wm.getSession(null, null, conf);
+    WmTezSession session = (WmTezSession) wm.getSession(null, new MappingInput("user"), conf);
     assertEquals(1.0, session.getClusterFraction(), EPSILON);
     qam.assertWasCalled();
-    WmTezSession session2 = (WmTezSession) wm.getSession(null, null, conf);
+    WmTezSession session2 = (WmTezSession) wm.getSession(null, new MappingInput("user"), conf);
     assertEquals(0.5, session.getClusterFraction(), EPSILON);
     assertEquals(0.5, session2.getClusterFraction(), EPSILON);
     qam.assertWasCalled();
@@ -266,7 +262,7 @@ public class TestWorkloadManager {
     qam.assertWasCalled();
 
     // We never lose pool session, so we should still be able to get.
-    session = (WmTezSession) wm.getSession(null, null, conf);
+    session = (WmTezSession) wm.getSession(null, new MappingInput("user"), conf);
     session.returnToSessionManager();
     assertEquals(1.0, session2.getClusterFraction(), EPSILON);
     assertEquals(0.0, session.getClusterFraction(), EPSILON);
@@ -286,16 +282,16 @@ public class TestWorkloadManager {
     wm.start();
     assertEquals(5, wm.getNumSessions());
     // Get all the 5 sessions; validate cluster fractions.
-    WmTezSession session05of06 = (WmTezSession) wm.getSession(null, "p1", conf);
+    WmTezSession session05of06 = (WmTezSession) wm.getSession(null, new MappingInput("p1"), conf);
     assertEquals(0.3, session05of06.getClusterFraction(), EPSILON);
-    WmTezSession session03of06 = (WmTezSession) wm.getSession(null, "p2", conf);
+    WmTezSession session03of06 = (WmTezSession) wm.getSession(null, new MappingInput("p2"), conf);
     assertEquals(0.18, session03of06.getClusterFraction(), EPSILON);
-    WmTezSession session03of06_2 = (WmTezSession) wm.getSession(null, "p2", conf);
+    WmTezSession session03of06_2 = (WmTezSession) wm.getSession(null, new MappingInput("p2"), conf);
     assertEquals(0.09, session03of06.getClusterFraction(), EPSILON);
     assertEquals(0.09, session03of06_2.getClusterFraction(), EPSILON);
-    WmTezSession session02of06 = (WmTezSession) wm.getSession(null, "r1", conf);
+    WmTezSession session02of06 = (WmTezSession) wm.getSession(null,new MappingInput("r1"), conf);
     assertEquals(0.12, session02of06.getClusterFraction(), EPSILON);
-    WmTezSession session04 = (WmTezSession) wm.getSession(null, "r2", conf);
+    WmTezSession session04 = (WmTezSession) wm.getSession(null, new MappingInput("r2"), conf);
     assertEquals(0.4, session04.getClusterFraction(), EPSILON);
     session05of06.returnToSessionManager();
     session03of06.returnToSessionManager();
@@ -313,9 +309,9 @@ public class TestWorkloadManager {
     plan.setMappings(Lists.newArrayList(mapping("A", "A"), mapping("B", "B")));
     final WorkloadManager wm = new WorkloadManagerForTest("test", conf, qam, plan);
     wm.start();
-    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, "A", conf),
-        sessionA2 = (WmTezSession) wm.getSession(null, "A", conf),
-        sessionB1 = (WmTezSession) wm.getSession(null, "B", conf);
+    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf),
+        sessionA2 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf),
+        sessionB1 = (WmTezSession) wm.getSession(null, new MappingInput("B"), conf);
     final AtomicReference<WmTezSession> sessionA3 = new AtomicReference<>(),
         sessionA4 = new AtomicReference<>();
     final AtomicReference<Throwable> error = new AtomicReference<>();
@@ -329,7 +325,7 @@ public class TestWorkloadManager {
     assertNull(sessionA4.get());
     checkError(error);
     // While threads are blocked on A, we should still be able to get and return a B session.
-    WmTezSession sessionB2 = (WmTezSession) wm.getSession(null, "B", conf);
+    WmTezSession sessionB2 = (WmTezSession) wm.getSession(null, new MappingInput("B"), conf);
     sessionB1.returnToSessionManager();
     sessionB2.returnToSessionManager();
     assertNull(sessionA3.get());
@@ -355,16 +351,16 @@ public class TestWorkloadManager {
     MockQam qam = new MockQam();
     final WorkloadManager wm = new WorkloadManagerForTest("test", conf, 2, qam);
     wm.start();
-    WmTezSession session1 = (WmTezSession) wm.getSession(null, null, conf);
+    WmTezSession session1 = (WmTezSession) wm.getSession(null, new MappingInput("user"), conf);
     // First, try to reuse from the same pool - should "just work".
-    WmTezSession session1a = (WmTezSession) wm.getSession(session1, null, conf);
+    WmTezSession session1a = (WmTezSession) wm.getSession(session1, new MappingInput("user"), conf);
     assertSame(session1, session1a);
     assertEquals(1.0, session1.getClusterFraction(), EPSILON);
     // Should still be able to get the 2nd session.
-    WmTezSession session2 = (WmTezSession) wm.getSession(null, null, conf);
+    WmTezSession session2 = (WmTezSession) wm.getSession(null, new MappingInput("user"), conf);
 
     // Now try to reuse with no other sessions remaining. Should still work.
-    WmTezSession session2a = (WmTezSession) wm.getSession(session2, null, conf);
+    WmTezSession session2a = (WmTezSession) wm.getSession(session2, new MappingInput("user"), conf);
     assertSame(session2, session2a);
     assertEquals(0.5, session1.getClusterFraction(), EPSILON);
     assertEquals(0.5, session2.getClusterFraction(), EPSILON);
@@ -421,19 +417,19 @@ public class TestWorkloadManager {
     plan.setMappings(Lists.newArrayList(mapping("A", "A"), mapping("B", "B")));
     final WorkloadManager wm = new WorkloadManagerForTest("test", conf, qam, plan);
     wm.start();
-    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, "A", conf),
-        sessionA2 = (WmTezSession) wm.getSession(null, "A", conf);
+    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf),
+        sessionA2 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf);
     assertEquals("A", sessionA1.getPoolName());
     assertEquals(0.3f, sessionA1.getClusterFraction(), EPSILON);
     assertEquals("A", sessionA2.getPoolName());
     assertEquals(0.3f, sessionA2.getClusterFraction(), EPSILON);
-    WmTezSession sessionB1 = (WmTezSession) wm.getSession(sessionA1, "B", conf);
+    WmTezSession sessionB1 = (WmTezSession) wm.getSession(sessionA1, new MappingInput("B"), conf);
     assertSame(sessionA1, sessionB1);
     assertEquals("B", sessionB1.getPoolName());
     assertEquals(0.4f, sessionB1.getClusterFraction(), EPSILON);
     assertEquals(0.6f, sessionA2.getClusterFraction(), EPSILON); // A1 removed from A.
     // Make sure that we can still get a session from A.
-    WmTezSession sessionA3 = (WmTezSession) wm.getSession(null, "A", conf);
+    WmTezSession sessionA3 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf);
     assertEquals("A", sessionA3.getPoolName());
     assertEquals(0.3f, sessionA3.getClusterFraction(), EPSILON);
     assertEquals(0.3f, sessionA3.getClusterFraction(), EPSILON);
@@ -453,7 +449,7 @@ public class TestWorkloadManager {
     wm.start();
  
     // One session will be running, the other will be queued in "A"
-    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, "U", conf);
+    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, new MappingInput("U"), conf);
     assertEquals("A", sessionA1.getPoolName());
     assertEquals(0.5f, sessionA1.getClusterFraction(), EPSILON);
     final AtomicReference<WmTezSession> sessionA2 = new AtomicReference<>();
@@ -478,7 +474,7 @@ public class TestWorkloadManager {
     assertEquals(0.4f, sessionA2.get().getClusterFraction(), EPSILON);
     // The new session will also go to B now.
     sessionA2.get().returnToSessionManager();
-    WmTezSession sessionB1 = (WmTezSession) wm.getSession(null, "U", conf);
+    WmTezSession sessionB1 = (WmTezSession) wm.getSession(null, new MappingInput("U"), conf);
     assertEquals("B", sessionB1.getPoolName());
     assertEquals(0.4f, sessionB1.getClusterFraction(), EPSILON);
     sessionA1.returnToSessionManager();
@@ -500,11 +496,11 @@ public class TestWorkloadManager {
  
     // A: 1/1 running, 1 queued; B: 2/2 running, C: 1/2 running, D: 1/1 running, 1 queued.
     // Total: 5/6 running.
-    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, "A", conf),
-        sessionB1 = (WmTezSession) wm.getSession(null, "B", conf),
-        sessionB2 = (WmTezSession) wm.getSession(null, "B", conf),
-        sessionC1 = (WmTezSession) wm.getSession(null, "C", conf),
-        sessionD1 = (WmTezSession) wm.getSession(null, "D", conf);
+    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf),
+        sessionB1 = (WmTezSession) wm.getSession(null, new MappingInput("B"), conf),
+        sessionB2 = (WmTezSession) wm.getSession(null, new MappingInput("B"), conf),
+        sessionC1 = (WmTezSession) wm.getSession(null, new MappingInput("C"), conf),
+        sessionD1 = (WmTezSession) wm.getSession(null, new MappingInput("D"), conf);
     final AtomicReference<WmTezSession> sessionA2 = new AtomicReference<>(),
         sessionD2 = new AtomicReference<>();
     final AtomicReference<Throwable> error = new AtomicReference<>();
@@ -683,7 +679,7 @@ public class TestWorkloadManager {
     failedWait.setException(new Exception("foo"));
     theOnlySession.setWaitForAmRegistryFuture(failedWait);
     try {
-      TezSessionState r = wm.getSession(null, "A", conf);
+      TezSessionState r = wm.getSession(null, new MappingInput("A"), conf);
       fail("Expected an error but got " + r);
     } catch (Exception ex) {
       // Expected.
@@ -734,7 +730,7 @@ public class TestWorkloadManager {
     assertEquals(0f, oldSession.getClusterFraction(), EPSILON);
     pool.returnSession(theOnlySession);
     // Make sure we can actually get a session still - parallelism/etc. should not be affected.
-    WmTezSession result = (WmTezSession) wm.getSession(null, "A", conf);
+    WmTezSession result = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf);
     assertEquals(sessionPoolName, result.getPoolName());
     assertEquals(1f, result.getClusterFraction(), EPSILON);
     result.returnToSessionManager();

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/ql/src/test/queries/clientpositive/resourceplan.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/resourceplan.q b/ql/src/test/queries/clientpositive/resourceplan.q
index f1be695..a094712 100644
--- a/ql/src/test/queries/clientpositive/resourceplan.q
+++ b/ql/src/test/queries/clientpositive/resourceplan.q
@@ -46,6 +46,10 @@ SELECT * FROM SYS.WM_RESOURCEPLANS;
 ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 20;
 SELECT * FROM SYS.WM_RESOURCEPLANS;
 
+-- Will fail for now; there are no pools.
+ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = 'default';
+SELECT * FROM SYS.WM_RESOURCEPLANS;
+
 --
 -- Activate, enable, disable.
 --

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/ql/src/test/results/clientpositive/llap/resourceplan.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/resourceplan.q.out b/ql/src/test/results/clientpositive/llap/resourceplan.q.out
index 2f314a6..5b5d8f1 100644
--- a/ql/src/test/results/clientpositive/llap/resourceplan.q.out
+++ b/ql/src/test/results/clientpositive/llap/resourceplan.q.out
@@ -2122,18 +2122,20 @@ POSTHOOK: Lineage: PARTITION_STATS_VIEW.transient_last_ddl_time EXPRESSION [(par
 PREHOOK: query: CREATE TABLE IF NOT EXISTS `WM_RESOURCEPLANS` (
   `NAME` string,
   `STATUS` string,
-  `QUERY_PARALLELISM` int
+  `QUERY_PARALLELISM` int,
+  `DEFAULT_POOL_PATH` string
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
 TBLPROPERTIES (
 "hive.sql.database.type" = "METASTORE",
 "hive.sql.query" =
 "SELECT
-  \"NAME\",
+  \"WM_RESOURCEPLAN\".\"NAME\",
   \"STATUS\",
-  \"QUERY_PARALLELISM\"
+  \"WM_RESOURCEPLAN\".\"QUERY_PARALLELISM\",
+  \"WM_POOL\".\"PATH\"
 FROM
-  \"WM_RESOURCEPLAN\""
+  \"WM_RESOURCEPLAN\" LEFT OUTER JOIN \"WM_POOL\" ON \"WM_RESOURCEPLAN\".\"DEFAULT_POOL_ID\" = \"WM_POOL\".\"POOL_ID\""
 )
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: SYS@WM_RESOURCEPLANS
@@ -2141,18 +2143,20 @@ PREHOOK: Output: database:sys
 POSTHOOK: query: CREATE TABLE IF NOT EXISTS `WM_RESOURCEPLANS` (
   `NAME` string,
   `STATUS` string,
-  `QUERY_PARALLELISM` int
+  `QUERY_PARALLELISM` int,
+  `DEFAULT_POOL_PATH` string
 )
 STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler'
 TBLPROPERTIES (
 "hive.sql.database.type" = "METASTORE",
 "hive.sql.query" =
 "SELECT
-  \"NAME\",
+  \"WM_RESOURCEPLAN\".\"NAME\",
   \"STATUS\",
-  \"QUERY_PARALLELISM\"
+  \"WM_RESOURCEPLAN\".\"QUERY_PARALLELISM\",
+  \"WM_POOL\".\"PATH\"
 FROM
-  \"WM_RESOURCEPLAN\""
+  \"WM_RESOURCEPLAN\" LEFT OUTER JOIN \"WM_POOL\" ON \"WM_RESOURCEPLAN\".\"DEFAULT_POOL_ID\" = \"WM_POOL\".\"POOL_ID\""
 )
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: SYS@WM_RESOURCEPLANS
@@ -2995,7 +2999,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_1	DISABLED	NULL
+plan_1	DISABLED	NULL	default
 PREHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM 10
 PREHOOK: type: CREATE RESOURCEPLAN
 POSTHOOK: query: CREATE RESOURCE PLAN plan_2 WITH QUERY_PARALLELISM 10
@@ -3019,8 +3023,8 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_1	DISABLED	NULL
-plan_2	DISABLED	10
+plan_1	DISABLED	NULL	default
+plan_2	DISABLED	10	default
 PREHOOK: query: ALTER RESOURCE PLAN plan_1 RENAME TO plan_2
 PREHOOK: type: ALTER RESOURCEPLAN
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. AlreadyExistsException(message:Resource plan name should be unique: )
@@ -3032,8 +3036,8 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_1	DISABLED	NULL
-plan_2	DISABLED	10
+plan_1	DISABLED	NULL	default
+plan_2	DISABLED	10	default
 PREHOOK: query: ALTER RESOURCE PLAN plan_1 RENAME TO plan_3
 PREHOOK: type: ALTER RESOURCEPLAN
 POSTHOOK: query: ALTER RESOURCE PLAN plan_1 RENAME TO plan_3
@@ -3046,8 +3050,8 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_3	DISABLED	NULL
-plan_2	DISABLED	10
+plan_3	DISABLED	NULL	default
+plan_2	DISABLED	10	default
 PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 20
 PREHOOK: type: ALTER RESOURCEPLAN
 POSTHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 20
@@ -3060,8 +3064,21 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_3	DISABLED	20
-plan_2	DISABLED	10
+plan_3	DISABLED	20	default
+plan_2	DISABLED	10	default
+PREHOOK: query: ALTER RESOURCE PLAN plan_3 SET QUERY_PARALLELISM = 30, DEFAULT POOL = 'default'
+PREHOOK: type: ALTER RESOURCEPLAN
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Portion of expression could not be parsed: and resourcePlan == rp)
+PREHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
+PREHOOK: type: QUERY
+PREHOOK: Input: sys@wm_resourceplans
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
+POSTHOOK: type: QUERY
+POSTHOOK: Input: sys@wm_resourceplans
+#### A masked pattern was here ####
+plan_3	DISABLED	20	default
+plan_2	DISABLED	10	default
 PREHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE
 PREHOOK: type: ALTER RESOURCEPLAN
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan plan_3 is disabled and should be enabled before activation (or in the same command))
@@ -3073,8 +3090,8 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_3	DISABLED	20
-plan_2	DISABLED	10
+plan_3	DISABLED	20	default
+plan_2	DISABLED	10	default
 PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE
 PREHOOK: type: ALTER RESOURCEPLAN
 POSTHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE
@@ -3087,8 +3104,8 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_3	DISABLED	20
-plan_2	DISABLED	10
+plan_3	DISABLED	20	default
+plan_2	DISABLED	10	default
 PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE
 PREHOOK: type: ALTER RESOURCEPLAN
 POSTHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE
@@ -3101,8 +3118,8 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_3	ENABLED	20
-plan_2	DISABLED	10
+plan_3	ENABLED	20	default
+plan_2	DISABLED	10	default
 PREHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE
 PREHOOK: type: ALTER RESOURCEPLAN
 POSTHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE
@@ -3115,8 +3132,8 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_3	ACTIVE	20
-plan_2	DISABLED	10
+plan_3	ACTIVE	20	default
+plan_2	DISABLED	10	default
 PREHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE
 PREHOOK: type: ALTER RESOURCEPLAN
 POSTHOOK: query: ALTER RESOURCE PLAN plan_3 ACTIVATE
@@ -3129,8 +3146,8 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_3	ACTIVE	20
-plan_2	DISABLED	10
+plan_3	ACTIVE	20	default
+plan_2	DISABLED	10	default
 PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE
 PREHOOK: type: ALTER RESOURCEPLAN
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan plan_3 is active, activate another plan first.)
@@ -3142,8 +3159,8 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_3	ACTIVE	20
-plan_2	DISABLED	10
+plan_3	ACTIVE	20	default
+plan_2	DISABLED	10	default
 PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE
 PREHOOK: type: ALTER RESOURCEPLAN
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan plan_3 is active, activate another plan first.)
@@ -3155,8 +3172,8 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_3	ACTIVE	20
-plan_2	DISABLED	10
+plan_3	ACTIVE	20	default
+plan_2	DISABLED	10	default
 PREHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE
 PREHOOK: type: ALTER RESOURCEPLAN
 POSTHOOK: query: ALTER RESOURCE PLAN plan_2 ENABLE
@@ -3169,8 +3186,8 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_3	ACTIVE	20
-plan_2	ENABLED	10
+plan_3	ACTIVE	20	default
+plan_2	ENABLED	10	default
 PREHOOK: query: ALTER RESOURCE PLAN plan_2 ACTIVATE
 PREHOOK: type: ALTER RESOURCEPLAN
 POSTHOOK: query: ALTER RESOURCE PLAN plan_2 ACTIVATE
@@ -3183,8 +3200,8 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_3	ENABLED	20
-plan_2	ACTIVE	10
+plan_3	ENABLED	20	default
+plan_2	ACTIVE	10	default
 PREHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE
 PREHOOK: type: ALTER RESOURCEPLAN
 POSTHOOK: query: ALTER RESOURCE PLAN plan_3 ENABLE
@@ -3197,8 +3214,8 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_3	ENABLED	20
-plan_2	ACTIVE	10
+plan_3	ENABLED	20	default
+plan_2	ACTIVE	10	default
 PREHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE
 PREHOOK: type: ALTER RESOURCEPLAN
 POSTHOOK: query: ALTER RESOURCE PLAN plan_3 DISABLE
@@ -3211,11 +3228,11 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_3	DISABLED	20
-plan_2	ACTIVE	10
+plan_3	DISABLED	20	default
+plan_2	ACTIVE	10	default
 PREHOOK: query: DROP RESOURCE PLAN plan_2
 PREHOOK: type: DROP RESOURCEPLAN
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. NoSuchObjectException(message:Cannot find resourcePlan: plan_2 or its active)
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Cannot drop an active resource plan)
 PREHOOK: query: DROP RESOURCE PLAN plan_3
 PREHOOK: type: DROP RESOURCEPLAN
 POSTHOOK: query: DROP RESOURCE PLAN plan_3
@@ -3228,7 +3245,7 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_2	ACTIVE	10
+plan_2	ACTIVE	10	default
 PREHOOK: query: CREATE RESOURCE PLAN plan_1
 PREHOOK: type: CREATE RESOURCEPLAN
 POSTHOOK: query: CREATE RESOURCE PLAN plan_1
@@ -3305,8 +3322,8 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_2	ACTIVE	10
-plan_1	ENABLED	NULL
+plan_2	ACTIVE	10	default
+plan_1	ENABLED	NULL	default
 PREHOOK: query: DROP TRIGGER plan_1.trigger_2
 PREHOOK: type: DROP TRIGGER
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)
@@ -3325,8 +3342,8 @@ POSTHOOK: query: SELECT * FROM SYS.WM_RESOURCEPLANS
 POSTHOOK: type: QUERY
 POSTHOOK: Input: sys@wm_resourceplans
 #### A masked pattern was here ####
-plan_2	ENABLED	10
-plan_1	ACTIVE	NULL
+plan_2	ENABLED	10	default
+plan_1	ACTIVE	NULL	default
 PREHOOK: query: DROP TRIGGER plan_1.trigger_2
 PREHOOK: type: DROP TRIGGER
 FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidOperationException(message:Resource plan must be disabled to edit it.)

http://git-wip-us.apache.org/repos/asf/hive/blob/a42314de/service/src/java/org/apache/hive/service/server/HiveServer2.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/server/HiveServer2.java b/service/src/java/org/apache/hive/service/server/HiveServer2.java
index 300ba72..602dfda 100644
--- a/service/src/java/org/apache/hive/service/server/HiveServer2.java
+++ b/service/src/java/org/apache/hive/service/server/HiveServer2.java
@@ -302,9 +302,7 @@ public class HiveServer2 extends CompositeService {
     pool.setQueryParallelism(1);
     resourcePlan = new WMFullResourcePlan(
         new WMResourcePlan("testDefault"), Lists.newArrayList(pool));
-    WMMapping mapping = new WMMapping("testDefault", "DEFAULT", "");
-    mapping.setPoolName("llap");
-    resourcePlan.addToMappings(mapping);
+    resourcePlan.getPlan().setDefaultPoolPath("testDefault");
     return resourcePlan;
   }