You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ga...@apache.org on 2017/12/18 23:25:44 UTC

[01/50] [abbrv] hive git commit: HIVE-18228: Azure credential properties should be added to the HiveConf hidden list (Andrew Sherman, via Peter Vary) [Forced Update!]

Repository: hive
Updated Branches:
  refs/heads/standalone-metastore bd212257f -> b3cb8526b (forced update)


HIVE-18228: Azure credential properties should be added to the HiveConf hidden list (Andrew Sherman, via Peter Vary)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e86c77af
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e86c77af
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e86c77af

Branch: refs/heads/standalone-metastore
Commit: e86c77af5ffa80b55f46eb3b69b0365fbf79ab5a
Parents: 095e6bf
Author: Peter Vary <pv...@cloudera.com>
Authored: Wed Dec 13 13:04:24 2017 +0100
Committer: Peter Vary <pv...@cloudera.com>
Committed: Wed Dec 13 13:04:24 2017 +0100

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  4 +-
 .../apache/hadoop/hive/conf/TestHiveConf.java   | 43 +++++++++++++-------
 2 files changed, 31 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e86c77af/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index dc31505..7a81612 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -3617,7 +3617,9 @@ public class HiveConf extends Configuration {
         + ",fs.s3n.awsSecretAccessKey"
         + ",fs.s3a.access.key"
         + ",fs.s3a.secret.key"
-        + ",fs.s3a.proxy.password",
+        + ",fs.s3a.proxy.password"
+        + ",dfs.adls.oauth2.credential"
+        + ",fs.adl.oauth2.credential",
         "Comma separated list of configuration options which should not be read by normal user like passwords"),
     HIVE_CONF_INTERNAL_VARIABLE_LIST("hive.conf.internal.variable.list",
         "hive.added.files.path,hive.added.jars.path,hive.added.archives.path",

http://git-wip-us.apache.org/repos/asf/hive/blob/e86c77af/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java
index d24668f..6a67809 100644
--- a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java
+++ b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.conf;
 
+import com.google.common.collect.Lists;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -27,6 +28,7 @@ import org.junit.Test;
 
 import java.io.UnsupportedEncodingException;
 import java.net.URLEncoder;
+import java.util.ArrayList;
 import java.util.concurrent.TimeUnit;
 
 
@@ -130,11 +132,8 @@ public class TestHiveConf {
   @Test
   public void testHiddenConfig() throws Exception {
     HiveConf conf = new HiveConf();
-    // check password configs are hidden
-    Assert.assertTrue(conf.isHiddenConfig(HiveConf.ConfVars.METASTOREPWD.varname));
-    Assert.assertTrue(conf.isHiddenConfig(
-        HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname));
-    // check change hidden list should fail
+
+    // check that a change to the hidden list should fail
     try {
       final String name = HiveConf.ConfVars.HIVE_CONF_HIDDEN_LIST.varname;
       conf.verifyAndSet(name, "");
@@ -143,16 +142,30 @@ public class TestHiveConf {
     } catch (IllegalArgumentException e) {
       // the verifyAndSet in this case is expected to fail with the IllegalArgumentException
     }
-    // check stripHiddenConfigurations
-    Configuration conf2 = new Configuration(conf);
-    conf2.set(HiveConf.ConfVars.METASTOREPWD.varname, "password");
-    conf2.set(HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname, "password");
-    conf.stripHiddenConfigurations(conf2);
-    Assert.assertTrue(conf.isHiddenConfig(HiveConf.ConfVars.METASTOREPWD.varname + "postfix"));
-    Assert.assertTrue(
-        conf.isHiddenConfig(HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname + "postfix"));
-    Assert.assertEquals("", conf2.get(HiveConf.ConfVars.METASTOREPWD.varname));
-    Assert.assertEquals("", conf2.get(HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname));
+
+    ArrayList<String> hiddenList = Lists.newArrayList(
+        HiveConf.ConfVars.METASTOREPWD.varname,
+        HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname,
+        "fs.s3.awsSecretAccessKey",
+        "fs.s3n.awsSecretAccessKey",
+        "dfs.adls.oauth2.credential",
+        "fs.adl.oauth2.credential"
+    );
+
+    for (String hiddenConfig : hiddenList) {
+      // check configs are hidden
+      Assert.assertTrue("config " + hiddenConfig + " should be hidden",
+          conf.isHiddenConfig(hiddenConfig));
+      // check stripHiddenConfigurations removes the property
+      Configuration conf2 = new Configuration(conf);
+      conf2.set(hiddenConfig, "password");
+      conf.stripHiddenConfigurations(conf2);
+      // check that a property that begins the same is also hidden
+      Assert.assertTrue(conf.isHiddenConfig(
+          hiddenConfig + "postfix"));
+      // Check the stripped property is the empty string
+      Assert.assertEquals("", conf2.get(hiddenConfig));
+    }
   }
 
   @Test


[46/50] [abbrv] hive git commit: HIVE-17990 Add Thrift and DB storage for Schema Registry objects

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index 913e3cc..f026ff9 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -149,6 +149,72 @@ const char* _kEventRequestTypeNames[] = {
 };
 const std::map<int, const char*> _EventRequestType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kEventRequestTypeValues, _kEventRequestTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
 
+int _kSerdeTypeValues[] = {
+  SerdeType::HIVE,
+  SerdeType::SCHEMA_REGISTRY
+};
+const char* _kSerdeTypeNames[] = {
+  "HIVE",
+  "SCHEMA_REGISTRY"
+};
+const std::map<int, const char*> _SerdeType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(2, _kSerdeTypeValues, _kSerdeTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kSchemaTypeValues[] = {
+  SchemaType::HIVE,
+  SchemaType::AVRO
+};
+const char* _kSchemaTypeNames[] = {
+  "HIVE",
+  "AVRO"
+};
+const std::map<int, const char*> _SchemaType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(2, _kSchemaTypeValues, _kSchemaTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kSchemaCompatibilityValues[] = {
+  SchemaCompatibility::NONE,
+  SchemaCompatibility::BACKWARD,
+  SchemaCompatibility::FORWARD,
+  SchemaCompatibility::BOTH
+};
+const char* _kSchemaCompatibilityNames[] = {
+  "NONE",
+  "BACKWARD",
+  "FORWARD",
+  "BOTH"
+};
+const std::map<int, const char*> _SchemaCompatibility_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(4, _kSchemaCompatibilityValues, _kSchemaCompatibilityNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kSchemaValidationValues[] = {
+  SchemaValidation::LATEST,
+  SchemaValidation::ALL
+};
+const char* _kSchemaValidationNames[] = {
+  "LATEST",
+  "ALL"
+};
+const std::map<int, const char*> _SchemaValidation_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(2, _kSchemaValidationValues, _kSchemaValidationNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kSchemaVersionStateValues[] = {
+  SchemaVersionState::INITIATED,
+  SchemaVersionState::START_REVIEW,
+  SchemaVersionState::CHANGES_REQUIRED,
+  SchemaVersionState::REVIEWED,
+  SchemaVersionState::ENABLED,
+  SchemaVersionState::DISABLED,
+  SchemaVersionState::ARCHIVED,
+  SchemaVersionState::DELETED
+};
+const char* _kSchemaVersionStateNames[] = {
+  "INITIATED",
+  "START_REVIEW",
+  "CHANGES_REQUIRED",
+  "REVIEWED",
+  "ENABLED",
+  "DISABLED",
+  "ARCHIVED",
+  "DELETED"
+};
+const std::map<int, const char*> _SchemaVersionState_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(8, _kSchemaVersionStateValues, _kSchemaVersionStateNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
 int _kFunctionTypeValues[] = {
   FunctionType::JAVA
 };
@@ -3999,6 +4065,26 @@ void SerDeInfo::__set_parameters(const std::map<std::string, std::string> & val)
   this->parameters = val;
 }
 
+void SerDeInfo::__set_description(const std::string& val) {
+  this->description = val;
+__isset.description = true;
+}
+
+void SerDeInfo::__set_serializerClass(const std::string& val) {
+  this->serializerClass = val;
+__isset.serializerClass = true;
+}
+
+void SerDeInfo::__set_deserializerClass(const std::string& val) {
+  this->deserializerClass = val;
+__isset.deserializerClass = true;
+}
+
+void SerDeInfo::__set_serdeType(const SerdeType::type val) {
+  this->serdeType = val;
+__isset.serdeType = true;
+}
+
 uint32_t SerDeInfo::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -4059,6 +4145,40 @@ uint32_t SerDeInfo::read(::apache::thrift::protocol::TProtocol* iprot) {
           xfer += iprot->skip(ftype);
         }
         break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->description);
+          this->__isset.description = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 5:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->serializerClass);
+          this->__isset.serializerClass = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 6:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->deserializerClass);
+          this->__isset.deserializerClass = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 7:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          int32_t ecast144;
+          xfer += iprot->readI32(ecast144);
+          this->serdeType = (SerdeType::type)ecast144;
+          this->__isset.serdeType = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
       default:
         xfer += iprot->skip(ftype);
         break;
@@ -4087,16 +4207,36 @@ uint32_t SerDeInfo::write(::apache::thrift::protocol::TProtocol* oprot) const {
   xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 3);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
-    std::map<std::string, std::string> ::const_iterator _iter144;
-    for (_iter144 = this->parameters.begin(); _iter144 != this->parameters.end(); ++_iter144)
+    std::map<std::string, std::string> ::const_iterator _iter145;
+    for (_iter145 = this->parameters.begin(); _iter145 != this->parameters.end(); ++_iter145)
     {
-      xfer += oprot->writeString(_iter144->first);
-      xfer += oprot->writeString(_iter144->second);
+      xfer += oprot->writeString(_iter145->first);
+      xfer += oprot->writeString(_iter145->second);
     }
     xfer += oprot->writeMapEnd();
   }
   xfer += oprot->writeFieldEnd();
 
+  if (this->__isset.description) {
+    xfer += oprot->writeFieldBegin("description", ::apache::thrift::protocol::T_STRING, 4);
+    xfer += oprot->writeString(this->description);
+    xfer += oprot->writeFieldEnd();
+  }
+  if (this->__isset.serializerClass) {
+    xfer += oprot->writeFieldBegin("serializerClass", ::apache::thrift::protocol::T_STRING, 5);
+    xfer += oprot->writeString(this->serializerClass);
+    xfer += oprot->writeFieldEnd();
+  }
+  if (this->__isset.deserializerClass) {
+    xfer += oprot->writeFieldBegin("deserializerClass", ::apache::thrift::protocol::T_STRING, 6);
+    xfer += oprot->writeString(this->deserializerClass);
+    xfer += oprot->writeFieldEnd();
+  }
+  if (this->__isset.serdeType) {
+    xfer += oprot->writeFieldBegin("serdeType", ::apache::thrift::protocol::T_I32, 7);
+    xfer += oprot->writeI32((int32_t)this->serdeType);
+    xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
@@ -4107,20 +4247,32 @@ void swap(SerDeInfo &a, SerDeInfo &b) {
   swap(a.name, b.name);
   swap(a.serializationLib, b.serializationLib);
   swap(a.parameters, b.parameters);
+  swap(a.description, b.description);
+  swap(a.serializerClass, b.serializerClass);
+  swap(a.deserializerClass, b.deserializerClass);
+  swap(a.serdeType, b.serdeType);
   swap(a.__isset, b.__isset);
 }
 
-SerDeInfo::SerDeInfo(const SerDeInfo& other145) {
-  name = other145.name;
-  serializationLib = other145.serializationLib;
-  parameters = other145.parameters;
-  __isset = other145.__isset;
-}
-SerDeInfo& SerDeInfo::operator=(const SerDeInfo& other146) {
+SerDeInfo::SerDeInfo(const SerDeInfo& other146) {
   name = other146.name;
   serializationLib = other146.serializationLib;
   parameters = other146.parameters;
+  description = other146.description;
+  serializerClass = other146.serializerClass;
+  deserializerClass = other146.deserializerClass;
+  serdeType = other146.serdeType;
   __isset = other146.__isset;
+}
+SerDeInfo& SerDeInfo::operator=(const SerDeInfo& other147) {
+  name = other147.name;
+  serializationLib = other147.serializationLib;
+  parameters = other147.parameters;
+  description = other147.description;
+  serializerClass = other147.serializerClass;
+  deserializerClass = other147.deserializerClass;
+  serdeType = other147.serdeType;
+  __isset = other147.__isset;
   return *this;
 }
 void SerDeInfo::printTo(std::ostream& out) const {
@@ -4129,6 +4281,10 @@ void SerDeInfo::printTo(std::ostream& out) const {
   out << "name=" << to_string(name);
   out << ", " << "serializationLib=" << to_string(serializationLib);
   out << ", " << "parameters=" << to_string(parameters);
+  out << ", " << "description="; (__isset.description ? (out << to_string(description)) : (out << "<null>"));
+  out << ", " << "serializerClass="; (__isset.serializerClass ? (out << to_string(serializerClass)) : (out << "<null>"));
+  out << ", " << "deserializerClass="; (__isset.deserializerClass ? (out << to_string(deserializerClass)) : (out << "<null>"));
+  out << ", " << "serdeType="; (__isset.serdeType ? (out << to_string(serdeType)) : (out << "<null>"));
   out << ")";
 }
 
@@ -4219,15 +4375,15 @@ void swap(Order &a, Order &b) {
   swap(a.__isset, b.__isset);
 }
 
-Order::Order(const Order& other147) {
-  col = other147.col;
-  order = other147.order;
-  __isset = other147.__isset;
-}
-Order& Order::operator=(const Order& other148) {
+Order::Order(const Order& other148) {
   col = other148.col;
   order = other148.order;
   __isset = other148.__isset;
+}
+Order& Order::operator=(const Order& other149) {
+  col = other149.col;
+  order = other149.order;
+  __isset = other149.__isset;
   return *this;
 }
 void Order::printTo(std::ostream& out) const {
@@ -4280,14 +4436,14 @@ uint32_t SkewedInfo::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->skewedColNames.clear();
-            uint32_t _size149;
-            ::apache::thrift::protocol::TType _etype152;
-            xfer += iprot->readListBegin(_etype152, _size149);
-            this->skewedColNames.resize(_size149);
-            uint32_t _i153;
-            for (_i153 = 0; _i153 < _size149; ++_i153)
+            uint32_t _size150;
+            ::apache::thrift::protocol::TType _etype153;
+            xfer += iprot->readListBegin(_etype153, _size150);
+            this->skewedColNames.resize(_size150);
+            uint32_t _i154;
+            for (_i154 = 0; _i154 < _size150; ++_i154)
             {
-              xfer += iprot->readString(this->skewedColNames[_i153]);
+              xfer += iprot->readString(this->skewedColNames[_i154]);
             }
             xfer += iprot->readListEnd();
           }
@@ -4300,23 +4456,23 @@ uint32_t SkewedInfo::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->skewedColValues.clear();
-            uint32_t _size154;
-            ::apache::thrift::protocol::TType _etype157;
-            xfer += iprot->readListBegin(_etype157, _size154);
-            this->skewedColValues.resize(_size154);
-            uint32_t _i158;
-            for (_i158 = 0; _i158 < _size154; ++_i158)
+            uint32_t _size155;
+            ::apache::thrift::protocol::TType _etype158;
+            xfer += iprot->readListBegin(_etype158, _size155);
+            this->skewedColValues.resize(_size155);
+            uint32_t _i159;
+            for (_i159 = 0; _i159 < _size155; ++_i159)
             {
               {
-                this->skewedColValues[_i158].clear();
-                uint32_t _size159;
-                ::apache::thrift::protocol::TType _etype162;
-                xfer += iprot->readListBegin(_etype162, _size159);
-                this->skewedColValues[_i158].resize(_size159);
-                uint32_t _i163;
-                for (_i163 = 0; _i163 < _size159; ++_i163)
+                this->skewedColValues[_i159].clear();
+                uint32_t _size160;
+                ::apache::thrift::protocol::TType _etype163;
+                xfer += iprot->readListBegin(_etype163, _size160);
+                this->skewedColValues[_i159].resize(_size160);
+                uint32_t _i164;
+                for (_i164 = 0; _i164 < _size160; ++_i164)
                 {
-                  xfer += iprot->readString(this->skewedColValues[_i158][_i163]);
+                  xfer += iprot->readString(this->skewedColValues[_i159][_i164]);
                 }
                 xfer += iprot->readListEnd();
               }
@@ -4332,29 +4488,29 @@ uint32_t SkewedInfo::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->skewedColValueLocationMaps.clear();
-            uint32_t _size164;
-            ::apache::thrift::protocol::TType _ktype165;
-            ::apache::thrift::protocol::TType _vtype166;
-            xfer += iprot->readMapBegin(_ktype165, _vtype166, _size164);
-            uint32_t _i168;
-            for (_i168 = 0; _i168 < _size164; ++_i168)
+            uint32_t _size165;
+            ::apache::thrift::protocol::TType _ktype166;
+            ::apache::thrift::protocol::TType _vtype167;
+            xfer += iprot->readMapBegin(_ktype166, _vtype167, _size165);
+            uint32_t _i169;
+            for (_i169 = 0; _i169 < _size165; ++_i169)
             {
-              std::vector<std::string>  _key169;
+              std::vector<std::string>  _key170;
               {
-                _key169.clear();
-                uint32_t _size171;
-                ::apache::thrift::protocol::TType _etype174;
-                xfer += iprot->readListBegin(_etype174, _size171);
-                _key169.resize(_size171);
-                uint32_t _i175;
-                for (_i175 = 0; _i175 < _size171; ++_i175)
+                _key170.clear();
+                uint32_t _size172;
+                ::apache::thrift::protocol::TType _etype175;
+                xfer += iprot->readListBegin(_etype175, _size172);
+                _key170.resize(_size172);
+                uint32_t _i176;
+                for (_i176 = 0; _i176 < _size172; ++_i176)
                 {
-                  xfer += iprot->readString(_key169[_i175]);
+                  xfer += iprot->readString(_key170[_i176]);
                 }
                 xfer += iprot->readListEnd();
               }
-              std::string& _val170 = this->skewedColValueLocationMaps[_key169];
-              xfer += iprot->readString(_val170);
+              std::string& _val171 = this->skewedColValueLocationMaps[_key170];
+              xfer += iprot->readString(_val171);
             }
             xfer += iprot->readMapEnd();
           }
@@ -4383,10 +4539,10 @@ uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const {
   xfer += oprot->writeFieldBegin("skewedColNames", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->skewedColNames.size()));
-    std::vector<std::string> ::const_iterator _iter176;
-    for (_iter176 = this->skewedColNames.begin(); _iter176 != this->skewedColNames.end(); ++_iter176)
+    std::vector<std::string> ::const_iterator _iter177;
+    for (_iter177 = this->skewedColNames.begin(); _iter177 != this->skewedColNames.end(); ++_iter177)
     {
-      xfer += oprot->writeString((*_iter176));
+      xfer += oprot->writeString((*_iter177));
     }
     xfer += oprot->writeListEnd();
   }
@@ -4395,15 +4551,15 @@ uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const {
   xfer += oprot->writeFieldBegin("skewedColValues", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_LIST, static_cast<uint32_t>(this->skewedColValues.size()));
-    std::vector<std::vector<std::string> > ::const_iterator _iter177;
-    for (_iter177 = this->skewedColValues.begin(); _iter177 != this->skewedColValues.end(); ++_iter177)
+    std::vector<std::vector<std::string> > ::const_iterator _iter178;
+    for (_iter178 = this->skewedColValues.begin(); _iter178 != this->skewedColValues.end(); ++_iter178)
     {
       {
-        xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*_iter177).size()));
-        std::vector<std::string> ::const_iterator _iter178;
-        for (_iter178 = (*_iter177).begin(); _iter178 != (*_iter177).end(); ++_iter178)
+        xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*_iter178).size()));
+        std::vector<std::string> ::const_iterator _iter179;
+        for (_iter179 = (*_iter178).begin(); _iter179 != (*_iter178).end(); ++_iter179)
         {
-          xfer += oprot->writeString((*_iter178));
+          xfer += oprot->writeString((*_iter179));
         }
         xfer += oprot->writeListEnd();
       }
@@ -4415,19 +4571,19 @@ uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const {
   xfer += oprot->writeFieldBegin("skewedColValueLocationMaps", ::apache::thrift::protocol::T_MAP, 3);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_LIST, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->skewedColValueLocationMaps.size()));
-    std::map<std::vector<std::string> , std::string> ::const_iterator _iter179;
-    for (_iter179 = this->skewedColValueLocationMaps.begin(); _iter179 != this->skewedColValueLocationMaps.end(); ++_iter179)
+    std::map<std::vector<std::string> , std::string> ::const_iterator _iter180;
+    for (_iter180 = this->skewedColValueLocationMaps.begin(); _iter180 != this->skewedColValueLocationMaps.end(); ++_iter180)
     {
       {
-        xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(_iter179->first.size()));
-        std::vector<std::string> ::const_iterator _iter180;
-        for (_iter180 = _iter179->first.begin(); _iter180 != _iter179->first.end(); ++_iter180)
+        xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(_iter180->first.size()));
+        std::vector<std::string> ::const_iterator _iter181;
+        for (_iter181 = _iter180->first.begin(); _iter181 != _iter180->first.end(); ++_iter181)
         {
-          xfer += oprot->writeString((*_iter180));
+          xfer += oprot->writeString((*_iter181));
         }
         xfer += oprot->writeListEnd();
       }
-      xfer += oprot->writeString(_iter179->second);
+      xfer += oprot->writeString(_iter180->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -4446,17 +4602,17 @@ void swap(SkewedInfo &a, SkewedInfo &b) {
   swap(a.__isset, b.__isset);
 }
 
-SkewedInfo::SkewedInfo(const SkewedInfo& other181) {
-  skewedColNames = other181.skewedColNames;
-  skewedColValues = other181.skewedColValues;
-  skewedColValueLocationMaps = other181.skewedColValueLocationMaps;
-  __isset = other181.__isset;
-}
-SkewedInfo& SkewedInfo::operator=(const SkewedInfo& other182) {
+SkewedInfo::SkewedInfo(const SkewedInfo& other182) {
   skewedColNames = other182.skewedColNames;
   skewedColValues = other182.skewedColValues;
   skewedColValueLocationMaps = other182.skewedColValueLocationMaps;
   __isset = other182.__isset;
+}
+SkewedInfo& SkewedInfo::operator=(const SkewedInfo& other183) {
+  skewedColNames = other183.skewedColNames;
+  skewedColValues = other183.skewedColValues;
+  skewedColValueLocationMaps = other183.skewedColValueLocationMaps;
+  __isset = other183.__isset;
   return *this;
 }
 void SkewedInfo::printTo(std::ostream& out) const {
@@ -4548,14 +4704,14 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->cols.clear();
-            uint32_t _size183;
-            ::apache::thrift::protocol::TType _etype186;
-            xfer += iprot->readListBegin(_etype186, _size183);
-            this->cols.resize(_size183);
-            uint32_t _i187;
-            for (_i187 = 0; _i187 < _size183; ++_i187)
+            uint32_t _size184;
+            ::apache::thrift::protocol::TType _etype187;
+            xfer += iprot->readListBegin(_etype187, _size184);
+            this->cols.resize(_size184);
+            uint32_t _i188;
+            for (_i188 = 0; _i188 < _size184; ++_i188)
             {
-              xfer += this->cols[_i187].read(iprot);
+              xfer += this->cols[_i188].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4616,14 +4772,14 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->bucketCols.clear();
-            uint32_t _size188;
-            ::apache::thrift::protocol::TType _etype191;
-            xfer += iprot->readListBegin(_etype191, _size188);
-            this->bucketCols.resize(_size188);
-            uint32_t _i192;
-            for (_i192 = 0; _i192 < _size188; ++_i192)
+            uint32_t _size189;
+            ::apache::thrift::protocol::TType _etype192;
+            xfer += iprot->readListBegin(_etype192, _size189);
+            this->bucketCols.resize(_size189);
+            uint32_t _i193;
+            for (_i193 = 0; _i193 < _size189; ++_i193)
             {
-              xfer += iprot->readString(this->bucketCols[_i192]);
+              xfer += iprot->readString(this->bucketCols[_i193]);
             }
             xfer += iprot->readListEnd();
           }
@@ -4636,14 +4792,14 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->sortCols.clear();
-            uint32_t _size193;
-            ::apache::thrift::protocol::TType _etype196;
-            xfer += iprot->readListBegin(_etype196, _size193);
-            this->sortCols.resize(_size193);
-            uint32_t _i197;
-            for (_i197 = 0; _i197 < _size193; ++_i197)
+            uint32_t _size194;
+            ::apache::thrift::protocol::TType _etype197;
+            xfer += iprot->readListBegin(_etype197, _size194);
+            this->sortCols.resize(_size194);
+            uint32_t _i198;
+            for (_i198 = 0; _i198 < _size194; ++_i198)
             {
-              xfer += this->sortCols[_i197].read(iprot);
+              xfer += this->sortCols[_i198].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4656,17 +4812,17 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->parameters.clear();
-            uint32_t _size198;
-            ::apache::thrift::protocol::TType _ktype199;
-            ::apache::thrift::protocol::TType _vtype200;
-            xfer += iprot->readMapBegin(_ktype199, _vtype200, _size198);
-            uint32_t _i202;
-            for (_i202 = 0; _i202 < _size198; ++_i202)
+            uint32_t _size199;
+            ::apache::thrift::protocol::TType _ktype200;
+            ::apache::thrift::protocol::TType _vtype201;
+            xfer += iprot->readMapBegin(_ktype200, _vtype201, _size199);
+            uint32_t _i203;
+            for (_i203 = 0; _i203 < _size199; ++_i203)
             {
-              std::string _key203;
-              xfer += iprot->readString(_key203);
-              std::string& _val204 = this->parameters[_key203];
-              xfer += iprot->readString(_val204);
+              std::string _key204;
+              xfer += iprot->readString(_key204);
+              std::string& _val205 = this->parameters[_key204];
+              xfer += iprot->readString(_val205);
             }
             xfer += iprot->readMapEnd();
           }
@@ -4711,10 +4867,10 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot)
   xfer += oprot->writeFieldBegin("cols", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->cols.size()));
-    std::vector<FieldSchema> ::const_iterator _iter205;
-    for (_iter205 = this->cols.begin(); _iter205 != this->cols.end(); ++_iter205)
+    std::vector<FieldSchema> ::const_iterator _iter206;
+    for (_iter206 = this->cols.begin(); _iter206 != this->cols.end(); ++_iter206)
     {
-      xfer += (*_iter205).write(oprot);
+      xfer += (*_iter206).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -4747,10 +4903,10 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot)
   xfer += oprot->writeFieldBegin("bucketCols", ::apache::thrift::protocol::T_LIST, 8);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->bucketCols.size()));
-    std::vector<std::string> ::const_iterator _iter206;
-    for (_iter206 = this->bucketCols.begin(); _iter206 != this->bucketCols.end(); ++_iter206)
+    std::vector<std::string> ::const_iterator _iter207;
+    for (_iter207 = this->bucketCols.begin(); _iter207 != this->bucketCols.end(); ++_iter207)
     {
-      xfer += oprot->writeString((*_iter206));
+      xfer += oprot->writeString((*_iter207));
     }
     xfer += oprot->writeListEnd();
   }
@@ -4759,10 +4915,10 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot)
   xfer += oprot->writeFieldBegin("sortCols", ::apache::thrift::protocol::T_LIST, 9);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->sortCols.size()));
-    std::vector<Order> ::const_iterator _iter207;
-    for (_iter207 = this->sortCols.begin(); _iter207 != this->sortCols.end(); ++_iter207)
+    std::vector<Order> ::const_iterator _iter208;
+    for (_iter208 = this->sortCols.begin(); _iter208 != this->sortCols.end(); ++_iter208)
     {
-      xfer += (*_iter207).write(oprot);
+      xfer += (*_iter208).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -4771,11 +4927,11 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot)
   xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 10);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
-    std::map<std::string, std::string> ::const_iterator _iter208;
-    for (_iter208 = this->parameters.begin(); _iter208 != this->parameters.end(); ++_iter208)
+    std::map<std::string, std::string> ::const_iterator _iter209;
+    for (_iter209 = this->parameters.begin(); _iter209 != this->parameters.end(); ++_iter209)
     {
-      xfer += oprot->writeString(_iter208->first);
-      xfer += oprot->writeString(_iter208->second);
+      xfer += oprot->writeString(_iter209->first);
+      xfer += oprot->writeString(_iter209->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -4813,22 +4969,7 @@ void swap(StorageDescriptor &a, StorageDescriptor &b) {
   swap(a.__isset, b.__isset);
 }
 
-StorageDescriptor::StorageDescriptor(const StorageDescriptor& other209) {
-  cols = other209.cols;
-  location = other209.location;
-  inputFormat = other209.inputFormat;
-  outputFormat = other209.outputFormat;
-  compressed = other209.compressed;
-  numBuckets = other209.numBuckets;
-  serdeInfo = other209.serdeInfo;
-  bucketCols = other209.bucketCols;
-  sortCols = other209.sortCols;
-  parameters = other209.parameters;
-  skewedInfo = other209.skewedInfo;
-  storedAsSubDirectories = other209.storedAsSubDirectories;
-  __isset = other209.__isset;
-}
-StorageDescriptor& StorageDescriptor::operator=(const StorageDescriptor& other210) {
+StorageDescriptor::StorageDescriptor(const StorageDescriptor& other210) {
   cols = other210.cols;
   location = other210.location;
   inputFormat = other210.inputFormat;
@@ -4842,6 +4983,21 @@ StorageDescriptor& StorageDescriptor::operator=(const StorageDescriptor& other21
   skewedInfo = other210.skewedInfo;
   storedAsSubDirectories = other210.storedAsSubDirectories;
   __isset = other210.__isset;
+}
+StorageDescriptor& StorageDescriptor::operator=(const StorageDescriptor& other211) {
+  cols = other211.cols;
+  location = other211.location;
+  inputFormat = other211.inputFormat;
+  outputFormat = other211.outputFormat;
+  compressed = other211.compressed;
+  numBuckets = other211.numBuckets;
+  serdeInfo = other211.serdeInfo;
+  bucketCols = other211.bucketCols;
+  sortCols = other211.sortCols;
+  parameters = other211.parameters;
+  skewedInfo = other211.skewedInfo;
+  storedAsSubDirectories = other211.storedAsSubDirectories;
+  __isset = other211.__isset;
   return *this;
 }
 void StorageDescriptor::printTo(std::ostream& out) const {
@@ -5011,14 +5167,14 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->partitionKeys.clear();
-            uint32_t _size211;
-            ::apache::thrift::protocol::TType _etype214;
-            xfer += iprot->readListBegin(_etype214, _size211);
-            this->partitionKeys.resize(_size211);
-            uint32_t _i215;
-            for (_i215 = 0; _i215 < _size211; ++_i215)
+            uint32_t _size212;
+            ::apache::thrift::protocol::TType _etype215;
+            xfer += iprot->readListBegin(_etype215, _size212);
+            this->partitionKeys.resize(_size212);
+            uint32_t _i216;
+            for (_i216 = 0; _i216 < _size212; ++_i216)
             {
-              xfer += this->partitionKeys[_i215].read(iprot);
+              xfer += this->partitionKeys[_i216].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -5031,17 +5187,17 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->parameters.clear();
-            uint32_t _size216;
-            ::apache::thrift::protocol::TType _ktype217;
-            ::apache::thrift::protocol::TType _vtype218;
-            xfer += iprot->readMapBegin(_ktype217, _vtype218, _size216);
-            uint32_t _i220;
-            for (_i220 = 0; _i220 < _size216; ++_i220)
+            uint32_t _size217;
+            ::apache::thrift::protocol::TType _ktype218;
+            ::apache::thrift::protocol::TType _vtype219;
+            xfer += iprot->readMapBegin(_ktype218, _vtype219, _size217);
+            uint32_t _i221;
+            for (_i221 = 0; _i221 < _size217; ++_i221)
             {
-              std::string _key221;
-              xfer += iprot->readString(_key221);
-              std::string& _val222 = this->parameters[_key221];
-              xfer += iprot->readString(_val222);
+              std::string _key222;
+              xfer += iprot->readString(_key222);
+              std::string& _val223 = this->parameters[_key222];
+              xfer += iprot->readString(_val223);
             }
             xfer += iprot->readMapEnd();
           }
@@ -5146,10 +5302,10 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const {
   xfer += oprot->writeFieldBegin("partitionKeys", ::apache::thrift::protocol::T_LIST, 8);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitionKeys.size()));
-    std::vector<FieldSchema> ::const_iterator _iter223;
-    for (_iter223 = this->partitionKeys.begin(); _iter223 != this->partitionKeys.end(); ++_iter223)
+    std::vector<FieldSchema> ::const_iterator _iter224;
+    for (_iter224 = this->partitionKeys.begin(); _iter224 != this->partitionKeys.end(); ++_iter224)
     {
-      xfer += (*_iter223).write(oprot);
+      xfer += (*_iter224).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -5158,11 +5314,11 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const {
   xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
-    std::map<std::string, std::string> ::const_iterator _iter224;
-    for (_iter224 = this->parameters.begin(); _iter224 != this->parameters.end(); ++_iter224)
+    std::map<std::string, std::string> ::const_iterator _iter225;
+    for (_iter225 = this->parameters.begin(); _iter225 != this->parameters.end(); ++_iter225)
     {
-      xfer += oprot->writeString(_iter224->first);
-      xfer += oprot->writeString(_iter224->second);
+      xfer += oprot->writeString(_iter225->first);
+      xfer += oprot->writeString(_iter225->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -5220,25 +5376,7 @@ void swap(Table &a, Table &b) {
   swap(a.__isset, b.__isset);
 }
 
-Table::Table(const Table& other225) {
-  tableName = other225.tableName;
-  dbName = other225.dbName;
-  owner = other225.owner;
-  createTime = other225.createTime;
-  lastAccessTime = other225.lastAccessTime;
-  retention = other225.retention;
-  sd = other225.sd;
-  partitionKeys = other225.partitionKeys;
-  parameters = other225.parameters;
-  viewOriginalText = other225.viewOriginalText;
-  viewExpandedText = other225.viewExpandedText;
-  tableType = other225.tableType;
-  privileges = other225.privileges;
-  temporary = other225.temporary;
-  rewriteEnabled = other225.rewriteEnabled;
-  __isset = other225.__isset;
-}
-Table& Table::operator=(const Table& other226) {
+Table::Table(const Table& other226) {
   tableName = other226.tableName;
   dbName = other226.dbName;
   owner = other226.owner;
@@ -5255,6 +5393,24 @@ Table& Table::operator=(const Table& other226) {
   temporary = other226.temporary;
   rewriteEnabled = other226.rewriteEnabled;
   __isset = other226.__isset;
+}
+Table& Table::operator=(const Table& other227) {
+  tableName = other227.tableName;
+  dbName = other227.dbName;
+  owner = other227.owner;
+  createTime = other227.createTime;
+  lastAccessTime = other227.lastAccessTime;
+  retention = other227.retention;
+  sd = other227.sd;
+  partitionKeys = other227.partitionKeys;
+  parameters = other227.parameters;
+  viewOriginalText = other227.viewOriginalText;
+  viewExpandedText = other227.viewExpandedText;
+  tableType = other227.tableType;
+  privileges = other227.privileges;
+  temporary = other227.temporary;
+  rewriteEnabled = other227.rewriteEnabled;
+  __isset = other227.__isset;
   return *this;
 }
 void Table::printTo(std::ostream& out) const {
@@ -5341,14 +5497,14 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->values.clear();
-            uint32_t _size227;
-            ::apache::thrift::protocol::TType _etype230;
-            xfer += iprot->readListBegin(_etype230, _size227);
-            this->values.resize(_size227);
-            uint32_t _i231;
-            for (_i231 = 0; _i231 < _size227; ++_i231)
+            uint32_t _size228;
+            ::apache::thrift::protocol::TType _etype231;
+            xfer += iprot->readListBegin(_etype231, _size228);
+            this->values.resize(_size228);
+            uint32_t _i232;
+            for (_i232 = 0; _i232 < _size228; ++_i232)
             {
-              xfer += iprot->readString(this->values[_i231]);
+              xfer += iprot->readString(this->values[_i232]);
             }
             xfer += iprot->readListEnd();
           }
@@ -5401,17 +5557,17 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->parameters.clear();
-            uint32_t _size232;
-            ::apache::thrift::protocol::TType _ktype233;
-            ::apache::thrift::protocol::TType _vtype234;
-            xfer += iprot->readMapBegin(_ktype233, _vtype234, _size232);
-            uint32_t _i236;
-            for (_i236 = 0; _i236 < _size232; ++_i236)
+            uint32_t _size233;
+            ::apache::thrift::protocol::TType _ktype234;
+            ::apache::thrift::protocol::TType _vtype235;
+            xfer += iprot->readMapBegin(_ktype234, _vtype235, _size233);
+            uint32_t _i237;
+            for (_i237 = 0; _i237 < _size233; ++_i237)
             {
-              std::string _key237;
-              xfer += iprot->readString(_key237);
-              std::string& _val238 = this->parameters[_key237];
-              xfer += iprot->readString(_val238);
+              std::string _key238;
+              xfer += iprot->readString(_key238);
+              std::string& _val239 = this->parameters[_key238];
+              xfer += iprot->readString(_val239);
             }
             xfer += iprot->readMapEnd();
           }
@@ -5448,10 +5604,10 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const {
   xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->values.size()));
-    std::vector<std::string> ::const_iterator _iter239;
-    for (_iter239 = this->values.begin(); _iter239 != this->values.end(); ++_iter239)
+    std::vector<std::string> ::const_iterator _iter240;
+    for (_iter240 = this->values.begin(); _iter240 != this->values.end(); ++_iter240)
     {
-      xfer += oprot->writeString((*_iter239));
+      xfer += oprot->writeString((*_iter240));
     }
     xfer += oprot->writeListEnd();
   }
@@ -5480,11 +5636,11 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const {
   xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 7);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
-    std::map<std::string, std::string> ::const_iterator _iter240;
-    for (_iter240 = this->parameters.begin(); _iter240 != this->parameters.end(); ++_iter240)
+    std::map<std::string, std::string> ::const_iterator _iter241;
+    for (_iter241 = this->parameters.begin(); _iter241 != this->parameters.end(); ++_iter241)
     {
-      xfer += oprot->writeString(_iter240->first);
-      xfer += oprot->writeString(_iter240->second);
+      xfer += oprot->writeString(_iter241->first);
+      xfer += oprot->writeString(_iter241->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -5513,18 +5669,7 @@ void swap(Partition &a, Partition &b) {
   swap(a.__isset, b.__isset);
 }
 
-Partition::Partition(const Partition& other241) {
-  values = other241.values;
-  dbName = other241.dbName;
-  tableName = other241.tableName;
-  createTime = other241.createTime;
-  lastAccessTime = other241.lastAccessTime;
-  sd = other241.sd;
-  parameters = other241.parameters;
-  privileges = other241.privileges;
-  __isset = other241.__isset;
-}
-Partition& Partition::operator=(const Partition& other242) {
+Partition::Partition(const Partition& other242) {
   values = other242.values;
   dbName = other242.dbName;
   tableName = other242.tableName;
@@ -5534,6 +5679,17 @@ Partition& Partition::operator=(const Partition& other242) {
   parameters = other242.parameters;
   privileges = other242.privileges;
   __isset = other242.__isset;
+}
+Partition& Partition::operator=(const Partition& other243) {
+  values = other243.values;
+  dbName = other243.dbName;
+  tableName = other243.tableName;
+  createTime = other243.createTime;
+  lastAccessTime = other243.lastAccessTime;
+  sd = other243.sd;
+  parameters = other243.parameters;
+  privileges = other243.privileges;
+  __isset = other243.__isset;
   return *this;
 }
 void Partition::printTo(std::ostream& out) const {
@@ -5605,14 +5761,14 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot)
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->values.clear();
-            uint32_t _size243;
-            ::apache::thrift::protocol::TType _etype246;
-            xfer += iprot->readListBegin(_etype246, _size243);
-            this->values.resize(_size243);
-            uint32_t _i247;
-            for (_i247 = 0; _i247 < _size243; ++_i247)
+            uint32_t _size244;
+            ::apache::thrift::protocol::TType _etype247;
+            xfer += iprot->readListBegin(_etype247, _size244);
+            this->values.resize(_size244);
+            uint32_t _i248;
+            for (_i248 = 0; _i248 < _size244; ++_i248)
             {
-              xfer += iprot->readString(this->values[_i247]);
+              xfer += iprot->readString(this->values[_i248]);
             }
             xfer += iprot->readListEnd();
           }
@@ -5649,17 +5805,17 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot)
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->parameters.clear();
-            uint32_t _size248;
-            ::apache::thrift::protocol::TType _ktype249;
-            ::apache::thrift::protocol::TType _vtype250;
-            xfer += iprot->readMapBegin(_ktype249, _vtype250, _size248);
-            uint32_t _i252;
-            for (_i252 = 0; _i252 < _size248; ++_i252)
+            uint32_t _size249;
+            ::apache::thrift::protocol::TType _ktype250;
+            ::apache::thrift::protocol::TType _vtype251;
+            xfer += iprot->readMapBegin(_ktype250, _vtype251, _size249);
+            uint32_t _i253;
+            for (_i253 = 0; _i253 < _size249; ++_i253)
             {
-              std::string _key253;
-              xfer += iprot->readString(_key253);
-              std::string& _val254 = this->parameters[_key253];
-              xfer += iprot->readString(_val254);
+              std::string _key254;
+              xfer += iprot->readString(_key254);
+              std::string& _val255 = this->parameters[_key254];
+              xfer += iprot->readString(_val255);
             }
             xfer += iprot->readMapEnd();
           }
@@ -5696,10 +5852,10 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot)
   xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->values.size()));
-    std::vector<std::string> ::const_iterator _iter255;
-    for (_iter255 = this->values.begin(); _iter255 != this->values.end(); ++_iter255)
+    std::vector<std::string> ::const_iterator _iter256;
+    for (_iter256 = this->values.begin(); _iter256 != this->values.end(); ++_iter256)
     {
-      xfer += oprot->writeString((*_iter255));
+      xfer += oprot->writeString((*_iter256));
     }
     xfer += oprot->writeListEnd();
   }
@@ -5720,11 +5876,11 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot)
   xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 5);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
-    std::map<std::string, std::string> ::const_iterator _iter256;
-    for (_iter256 = this->parameters.begin(); _iter256 != this->parameters.end(); ++_iter256)
+    std::map<std::string, std::string> ::const_iterator _iter257;
+    for (_iter257 = this->parameters.begin(); _iter257 != this->parameters.end(); ++_iter257)
     {
-      xfer += oprot->writeString(_iter256->first);
-      xfer += oprot->writeString(_iter256->second);
+      xfer += oprot->writeString(_iter257->first);
+      xfer += oprot->writeString(_iter257->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -5751,16 +5907,7 @@ void swap(PartitionWithoutSD &a, PartitionWithoutSD &b) {
   swap(a.__isset, b.__isset);
 }
 
-PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other257) {
-  values = other257.values;
-  createTime = other257.createTime;
-  lastAccessTime = other257.lastAccessTime;
-  relativePath = other257.relativePath;
-  parameters = other257.parameters;
-  privileges = other257.privileges;
-  __isset = other257.__isset;
-}
-PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other258) {
+PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other258) {
   values = other258.values;
   createTime = other258.createTime;
   lastAccessTime = other258.lastAccessTime;
@@ -5768,6 +5915,15 @@ PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& othe
   parameters = other258.parameters;
   privileges = other258.privileges;
   __isset = other258.__isset;
+}
+PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other259) {
+  values = other259.values;
+  createTime = other259.createTime;
+  lastAccessTime = other259.lastAccessTime;
+  relativePath = other259.relativePath;
+  parameters = other259.parameters;
+  privileges = other259.privileges;
+  __isset = other259.__isset;
   return *this;
 }
 void PartitionWithoutSD::printTo(std::ostream& out) const {
@@ -5820,14 +5976,14 @@ uint32_t PartitionSpecWithSharedSD::read(::apache::thrift::protocol::TProtocol*
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->partitions.clear();
-            uint32_t _size259;
-            ::apache::thrift::protocol::TType _etype262;
-            xfer += iprot->readListBegin(_etype262, _size259);
-            this->partitions.resize(_size259);
-            uint32_t _i263;
-            for (_i263 = 0; _i263 < _size259; ++_i263)
+            uint32_t _size260;
+            ::apache::thrift::protocol::TType _etype263;
+            xfer += iprot->readListBegin(_etype263, _size260);
+            this->partitions.resize(_size260);
+            uint32_t _i264;
+            for (_i264 = 0; _i264 < _size260; ++_i264)
             {
-              xfer += this->partitions[_i263].read(iprot);
+              xfer += this->partitions[_i264].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -5864,10 +6020,10 @@ uint32_t PartitionSpecWithSharedSD::write(::apache::thrift::protocol::TProtocol*
   xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitions.size()));
-    std::vector<PartitionWithoutSD> ::const_iterator _iter264;
-    for (_iter264 = this->partitions.begin(); _iter264 != this->partitions.end(); ++_iter264)
+    std::vector<PartitionWithoutSD> ::const_iterator _iter265;
+    for (_iter265 = this->partitions.begin(); _iter265 != this->partitions.end(); ++_iter265)
     {
-      xfer += (*_iter264).write(oprot);
+      xfer += (*_iter265).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -5889,15 +6045,15 @@ void swap(PartitionSpecWithSharedSD &a, PartitionSpecWithSharedSD &b) {
   swap(a.__isset, b.__isset);
 }
 
-PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other265) {
-  partitions = other265.partitions;
-  sd = other265.sd;
-  __isset = other265.__isset;
-}
-PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other266) {
+PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other266) {
   partitions = other266.partitions;
   sd = other266.sd;
   __isset = other266.__isset;
+}
+PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other267) {
+  partitions = other267.partitions;
+  sd = other267.sd;
+  __isset = other267.__isset;
   return *this;
 }
 void PartitionSpecWithSharedSD::printTo(std::ostream& out) const {
@@ -5942,14 +6098,14 @@ uint32_t PartitionListComposingSpec::read(::apache::thrift::protocol::TProtocol*
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->partitions.clear();
-            uint32_t _size267;
-            ::apache::thrift::protocol::TType _etype270;
-            xfer += iprot->readListBegin(_etype270, _size267);
-            this->partitions.resize(_size267);
-            uint32_t _i271;
-            for (_i271 = 0; _i271 < _size267; ++_i271)
+            uint32_t _size268;
+            ::apache::thrift::protocol::TType _etype271;
+            xfer += iprot->readListBegin(_etype271, _size268);
+            this->partitions.resize(_size268);
+            uint32_t _i272;
+            for (_i272 = 0; _i272 < _size268; ++_i272)
             {
-              xfer += this->partitions[_i271].read(iprot);
+              xfer += this->partitions[_i272].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -5978,10 +6134,10 @@ uint32_t PartitionListComposingSpec::write(::apache::thrift::protocol::TProtocol
   xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitions.size()));
-    std::vector<Partition> ::const_iterator _iter272;
-    for (_iter272 = this->partitions.begin(); _iter272 != this->partitions.end(); ++_iter272)
+    std::vector<Partition> ::const_iterator _iter273;
+    for (_iter273 = this->partitions.begin(); _iter273 != this->partitions.end(); ++_iter273)
     {
-      xfer += (*_iter272).write(oprot);
+      xfer += (*_iter273).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -5998,13 +6154,13 @@ void swap(PartitionListComposingSpec &a, PartitionListComposingSpec &b) {
   swap(a.__isset, b.__isset);
 }
 
-PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other273) {
-  partitions = other273.partitions;
-  __isset = other273.__isset;
-}
-PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other274) {
+PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other274) {
   partitions = other274.partitions;
   __isset = other274.__isset;
+}
+PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other275) {
+  partitions = other275.partitions;
+  __isset = other275.__isset;
   return *this;
 }
 void PartitionListComposingSpec::printTo(std::ostream& out) const {
@@ -6156,21 +6312,21 @@ void swap(PartitionSpec &a, PartitionSpec &b) {
   swap(a.__isset, b.__isset);
 }
 
-PartitionSpec::PartitionSpec(const PartitionSpec& other275) {
-  dbName = other275.dbName;
-  tableName = other275.tableName;
-  rootPath = other275.rootPath;
-  sharedSDPartitionSpec = other275.sharedSDPartitionSpec;
-  partitionList = other275.partitionList;
-  __isset = other275.__isset;
-}
-PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other276) {
+PartitionSpec::PartitionSpec(const PartitionSpec& other276) {
   dbName = other276.dbName;
   tableName = other276.tableName;
   rootPath = other276.rootPath;
   sharedSDPartitionSpec = other276.sharedSDPartitionSpec;
   partitionList = other276.partitionList;
   __isset = other276.__isset;
+}
+PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other277) {
+  dbName = other277.dbName;
+  tableName = other277.tableName;
+  rootPath = other277.rootPath;
+  sharedSDPartitionSpec = other277.sharedSDPartitionSpec;
+  partitionList = other277.partitionList;
+  __isset = other277.__isset;
   return *this;
 }
 void PartitionSpec::printTo(std::ostream& out) const {
@@ -6318,17 +6474,17 @@ uint32_t Index::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->parameters.clear();
-            uint32_t _size277;
-            ::apache::thrift::protocol::TType _ktype278;
-            ::apache::thrift::protocol::TType _vtype279;
-            xfer += iprot->readMapBegin(_ktype278, _vtype279, _size277);
-            uint32_t _i281;
-            for (_i281 = 0; _i281 < _size277; ++_i281)
+            uint32_t _size278;
+            ::apache::thrift::protocol::TType _ktype279;
+            ::apache::thrift::protocol::TType _vtype280;
+            xfer += iprot->readMapBegin(_ktype279, _vtype280, _size278);
+            uint32_t _i282;
+            for (_i282 = 0; _i282 < _size278; ++_i282)
             {
-              std::string _key282;
-              xfer += iprot->readString(_key282);
-              std::string& _val283 = this->parameters[_key282];
-              xfer += iprot->readString(_val283);
+              std::string _key283;
+              xfer += iprot->readString(_key283);
+              std::string& _val284 = this->parameters[_key283];
+              xfer += iprot->readString(_val284);
             }
             xfer += iprot->readMapEnd();
           }
@@ -6397,11 +6553,11 @@ uint32_t Index::write(::apache::thrift::protocol::TProtocol* oprot) const {
   xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
-    std::map<std::string, std::string> ::const_iterator _iter284;
-    for (_iter284 = this->parameters.begin(); _iter284 != this->parameters.end(); ++_iter284)
+    std::map<std::string, std::string> ::const_iterator _iter285;
+    for (_iter285 = this->parameters.begin(); _iter285 != this->parameters.end(); ++_iter285)
     {
-      xfer += oprot->writeString(_iter284->first);
-      xfer += oprot->writeString(_iter284->second);
+      xfer += oprot->writeString(_iter285->first);
+      xfer += oprot->writeString(_iter285->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -6431,20 +6587,7 @@ void swap(Index &a, Index &b) {
   swap(a.__isset, b.__isset);
 }
 
-Index::Index(const Index& other285) {
-  indexName = other285.indexName;
-  indexHandlerClass = other285.indexHandlerClass;
-  dbName = other285.dbName;
-  origTableName = other285.origTableName;
-  createTime = other285.createTime;
-  lastAccessTime = other285.lastAccessTime;
-  indexTableName = other285.indexTableName;
-  sd = other285.sd;
-  parameters = other285.parameters;
-  deferredRebuild = other285.deferredRebuild;
-  __isset = other285.__isset;
-}
-Index& Index::operator=(const Index& other286) {
+Index::Index(const Index& other286) {
   indexName = other286.indexName;
   indexHandlerClass = other286.indexHandlerClass;
   dbName = other286.dbName;
@@ -6456,6 +6599,19 @@ Index& Index::operator=(const Index& other286) {
   parameters = other286.parameters;
   deferredRebuild = other286.deferredRebuild;
   __isset = other286.__isset;
+}
+Index& Index::operator=(const Index& other287) {
+  indexName = other287.indexName;
+  indexHandlerClass = other287.indexHandlerClass;
+  dbName = other287.dbName;
+  origTableName = other287.origTableName;
+  createTime = other287.createTime;
+  lastAccessTime = other287.lastAccessTime;
+  indexTableName = other287.indexTableName;
+  sd = other287.sd;
+  parameters = other287.parameters;
+  deferredRebuild = other287.deferredRebuild;
+  __isset = other287.__isset;
   return *this;
 }
 void Index::printTo(std::ostream& out) const {
@@ -6606,19 +6762,19 @@ void swap(BooleanColumnStatsData &a, BooleanColumnStatsData &b) {
   swap(a.__isset, b.__isset);
 }
 
-BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other287) {
-  numTrues = other287.numTrues;
-  numFalses = other287.numFalses;
-  numNulls = other287.numNulls;
-  bitVectors = other287.bitVectors;
-  __isset = other287.__isset;
-}
-BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other288) {
+BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other288) {
   numTrues = other288.numTrues;
   numFalses = other288.numFalses;
   numNulls = other288.numNulls;
   bitVectors = other288.bitVectors;
   __isset = other288.__isset;
+}
+BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other289) {
+  numTrues = other289.numTrues;
+  numFalses = other289.numFalses;
+  numNulls = other289.numNulls;
+  bitVectors = other289.bitVectors;
+  __isset = other289.__isset;
   return *this;
 }
 void BooleanColumnStatsData::printTo(std::ostream& out) const {
@@ -6781,21 +6937,21 @@ void swap(DoubleColumnStatsData &a, DoubleColumnStatsData &b) {
   swap(a.__isset, b.__isset);
 }
 
-DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other289) {
-  lowValue = other289.lowValue;
-  highValue = other289.highValue;
-  numNulls = other289.numNulls;
-  numDVs = other289.numDVs;
-  bitVectors = other289.bitVectors;
-  __isset = other289.__isset;
-}
-DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other290) {
+DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other290) {
   lowValue = other290.lowValue;
   highValue = other290.highValue;
   numNulls = other290.numNulls;
   numDVs = other290.numDVs;
   bitVectors = other290.bitVectors;
   __isset = other290.__isset;
+}
+DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other291) {
+  lowValue = other291.lowValue;
+  highValue = other291.highValue;
+  numNulls = other291.numNulls;
+  numDVs = other291.numDVs;
+  bitVectors = other291.bitVectors;
+  __isset = other291.__isset;
   return *this;
 }
 void DoubleColumnStatsData::printTo(std::ostream& out) const {
@@ -6959,21 +7115,21 @@ void swap(LongColumnStatsData &a, LongColumnStatsData &b) {
   swap(a.__isset, b.__isset);
 }
 
-LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other291) {
-  lowValue = other291.lowValue;
-  highValue = other291.highValue;
-  numNulls = other291.numNulls;
-  numDVs = other291.numDVs;
-  bitVectors = other291.bitVectors;
-  __isset = other291.__isset;
-}
-LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other292) {
+LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other292) {
   lowValue = other292.lowValue;
   highValue = other292.highValue;
   numNulls = other292.numNulls;
   numDVs = other292.numDVs;
   bitVectors = other292.bitVectors;
   __isset = other292.__isset;
+}
+LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other293) {
+  lowValue = other293.lowValue;
+  highValue = other293.highValue;
+  numNulls = other293.numNulls;
+  numDVs = other293.numDVs;
+  bitVectors = other293.bitVectors;
+  __isset = other293.__isset;
   return *this;
 }
 void LongColumnStatsData::printTo(std::ostream& out) const {
@@ -7139,21 +7295,21 @@ void swap(StringColumnStatsData &a, StringColumnStatsData &b) {
   swap(a.__isset, b.__isset);
 }
 
-StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other293) {
-  maxColLen = other293.maxColLen;
-  avgColLen = other293.avgColLen;
-  numNulls = other293.numNulls;
-  numDVs = other293.numDVs;
-  bitVectors = other293.bitVectors;
-  __isset = other293.__isset;
-}
-StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other294) {
+StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other294) {
   maxColLen = other294.maxColLen;
   avgColLen = other294.avgColLen;
   numNulls = other294.numNulls;
   numDVs = other294.numDVs;
   bitVectors = other294.bitVectors;
   __isset = other294.__isset;
+}
+StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other295) {
+  maxColLen = other295.maxColLen;
+  avgColLen = other295.avgColLen;
+  numNulls = other295.numNulls;
+  numDVs = other295.numDVs;
+  bitVectors = other295.bitVectors;
+  __isset = other295.__isset;
   return *this;
 }
 void StringColumnStatsData::printTo(std::ostream& out) const {
@@ -7299,19 +7455,19 @@ void swap(BinaryColumnStatsData &a, BinaryColumnStatsData &b) {
   swap(a.__isset, b.__isset);
 }
 
-BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other295) {
-  maxColLen = other295.maxColLen;
-  avgColLen = other295.avgColLen;
-  numNulls = other295.numNulls;
-  bitVectors = other295.bitVectors;
-  __isset = other295.__isset;
-}
-BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other296) {
+BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other296) {
   maxColLen = other296.maxColLen;
   avgColLen = other296.avgColLen;
   numNulls = other296.numNulls;
   bitVectors = other296.bitVectors;
   __isset = other296.__isset;
+}
+BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other297) {
+  maxColLen = other297.maxColLen;
+  avgColLen = other297.avgColLen;
+  numNulls = other297.numNulls;
+  bitVectors = other297.bitVectors;
+  __isset = other297.__isset;
   return *this;
 }
 void BinaryColumnStatsData::printTo(std::ostream& out) const {
@@ -7416,13 +7572,13 @@ void swap(Decimal &a, Decimal &b) {
   swap(a.scale, b.scale);
 }
 
-Decimal::Decimal(const Decimal& other297) {
-  unscaled = other297.unscaled;
-  scale = other297.scale;
-}
-Decimal& Decimal::operator=(const Decimal& other298) {
+Decimal::Decimal(const Decimal& other298) {
   unscaled = other298.unscaled;
   scale = other298.scale;
+}
+Decimal& Decimal::operator=(const Decimal& other299) {
+  unscaled = other299.unscaled;
+  scale = other299.scale;
   return *this;
 }
 void Decimal::printTo(std::ostream& out) const {
@@ -7583,21 +7739,21 @@ void swap(DecimalColumnStatsData &a, DecimalColumnStatsData &b) {
   swap(a.__isset, b.__isset);
 }
 
-DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other299) {
-  lowValue = other299.lowValue;
-  highValue = other299.highValue;
-  numNulls = other299.numNulls;
-  numDVs = other299.numDVs;
-  bitVectors = other299.bitVectors;
-  __isset = other299.__isset;
-}
-DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other300) {
+DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other300) {
   lowValue = other300.lowValue;
   highValue = other300.highValue;
   numNulls = other300.numNulls;
   numDVs = other300.numDVs;
   bitVectors = other300.bitVectors;
   __isset = other300.__isset;
+}
+DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other301) {
+  lowValue = other301.lowValue;
+  highValue = other301.highValue;
+  numNulls = other301.numNulls;
+  numDVs = other301.numDVs;
+  bitVectors = other301.bitVectors;
+  __isset = other301.__isset;
   return *this;
 }
 void DecimalColumnStatsData::printTo(std::ostream& out) const {
@@ -7683,11 +7839,11 @@ void swap(Date &a, Date &b) {
   swap(a.daysSinceEpoch, b.daysSinceEpoch);
 }
 
-Date::Date(const Date& other301) {
-  daysSinceEpoch = other301.daysSinceEpoch;
-}
-Date& Date::operator=(const Date& other302) {
+Date::Date(const Date& other302) {
   daysSinceEpoch = other302.daysSinceEpoch;
+}
+Date& Date::operator=(const Date& other303) {
+  daysSinceEpoch = other303.daysSinceEpoch;
   return *this;
 }
 void Date::printTo(std::ostream& out) const {
@@ -7847,21 +8003,21 @@ void swap(DateColumnStatsData &a, DateColumnStatsData &b) {
   swap(a.__isset, b.__isset);
 }
 
-DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other303) {
-  lowValue = other303.lowValue;
-  highValue = other303.highValue;
-  numNulls = other303.numNulls;
-  numDVs = other303.numDVs;
-  bitVectors = other303.bitVectors;
-  __isset = other303.__isset;
-}
-DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other304) {
+DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other304) {
   lowValue = other304.lowValue;
   highValue = other304.highValue;
   numNulls = other304.numNulls;
   numDVs = other304.numDVs;
   bitVectors = other304.bitVectors;
   __isset = other304.__isset;
+}
+DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other305) {
+  lowValue = other305.lowValue;
+  highValue = other305.highValue;
+  numNulls = other305.numNulls;
+  numDVs = other305.numDVs;
+  bitVectors = other305.bitVectors;
+  __isset = other305.__isset;
   return *this;
 }
 void DateColumnStatsData::printTo(std::ostream& out) const {
@@ -8047,17 +8203,7 @@ void swap(ColumnStatisticsData &a, ColumnStatisticsData &b) {
   swap(a.__isset, b.__isset);
 }
 
-ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other305) {
-  booleanStats = other305.booleanStats;
-  longStats = other305.longStats;
-  doubleStats = other305.doubleStats;
-  stringStats = other305.stringStats;
-  binaryStats = other305.binaryStats;
-  decimalStats = other305.decimalStats;
-  dateStats = other305.dateStats;
-  __isset = other305.__isset;
-}
-ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other306) {
+ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other306) {
   booleanStats = other306.booleanStats;
   longStats = other306.longStats;
   doubleStats = other306.doubleStats;
@@ -8066,6 +8212,16 @@ ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData
   decimalStats = other306.decimalStats;
   dateStats = other306.dateStats;
   __isset = other306.__isset;
+}
+ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other307) {
+  booleanStats = other307.booleanStats;
+  longStats = other307.longStats;
+  doubleStats = other307.doubleStats;
+  stringStats = other307.stringStats;
+  binaryStats = other307.binaryStats;
+  decimalStats = other307.decimalStats;
+  dateStats = other307.dateStats;
+  __isset = other307.__isset;
   return *this;
 }
 void ColumnStatisticsData::printTo(std::ostream& out) const {
@@ -8193,15 +8349,15 @@ void swap(ColumnStatisticsObj &a, ColumnStatisticsObj &b) {
   swap(a.statsData, b.statsData);
 }
 
-ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other307) {
-  colName = other307.colName;
-  colType = other307.colType;
-  statsData = other307.statsData;
-}
-ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other308) {
+ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other308) {
   colName = other308.colName;
   colType = other308.colType;
   statsData = other308.statsData;
+}
+ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other309) {
+  colName = other309.colName;
+  colType = other309.colType;
+  statsData = other309.statsData;
   return *this;
 }
 void ColumnStatisticsObj::printTo(std::ostream& out) const {
@@ -8364,21 +8520,21 @@ void swap(ColumnStatisticsDesc &a, ColumnStatisticsDesc &b) {
   swap(a.__isset, b.__isset);
 }
 
-ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other309) {
-  isTblLevel = other309.isTblLevel;
-  dbName = other309.dbName;
-  tableName = other309.tableName;
-  partName = other309.partName;
-  lastAnalyzed = other309.lastAnalyzed;
-  __isset = other309.__isset;
-}
-ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other310) {
+ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other310) {
   isTblLevel = other310.isTblLevel;
   dbName = other310.dbName;
   tableName = other310.tableName;
   partName = other310.partName;
   lastAnalyzed = other310.lastAnalyzed;
   __isset = other310.__isset;
+}
+ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other311) {
+  isTblLevel = other311.isTblLevel;
+  dbName = other311.dbName;
+  tableName = other311.tableName;
+  partName = other311.partName;
+  lastAnalyzed = other311.lastAnalyzed;
+  __isset = other311.__isset;
   return *this;
 }
 void ColumnStatisticsDesc::printTo(std::ostream& out) const {
@@ -8440,14 +8596,14 @@ uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->statsObj.clear();
-            uint32_t _size311;
-            ::apache::thrift::protocol::TType _etype314;
-            xfer += iprot->readListBegin(_etype314, _size311);
-            this->statsObj.resize(_size311);
-            uint32_t _i315;
-            for (_i315 = 0; _i315 < _size311; ++_i315)
+            uint32_t _size312;
+            ::apache::thrift::protocol::TType _etype315;
+            xfer += iprot->readListBegin(_etype315, _size312);
+            this->statsObj.resize(_size312);
+            uint32_t _i316;
+            for (_i316 = 0; _i316 < _size312; ++_i316)
             {
-              xfer += this->statsObj[_i315].read(iprot);
+              xfer += this->statsObj[_i316].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -8484,10 +8640,10 @@ uint32_t ColumnStatistics::write(::apache::thrift::protocol::TProtocol* oprot) c
   xfer += oprot->writeFieldBegin("statsObj", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->statsObj.size()));
-    std::vector<ColumnStatisticsObj> ::const_iterator _iter316;
-    for (_iter316 = this->statsObj.begin(); _iter316 != this->statsObj.end(); ++_iter316)
+    std::vector<ColumnStatisticsObj> ::const_iterator _iter317;
+    for (_iter317 = this->statsObj.begin(); _iter317 != this->statsObj.end(); ++_iter317)
     {
-      xfer += (*_iter316).write(oprot);
+      xfer += (*_iter317).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -8504,13 +8660,13 @@ void swap(ColumnStatistics &a, ColumnStatistics &b) {
   swap(a.statsObj, b.statsObj);
 }
 
-ColumnStatistics::ColumnStatistics(const ColumnStatistics& other317) {
-  statsDesc = other317.statsDesc;
-  statsObj = other317.statsObj;
-}
-ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other318) {
+ColumnStatistics::ColumnStatistics(const ColumnStatistics& other318) {
   statsDesc = other318.statsDesc;
   statsObj = other318.statsObj;
+}
+ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other319) {
+  statsDesc = other319.statsDesc;
+  statsObj = other319.statsObj;
   return *this;
 }
 void ColumnStatistics::printTo(std::ostream& out) const {
@@ -8561,14 +8717,14 @@ uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->colStats.clear();
-            uint32_t _size319;
-            ::apache::thrift::protocol::TType _etype322;
-            xfer += iprot->readListBegin(_etype322, _size319);
-            this->colStats.resize(_size319);
-            uint32_t _i323;
-            for (_i323 = 0; _i323 < _size319; ++_i323)
+            uint32_t _size320;
+            ::apache::thrift::protocol::TType _etype323;
+            xfer += iprot->readListBegin(_etype323, _size320);
+            this->colStats.resize(_size320);
+            uint32_t _i324;
+            for (_i324 = 0; _i324 < _size320; ++_i324)
             {
-              xfer += this->colStats[_i323].read(iprot);
+              xfer += this->colStats[_i324].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -8609,10 +8765,10 @@ uint32_t AggrStats::write(::apache::thrift::protocol::TProtocol* oprot) const {
   xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->colStats.size()));
-    std::vector<ColumnStatisticsObj> ::const_iterator _iter324;
-    for (_iter324 = this->colStats.begin(); _iter324 != this->colStats.end(); ++_iter324)
+    std::vector<ColumnStatisticsObj> ::const_iterator _iter325;
+    for (_iter325 = this->colStats.begin(); _iter325 != this->colStats.end(); ++_iter325)
     {
-      xfer += (*_iter324).write(oprot);
+      xfer += (*_iter325).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -8633,13 +8789,13 @@ void swap(AggrStats &a, AggrStats &b) {
   swap(a.partsFound, b.partsFound);
 }
 
-AggrStats::AggrStats(const AggrStats& other325) {
-  colStats = other325.colStats;
-  partsFound = other325.partsFound;
-}
-AggrStats& AggrStats::operator=(const AggrStats& other326) {
+AggrStats::AggrStats(const AggrStats& other326) {
   colStats = other326.colStats;
   partsFound = other326.partsFound;
+}
+AggrStats& AggrStats::operator=(const AggrStats& other327) {
+  colStats = other327.colStats;
+  partsFound = other327.partsFound;
   return *this;
 }
 void AggrStats::printTo(std::ostream& out) const {
@@ -8690,14 +8846,14 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol*
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->colStats.clear();
-            uint32_t _size327;
-            ::apache::thrift::protocol::TType _etype330;
-            xfer += iprot->readListBegin(_etype330, _size327);
-            this->colStats.resize(_size327);
-            uint32_t _i331;
-            for (_i331 = 0; _i331 < _size327; ++_i331)
+            uint32_t _size328;
+            ::apache::thrift::protocol::TType _etype331;
+            xfer += iprot->readListBegin(_etype331, _size328);
+            this->colStats.resize(_size328);
+            uint32_t _i332;
+            for (_i332 = 0; _i332 < _size328; ++_i332)
             {
-              xfer += this->colStats[_i331].read(iprot);
+              xfer += this->colStats[_i332].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -8736,10 +8892,10 @@ uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol*
   xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->colStats.size()));
-    std::vector<ColumnStatistics> ::const_iterator _iter332;
-    for (_iter332 = this->colStats.begin(); _iter332 != this->colStats.end(); ++_iter332)
+    std::vector<ColumnStatistics> ::const_iterator _iter333;
+    for (_iter333 = this->colStats.begin(); _iter333 != this->colStats.end(); ++_iter333)
     {
-      xfer += (*_iter332).write(oprot);
+      xfer += (*_iter333).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -8762,15 +8918,15 @@ void swap(SetPartitionsStatsRequest &a, SetPartitionsStatsRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other333) {
-  colStats = other333.colStats;
-  needMerge = other333.needMerge;
-  __isset = other333.__isset;
-}
-SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other334) {
+SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other334) {
   colStats = other334.colStats;
   needMerge = other334.needMerge;
   __isset = other334.__isset;
+}
+SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other335) {
+  colStats = other335.colStats;
+  needMerge = other335.needMerge;
+  __isset = other335.__isset;
   return *this;
 }
 void SetPartitionsStatsRequest::printTo(std::ostream& out) const {
@@ -8819,14 +8975,14 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->fieldSchemas.clear();
-            uint32_t _size335;
-            ::apache::thrift::protocol::TType _etype338;
-            xfer += iprot->readListBegin(_etype338, _size335);
-            this->fieldSchemas.resize(_size335);
-            uint32_t _i339;
-            for (_i339 = 0; _i339 < _size335; ++_i339)
+            uint32_t _size336;
+            ::apache::thrift::protocol::TType _etype339;
+            xfer += iprot->readListBegin(_etype339, _size336);
+            this->fieldSchemas.resize(_size336);
+            uint32_t _i340;
+            for (_i340 = 0; _i340 < _size336; ++_i340)
             {
-              xfer += this->fieldSchemas[_i339].read(iprot);
+              xfer += this->fieldSchemas[_i340].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -8839,17 +8995,17 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->properties.clear();
-            uint32_t _size340;
-            ::apache::thrift::protocol::TType _ktype341;
-            ::apache::thrift::protocol::TType _vtype342;
-            xfer += iprot->readMapBegin(_ktype341, _vtype342, _size340);
-            uint32_t _i344;
-            for (_i344 = 0; _i344 < _size340; ++_i344)
+            uint32_t _size341;
+            ::apache::thrift::protocol::TType _ktype342;
+            ::apache::thrift::protocol::TType _vtype343;
+            xfer += iprot->readMapBegin(_ktype342, _vtype343, _size341);
+            uint32_t _i345;
+            for (_i345 = 0; _i345 < _size341; ++_i345)
             {
-              std::string _key345;
-              xfer += iprot->readString(_key345);
-              std::string& _val346 = this->properties[_key345];
-              xfer += iprot->readString(_val346);
+              std::string _key346;
+              xfer += iprot->readString(_key346);
+              std::string& _val347 = this->properties[_key346];
+              xfer += iprot->readString(_val347);
             }
             xfer += iprot->readMapEnd();
           }
@@ -8878,10 +9034,10 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const {
   xfer += oprot->writeFieldBegin("fieldSchemas", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->fieldSchemas.size()));
-    std::vector<FieldSchema> ::const_iterator _iter347;
-    for (_iter347 = this->fieldSchemas.begin(); _iter347 != this->fieldSchemas.end(); ++_iter347)
+    std::vector<FieldSchema> ::const_iterator _iter348;
+    for (_iter348 = this->fieldSchemas.begin(); _iter348 != this->fieldSchemas.end(); ++_iter348)
     {
-      xfer += (*_iter347).write(oprot);
+      xfer += (*_iter348).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -8890,11 +9046,11 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const {
   xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 2);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->properties.size()));
-    std::map<std::string, std::string> ::const_iterator _iter348;
-    for (_iter348 = this->properties.begin(); _iter348 != this->properties.end(); ++_iter348)
+    std::map<std::string, std::string> ::const_iterator _iter349;
+    for (_iter349 = this->properties.begin(); _iter349 != this->properties.end(); ++_iter349)
     {
-      xfer += oprot->writeString(_iter348->first);
-      xfer += oprot->writeString(_iter348->second);
+      xfer += oprot->writeString(_iter349->first);
+      xfer += oprot->writeString(_iter349->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -8912,15 +9068,15 @@ void swap(Schema &a, Schema &b) {
   swap(a.__isset, b.__isset);
 }
 
-Schema::Schema(const Schema& other349) {
-  fieldSchemas = other349.fieldSchemas;
-  properties = other349.properties;
-  __isset = other349.__isset;
-}
-Schema& Schema::operator=(const Schema& other350) {
+Schema::Schema(const Schema& other350) {
   fieldSchemas = other350.fieldSchemas;
   properties = other350.properties;
   __isset = other350.__isset;
+}
+Schema& Schema::operator=(const Schema& other351) {
+  fieldSchemas = other351.fieldSchemas;
+  properties = other351.properties;
+  __isset = other351.__isset;
   return *this;
 }
 void Schema::printTo(std::ostream& out) const {
@@ -8965,17 +9121,17 @@ uint32_t EnvironmentContext::read(::apache::thrift::protocol::TProtocol* iprot)
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->properties.clear();
-            uint32_t _size351;
-            ::apache::thrift::protocol::TType _ktype352;
-            ::apache::thrift::protocol::TType _vtype353;
-            xfer += iprot->readMapBegin(_ktype352, _vtype353, _size351);
-            uint32_t _i355;
-            for (_i355 = 0; _i355 < _size351; ++_i355)
+            uint32_t _size352;
+            ::apache::thrift::protocol::TType _ktype353;
+            ::apache::thrift::protocol::TType _vtype354;
+            xfer += iprot->readMapBegin(_ktype353, _vtype354, _size352);
+            uint32_t _i356;
+            for (_i356 = 0; _i356 < _size352; ++_i356)
             {
-              std::string _key356;
-              xfer += iprot->readString(_key356);
-              std::string& _val357 = this->properties[_key356];
-              xfer += iprot->readString(_val357);
+              std::string _key357;
+              xfer += iprot->readString(_key357);
+              std::string& _val358 = this->properties[_key357];
+              xfer += iprot->readString(_val358);
             }
             xfer += iprot->readMapEnd();
           }
@@ -9004,11 +9160,11 @@ uint32_t EnvironmentContext::write(::apache::thrift::protocol::TProtocol* oprot)
   xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->properties.size()));
-    std::map<std::string, std::string> ::const_iterator _iter358;
-    for (_iter358 = this->properties.begin(); _iter358 != this->properties.end(); ++_iter358)
+    std::map<std::string, std::string> ::const_iterator _iter359;
+    for (_iter359 = this->properties.begin(); _iter359 != this->properties.end(); ++_iter359)
     {
-      xfer += oprot->writeString(_iter358->first);
-      xfer += oprot->writeString(_iter358->second);
+      xfer += oprot->writeString(_iter359->first);
+      xfer += oprot->writeString(_iter359->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -9025,13 +9181,13 @@ void swap(EnvironmentContext &a, EnvironmentContext &b) {
   swap(a.__isset, b.__isset);
 }
 
-EnvironmentContext::EnvironmentContext(const EnvironmentContext& other359) {
-  properties = other359.properties;
-  __isset = other359.__isset;
-}
-EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other360) {
+EnvironmentContext::EnvironmentContext(const EnvironmentContext& other360) {
   properties = other360.properties;
   __isset = other360.__isset;
+}
+EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other361) {
+  properties = other361.properties;
+  __isset = other361.__isset;
   return *this;
 }
 void EnvironmentContext::printTo(std::ostream& out) const {
@@ -9133,13 +9289,13 @@ void swap(PrimaryKeysRequest &a, PrimaryKeysRequest &b) {
   swap(a.tbl_name, b.tbl_name);
 }
 
-PrimaryKeysRequest::PrimaryKeysRequest(const PrimaryKeysRequest& other361) {
-  db_name = other361.db_name;
-  tbl_name = other361.tbl_name;
-}
-PrimaryKeysRequest& PrimaryKeysRequest::operator=(const PrimaryKeysRequest& other362) {
+PrimaryKeysRequest::PrimaryKeysRequest(const PrimaryKeysRequest& other362) {
   db_name = other362.db_name;
   tbl_name = other362.tbl_name;
+}
+PrimaryKeysRequest& PrimaryKeysRequest::operator=(const PrimaryKeysRequest& other363) {
+  db_name = other363.db_name;
+  tbl_name = other363.tbl_name;
   return *this;
 }
 void PrimaryKeysRequest::printTo(std::ostream& out) const {
@@ -9185,14 +9341,14 @@ uint32_t PrimaryKeysResponse::read(::apache::thrift::protocol::TProtocol* iprot)
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->primaryKeys.clear();
-            uint32_t _size363;
-            ::apache::thrift::protocol::TType _etype366;
-            xfer += iprot->readListBegin(_etype366, _size363);
-            this->primaryKeys.resize(_size363);
-            uint32_t _i367;
-            for (_i367 = 0; _i367 < _size363; ++_i367)
+            uint32_t _size364;
+            ::apache::thrift::protocol::TType _etype367;
+            xfer += iprot->readListBegin(_etype367, _size364);
+            this->primaryKeys.resize(_size364);
+            uint32_t _i368;
+            for (_i368 = 0; _i368 < _size364; ++_i368)
             {
-              xfer += this->primaryKeys[_i367].read(iprot);
+              xfer += this->primaryKeys[_i368].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -9223,10 +9379,10 @@ uint32_t PrimaryKeysResponse::write(::apache::thrift::protocol::TProtocol* oprot
   xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->primaryKeys.size()));
-    std::vector<SQLPrimaryKey> ::const_iterator _iter368;
-    for (_iter368 = this->primaryKeys.begin(); _iter368 != this->primaryKeys.end(); ++_iter368)
+    std::vector<SQLPrimaryKey> ::const_iterator _iter369;
+    for (_iter369 = this->primaryKeys.begin(); _iter369 != this->primaryKeys.end(); ++_iter369)
     {
-      xfer += (*_iter368).write(oprot);
+      xfer += (*_iter369).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -9242,11 +9398,11 @@ void swap(PrimaryKeysResponse &a, PrimaryKeysResponse &b) {
   swap(a.primaryKeys, b.primaryKeys);
 }
 
-PrimaryKeysResponse::PrimaryKeysResponse(const PrimaryKeysResponse& other369) {
-  primaryKeys = other369.primaryKeys;
-}
-PrimaryKeysResponse& PrimaryKeysResponse::operator=(const PrimaryKeysResponse& other370) {
+PrimaryKeysResponse::PrimaryKeysResponse(const PrimaryKeysResponse& other370) {
   primaryKeys = other370.primaryKeys;
+}
+PrimaryKeysResponse& PrimaryKeysResponse::operator=(const PrimaryKeysResponse& other371) {
+  primaryKeys = other371.primaryKeys;
   return *this;
 }
 void PrimaryKeysResponse::printTo(std::ostream& out) const {
@@ -9377,19 +9533,19 @@ void swap(ForeignKeysRequest &a, ForeignKeysRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-ForeignKeysRequest::ForeignKeysRequest(const ForeignKeysRequest& other371) {
-  parent_db_name = other371.parent_db_name;
-  parent_tbl_name = other371.parent_tbl_name;
-  foreign_db_name = other371.foreign_db_name;
-  foreign_tbl_name = other371.foreign_tbl_name;
-  __isset = other371.__isset;
-}
-ForeignKeysRequest& ForeignKeysRequest::operator=(const ForeignKeysRequest& other372) {
+ForeignKeysRequest::ForeignKeysRequest(const ForeignKeysRequest& other372) {
   parent_db_name = other372.parent_db_name;
   parent_tbl_name = other372.parent_tbl_name;
   foreign_db_name = other372.foreign_db_name;
   foreign_tbl_name = other372.foreign_tbl_name;
   __isset = other372.__isset;
+}
+ForeignKeysRequest& ForeignKeysRequest::operator=(const ForeignKeysRequest& other373) {
+  parent_db_name = other373.parent_db_name;
+  parent_tbl_name = other373.parent_tbl_name;
+  foreign_db_name = other373.foreign_db_name;
+  foreign_tbl_name = other373.foreign_tbl_name;
+  __isset = other373.__isset;
   return *this;
 }
 void ForeignKeysRequest::printTo(std::ostream& out) const {
@@ -9437,14 +9593,14 @@ uint32_t ForeignKeysResponse::read(::apache::thrift::protocol::TProtocol* iprot)
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->foreignKeys.clear();
-            uint32_t _size373;
-            ::apache::thrift::protocol::TType _etype376;
-            xfer += iprot->readListBegin(_etype376, _size373);
-            this->foreignKeys.resize(_size373);
-            uint32_t _i377;
-            for (_i377 = 0; _i377 < _size373; ++_i377)
+            uint32_t _size374;
+            ::apache::thrift::protocol::TType _etype377;
+            xfer += iprot->readListBegin(_etype377, _size374);
+            this->foreignKeys.resize(_size374);
+            uint32_t _i378;
+            for (_i378 = 0; _i378 < _size374; ++_i378)
             {
-              xfer += this->foreignKeys[_i377].read(iprot);
+              xfer += this->foreignKeys[_i378].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -9475,10 +9631,10 @@ uint32_t ForeignKeysResponse::write(::apache::thrift::protocol::TProtocol* oprot
   xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->for

<TRUNCATED>

[37/50] [abbrv] hive git commit: HIVE-17990 Add Thrift and DB storage for Schema Registry objects

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index fc254c6..0c0e408 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -2739,11 +2739,83 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
   public void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerName,
       String poolPath, boolean shouldDrop) throws AlreadyExistsException, NoSuchObjectException,
       InvalidObjectException, MetaException, TException {
-    WMCreateOrDropTriggerToPoolMappingRequest request = new WMCreateOrDropTriggerToPoolMappingRequest();
+    WMCreateOrDropTriggerToPoolMappingRequest request =
+        new WMCreateOrDropTriggerToPoolMappingRequest();
     request.setResourcePlanName(resourcePlanName);
     request.setTriggerName(triggerName);
     request.setPoolPath(poolPath);
     request.setDrop(shouldDrop);
     client.create_or_drop_wm_trigger_to_pool_mapping(request);
   }
+
+  public void createISchema(ISchema schema) throws TException {
+    client.create_ischema(schema);
+  }
+
+  @Override
+  public void alterISchema(String schemaName, ISchema newSchema) throws TException {
+    client.alter_ischema(schemaName, newSchema);
+  }
+
+  @Override
+  public ISchema getISchema(String name) throws TException {
+    return client.get_ischema(name);
+  }
+
+  @Override
+  public void dropISchema(String name) throws TException {
+    client.drop_ischema(name);
+  }
+
+  @Override
+  public void addSchemaVersion(SchemaVersion schemaVersion) throws TException {
+    client.add_schema_version(schemaVersion);
+  }
+
+  @Override
+  public SchemaVersion getSchemaVersion(String schemaName, int version) throws TException {
+    return client.get_schema_version(schemaName, version);
+  }
+
+  @Override
+  public SchemaVersion getSchemaLatestVersion(String schemaName) throws TException {
+    return client.get_schema_latest_version(schemaName);
+  }
+
+  @Override
+  public List<SchemaVersion> getSchemaAllVersions(String schemaName) throws TException {
+    return client.get_schema_all_versions(schemaName);
+  }
+
+  @Override
+  public void dropSchemaVersion(String schemaName, int version) throws TException {
+    client.drop_schema_version(schemaName, version);
+  }
+
+  @Override
+  public FindSchemasByColsResp getSchemaByCols(FindSchemasByColsRqst rqst) throws TException {
+    return client.get_schemas_by_cols(rqst);
+  }
+
+  @Override
+  public void mapSchemaVersionToSerde(String schemaName, int version, String serdeName)
+      throws TException {
+    client.map_schema_version_to_serde(schemaName, version, serdeName);
+  }
+
+  @Override
+  public void setSchemaVersionState(String schemaName, int version, SchemaVersionState state)
+      throws TException {
+    client.set_schema_version_state(schemaName, version, state);
+  }
+
+  @Override
+  public void addSerDe(SerDeInfo serDeInfo) throws TException {
+    client.add_serde(serDeInfo);
+  }
+
+  @Override
+  public SerDeInfo getSerDe(String serDeName) throws TException {
+    return client.get_serde(serDeName);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 573ac01..6e0c1a7 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -47,6 +47,8 @@ import org.apache.hadoop.hive.metastore.api.DataOperationType;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.FindSchemasByColsResp;
+import org.apache.hadoop.hive.metastore.api.FindSchemasByColsRqst;
 import org.apache.hadoop.hive.metastore.api.FireEventRequest;
 import org.apache.hadoop.hive.metastore.api.FireEventResponse;
 import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
@@ -60,6 +62,7 @@ import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
 import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+import org.apache.hadoop.hive.metastore.api.ISchema;
 import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
@@ -93,6 +96,9 @@ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+import org.apache.hadoop.hive.metastore.api.SchemaVersionState;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
@@ -1822,4 +1828,156 @@ public interface IMetaStoreClient {
   void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerName,
       String poolPath, boolean shouldDrop) throws AlreadyExistsException, NoSuchObjectException,
       InvalidObjectException, MetaException, TException;
+
+  /**
+   * Create a new schema.  This is really a schema container, as there will be specific versions
+   * of the schema that have columns, etc.
+   * @param schema schema to create
+   * @throws AlreadyExistsException if a schema of this name already exists
+   * @throws NoSuchObjectException database references by this schema does not exist
+   * @throws MetaException general metastore error
+   * @throws TException general thrift error
+   */
+  void createISchema(ISchema schema) throws TException;
+
+  /**
+   * Alter an existing schema.
+   * @param schemaName name of the schema
+   * @param newSchema altered schema object
+   * @throws NoSuchObjectException no schema with this name could be found
+   * @throws MetaException general metastore error
+   * @throws TException general thrift error
+   */
+  void alterISchema(String schemaName, ISchema newSchema) throws TException;
+
+  /**
+   * Fetch a schema.
+   * @param name name of the schema
+   * @return the schema or null if no such schema
+   * @throws NoSuchObjectException no schema matching this name exists
+   * @throws MetaException general metastore error
+   * @throws TException general thrift error
+   */
+  ISchema getISchema(String name) throws TException;
+
+  /**
+   * Drop an existing schema.  If there are schema versions of this, this call will fail.
+   * @param name name of the schema to drop
+   * @throws NoSuchObjectException no schema with this name could be found
+   * @throws InvalidOperationException attempt to drop a schema that has versions
+   * @throws MetaException general metastore error
+   * @throws TException general thrift error
+   */
+  void dropISchema(String name) throws TException;
+
+  /**
+   * Add a new version to an existing schema.
+   * @param schemaVersion version object to add
+   * @throws AlreadyExistsException a version of this schema with the same version id already exists
+   * @throws NoSuchObjectException no schema with this name could be found
+   * @throws MetaException general metastore error
+   * @throws TException general thrift error
+   */
+  void addSchemaVersion(SchemaVersion schemaVersion) throws TException;
+
+  /**
+   * Get a specific version of a schema.
+   * @param schemaName name of the schema
+   * @param version version of the schema
+   * @return the schema version or null if no such schema version
+   * @throws NoSuchObjectException no schema matching this name and version exists
+   * @throws MetaException general metastore error
+   * @throws TException general thrift error
+   */
+  SchemaVersion getSchemaVersion(String schemaName, int version) throws TException;
+
+  /**
+   * Get the latest version of a schema.
+   * @param schemaName name of the schema
+   * @return latest version of the schema or null if the schema does not exist or there are no
+   * version of the schema.
+   * @throws NoSuchObjectException no versions of schema matching this name exist
+   * @throws MetaException general metastore error
+   * @throws TException general thrift error
+   */
+  SchemaVersion getSchemaLatestVersion(String schemaName) throws TException;
+
+  /**
+   * Get all the extant versions of a schema.
+   * @param schemaName name of the schema.
+   * @return list of all the schema versions or null if this schema does not exist or has no
+   * versions.
+   * @throws NoSuchObjectException no versions of schema matching this name exist
+   * @throws MetaException general metastore error
+   * @throws TException general thrift error
+   */
+  List<SchemaVersion> getSchemaAllVersions(String schemaName) throws TException;
+
+  /**
+   * Drop a version of a schema.  Given that versions are supposed to be immutable you should
+   * think really hard before you call this method.  It should only be used for schema versions
+   * that were added in error and never referenced any data.
+   * @param schemaName name of the schema
+   * @param version version of the schema
+   * @throws NoSuchObjectException no matching version of the schema could be found
+   * @throws MetaException general metastore error
+   * @throws TException general thrift error
+   */
+  void dropSchemaVersion(String schemaName, int version) throws TException;
+
+  /**
+   * Find all schema versions that have columns that match a query.
+   * @param rqst query, this can include column names, namespaces (actually stored in the
+   *             description field in FieldSchema), and types.
+   * @return The (possibly empty) list of schema name/version pairs that match.
+   * @throws MetaException general metastore error
+   * @throws TException general thrift error
+   */
+  FindSchemasByColsResp getSchemaByCols(FindSchemasByColsRqst rqst) throws TException;
+
+  /**
+   * Map a schema version to a serde.  This mapping is one-to-one, thus this will destroy any
+   * previous mappings for this schema version.
+   * @param schemaName name of the schema
+   * @param version version of the schema
+   * @param serdeName name of the serde
+   * @throws NoSuchObjectException no matching version of the schema could be found or no serde
+   * of the provided name could be found
+   * @throws MetaException general metastore error
+   * @throws TException general thrift error
+   */
+  void mapSchemaVersionToSerde(String schemaName, int version, String serdeName) throws TException;
+
+  /**
+   * Set the state of a schema version.
+   * @param schemaName name of the schema
+   * @param version version of the schema
+   * @param state state to set the schema too
+   * @throws NoSuchObjectException no matching version of the schema could be found
+   * @throws InvalidOperationException attempt to make a state change that is not valid
+   * @throws MetaException general metastore error
+   * @throws TException general thrift error
+   */
+  void setSchemaVersionState(String schemaName, int version, SchemaVersionState state) throws TException;
+
+  /**
+   * Add a serde.  This is primarily intended for use with SchemaRegistry objects, since serdes
+   * are automatically added when needed as part of creating and altering tables and partitions.
+   * @param serDeInfo serde to add
+   * @throws AlreadyExistsException serde of this name already exists
+   * @throws MetaException general metastore error
+   * @throws TException general thrift error
+   */
+  void addSerDe(SerDeInfo serDeInfo) throws TException;
+
+  /**
+   * Fetch a serde.  This is primarily intended for use with SchemaRegistry objects, since serdes
+   * are automatically fetched along with other information for tables and partitions.
+   * @param serDeName name of the serde
+   * @return the serde.
+   * @throws NoSuchObjectException no serde with this name exists.
+   * @throws MetaException general metastore error
+   * @throws TException general thrift error
+   */
+  SerDeInfo getSerDe(String serDeName) throws TException;
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java
index fc4f4d7..b0abb96 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java
@@ -27,20 +27,26 @@ import org.apache.hadoop.hive.metastore.events.AddForeignKeyEvent;
 import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
 import org.apache.hadoop.hive.metastore.events.AddNotNullConstraintEvent;
 import org.apache.hadoop.hive.metastore.events.AddPrimaryKeyEvent;
+import org.apache.hadoop.hive.metastore.events.AddSchemaVersionEvent;
 import org.apache.hadoop.hive.metastore.events.AddUniqueConstraintEvent;
+import org.apache.hadoop.hive.metastore.events.AlterISchemaEvent;
 import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
 import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterSchemaVersionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
 import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
 import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent;
+import org.apache.hadoop.hive.metastore.events.CreateISchemaEvent;
 import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
 import org.apache.hadoop.hive.metastore.events.DropConstraintEvent;
 import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.DropFunctionEvent;
+import org.apache.hadoop.hive.metastore.events.DropISchemaEvent;
 import org.apache.hadoop.hive.metastore.events.DropIndexEvent;
 import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropSchemaVersionEvent;
 import org.apache.hadoop.hive.metastore.events.DropTableEvent;
 import org.apache.hadoop.hive.metastore.events.InsertEvent;
 import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
@@ -212,6 +218,26 @@ public abstract class MetaStoreEventListener implements Configurable {
   public void onDropConstraint(DropConstraintEvent dropConstraintEvent) throws MetaException {
   }
 
+  public void onCreateISchema(CreateISchemaEvent createISchemaEvent) throws MetaException {
+  }
+
+  public void onAlterISchema(AlterISchemaEvent alterISchemaEvent) throws MetaException {
+  }
+
+  public void onDropISchema(DropISchemaEvent dropISchemaEvent) throws MetaException {
+  }
+
+  public void onAddSchemaVersion(AddSchemaVersionEvent addSchemaVersionEvent) throws MetaException {
+  }
+
+  public void onAlterSchemaVersion(AlterSchemaVersionEvent alterSchemaVersionEvent)
+      throws MetaException {
+  }
+
+  public void onDropSchemaVersion(DropSchemaVersionEvent dropSchemaVersionEvent)
+      throws MetaException {
+  }
+
   @Override
   public Configuration getConf() {
     return this.conf;

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java
index f6e25c6..66daaa0 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java
@@ -29,17 +29,23 @@ import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
 import org.apache.hadoop.hive.metastore.events.AddNotNullConstraintEvent;
 import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AddPrimaryKeyEvent;
+import org.apache.hadoop.hive.metastore.events.AddSchemaVersionEvent;
 import org.apache.hadoop.hive.metastore.events.AddUniqueConstraintEvent;
+import org.apache.hadoop.hive.metastore.events.AlterISchemaEvent;
 import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
 import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterSchemaVersionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
 import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent;
+import org.apache.hadoop.hive.metastore.events.CreateISchemaEvent;
 import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
 import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.DropFunctionEvent;
+import org.apache.hadoop.hive.metastore.events.DropISchemaEvent;
 import org.apache.hadoop.hive.metastore.events.DropIndexEvent;
 import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropSchemaVersionEvent;
 import org.apache.hadoop.hive.metastore.events.DropTableEvent;
 import org.apache.hadoop.hive.metastore.events.InsertEvent;
 import org.apache.hadoop.hive.metastore.events.ListenerEvent;
@@ -170,6 +176,42 @@ public class MetaStoreListenerNotifier {
               listener.onAddNotNullConstraint((AddNotNullConstraintEvent)event);
             }
           })
+          .put(EventType.CREATE_ISCHEMA, new EventNotifier() {
+            @Override
+            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
+              listener.onCreateISchema((CreateISchemaEvent)event);
+            }
+          })
+          .put(EventType.ALTER_ISCHEMA, new EventNotifier() {
+            @Override
+            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
+              listener.onAlterISchema((AlterISchemaEvent)event);
+            }
+          })
+          .put(EventType.DROP_ISCHEMA, new EventNotifier() {
+            @Override
+            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
+              listener.onDropISchema((DropISchemaEvent)event);
+            }
+          })
+          .put(EventType.ADD_SCHEMA_VERSION, new EventNotifier() {
+            @Override
+            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
+              listener.onAddSchemaVersion((AddSchemaVersionEvent) event);
+            }
+          })
+          .put(EventType.ALTER_SCHEMA_VERSION, new EventNotifier() {
+            @Override
+            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
+              listener.onAlterSchemaVersion((AlterSchemaVersionEvent) event);
+            }
+          })
+          .put(EventType.DROP_SCHEMA_VERSION, new EventNotifier() {
+            @Override
+            public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException {
+              listener.onDropSchemaVersion((DropSchemaVersionEvent) event);
+            }
+          })
           .build()
   );
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 2c92bb2..41bbfdf 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -100,6 +100,7 @@ import org.apache.hadoop.hive.metastore.api.FunctionType;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 import org.apache.hadoop.hive.metastore.api.HiveObjectType;
+import org.apache.hadoop.hive.metastore.api.ISchema;
 import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
@@ -129,7 +130,13 @@ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+import org.apache.hadoop.hive.metastore.api.SchemaCompatibility;
+import org.apache.hadoop.hive.metastore.api.SchemaType;
+import org.apache.hadoop.hive.metastore.api.SchemaValidation;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+import org.apache.hadoop.hive.metastore.api.SchemaVersionState;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.SerdeType;
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
@@ -155,6 +162,7 @@ import org.apache.hadoop.hive.metastore.model.MDelegationToken;
 import org.apache.hadoop.hive.metastore.model.MFieldSchema;
 import org.apache.hadoop.hive.metastore.model.MFunction;
 import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege;
+import org.apache.hadoop.hive.metastore.model.MISchema;
 import org.apache.hadoop.hive.metastore.model.MIndex;
 import org.apache.hadoop.hive.metastore.model.MMasterKey;
 import org.apache.hadoop.hive.metastore.model.MMetastoreDBProperties;
@@ -169,6 +177,7 @@ import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege;
 import org.apache.hadoop.hive.metastore.model.MResourceUri;
 import org.apache.hadoop.hive.metastore.model.MRole;
 import org.apache.hadoop.hive.metastore.model.MRoleMap;
+import org.apache.hadoop.hive.metastore.model.MSchemaVersion;
 import org.apache.hadoop.hive.metastore.model.MSerDeInfo;
 import org.apache.hadoop.hive.metastore.model.MStorageDescriptor;
 import org.apache.hadoop.hive.metastore.model.MStringList;
@@ -1680,15 +1689,22 @@ public class ObjectStore implements RawStore, Configurable {
     if (ms == null) {
       throw new MetaException("Invalid SerDeInfo object");
     }
-    return new SerDeInfo(ms.getName(), ms.getSerializationLib(), convertMap(ms.getParameters()));
+    SerDeInfo serde =
+        new SerDeInfo(ms.getName(), ms.getSerializationLib(), convertMap(ms.getParameters()));
+    if (ms.getDescription() != null) serde.setDescription(ms.getDescription());
+    if (ms.getSerializerClass() != null) serde.setSerializerClass(ms.getSerializerClass());
+    if (ms.getDeserializerClass() != null) serde.setDeserializerClass(ms.getDeserializerClass());
+    if (ms.getSerdeType() > 0) serde.setSerdeType(SerdeType.findByValue(ms.getSerdeType()));
+    return serde;
   }
 
   private MSerDeInfo convertToMSerDeInfo(SerDeInfo ms) throws MetaException {
     if (ms == null) {
       throw new MetaException("Invalid SerDeInfo object");
     }
-    return new MSerDeInfo(ms.getName(), ms.getSerializationLib(), ms
-        .getParameters());
+    return new MSerDeInfo(ms.getName(), ms.getSerializationLib(), ms.getParameters(),
+        ms.getDescription(), ms.getSerializerClass(), ms.getDeserializerClass(),
+        ms.getSerdeType() == null ? 0 : ms.getSerdeType().getValue());
   }
 
   /**
@@ -9464,6 +9480,392 @@ public class ObjectStore implements RawStore, Configurable {
     }
   }
 
+  @Override
+  public void createISchema(ISchema schema) throws AlreadyExistsException, MetaException,
+      NoSuchObjectException {
+    boolean committed = false;
+    MISchema mSchema = convertToMISchema(schema);
+    try {
+      openTransaction();
+      if (getMISchema(schema.getName()) != null) {
+        throw new AlreadyExistsException("Schema with name " + schema.getName() + " already exists");
+      }
+      pm.makePersistent(mSchema);
+      committed = commitTransaction();
+    } finally {
+      if (!committed) rollbackTransaction();
+    }
+  }
+
+  @Override
+  public void alterISchema(String schemaName, ISchema newSchema)
+      throws NoSuchObjectException, MetaException {
+    boolean committed = false;
+    try {
+      openTransaction();
+      MISchema oldMSchema = getMISchema(schemaName);
+      if (oldMSchema == null) {
+        throw new NoSuchObjectException("Schema " + schemaName + " does not exist");
+      }
+
+      // Don't support changing name or type
+      oldMSchema.setCompatibility(newSchema.getCompatibility().getValue());
+      oldMSchema.setValidationLevel(newSchema.getValidationLevel().getValue());
+      oldMSchema.setCanEvolve(newSchema.isCanEvolve());
+      if (newSchema.isSetSchemaGroup()) oldMSchema.setSchemaGroup(newSchema.getSchemaGroup());
+      if (newSchema.isSetDescription()) oldMSchema.setDescription(newSchema.getDescription());
+      committed = commitTransaction();
+    } finally {
+      if (!committed) rollbackTransaction();
+    }
+  }
+
+  @Override
+  public ISchema getISchema(String schemaName) throws MetaException {
+    boolean committed = false;
+    try {
+      openTransaction();
+      ISchema schema = convertToISchema(getMISchema(schemaName));
+      committed = commitTransaction();
+      return schema;
+    } finally {
+      if (!committed) rollbackTransaction();
+    }
+  }
+
+  private MISchema getMISchema(String schemaName) {
+    Query query = null;
+    try {
+      schemaName = normalizeIdentifier(schemaName);
+      query = pm.newQuery(MISchema.class, "name == schemaName");
+      query.declareParameters("java.lang.String schemaName");
+      query.setUnique(true);
+      MISchema mSchema = (MISchema)query.execute(schemaName);
+      pm.retrieve(mSchema);
+      return mSchema;
+    } finally {
+      if (query != null) query.closeAll();
+    }
+  }
+
+  @Override
+  public void dropISchema(String schemaName) throws NoSuchObjectException, MetaException {
+    boolean committed = false;
+    try {
+      openTransaction();
+      MISchema mSchema = getMISchema(schemaName);
+      if (mSchema != null) {
+        pm.deletePersistentAll(mSchema);
+      } else {
+        throw new NoSuchObjectException("Schema " + schemaName + " does not exist");
+      }
+      committed = commitTransaction();
+    } finally {
+      if (!committed) rollbackTransaction();
+    }
+  }
+
+  @Override
+  public void addSchemaVersion(SchemaVersion schemaVersion)
+      throws AlreadyExistsException, NoSuchObjectException, MetaException {
+    boolean committed = false;
+    MSchemaVersion mSchemaVersion = convertToMSchemaVersion(schemaVersion);
+    try {
+      openTransaction();
+      // Make sure it doesn't already exist
+      if (getMSchemaVersion(schemaVersion.getSchemaName(), schemaVersion.getVersion()) != null) {
+        throw new AlreadyExistsException("Schema name " + schemaVersion.getSchemaName() +
+            " version " + schemaVersion.getVersion() + " already exists");
+      }
+      // Make sure the referenced Schema exists
+      if (getMISchema(schemaVersion.getSchemaName()) == null) {
+        throw new NoSuchObjectException("Schema " + schemaVersion.getSchemaName() + " does not exist");
+      }
+      pm.makePersistent(mSchemaVersion);
+      committed = commitTransaction();
+    } finally {
+      if (!committed) rollbackTransaction();;
+    }
+  }
+
+  @Override
+  public void alterSchemaVersion(String schemaName, int version, SchemaVersion newVersion)
+      throws NoSuchObjectException, MetaException {
+    boolean committed = false;
+    try {
+      openTransaction();
+      MSchemaVersion oldMSchemaVersion = getMSchemaVersion(schemaName, version);
+      if (oldMSchemaVersion == null) {
+        throw new NoSuchObjectException("No schema of name " + schemaName + " with version " +
+            version + " exists");
+      }
+
+      // We only support changing the SerDe mapping and the state.
+      if (newVersion.isSetSerDe()) oldMSchemaVersion.setSerDe(convertToMSerDeInfo(newVersion.getSerDe()));
+      if (newVersion.isSetState()) oldMSchemaVersion.setState(newVersion.getState().getValue());
+      committed = commitTransaction();
+    } finally {
+      if (!committed) commitTransaction();
+    }
+  }
+
+  @Override
+  public SchemaVersion getSchemaVersion(String schemaName, int version) throws MetaException {
+    boolean committed = false;
+    try {
+      openTransaction();
+      SchemaVersion schemaVersion = convertToSchemaVersion(getMSchemaVersion(schemaName, version));
+      committed = commitTransaction();
+      return schemaVersion;
+    } finally {
+      if (!committed) rollbackTransaction();;
+    }
+  }
+
+  private MSchemaVersion getMSchemaVersion(String schemaName, int version) {
+    Query query = null;
+    try {
+      schemaName = normalizeIdentifier(schemaName);
+      query = pm.newQuery(MSchemaVersion.class, "iSchema.name == schemaName && version == schemaVersion");
+      query.declareParameters("java.lang.String schemaName, java.lang.Integer schemaVersion");
+      query.setUnique(true);
+      MSchemaVersion mSchemaVersion = (MSchemaVersion)query.execute(schemaName, version);
+      pm.retrieve(mSchemaVersion);
+      if (mSchemaVersion != null) {
+        pm.retrieveAll(mSchemaVersion.getCols());
+        if (mSchemaVersion.getSerDe() != null) pm.retrieve(mSchemaVersion.getSerDe());
+      }
+      return mSchemaVersion;
+    } finally {
+      if (query != null) query.closeAll();
+    }
+  }
+
+  @Override
+  public SchemaVersion getLatestSchemaVersion(String schemaName) throws MetaException {
+    boolean committed = false;
+    Query query = null;
+    try {
+      openTransaction();
+      schemaName = normalizeIdentifier(schemaName);
+      query = pm.newQuery(MSchemaVersion.class, "iSchema.name == schemaName");
+      query.declareParameters("java.lang.String schemaName");
+      query.setUnique(true);
+      query.setOrdering("version descending");
+      query.setRange(0, 1);
+      MSchemaVersion mSchemaVersion = (MSchemaVersion)query.execute(schemaName);
+      pm.retrieve(mSchemaVersion);
+      if (mSchemaVersion != null) {
+        pm.retrieveAll(mSchemaVersion.getCols());
+        if (mSchemaVersion.getSerDe() != null) pm.retrieve(mSchemaVersion.getSerDe());
+      }
+      committed = commitTransaction();
+      return mSchemaVersion == null ? null : convertToSchemaVersion(mSchemaVersion);
+    } finally {
+      rollbackAndCleanup(committed, query);
+    }
+  }
+
+  @Override
+  public List<SchemaVersion> getAllSchemaVersion(String schemaName) throws MetaException {
+    boolean committed = false;
+    Query query = null;
+    try {
+      openTransaction();
+      schemaName = normalizeIdentifier(schemaName);
+      query = pm.newQuery(MSchemaVersion.class, "iSchema.name == schemaName");
+      query.declareParameters("java.lang.String schemaName");
+      query.setOrdering("version descending");
+      List<MSchemaVersion> mSchemaVersions = query.setParameters(schemaName).executeList();
+      pm.retrieveAll(mSchemaVersions);
+      if (mSchemaVersions == null || mSchemaVersions.isEmpty()) return null;
+      List<SchemaVersion> schemaVersions = new ArrayList<>(mSchemaVersions.size());
+      for (MSchemaVersion mSchemaVersion : mSchemaVersions) {
+        pm.retrieveAll(mSchemaVersion.getCols());
+        if (mSchemaVersion.getSerDe() != null) pm.retrieve(mSchemaVersion.getSerDe());
+        schemaVersions.add(convertToSchemaVersion(mSchemaVersion));
+      }
+      committed = commitTransaction();
+      return schemaVersions;
+    } finally {
+      rollbackAndCleanup(committed, query);
+    }
+  }
+
+  @Override
+  public List<SchemaVersion> getSchemaVersionsByColumns(String colName, String colNamespace,
+                                                        String type) throws MetaException {
+    if (colName == null && colNamespace == null) {
+      // Don't allow a query that returns everything, it will blow stuff up.
+      throw new MetaException("You must specify column name or column namespace, else your query " +
+          "may be too large");
+    }
+    boolean committed = false;
+    Query query = null;
+    try {
+      openTransaction();
+      if (colName != null) colName = normalizeIdentifier(colName);
+      if (type != null) type = normalizeIdentifier(type);
+      Map<String, String> parameters = new HashMap<>(3);
+      StringBuilder sql = new StringBuilder("select SCHEMA_VERSION_ID from " +
+          "SCHEMA_VERSION, COLUMNS_V2 where SCHEMA_VERSION.CD_ID = COLUMNS_V2.CD_ID ");
+      if (colName != null) {
+        sql.append("and COLUMNS_V2.COLUMN_NAME = :colName ");
+        parameters.put("colName", colName);
+      }
+      if (colNamespace != null) {
+        sql.append("and COLUMNS_V2.COMMENT = :colComment ");
+        parameters.put("colComment", colNamespace);
+      }
+      if (type != null) {
+        sql.append("and COLUMNS_V2.TYPE_NAME = :colType ");
+        parameters.put("colType", type);
+      }
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("getSchemaVersionsByColumns going to execute query " + sql.toString());
+        LOG.debug("With parameters");
+        for (Map.Entry<String, String> p : parameters.entrySet()) {
+          LOG.debug(p.getKey() + " : " + p.getValue());
+        }
+      }
+      query = pm.newQuery("javax.jdo.query.SQL", sql.toString());
+      query.setClass(MSchemaVersion.class);
+      List<MSchemaVersion> mSchemaVersions = query.setNamedParameters(parameters).executeList();
+      if (mSchemaVersions == null || mSchemaVersions.isEmpty()) return Collections.emptyList();
+      pm.retrieveAll(mSchemaVersions);
+      List<SchemaVersion> schemaVersions = new ArrayList<>(mSchemaVersions.size());
+      for (MSchemaVersion mSchemaVersion : mSchemaVersions) {
+        pm.retrieveAll(mSchemaVersion.getCols());
+        if (mSchemaVersion.getSerDe() != null) pm.retrieve(mSchemaVersion.getSerDe());
+        schemaVersions.add(convertToSchemaVersion(mSchemaVersion));
+      }
+      committed = commitTransaction();
+      return schemaVersions;
+    } finally {
+      rollbackAndCleanup(committed, query);
+    }
+
+  }
+
+  @Override
+  public void dropSchemaVersion(String schemaName, int version) throws NoSuchObjectException,
+      MetaException {
+    boolean committed = false;
+    try {
+      openTransaction();
+      MSchemaVersion mSchemaVersion = getMSchemaVersion(schemaName, version);
+      if (mSchemaVersion != null) {
+        pm.deletePersistentAll(mSchemaVersion);
+      } else {
+        throw new NoSuchObjectException("Schema " + schemaName + " of version " + version +
+            "does not exist");
+      }
+      committed = commitTransaction();
+    } finally {
+      if (!committed) rollbackTransaction();
+    }
+  }
+
+  @Override
+  public SerDeInfo getSerDeInfo(String serDeName) throws NoSuchObjectException, MetaException {
+    boolean committed = false;
+    try {
+      openTransaction();
+      MSerDeInfo mSerDeInfo = getMSerDeInfo(serDeName);
+      if (mSerDeInfo == null) {
+        throw new NoSuchObjectException("No SerDe named " + serDeName);
+      }
+      SerDeInfo serde = convertToSerDeInfo(mSerDeInfo);
+      committed = commitTransaction();
+      return serde;
+    } finally {
+      if (!committed) rollbackTransaction();;
+    }
+  }
+
+  private MSerDeInfo getMSerDeInfo(String serDeName) throws MetaException {
+    Query query = null;
+    try {
+      query = pm.newQuery(MSerDeInfo.class, "name == serDeName");
+      query.declareParameters("java.lang.String serDeName");
+      query.setUnique(true);
+      MSerDeInfo mSerDeInfo = (MSerDeInfo)query.execute(serDeName);
+      pm.retrieve(mSerDeInfo);
+      return mSerDeInfo;
+    } finally {
+      if (query != null) query.closeAll();
+    }
+  }
+
+  @Override
+  public void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException {
+    boolean committed = false;
+    try {
+      openTransaction();
+      if (getMSerDeInfo(serde.getName()) != null) {
+        throw new AlreadyExistsException("Serde with name " + serde.getName() + " already exists");
+      }
+      MSerDeInfo mSerde = convertToMSerDeInfo(serde);
+      pm.makePersistent(mSerde);
+      committed = commitTransaction();
+    } finally {
+      if (!committed) rollbackTransaction();
+    }
+
+  }
+
+  private MISchema convertToMISchema(ISchema schema) throws NoSuchObjectException {
+    return new MISchema(schema.getSchemaType().getValue(),
+                        normalizeIdentifier(schema.getName()),
+                        getMDatabase(schema.getDbName()),
+                        schema.getCompatibility().getValue(),
+                        schema.getValidationLevel().getValue(),
+                        schema.isCanEvolve(),
+                        schema.isSetSchemaGroup() ? schema.getSchemaGroup() : null,
+                        schema.isSetDescription() ? schema.getDescription() : null);
+  }
+
+  private ISchema convertToISchema(MISchema mSchema) {
+    if (mSchema == null) return null;
+    ISchema schema = new ISchema(SchemaType.findByValue(mSchema.getSchemaType()),
+                                 mSchema.getName(),
+                                 mSchema.getDb().getName(),
+                                 SchemaCompatibility.findByValue(mSchema.getCompatibility()),
+                                 SchemaValidation.findByValue(mSchema.getValidationLevel()),
+                                 mSchema.getCanEvolve());
+    if (mSchema.getDescription() != null) schema.setDescription(mSchema.getDescription());
+    if (mSchema.getSchemaGroup() != null) schema.setSchemaGroup(mSchema.getSchemaGroup());
+    return schema;
+  }
+
+  private MSchemaVersion convertToMSchemaVersion(SchemaVersion schemaVersion) throws MetaException {
+    return new MSchemaVersion(getMISchema(normalizeIdentifier(schemaVersion.getSchemaName())),
+                              schemaVersion.getVersion(),
+                              schemaVersion.getCreatedAt(),
+                              createNewMColumnDescriptor(convertToMFieldSchemas(schemaVersion.getCols())),
+                              schemaVersion.isSetState() ? schemaVersion.getState().getValue() : 0,
+                              schemaVersion.isSetDescription() ? schemaVersion.getDescription() : null,
+                              schemaVersion.isSetSchemaText() ? schemaVersion.getSchemaText() : null,
+                              schemaVersion.isSetFingerprint() ? schemaVersion.getFingerprint() : null,
+                              schemaVersion.isSetName() ? schemaVersion.getName() : null,
+                              schemaVersion.isSetSerDe() ? convertToMSerDeInfo(schemaVersion.getSerDe()) : null);
+  }
+
+  private SchemaVersion convertToSchemaVersion(MSchemaVersion mSchemaVersion) throws MetaException {
+    if (mSchemaVersion == null) return null;
+    SchemaVersion schemaVersion = new SchemaVersion(mSchemaVersion.getiSchema().getName(),
+                                                    mSchemaVersion.getVersion(),
+                                                    mSchemaVersion.getCreatedAt(),
+                                                    convertToFieldSchemas(mSchemaVersion.getCols().getCols()));
+    if (mSchemaVersion.getState() > 0) schemaVersion.setState(SchemaVersionState.findByValue(mSchemaVersion.getState()));
+    if (mSchemaVersion.getDescription() != null) schemaVersion.setDescription(mSchemaVersion.getDescription());
+    if (mSchemaVersion.getSchemaText() != null) schemaVersion.setSchemaText(mSchemaVersion.getSchemaText());
+    if (mSchemaVersion.getFingerprint() != null) schemaVersion.setFingerprint(mSchemaVersion.getFingerprint());
+    if (mSchemaVersion.getName() != null) schemaVersion.setName(mSchemaVersion.getName());
+    if (mSchemaVersion.getSerDe() != null) schemaVersion.setSerDe(convertToSerDeInfo(mSchemaVersion.getSerDe()));
+    return schemaVersion;
+  }
+
   /**
    * This is a cleanup method which is used to rollback a active transaction
    * if the success flag is false and close the associated Query object. This method is used

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index 75fbfa2..4715534 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+import org.apache.hadoop.hive.metastore.api.ISchema;
 import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
@@ -66,6 +67,8 @@ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.TableMeta;
 import org.apache.hadoop.hive.metastore.api.Type;
@@ -807,4 +810,136 @@ public interface RawStore extends Configurable {
 
   void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath)
       throws NoSuchObjectException, InvalidOperationException, MetaException;
+
+  /**
+   * Create a new ISchema.
+   * @param schema schema to create
+   * @throws AlreadyExistsException there's already a schema with this name
+   * @throws MetaException general database exception
+   */
+  void createISchema(ISchema schema) throws AlreadyExistsException, MetaException,
+      NoSuchObjectException;
+
+  /**
+   * Alter an existing ISchema.  This assumes the caller has already checked that such a schema
+   * exists.
+   * @param schemaName name of the schema
+   * @param newSchema new schema object
+   * @throws NoSuchObjectException no function with this name exists
+   * @throws MetaException general database exception
+   */
+  void alterISchema(String schemaName, ISchema newSchema) throws NoSuchObjectException, MetaException;
+
+  /**
+   * Get an ISchema by name.
+   * @param schemaName name of the schema
+   * @return ISchema
+   * @throws MetaException general database exception
+   */
+  ISchema getISchema(String schemaName) throws MetaException;
+
+  /**
+   * Drop an ISchema.  This does not check whether there are valid versions of the schema in
+   * existence, it assumes the caller has already done that.
+   * @param schemaName name of the schema to drop
+   * @throws NoSuchObjectException no schema of this name exists
+   * @throws MetaException general database exception
+   */
+  void dropISchema(String schemaName) throws NoSuchObjectException, MetaException;
+
+  /**
+   * Create a new version of an existing schema.
+   * @param schemaVersion version number
+   * @throws AlreadyExistsException a version of the schema with the same version number already
+   * exists.
+   * @throws InvalidObjectException the passed in SchemaVersion object has problems.
+   * @throws NoSuchObjectException no schema with the passed in name exists.
+   * @throws MetaException general database exception
+   */
+  void addSchemaVersion(SchemaVersion schemaVersion)
+      throws AlreadyExistsException, InvalidObjectException, NoSuchObjectException, MetaException;
+
+  /**
+   * Alter a schema version.  Note that the Thrift interface only supports changing the serde
+   * mapping and states.  This method does not guarantee it will check anymore than that.  This
+   * method does not understand the state transitions and just assumes that the new state it is
+   * passed is reasonable.
+   * @param schemaName name of the schema
+   * @param version version of the schema
+   * @param newVersion altered SchemaVersion
+   * @throws NoSuchObjectException no such version of the named schema exists
+   * @throws MetaException general database exception
+   */
+  void alterSchemaVersion(String schemaName, int version, SchemaVersion newVersion)
+      throws NoSuchObjectException, MetaException;
+
+  /**
+   * Get a specific schema version.
+   * @param schemaName name of the schema
+   * @param version version of the schema
+   * @return the SchemaVersion
+   * @throws MetaException general database exception
+   */
+  SchemaVersion getSchemaVersion(String schemaName, int version) throws MetaException;
+
+  /**
+   * Get the latest version of a schema.
+   * @param schemaName name of the schema
+   * @return latest version of the schema
+   * @throws MetaException general database exception
+   */
+  SchemaVersion getLatestSchemaVersion(String schemaName) throws MetaException;
+
+  /**
+   * Get all of the versions of a schema
+   * @param schemaName name of the schema
+   * @return all versions of the schema
+   * @throws MetaException general database exception
+   */
+  List<SchemaVersion> getAllSchemaVersion(String schemaName) throws MetaException;
+
+  /**
+   * Find all SchemaVersion objects that match a query.  The query will select all SchemaVersions
+   * that are equal to all of the non-null passed in arguments.  That is, if arguments
+   * colName='name', colNamespace=null, type='string' are passed in, then all schemas that have
+   * a column with colName 'name' and type 'string' will be returned.
+   * @param colName column name.  Null is ok, which will cause this field to not be used in the
+   *                query.
+   * @param colNamespace column namespace.   Null is ok, which will cause this field to not be
+   *                     used in the query.
+   * @param type column type.   Null is ok, which will cause this field to not be used in the
+   *             query.
+   * @return List of all SchemaVersions that match.  Note that there is no expectation that these
+   * SchemaVersions derive from the same ISchema.  The list will be empty if there are no
+   * matching SchemaVersions.
+   * @throws MetaException general database exception
+   */
+  List<SchemaVersion> getSchemaVersionsByColumns(String colName, String colNamespace, String type)
+      throws MetaException;
+
+  /**
+   * Drop a version of the schema.
+   * @param schemaName name of the schema
+   * @param version version of the schema
+   * @throws NoSuchObjectException no such version of the named schema exists
+   * @throws MetaException general database exception
+   */
+  void dropSchemaVersion(String schemaName, int version) throws NoSuchObjectException, MetaException;
+
+  /**
+   * Get serde information
+   * @param serDeName name of the SerDe
+   * @return the SerDe, or null if there is no such serde
+   * @throws NoSuchObjectException no serde with this name exists
+   * @throws MetaException general database exception
+   */
+  SerDeInfo getSerDeInfo(String serDeName) throws NoSuchObjectException, MetaException;
+
+  /**
+   * Add a serde
+   * @param serde serde to add
+   * @throws AlreadyExistsException a serde of this name already exists
+   * @throws MetaException general database exception
+   */
+  void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException;
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index da518ab..e24f3d4 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+import org.apache.hadoop.hive.metastore.api.ISchema;
 import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
@@ -84,6 +85,8 @@ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.TableMeta;
@@ -2255,6 +2258,78 @@ public class CachedStore implements RawStore, Configurable {
     return rawStore.getColStatsForTablePartitions(dbName, tableName);
   }
 
+  // TODO - not clear if we should cache these or not.  For now, don't bother
+  @Override
+  public void createISchema(ISchema schema)
+      throws AlreadyExistsException, NoSuchObjectException, MetaException {
+    rawStore.createISchema(schema);
+  }
+
+  @Override
+  public void alterISchema(String schemaName, ISchema newSchema)
+      throws NoSuchObjectException, MetaException {
+    rawStore.alterISchema(schemaName, newSchema);
+  }
+
+  @Override
+  public ISchema getISchema(String schemaName) throws MetaException {
+    return rawStore.getISchema(schemaName);
+  }
+
+  @Override
+  public void dropISchema(String schemaName) throws NoSuchObjectException, MetaException {
+    rawStore.dropISchema(schemaName);
+  }
+
+  @Override
+  public void addSchemaVersion(SchemaVersion schemaVersion) throws
+      AlreadyExistsException, InvalidObjectException, NoSuchObjectException, MetaException {
+    rawStore.addSchemaVersion(schemaVersion);
+  }
+
+  @Override
+  public void alterSchemaVersion(String schemaName, int version, SchemaVersion newVersion) throws
+      NoSuchObjectException, MetaException {
+    rawStore.alterSchemaVersion(schemaName, version, newVersion);
+  }
+
+  @Override
+  public SchemaVersion getSchemaVersion(String schemaName, int version) throws MetaException {
+    return rawStore.getSchemaVersion(schemaName, version);
+  }
+
+  @Override
+  public SchemaVersion getLatestSchemaVersion(String schemaName) throws MetaException {
+    return rawStore.getLatestSchemaVersion(schemaName);
+  }
+
+  @Override
+  public List<SchemaVersion> getAllSchemaVersion(String schemaName) throws MetaException {
+    return rawStore.getAllSchemaVersion(schemaName);
+  }
+
+  @Override
+  public List<SchemaVersion> getSchemaVersionsByColumns(String colName, String colNamespace,
+                                                        String type) throws MetaException {
+    return rawStore.getSchemaVersionsByColumns(colName, colNamespace, type);
+  }
+
+  @Override
+  public void dropSchemaVersion(String schemaName, int version) throws NoSuchObjectException,
+      MetaException {
+    rawStore.dropSchemaVersion(schemaName, version);
+  }
+
+  @Override
+  public SerDeInfo getSerDeInfo(String serDeName) throws NoSuchObjectException, MetaException {
+    return rawStore.getSerDeInfo(serDeName);
+  }
+
+  @Override
+  public void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException {
+    rawStore.addSerde(serde);
+  }
+
   public RawStore getRawStore() {
     return rawStore;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
index 7627d89..01693ec 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
@@ -73,7 +73,7 @@ public class DatabaseBuilder {
     return this;
   }
 
-  public Database build() throws TException {
+  public Database build() throws MetaException {
     if (name == null) throw new MetaException("You must name the database");
     Database db = new Database(name, description, location, params);
     try {

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java
new file mode 100644
index 0000000..77adfbb
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.ISchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.SchemaCompatibility;
+import org.apache.hadoop.hive.metastore.api.SchemaType;
+import org.apache.hadoop.hive.metastore.api.SchemaValidation;
+
+public class ISchemaBuilder {
+  private SchemaType schemaType; // required
+  private String name; // required
+  private String dbName; // required
+  private SchemaCompatibility compatibility; // required
+  private SchemaValidation validationLevel; // required
+  private boolean canEvolve; // required
+  private String schemaGroup; // optional
+  private String description; // optional
+
+  public ISchemaBuilder() {
+    compatibility = SchemaCompatibility.BACKWARD;
+    validationLevel = SchemaValidation.ALL;
+    canEvolve = true;
+    dbName = "default";
+  }
+
+  public ISchemaBuilder setSchemaType(SchemaType schemaType) {
+    this.schemaType = schemaType;
+    return this;
+  }
+
+  public ISchemaBuilder setName(String name) {
+    this.name = name;
+    return this;
+  }
+
+  public ISchemaBuilder setDbName(String dbName) {
+    this.dbName = dbName;
+    return this;
+  }
+
+  public ISchemaBuilder setCompatibility(SchemaCompatibility compatibility) {
+    this.compatibility = compatibility;
+    return this;
+  }
+
+  public ISchemaBuilder setValidationLevel(SchemaValidation validationLevel) {
+    this.validationLevel = validationLevel;
+    return this;
+  }
+
+  public ISchemaBuilder setCanEvolve(boolean canEvolve) {
+    this.canEvolve = canEvolve;
+    return this;
+  }
+
+  public ISchemaBuilder setSchemaGroup(String schemaGroup) {
+    this.schemaGroup = schemaGroup;
+    return this;
+  }
+
+  public ISchemaBuilder setDescription(String description) {
+    this.description = description;
+    return this;
+  }
+
+  public ISchema build() throws MetaException {
+    if (schemaType == null || name == null) {
+      throw new MetaException("You must provide a schemaType and name");
+    }
+    ISchema iSchema =
+        new ISchema(schemaType, name, dbName, compatibility, validationLevel, canEvolve);
+    if (schemaGroup != null) iSchema.setSchemaGroup(schemaGroup);
+    if (description != null) iSchema.setDescription(description);
+    return iSchema;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java
new file mode 100644
index 0000000..289ef0a
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+import org.apache.hadoop.hive.metastore.api.SchemaVersionState;
+
+public class SchemaVersionBuilder extends SerdeAndColsBuilder<SchemaVersionBuilder> {
+  private String schemaName; // required
+  private int version; // required
+  private long createdAt; // required
+  private SchemaVersionState state; // optional
+  private String description; // optional
+  private String schemaText; // optional
+  private String fingerprint; // optional
+  private String name; // optional
+
+  public SchemaVersionBuilder() {
+    createdAt = System.currentTimeMillis() / 1000;
+    super.setChild(this);
+  }
+
+  public SchemaVersionBuilder setSchemaName(String schemaName) {
+    this.schemaName = schemaName;
+    return this;
+  }
+
+  public SchemaVersionBuilder setVersion(int version) {
+    this.version = version;
+    return this;
+  }
+
+  public SchemaVersionBuilder setCreatedAt(long createdAt) {
+    this.createdAt = createdAt;
+    return this;
+  }
+
+  public SchemaVersionBuilder setState(
+      SchemaVersionState state) {
+    this.state = state;
+    return this;
+  }
+
+  public SchemaVersionBuilder setDescription(String description) {
+    this.description = description;
+    return this;
+  }
+
+  public SchemaVersionBuilder setSchemaText(String schemaText) {
+    this.schemaText = schemaText;
+    return this;
+  }
+
+  public SchemaVersionBuilder setFingerprint(String fingerprint) {
+    this.fingerprint = fingerprint;
+    return this;
+  }
+
+  public SchemaVersionBuilder setName(String name) {
+    this.name = name;
+    return this;
+  }
+
+  public SchemaVersion build() throws MetaException {
+    SchemaVersion schemaVersion = new SchemaVersion(schemaName, version, createdAt, getCols());
+    if (state != null) schemaVersion.setState(state);
+    if (description != null) schemaVersion.setDescription(description);
+    if (schemaText != null) schemaVersion.setSchemaText(schemaText);
+    if (fingerprint != null) schemaVersion.setFingerprint(fingerprint);
+    if (name != null) schemaVersion.setName(name);
+    schemaVersion.setSerDe(buildSerde());
+    return schemaVersion;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SerdeAndColsBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SerdeAndColsBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SerdeAndColsBuilder.java
new file mode 100644
index 0000000..e1405d3
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SerdeAndColsBuilder.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.SerdeType;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * This collects together SerdeInfo and columns, since StorageDescriptor and SchemaVersion share
+ * those traits.
+ * @param <T>
+ */
+abstract class SerdeAndColsBuilder<T> {
+  private static final String SERDE_LIB = "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe";
+
+  private List<FieldSchema> cols;
+  private String serdeName, serdeLib, serdeDescription, serdeSerializerClass, serdeDeserializerClass;
+  private Map<String, String> serdeParams;
+  private SerdeType serdeType;
+  protected T child;
+
+  protected SerdeAndColsBuilder() {
+    serdeParams = new HashMap<>();
+    serdeLib = SERDE_LIB;
+  }
+
+  protected void setChild(T child) {
+    this.child = child;
+  }
+
+  protected SerDeInfo buildSerde() {
+    SerDeInfo serDeInfo = new SerDeInfo(serdeName, serdeLib, serdeParams);
+    if (serdeDescription != null) serDeInfo.setDescription(serdeDescription);
+    if (serdeSerializerClass != null) serDeInfo.setSerializerClass(serdeSerializerClass);
+    if (serdeDeserializerClass != null) serDeInfo.setDeserializerClass(serdeDeserializerClass);
+    if (serdeType != null) serDeInfo.setSerdeType(serdeType);
+    return serDeInfo;
+  }
+
+  protected List<FieldSchema> getCols() throws MetaException {
+    if (cols == null) throw new MetaException("You must provide the columns");
+    return cols;
+  }
+
+  public T setCols(
+      List<FieldSchema> cols) {
+    this.cols = cols;
+    return child;
+  }
+
+  public T addCol(String name, String type, String comment) {
+    if (cols == null) cols = new ArrayList<>();
+    cols.add(new FieldSchema(name, type, comment));
+    return child;
+  }
+
+  public T addCol(String name, String type) {
+    return addCol(name, type, "");
+  }
+
+  public T setSerdeName(String serdeName) {
+    this.serdeName = serdeName;
+    return child;
+  }
+
+  public T setSerdeLib(String serdeLib) {
+    this.serdeLib = serdeLib;
+    return child;
+  }
+
+  public T setSerdeDescription(String serdeDescription) {
+    this.serdeDescription = serdeDescription;
+    return child;
+  }
+
+  public T setSerdeSerializerClass(String serdeSerializerClass) {
+    this.serdeSerializerClass = serdeSerializerClass;
+    return child;
+  }
+
+  public T setSerdeDeserializerClass(String serdeDeserializerClass) {
+    this.serdeDeserializerClass = serdeDeserializerClass;
+    return child;
+  }
+
+  public T setSerdeParams(
+      Map<String, String> serdeParams) {
+    this.serdeParams = serdeParams;
+    return child;
+  }
+
+  public T addSerdeParam(String key, String value) {
+    if (serdeParams == null) serdeParams = new HashMap<>();
+    serdeParams.put(key, value);
+    return child;
+  }
+
+  public T setSerdeType(SerdeType serdeType) {
+    this.serdeType = serdeType;
+    return child;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/StorageDescriptorBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/StorageDescriptorBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/StorageDescriptorBuilder.java
index 39d1fa2..433e7c7 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/StorageDescriptorBuilder.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/StorageDescriptorBuilder.java
@@ -34,44 +34,36 @@ import java.util.Map;
  * defaults for everything else.  This is intended for use just by objects that have a StorageDescriptor,
  * not direct use.
  */
-abstract class StorageDescriptorBuilder<T> {
-  private static final String SERDE_LIB = "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe";
+abstract class StorageDescriptorBuilder<T> extends SerdeAndColsBuilder<T> {
   private static final String INPUT_FORMAT = "org.apache.hadoop.hive.ql.io.HiveInputFormat";
   private static final String OUTPUT_FORMAT = "org.apache.hadoop.hive.ql.io.HiveOutputFormat";
 
-  private String location, inputFormat, outputFormat, serdeName, serdeLib;
-  private List<FieldSchema> cols;
+  private String location, inputFormat, outputFormat;
   private int numBuckets;
-  private Map<String, String> storageDescriptorParams, serdeParams;
+  private Map<String, String> storageDescriptorParams;
   private boolean compressed, storedAsSubDirectories;
   private List<String> bucketCols, skewedColNames;
   private List<Order> sortCols;
   private List<List<String>> skewedColValues;
   private Map<List<String>, String> skewedColValueLocationMaps;
-  // This enables us to return the correct type from the builder
-  private T child;
 
   protected StorageDescriptorBuilder() {
     // Set some reasonable defaults
     storageDescriptorParams = new HashMap<>();
-    serdeParams = new HashMap<>();
     bucketCols = new ArrayList<>();
     sortCols = new ArrayList<>();
     numBuckets = 0;
     compressed = false;
     inputFormat = INPUT_FORMAT;
     outputFormat = OUTPUT_FORMAT;
-    serdeLib = SERDE_LIB;
     skewedColNames = new ArrayList<>();
     skewedColValues = new ArrayList<>();
     skewedColValueLocationMaps = new HashMap<>();
   }
 
   protected StorageDescriptor buildSd() throws MetaException {
-    if (cols == null) throw new MetaException("You must provide the columns");
-    SerDeInfo serdeInfo = new SerDeInfo(serdeName, serdeLib, serdeParams);
-    StorageDescriptor sd = new StorageDescriptor(cols, location, inputFormat, outputFormat,
-        compressed, numBuckets, serdeInfo, bucketCols, sortCols, storageDescriptorParams);
+    StorageDescriptor sd = new StorageDescriptor(getCols(), location, inputFormat, outputFormat,
+        compressed, numBuckets, buildSerde(), bucketCols, sortCols, storageDescriptorParams);
     sd.setStoredAsSubDirectories(storedAsSubDirectories);
     if (skewedColNames != null) {
       SkewedInfo skewed = new SkewedInfo(skewedColNames, skewedColValues,
@@ -81,10 +73,6 @@ abstract class StorageDescriptorBuilder<T> {
     return sd;
   }
 
-  protected void setChild(T child) {
-    this.child = child;
-  }
-
   public T setLocation(String location) {
     this.location = location;
     return child;
@@ -100,30 +88,6 @@ abstract class StorageDescriptorBuilder<T> {
     return child;
   }
 
-  public T setSerdeName(String serdeName) {
-    this.serdeName = serdeName;
-    return child;
-  }
-
-  public T setSerdeLib(String serdeLib) {
-    this.serdeLib = serdeLib;
-    return child;
-  }
-  public T setCols(List<FieldSchema> cols) {
-    this.cols = cols;
-    return child;
-  }
-
-  public T addCol(String name, String type, String comment) {
-    if (cols == null) cols = new ArrayList<>();
-    cols.add(new FieldSchema(name, type, comment));
-    return child;
-  }
-
-  public T addCol(String name, String type) {
-    return addCol(name, type, "");
-  }
-
   public T setNumBuckets(int numBuckets) {
     this.numBuckets = numBuckets;
     return child;
@@ -141,17 +105,6 @@ abstract class StorageDescriptorBuilder<T> {
     return child;
   }
 
-  public T setSerdeParams(Map<String, String> serdeParams) {
-    this.serdeParams = serdeParams;
-    return child;
-  }
-
-  public T addSerdeParam(String key, String value) {
-    if (serdeParams == null) serdeParams = new HashMap<>();
-    serdeParams.put(key, value);
-    return child;
-  }
-
   public T setCompressed(boolean compressed) {
     this.compressed = compressed;
     return child;

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/AddSchemaVersionEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/AddSchemaVersionEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/AddSchemaVersionEvent.java
new file mode 100644
index 0000000..e6839de
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/AddSchemaVersionEvent.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.events;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class AddSchemaVersionEvent extends ListenerEvent {
+
+  private final SchemaVersion schemaVersion;
+
+  public AddSchemaVersionEvent(boolean status, IHMSHandler handler,
+                               SchemaVersion schemaVersion) {
+    super(status, handler);
+    this.schemaVersion = schemaVersion;
+  }
+
+  public SchemaVersion getSchemaVersion() {
+    return schemaVersion;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/AlterISchemaEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/AlterISchemaEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/AlterISchemaEvent.java
new file mode 100644
index 0000000..eaf1db5
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/AlterISchemaEvent.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.events;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.api.ISchema;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class AlterISchemaEvent extends ListenerEvent {
+
+  private final ISchema oldSchema, newSchema;
+
+  public AlterISchemaEvent(boolean status, IHMSHandler handler,
+                           ISchema oldSchema, ISchema newSchema) {
+    super(status, handler);
+    this.oldSchema = oldSchema;
+    this.newSchema = newSchema;
+  }
+
+  public ISchema getOldSchema() {
+    return oldSchema;
+  }
+
+  public ISchema getNewSchema() {
+    return newSchema;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/AlterSchemaVersionEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/AlterSchemaVersionEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/AlterSchemaVersionEvent.java
new file mode 100644
index 0000000..76b834e
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/AlterSchemaVersionEvent.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.events;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class AlterSchemaVersionEvent extends ListenerEvent {
+
+  private final SchemaVersion oldSchemaVersion, newSchemaVersion;
+
+  public AlterSchemaVersionEvent(boolean status, IHMSHandler handler,
+                                 SchemaVersion oldSchemaVersion,
+                                 SchemaVersion newSchemaVersion) {
+    super(status, handler);
+    this.oldSchemaVersion = oldSchemaVersion;
+    this.newSchemaVersion = newSchemaVersion;
+  }
+
+  public SchemaVersion getOldSchemaVersion() {
+    return oldSchemaVersion;
+  }
+
+  public SchemaVersion getNewSchemaVersion() {
+    return newSchemaVersion;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/CreateISchemaEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/CreateISchemaEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/CreateISchemaEvent.java
new file mode 100644
index 0000000..348f8d3
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/CreateISchemaEvent.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.events;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.api.ISchema;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class CreateISchemaEvent extends ListenerEvent {
+
+  private final ISchema schema;
+
+  public CreateISchemaEvent(boolean status, IHMSHandler handler, ISchema schema) {
+    super(status, handler);
+    this.schema = schema;
+  }
+
+  public ISchema getSchema() {
+    return schema;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropISchemaEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropISchemaEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropISchemaEvent.java
new file mode 100644
index 0000000..7c03638
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropISchemaEvent.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.events;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.api.ISchema;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class DropISchemaEvent extends ListenerEvent {
+
+  private final ISchema schema;
+
+  public DropISchemaEvent(boolean status, IHMSHandler handler, ISchema schema) {
+    super(status, handler);
+    this.schema = schema;
+  }
+
+  public ISchema getSchema() {
+    return schema;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropSchemaVersionEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropSchemaVersionEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropSchemaVersionEvent.java
new file mode 100644
index 0000000..c722c33
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropSchemaVersionEvent.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.events;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class DropSchemaVersionEvent extends ListenerEvent {
+
+  private final SchemaVersion schemaVersion;
+
+  public DropSchemaVersionEvent(boolean status, IHMSHandler handler,
+                                SchemaVersion schemaVersion) {
+    super(status, handler);
+    this.schemaVersion = schemaVersion;
+  }
+
+  public SchemaVersion getSchemaVersion() {
+    return schemaVersion;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreAddSchemaVersionEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreAddSchemaVersionEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreAddSchemaVersionEvent.java
new file mode 100644
index 0000000..fc345f5
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreAddSchemaVersionEvent.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.events;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class PreAddSchemaVersionEvent extends PreEventContext {
+
+  private final SchemaVersion schemaVersion;
+
+  public PreAddSchemaVersionEvent(IHMSHandler handler, SchemaVersion schemaVersion) {
+    super(PreEventType.ADD_SCHEMA_VERSION, handler);
+    this.schemaVersion = schemaVersion;
+  }
+
+  public SchemaVersion getSchemaVersion() {
+    return schemaVersion;
+  }
+}


[13/50] [abbrv] hive git commit: HIVE-18211: Support to read multiple level definition for Map type in Parquet file (Colin Ma, reviewed by Ferdinand Xu)

Posted by ga...@apache.org.
HIVE-18211: Support to read multiple level definition for Map type in Parquet file (Colin Ma, reviewed by Ferdinand Xu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7acc4ce1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7acc4ce1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7acc4ce1

Branch: refs/heads/standalone-metastore
Commit: 7acc4ce1bbae060d890494c1499938c1eda5f3b6
Parents: 646ccce
Author: Ferdinand Xu <ch...@intel.com>
Authored: Mon Dec 18 09:35:16 2017 +0800
Committer: Ferdinand Xu <ch...@intel.com>
Committed: Mon Dec 18 09:35:16 2017 +0800

----------------------------------------------------------------------
 .../vector/VectorizedParquetRecordReader.java   | 27 +++++++++++++++++++-
 .../parquet/TestVectorizedMapColumnReader.java  | 26 ++++++++++++++++++-
 2 files changed, 51 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/7acc4ce1/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java
index 4303ca9..bffe008 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedParquetRecordReader.java
@@ -61,6 +61,7 @@ import org.apache.parquet.hadoop.metadata.ParquetMetadata;
 import org.apache.parquet.hadoop.util.HadoopStreams;
 import org.apache.parquet.io.InputFile;
 import org.apache.parquet.io.SeekableInputStream;
+import org.apache.parquet.schema.GroupType;
 import org.apache.parquet.schema.InvalidSchemaException;
 import org.apache.parquet.schema.MessageType;
 import org.apache.parquet.schema.Type;
@@ -97,6 +98,7 @@ public class VectorizedParquetRecordReader extends ParquetRecordReaderBase
   private VectorizedRowBatchCtx rbCtx;
   private Object[] partitionValues;
   private Path cacheFsPath;
+  private static final int MAP_DEFINITION_LEVEL_MAX = 3;
 
   /**
    * For each request column, the reader to read this column. This is NULL if this column
@@ -507,7 +509,30 @@ public class VectorizedParquetRecordReader extends ParquetRecordReaderBase
         throw new RuntimeException(
             "Failed to find related Parquet column descriptor with type " + type);
       }
-      List<Type> kvTypes = type.asGroupType().getFields();
+
+      // to handle the different Map definition in Parquet, eg:
+      // definition has 1 group:
+      //   repeated group map (MAP_KEY_VALUE)
+      //     {required binary key (UTF8); optional binary value (UTF8);}
+      // definition has 2 groups:
+      //   optional group m1 (MAP) {
+      //     repeated group map (MAP_KEY_VALUE)
+      //       {required binary key (UTF8); optional binary value (UTF8);}
+      //   }
+      int nestGroup = 0;
+      GroupType groupType = type.asGroupType();
+      // if FieldCount == 2, get types for key & value,
+      // otherwise, continue to get the group type until MAP_DEFINITION_LEVEL_MAX.
+      while (groupType.getFieldCount() < 2) {
+        if (nestGroup > MAP_DEFINITION_LEVEL_MAX) {
+          throw new RuntimeException(
+              "More than " + MAP_DEFINITION_LEVEL_MAX + " level is found in Map definition, " +
+                  "Failed to get the field types for Map with type " + type);
+        }
+        groupType = groupType.getFields().get(0).asGroupType();
+        nestGroup++;
+      }
+      List<Type> kvTypes = groupType.getFields();
       VectorizedListColumnReader keyListColumnReader = new VectorizedListColumnReader(
           descriptors.get(0), pages.getPageReader(descriptors.get(0)), skipTimestampConversion,
           kvTypes.get(0));

http://git-wip-us.apache.org/repos/asf/hive/blob/7acc4ce1/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestVectorizedMapColumnReader.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestVectorizedMapColumnReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestVectorizedMapColumnReader.java
index c33e8ab..185dfbb 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestVectorizedMapColumnReader.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestVectorizedMapColumnReader.java
@@ -56,6 +56,8 @@ public class TestVectorizedMapColumnReader extends VectorizedColumnReaderTestBas
 
       int mapSize = i % mapMaxSize + 1;
       if (!isNull) {
+        // the map_field is to test multiple level map definition
+        Group multipleLevelGroup = group.addGroup("map_field");
         for (int j = 0; j < mapSize; j++) {
           int intValForMap = getIntValue(isDictionaryEncoding, mapElementIndex);
           long longValForMap = getLongValue(isDictionaryEncoding, mapElementIndex);
@@ -74,6 +76,8 @@ public class TestVectorizedMapColumnReader extends VectorizedColumnReaderTestBas
               .append("value", binaryValForMap);
           group.addGroup("map_decimal").append("key", decimalValForMap)
               .append("value", decimalValForMap);
+          multipleLevelGroup.addGroup("map").append("key", binaryValForMap)
+              .append("value", binaryValForMap);
           mapElementIndex++;
         }
       }
@@ -160,6 +164,14 @@ public class TestVectorizedMapColumnReader extends VectorizedColumnReaderTestBas
     removeFile();
   }
 
+  @Test
+  public void testMultipleDefinitionMapRead() throws Exception {
+    removeFile();
+    writeMapData(initWriterFromFile(), false, 1023);
+    testMapRead(false, "multipleLevel", 1023);
+    removeFile();
+  }
+
   private void testMapReadAllType(boolean isDictionaryEncoding, int elementNum) throws Exception {
     testMapRead(isDictionaryEncoding, "int", elementNum);
     testMapRead(isDictionaryEncoding, "long", elementNum);
@@ -267,6 +279,9 @@ public class TestVectorizedMapColumnReader extends VectorizedColumnReaderTestBas
     } else if ("decimal".equals(type)) {
       conf.set(IOConstants.COLUMNS, "map_decimal");
       conf.set(IOConstants.COLUMNS_TYPES, "map<decimal(5,2),decimal(5,2)>");
+    } else if ("multipleLevel".equals(type)) {
+      conf.set(IOConstants.COLUMNS, "map_field");
+      conf.set(IOConstants.COLUMNS_TYPES, "map<string,string>");
     }
   }
 
@@ -291,6 +306,15 @@ public class TestVectorizedMapColumnReader extends VectorizedColumnReaderTestBas
       case "decimal":
         return String.format(schemaFormat, "decimal", "binary", "(DECIMAL(5,2))",
             "binary", "(DECIMAL(5,2))");
+      case "multipleLevel":
+        return "message hive_schema {\n"
+            + "optional group map_field (MAP) {\n"
+            + "  repeated group map (MAP_KEY_VALUE) {\n"
+            + "    required binary key;\n"
+            + "    optional binary value;\n"
+            + "  }\n"
+            + "}\n"
+            + "}\n";
       default:
         throw new RuntimeException("Unsupported type for TestVectorizedMapColumnReader!");
     }
@@ -310,7 +334,7 @@ public class TestVectorizedMapColumnReader extends VectorizedColumnReaderTestBas
     } else if ("float".equals(type)) {
       assertEquals(getFloatValue(isDictionaryEncoding, valueIndex),
           ((DoubleColumnVector)childVector).vector[position], 0);
-    } else if ("binary".equals(type)) {
+    } else if ("binary".equals(type) || "multipleLevel".equals(type)) {
       String actual = new String(ArrayUtils
           .subarray(((BytesColumnVector)childVector).vector[position],
               ((BytesColumnVector)childVector).start[position],


[27/50] [abbrv] hive git commit: HIVE-17983 Make the standalone metastore generate tarballs etc.

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql b/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
new file mode 100644
index 0000000..4bb3631
--- /dev/null
+++ b/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
@@ -0,0 +1,1699 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = off;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET escape_string_warning = off;
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "BUCKETING_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "CDS" (
+    "CD_ID" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_V2" (
+    "CD_ID" bigint NOT NULL,
+    "COMMENT" character varying(4000),
+    "COLUMN_NAME" character varying(767) NOT NULL,
+    "TYPE_NAME" text,
+    "INTEGER_IDX" integer NOT NULL
+);
+
+
+--
+-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DATABASE_PARAMS" (
+    "DB_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(180) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DBS" (
+    "DB_ID" bigint NOT NULL,
+    "DESC" character varying(4000) DEFAULT NULL::character varying,
+    "DB_LOCATION_URI" character varying(4000) NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DB_PRIVS" (
+    "DB_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "GLOBAL_PRIVS" (
+    "USER_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "IDXS" (
+    "INDEX_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DEFERRED_REBUILD" boolean NOT NULL,
+    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
+    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+    "INDEX_TBL_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "ORIG_TBL_ID" bigint,
+    "SD_ID" bigint
+);
+
+
+--
+-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "INDEX_PARAMS" (
+    "INDEX_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "NUCLEUS_TABLES" (
+    "CLASS_NAME" character varying(128) NOT NULL,
+    "TABLE_NAME" character varying(128) NOT NULL,
+    "TYPE" character varying(4) NOT NULL,
+    "OWNER" character varying(2) NOT NULL,
+    "VERSION" character varying(20) NOT NULL,
+    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITIONS" (
+    "PART_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
+    "SD_ID" bigint,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_EVENTS" (
+    "PART_NAME_ID" bigint NOT NULL,
+    "DB_NAME" character varying(128),
+    "EVENT_TIME" bigint NOT NULL,
+    "EVENT_TYPE" integer NOT NULL,
+    "PARTITION_NAME" character varying(767),
+    "TBL_NAME" character varying(256)
+);
+
+
+--
+-- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEYS" (
+    "TBL_ID" bigint NOT NULL,
+    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
+    "PKEY_NAME" character varying(128) NOT NULL,
+    "PKEY_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEY_VALS" (
+    "PART_ID" bigint NOT NULL,
+    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_PARAMS" (
+    "PART_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_PRIVS" (
+    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_PRIVS" (
+    "PART_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLES" (
+    "ROLE_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLE_MAP" (
+    "ROLE_GRANT_ID" bigint NOT NULL,
+    "ADD_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_ID" bigint
+);
+
+
+--
+-- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SDS" (
+    "SD_ID" bigint NOT NULL,
+    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "IS_COMPRESSED" boolean NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
+    "NUM_BUCKETS" bigint NOT NULL,
+    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "SERDE_ID" bigint,
+    "CD_ID" bigint,
+    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
+);
+
+
+--
+-- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SD_PARAMS" (
+    "SD_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" text DEFAULT NULL
+);
+
+
+--
+-- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SEQUENCE_TABLE" (
+    "SEQUENCE_NAME" character varying(255) NOT NULL,
+    "NEXT_VAL" bigint NOT NULL
+);
+
+
+--
+-- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDES" (
+    "SERDE_ID" bigint NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "SLIB" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDE_PARAMS" (
+    "SERDE_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" text DEFAULT NULL
+);
+
+
+--
+-- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SORT_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+    "ORDER" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TABLE_PARAMS" (
+    "TBL_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" text DEFAULT NULL
+);
+
+
+--
+-- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBLS" (
+    "TBL_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "OWNER" character varying(767) DEFAULT NULL::character varying,
+    "RETENTION" bigint NOT NULL,
+    "SD_ID" bigint,
+    "TBL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "VIEW_EXPANDED_TEXT" text,
+    "VIEW_ORIGINAL_TEXT" text,
+    "IS_REWRITE_ENABLED" boolean NOT NULL
+);
+
+
+--
+-- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_COL_PRIVS" (
+    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_PRIVS" (
+    "TBL_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPES" (
+    "TYPES_ID" bigint NOT NULL,
+    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TYPE1" character varying(767) DEFAULT NULL::character varying,
+    "TYPE2" character varying(767) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPE_FIELDS" (
+    "TYPE_NAME" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "FIELD_NAME" character varying(128) NOT NULL,
+    "FIELD_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST" (
+    "STRING_LIST_ID" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
+    "STRING_LIST_ID" bigint NOT NULL,
+    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_NAMES" (
+    "SD_ID" bigint NOT NULL,
+    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
+    "SD_ID" bigint NOT NULL,
+    "STRING_LIST_ID_KID" bigint NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying
+);
+
+CREATE TABLE "SKEWED_VALUES" (
+    "SD_ID_OID" bigint NOT NULL,
+    "STRING_LIST_ID_EID" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE  "MASTER_KEYS"
+(
+    "KEY_ID" SERIAL,
+    "MASTER_KEY" varchar(767) NULL,
+    PRIMARY KEY ("KEY_ID")
+);
+
+CREATE TABLE  "DELEGATION_TOKENS"
+(
+    "TOKEN_IDENT" varchar(767) NOT NULL,
+    "TOKEN" varchar(767) NULL,
+    PRIMARY KEY ("TOKEN_IDENT")
+);
+
+CREATE TABLE "TAB_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(256) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "TBL_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "BIT_VECTOR" bytea,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE "VERSION" (
+  "VER_ID" bigint,
+  "SCHEMA_VERSION" character varying(127) NOT NULL,
+  "VERSION_COMMENT" character varying(255) NOT NULL
+);
+
+--
+-- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(256) DEFAULT NULL::character varying,
+ "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "BIT_VECTOR" bytea,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for FUNCS
+--
+CREATE TABLE "FUNCS" (
+  "FUNC_ID" BIGINT NOT NULL,
+  "CLASS_NAME" VARCHAR(4000),
+  "CREATE_TIME" INTEGER NOT NULL,
+  "DB_ID" BIGINT,
+  "FUNC_NAME" VARCHAR(128),
+  "FUNC_TYPE" INTEGER NOT NULL,
+  "OWNER_NAME" VARCHAR(128),
+  "OWNER_TYPE" VARCHAR(10),
+  PRIMARY KEY ("FUNC_ID")
+);
+
+--
+-- Table structure for FUNC_RU
+--
+CREATE TABLE "FUNC_RU" (
+  "FUNC_ID" BIGINT NOT NULL,
+  "RESOURCE_TYPE" INTEGER NOT NULL,
+  "RESOURCE_URI" VARCHAR(4000),
+  "INTEGER_IDX" INTEGER NOT NULL,
+  PRIMARY KEY ("FUNC_ID", "INTEGER_IDX")
+);
+
+CREATE TABLE "NOTIFICATION_LOG"
+(
+    "NL_ID" BIGINT NOT NULL,
+    "EVENT_ID" BIGINT NOT NULL,
+    "EVENT_TIME" INTEGER NOT NULL,
+    "EVENT_TYPE" VARCHAR(32) NOT NULL,
+    "DB_NAME" VARCHAR(128),
+    "TBL_NAME" VARCHAR(256),
+    "MESSAGE" text,
+    "MESSAGE_FORMAT" VARCHAR(16),
+    PRIMARY KEY ("NL_ID")
+);
+
+CREATE TABLE "NOTIFICATION_SEQUENCE"
+(
+    "NNI_ID" BIGINT NOT NULL,
+    "NEXT_EVENT_ID" BIGINT NOT NULL,
+    PRIMARY KEY ("NNI_ID")
+);
+
+INSERT INTO "NOTIFICATION_SEQUENCE" ("NNI_ID", "NEXT_EVENT_ID") SELECT 1,1 WHERE NOT EXISTS ( SELECT "NEXT_EVENT_ID" FROM "NOTIFICATION_SEQUENCE");
+
+CREATE TABLE "KEY_CONSTRAINTS"
+(
+  "CHILD_CD_ID" BIGINT,
+  "CHILD_INTEGER_IDX" BIGINT,
+  "CHILD_TBL_ID" BIGINT,
+  "PARENT_CD_ID" BIGINT NOT NULL,
+  "PARENT_INTEGER_IDX" BIGINT NOT NULL,
+  "PARENT_TBL_ID" BIGINT NOT NULL,
+  "POSITION" BIGINT NOT NULL,
+  "CONSTRAINT_NAME" VARCHAR(400) NOT NULL,
+  "CONSTRAINT_TYPE" SMALLINT NOT NULL,
+  "UPDATE_RULE" SMALLINT,
+  "DELETE_RULE"	SMALLINT,
+  "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL,
+  PRIMARY KEY ("CONSTRAINT_NAME", "POSITION")
+) ;
+
+---
+--- Table structure for METASTORE_DB_PROPERTIES
+---
+CREATE TABLE "METASTORE_DB_PROPERTIES"
+(
+  "PROPERTY_KEY" VARCHAR(255) NOT NULL,
+  "PROPERTY_VALUE" VARCHAR(1000) NOT NULL,
+  "DESCRIPTION" VARCHAR(1000)
+);
+
+
+CREATE TABLE "WM_RESOURCEPLAN" (
+    "RP_ID" bigint NOT NULL,
+    "NAME" character varying(128) NOT NULL,
+    "QUERY_PARALLELISM" integer,
+    "STATUS" character varying(20) NOT NULL
+);
+
+CREATE TABLE "WM_POOL" (
+    "POOL_ID" bigint NOT NULL,
+    "RP_ID" bigint NOT NULL,
+    "PATH" character varying(1024) NOT NULL,
+    "PARENT_POOL_ID" bigint,
+    "ALLOC_FRACTION" double precision,
+    "QUERY_PARALLELISM" integer
+);
+
+CREATE TABLE "WM_TRIGGER" (
+    "TRIGGER_ID" bigint NOT NULL,
+    "RP_ID" bigint NOT NULL,
+    "NAME" character varying(128) NOT NULL,
+    "TRIGGER_EXPRESSION" character varying(1024) DEFAULT NULL::character varying,
+    "ACTION_EXPRESSION" character varying(1024) DEFAULT NULL::character varying
+);
+
+CREATE TABLE "WM_POOL_TO_TRIGGER" (
+    "POOL_ID" bigint NOT NULL,
+    "TRIGGER_ID" bigint NOT NULL
+);
+
+CREATE TABLE "WM_MAPPING" (
+    "MAPPING_ID" bigint NOT NULL,
+    "RP_ID" bigint NOT NULL,
+    "ENTITY_TYPE" character varying(10) NOT NULL,
+    "ENTITY_NAME" character varying(128) NOT NULL,
+    "POOL_ID" bigint NOT NULL,
+    "ORDERING" integer
+);
+
+--
+-- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "CDS"
+    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
+
+
+--
+-- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+
+
+--
+-- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
+
+
+--
+-- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
+
+
+--
+-- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
+
+
+--
+-- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+
+
+--
+-- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "NUCLEUS_TABLES"
+    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
+
+
+--
+-- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
+
+
+--
+-- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_EVENTS"
+    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
+
+
+--
+-- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+
+
+--
+-- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+
+
+--
+-- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+
+
+--
+-- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+
+
+--
+-- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
+
+
+--
+-- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
+
+
+--
+-- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
+
+
+--
+-- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
+
+
+--
+-- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
+
+
+--
+-- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+
+
+--
+-- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SEQUENCE_TABLE"
+    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
+
+
+--
+-- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDES"
+    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
+
+
+--
+-- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+
+
+--
+-- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+
+
+--
+-- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+
+
+--
+-- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
+
+
+--
+-- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
+
+
+--
+-- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+
+--
+-- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
+
+
+--
+-- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
+
+
+--
+-- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
+
+
+--
+-- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
+
+
+--
+-- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
+
+
+--
+-- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+
+ALTER TABLE ONLY "METASTORE_DB_PROPERTIES"
+    ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY");
+
+
+-- Resource plan: Primary key and unique key constraints.
+ALTER TABLE ONLY "WM_RESOURCEPLAN"
+    ADD CONSTRAINT "WM_RESOURCEPLAN_pkey" PRIMARY KEY ("RP_ID");
+
+ALTER TABLE ONLY "WM_RESOURCEPLAN"
+    ADD CONSTRAINT "UNIQUE_WM_RESOURCEPLAN" UNIQUE ("NAME");
+
+ALTER TABLE ONLY "WM_POOL"
+    ADD CONSTRAINT "WM_POOL_pkey" PRIMARY KEY ("POOL_ID");
+
+ALTER TABLE ONLY "WM_POOL"
+    ADD CONSTRAINT "UNIQUE_WM_POOL" UNIQUE ("RP_ID", "PATH");
+
+ALTER TABLE ONLY "WM_TRIGGER"
+    ADD CONSTRAINT "WM_TRIGGER_pkey" PRIMARY KEY ("TRIGGER_ID");
+
+ALTER TABLE ONLY "WM_TRIGGER"
+    ADD CONSTRAINT "UNIQUE_WM_TRIGGER" UNIQUE ("RP_ID", "NAME");
+
+ALTER TABLE ONLY "WM_POOL_TO_TRIGGER"
+    ADD CONSTRAINT "WM_POOL_TO_TRIGGER_pkey" PRIMARY KEY ("POOL_ID", "TRIGGER_ID");
+
+ALTER TABLE ONLY "WM_MAPPING"
+    ADD CONSTRAINT "WM_MAPPING_pkey" PRIMARY KEY ("MAPPING_ID");
+
+ALTER TABLE ONLY "WM_MAPPING"
+    ADD CONSTRAINT "UNIQUE_WM_MAPPING" UNIQUE ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME");
+
+--
+-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
+
+
+--
+-- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
+
+
+--
+-- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
+
+
+--
+-- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
+
+
+--
+-- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
+
+
+--
+-- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
+
+
+--
+-- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
+
+
+--
+-- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
+
+
+--
+-- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
+
+
+--
+-- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
+
+
+--
+-- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
+
+
+--
+-- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
+
+--
+-- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
+
+--
+-- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+
+--
+-- Name: UNIQUEFUNCTION; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE UNIQUE INDEX "UNIQUEFUNCTION" ON "FUNCS" ("FUNC_NAME", "DB_ID");
+
+--
+-- Name: FUNCS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "FUNCS_N49" ON "FUNCS" ("DB_ID");
+
+--
+-- Name: FUNC_RU_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "FUNC_RU_N49" ON "FUNC_RU" ("FUNC_ID");
+
+CREATE INDEX "CONSTRAINTS_PARENT_TBLID_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("PARENT_TBL_ID");
+
+CREATE INDEX "CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("CONSTRAINT_TYPE");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
+
+--
+-- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
+
+-- Name: FUNCS_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ALTER TABLE ONLY "FUNCS"
+    ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE;
+
+-- Name: FUNC_RU_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ALTER TABLE ONLY "FUNC_RU"
+    ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "FUNCS" ("FUNC_ID") DEFERRABLE;
+
+-- Resource plan FK constraints.
+
+ALTER TABLE ONLY "WM_POOL"
+    ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "WM_POOL"
+    ADD CONSTRAINT "WM_POOL_FK2" FOREIGN KEY ("PARENT_POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "WM_TRIGGER"
+    ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "WM_POOL_TO_TRIGGER"
+    ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "WM_POOL_TO_TRIGGER"
+    ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "WM_TRIGGER" ("TRIGGER_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "WM_MAPPING"
+    ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "WM_MAPPING"
+    ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
+
+--
+-- Name: public; Type: ACL; Schema: -; Owner: hiveuser
+--
+
+REVOKE ALL ON SCHEMA public FROM PUBLIC;
+GRANT ALL ON SCHEMA public TO PUBLIC;
+
+--
+-- PostgreSQL database dump complete
+--
+
+------------------------------
+-- Transaction and lock tables
+------------------------------
+CREATE TABLE TXNS (
+  TXN_ID bigint PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED bigint NOT NULL,
+  TXN_LAST_HEARTBEAT bigint NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL,
+  TXN_AGENT_INFO varchar(128),
+  TXN_META_INFO varchar(128),
+  TXN_HEARTBEAT_COUNT integer
+);
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID bigint REFERENCES TXNS (TXN_ID),
+  TC_DATABASE varchar(128) NOT NULL,
+  TC_TABLE varchar(128),
+  TC_PARTITION varchar(767) DEFAULT NULL,
+  TC_OPERATION_TYPE char(1) NOT NULL
+);
+
+CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS USING hash (TC_TXNID);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID bigint,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(256),
+  CTC_PARTITION varchar(767)
+);
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID bigint NOT NULL,
+  HL_LOCK_INT_ID bigint NOT NULL,
+  HL_TXNID bigint,
+  HL_DB varchar(128) NOT NULL,
+  HL_TABLE varchar(128),
+  HL_PARTITION varchar(767) DEFAULT NULL,
+  HL_LOCK_STATE char(1) NOT NULL,
+  HL_LOCK_TYPE char(1) NOT NULL,
+  HL_LAST_HEARTBEAT bigint NOT NULL,
+  HL_ACQUIRED_AT bigint,
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  HL_HEARTBEAT_COUNT integer,
+  HL_AGENT_INFO varchar(128),
+  HL_BLOCKEDBY_EXT_ID bigint,
+  HL_BLOCKEDBY_INT_ID bigint,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+); 
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS USING hash (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID bigint PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_TBLPROPERTIES varchar(2048),
+  CQ_WORKER_ID varchar(128),
+  CQ_START bigint,
+  CQ_RUN_AS varchar(128),
+  CQ_HIGHEST_TXN_ID bigint,
+  CQ_META_INFO bytea,
+  CQ_HADOOP_JOB_ID varchar(32)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+CREATE TABLE COMPLETED_COMPACTIONS (
+  CC_ID bigint PRIMARY KEY,
+  CC_DATABASE varchar(128) NOT NULL,
+  CC_TABLE varchar(128) NOT NULL,
+  CC_PARTITION varchar(767),
+  CC_STATE char(1) NOT NULL,
+  CC_TYPE char(1) NOT NULL,
+  CC_TBLPROPERTIES varchar(2048),
+  CC_WORKER_ID varchar(128),
+  CC_START bigint,
+  CC_END bigint,
+  CC_RUN_AS varchar(128),
+  CC_HIGHEST_TXN_ID bigint,
+  CC_META_INFO bytea,
+  CC_HADOOP_JOB_ID varchar(32)
+);
+
+CREATE TABLE AUX_TABLE (
+  MT_KEY1 varchar(128) NOT NULL,
+  MT_KEY2 bigint NOT NULL,
+  MT_COMMENT varchar(255),
+  PRIMARY KEY(MT_KEY1, MT_KEY2)
+);
+
+CREATE TABLE WRITE_SET (
+  WS_DATABASE varchar(128) NOT NULL,
+  WS_TABLE varchar(128) NOT NULL,
+  WS_PARTITION varchar(767),
+  WS_TXNID bigint NOT NULL,
+  WS_COMMIT_ID bigint NOT NULL,
+  WS_OPERATION_TYPE char(1) NOT NULL
+);
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '3.0.0', 'Hive release version 3.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql b/standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql
new file mode 100644
index 0000000..81f5a66
--- /dev/null
+++ b/standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql
@@ -0,0 +1,121 @@
+SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0';
+
+--\i 040-HIVE-16556.postgres.sql;
+CREATE TABLE "METASTORE_DB_PROPERTIES"
+(
+  "PROPERTY_KEY" VARCHAR(255) NOT NULL,
+  "PROPERTY_VALUE" VARCHAR(1000) NOT NULL,
+  "DESCRIPTION" VARCHAR(1000)
+);
+
+ALTER TABLE ONLY "METASTORE_DB_PROPERTIES"
+  ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY");
+
+--\i 041-HIVE-16575.postgres.sql;
+CREATE INDEX "CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("CONSTRAINT_TYPE");
+
+--\i 042-HIVE-16922.postgres.sql;
+UPDATE "SERDE_PARAMS"
+SET "PARAM_KEY"='collection.delim'
+WHERE "PARAM_KEY"='colelction.delim';
+
+--\i 043-HIVE-16997.postgres.sql;
+ALTER TABLE "PART_COL_STATS" ADD COLUMN "BIT_VECTOR" BYTEA;
+ALTER TABLE "TAB_COL_STATS" ADD COLUMN "BIT_VECTOR" BYTEA;
+
+--\i 044-HIVE-16886.postgres.sql;
+INSERT INTO "NOTIFICATION_SEQUENCE" ("NNI_ID", "NEXT_EVENT_ID") SELECT 1,1 WHERE NOT EXISTS ( SELECT "NEXT_EVENT_ID" FROM "NOTIFICATION_SEQUENCE");
+
+--\i 045-HIVE-17566.postgres.sql;
+CREATE TABLE "WM_RESOURCEPLAN" (
+    "RP_ID" bigint NOT NULL,
+    "NAME" character varying(128) NOT NULL,
+    "QUERY_PARALLELISM" integer,
+    "STATUS" character varying(20) NOT NULL
+);
+
+ALTER TABLE ONLY "WM_RESOURCEPLAN"
+    ADD CONSTRAINT "WM_RESOURCEPLAN_pkey" PRIMARY KEY ("RP_ID");
+
+ALTER TABLE ONLY "WM_RESOURCEPLAN"
+    ADD CONSTRAINT "UNIQUE_WM_RESOURCEPLAN" UNIQUE ("NAME");
+
+
+CREATE TABLE "WM_POOL" (
+    "POOL_ID" bigint NOT NULL,
+    "RP_ID" bigint NOT NULL,
+    "PATH" character varying(1024) NOT NULL,
+    "PARENT_POOL_ID" bigint,
+    "ALLOC_FRACTION" double precision,
+    "QUERY_PARALLELISM" integer
+);
+
+ALTER TABLE ONLY "WM_POOL"
+    ADD CONSTRAINT "WM_POOL_pkey" PRIMARY KEY ("POOL_ID");
+
+ALTER TABLE ONLY "WM_POOL"
+    ADD CONSTRAINT "UNIQUE_WM_POOL" UNIQUE ("RP_ID", "PATH");
+
+ALTER TABLE ONLY "WM_POOL"
+    ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE;
+ALTER TABLE ONLY "WM_POOL"
+    ADD CONSTRAINT "WM_POOL_FK2" FOREIGN KEY ("PARENT_POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
+
+
+CREATE TABLE "WM_TRIGGER" (
+    "TRIGGER_ID" bigint NOT NULL,
+    "RP_ID" bigint NOT NULL,
+    "NAME" character varying(128) NOT NULL,
+    "TRIGGER_EXPRESSION" character varying(1024) DEFAULT NULL::character varying,
+    "ACTION_EXPRESSION" character varying(1024) DEFAULT NULL::character varying
+);
+
+ALTER TABLE ONLY "WM_TRIGGER"
+    ADD CONSTRAINT "WM_TRIGGER_pkey" PRIMARY KEY ("TRIGGER_ID");
+
+ALTER TABLE ONLY "WM_TRIGGER"
+    ADD CONSTRAINT "UNIQUE_WM_TRIGGER" UNIQUE ("RP_ID", "NAME");
+
+ALTER TABLE ONLY "WM_TRIGGER"
+    ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE;
+
+
+CREATE TABLE "WM_POOL_TO_TRIGGER" (
+    "POOL_ID" bigint NOT NULL,
+    "TRIGGER_ID" bigint NOT NULL
+);
+
+ALTER TABLE ONLY "WM_POOL_TO_TRIGGER"
+    ADD CONSTRAINT "WM_POOL_TO_TRIGGER_pkey" PRIMARY KEY ("POOL_ID", "TRIGGER_ID");
+
+ALTER TABLE ONLY "WM_POOL_TO_TRIGGER"
+    ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "WM_POOL_TO_TRIGGER"
+    ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "WM_TRIGGER" ("TRIGGER_ID") DEFERRABLE;
+
+
+CREATE TABLE "WM_MAPPING" (
+    "MAPPING_ID" bigint NOT NULL,
+    "RP_ID" bigint NOT NULL,
+    "ENTITY_TYPE" character varying(10) NOT NULL,
+    "ENTITY_NAME" character varying(128) NOT NULL,
+    "POOL_ID" bigint NOT NULL,
+    "ORDERING" integer
+);
+
+ALTER TABLE ONLY "WM_MAPPING"
+    ADD CONSTRAINT "WM_MAPPING_pkey" PRIMARY KEY ("MAPPING_ID");
+
+ALTER TABLE ONLY "WM_MAPPING"
+    ADD CONSTRAINT "UNIQUE_WM_MAPPING" UNIQUE ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME");
+
+ALTER TABLE ONLY "WM_MAPPING"
+    ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "WM_MAPPING"
+    ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
+
+UPDATE "VERSION" SET "SCHEMA_VERSION"='3.0.0', "VERSION_COMMENT"='Hive release version 3.0.0' where "VER_ID"=1;
+SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0';
+

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/postgres/upgrade.order.postgres
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/postgres/upgrade.order.postgres b/standalone-metastore/src/main/sql/postgres/upgrade.order.postgres
new file mode 100644
index 0000000..15531df
--- /dev/null
+++ b/standalone-metastore/src/main/sql/postgres/upgrade.order.postgres
@@ -0,0 +1 @@
+2.3.0-to-3.0.0

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/DbInstallBase.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/DbInstallBase.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/DbInstallBase.java
new file mode 100644
index 0000000..4501af8
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/DbInstallBase.java
@@ -0,0 +1,280 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.dbinstall;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.hive.metastore.HiveMetaException;
+import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfoFactory;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.tools.MetastoreSchemaTool;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+public abstract class DbInstallBase {
+
+  private static final Logger LOG = LoggerFactory.getLogger(DbInstallBase.class);
+
+  private static final String HIVE_USER = "hiveuser";
+  protected static final String HIVE_DB = "hivedb";
+  private static final String FIRST_VERSION = "2.3.0";
+  private static final int MAX_STARTUP_WAIT = 5 * 60 * 1000;
+
+  private String metastoreHome;
+
+  protected abstract String getDockerContainerName();
+  protected abstract String getDockerImageName();
+  protected abstract String[] getDockerAdditionalArgs();
+  protected abstract String getDbType();
+  protected abstract String getDbRootUser();
+  protected abstract String getDbRootPassword();
+  protected abstract String getJdbcDriver();
+  protected abstract String getJdbcUrl();
+  /**
+   * URL to use when connecting as root rather than Hive
+   * @return URL
+   */
+  protected abstract String getInitialJdbcUrl();
+
+  /**
+   * Determine if the docker container is ready to use.
+   * @param logOutput output of docker logs command
+   * @return true if ready, false otherwise
+   */
+  protected abstract boolean isContainerReady(String logOutput);
+  protected abstract String getHivePassword();
+
+  @Before
+  public void runDockerContainer() throws IOException, InterruptedException {
+    if (runCmdAndPrintStreams(buildRunCmd(), 60) != 0) {
+      throw new RuntimeException("Unable to start docker container");
+    }
+    long startTime = System.currentTimeMillis();
+    ProcessResults pr;
+    do {
+      Thread.sleep(5000);
+      pr = runCmd(buildLogCmd(), 5);
+      if (pr.rc != 0) throw new RuntimeException("Failed to get docker logs");
+    } while (startTime + MAX_STARTUP_WAIT >= System.currentTimeMillis() && !isContainerReady(pr.stdout));
+    if (startTime + MAX_STARTUP_WAIT < System.currentTimeMillis()) {
+      throw new RuntimeException("Container failed to be ready in " + MAX_STARTUP_WAIT/1000 +
+          " seconds");
+    }
+    MetastoreSchemaTool.homeDir = metastoreHome = System.getProperty("test.tmp.dir", "target/tmp");
+  }
+
+  @After
+  public void stopAndRmDockerContainer() throws IOException, InterruptedException {
+    if ("true".equalsIgnoreCase(System.getProperty("metastore.itest.no.stop.container"))) {
+      LOG.warn("Not stopping container " + getDockerContainerName() + " at user request, please " +
+          "be sure to shut it down before rerunning the test.");
+      return;
+    }
+    if (runCmdAndPrintStreams(buildStopCmd(), 60) != 0) {
+      throw new RuntimeException("Unable to stop docker container");
+    }
+    if (runCmdAndPrintStreams(buildRmCmd(), 15) != 0) {
+      throw new RuntimeException("Unable to remove docker container");
+    }
+  }
+
+  private static class ProcessResults {
+    final String stdout;
+    final String stderr;
+    final int rc;
+
+    public ProcessResults(String stdout, String stderr, int rc) {
+      this.stdout = stdout;
+      this.stderr = stderr;
+      this.rc = rc;
+    }
+  }
+
+  private ProcessResults runCmd(String[] cmd, long secondsToWait) throws IOException,
+      InterruptedException {
+    LOG.info("Going to run: " + StringUtils.join(cmd, " "));
+    Process proc = Runtime.getRuntime().exec(cmd);
+    if (!proc.waitFor(secondsToWait, TimeUnit.SECONDS)) {
+      throw new RuntimeException("Process " + cmd[0] + " failed to run in " + secondsToWait +
+          " seconds");
+    }
+    BufferedReader reader = new BufferedReader(new InputStreamReader(proc.getInputStream()));
+    final StringBuilder lines = new StringBuilder();
+    reader.lines()
+        .forEach(s -> lines.append(s).append('\n'));
+
+    reader = new BufferedReader(new InputStreamReader(proc.getErrorStream()));
+    final StringBuilder errLines = new StringBuilder();
+    reader.lines()
+        .forEach(s -> errLines.append(s).append('\n'));
+    return new ProcessResults(lines.toString(), errLines.toString(), proc.exitValue());
+  }
+
+  private int runCmdAndPrintStreams(String[] cmd, long secondsToWait)
+      throws InterruptedException, IOException {
+    ProcessResults results = runCmd(cmd, secondsToWait);
+    LOG.info("Stdout from proc: " + results.stdout);
+    LOG.info("Stderr from proc: " + results.stderr);
+    return results.rc;
+  }
+
+  private int createUser() {
+    return MetastoreSchemaTool.run(buildArray(
+        "-createUser",
+        "-dbType",
+        getDbType(),
+        "-userName",
+        getDbRootUser(),
+        "-passWord",
+        getDbRootPassword(),
+        "-hiveUser",
+        HIVE_USER,
+        "-hivePassword",
+        getHivePassword(),
+        "-hiveDb",
+        HIVE_DB,
+        "-url",
+        getInitialJdbcUrl(),
+        "-driver",
+        getJdbcDriver()
+    ));
+  }
+
+  private int installLatest() {
+    return MetastoreSchemaTool.run(buildArray(
+        "-initSchema",
+        "-dbType",
+        getDbType(),
+        "-userName",
+        HIVE_USER,
+        "-passWord",
+        getHivePassword(),
+        "-url",
+        getJdbcUrl(),
+        "-driver",
+        getJdbcDriver()
+    ));
+  }
+
+  private int installAVersion(String version) {
+    return MetastoreSchemaTool.run(buildArray(
+        "-initSchemaTo",
+        version,
+        "-dbType",
+        getDbType(),
+        "-userName",
+        HIVE_USER,
+        "-passWord",
+        getHivePassword(),
+        "-url",
+        getJdbcUrl(),
+        "-driver",
+        getJdbcDriver()
+    ));
+  }
+
+  private int upgradeToLatest() {
+    return MetastoreSchemaTool.run(buildArray(
+        "-upgradeSchema",
+        "-dbType",
+        getDbType(),
+        "-userName",
+        HIVE_USER,
+        "-passWord",
+        getHivePassword(),
+        "-url",
+        getJdbcUrl(),
+        "-driver",
+        getJdbcDriver()
+    ));
+  }
+
+  protected String[] buildArray(String... strs) {
+    return strs;
+  }
+
+  private String getCurrentVersionMinusOne() throws HiveMetaException {
+    List<String> scripts = MetaStoreSchemaInfoFactory.get(
+        MetastoreConf.newMetastoreConf(), metastoreHome, getDbType()
+    ).getUpgradeScripts(FIRST_VERSION);
+    Assert.assertTrue(scripts.size() > 0);
+    String lastUpgradePath = scripts.get(scripts.size() - 1);
+    String version = lastUpgradePath.split("-")[1];
+    LOG.info("Current version minus 1 is " + version);
+    return version;
+  }
+
+  @Test
+  public void install() {
+    Assert.assertEquals(0, createUser());
+    Assert.assertEquals(0, installLatest());
+  }
+
+  @Test
+  public void upgrade() throws HiveMetaException {
+    Assert.assertEquals(0, createUser());
+    Assert.assertEquals(0, installAVersion(getCurrentVersionMinusOne()));
+    Assert.assertEquals(0, upgradeToLatest());
+  }
+
+  private String[] buildRunCmd() {
+    List<String> cmd = new ArrayList<>(4 + getDockerAdditionalArgs().length);
+    cmd.add("docker");
+    cmd.add("run");
+    cmd.add("--name");
+    cmd.add(getDockerContainerName());
+    cmd.addAll(Arrays.asList(getDockerAdditionalArgs()));
+    cmd.add(getDockerImageName());
+    return cmd.toArray(new String[cmd.size()]);
+  }
+
+  private String[] buildStopCmd() {
+    return buildArray(
+        "docker",
+        "stop",
+        getDockerContainerName()
+    );
+  }
+
+  private String[] buildRmCmd() {
+    return buildArray(
+        "docker",
+        "rm",
+        getDockerContainerName()
+    );
+  }
+
+  private String[] buildLogCmd() {
+    return buildArray(
+        "docker",
+        "logs",
+        getDockerContainerName()
+    );
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestMysql.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestMysql.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestMysql.java
new file mode 100644
index 0000000..9999d8d
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestMysql.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.dbinstall;
+
+public class ITestMysql extends DbInstallBase {
+
+  @Override
+  protected String getDockerImageName() {
+    return "mariadb:5.5";
+  }
+
+  @Override
+  protected String[] getDockerAdditionalArgs() {
+    return buildArray(
+        "-p",
+        "3306:3306",
+        "-e",
+        "MYSQL_ROOT_PASSWORD=" + getDbRootPassword(),
+        "-d"
+    );
+  }
+
+  @Override
+  protected String getDbType() {
+    return "mysql";
+  }
+
+  @Override
+  protected String getDbRootUser() {
+    return "root";
+  }
+
+  @Override
+  protected String getDbRootPassword() {
+    return "its-a-secret";
+  }
+
+  @Override
+  protected String getJdbcDriver() {
+    return org.mariadb.jdbc.Driver.class.getName();
+  }
+
+  @Override
+  protected String getJdbcUrl() {
+    return "jdbc:mysql://localhost:3306/" + HIVE_DB;
+  }
+
+  @Override
+  protected String getInitialJdbcUrl() {
+    return "jdbc:mysql://localhost:3306/";
+  }
+
+  @Override
+  protected boolean isContainerReady(String logOutput) {
+    return logOutput.contains("MySQL init process done. Ready for start up.");
+  }
+
+  @Override
+  protected String getDockerContainerName() {
+    return "metastore-test-mysql-install";
+  }
+
+  @Override
+  protected String getHivePassword() {
+    return "hivepassword";
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestOracle.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestOracle.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestOracle.java
new file mode 100644
index 0000000..2cff1a5
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestOracle.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.dbinstall;
+
+public class ITestOracle extends DbInstallBase {
+  @Override
+  protected String getDockerContainerName() {
+    return "metastore-test-oracle-install";
+  }
+
+  @Override
+  protected String getDockerImageName() {
+    return "alexeiled/docker-oracle-xe-11g";
+  }
+
+  @Override
+  protected String[] getDockerAdditionalArgs() {
+    return buildArray(
+        "-p",
+        "1521:1521",
+        "-e",
+        "DEFAULT_SYS_PASS=" + getDbRootPassword(),
+        "-e",
+        "ORACLE_ALLOW_REMOTE=true",
+        "-d"
+    );
+  }
+
+  @Override
+  protected String getDbType() {
+    return "oracle";
+  }
+
+  @Override
+  protected String getDbRootUser() {
+    return "SYS as SYSDBA";
+  }
+
+  @Override
+  protected String getDbRootPassword() {
+    return "oracle";
+  }
+
+  @Override
+  protected String getJdbcDriver() {
+    return "oracle.jdbc.OracleDriver";
+  }
+
+  @Override
+  protected String getJdbcUrl() {
+    return "jdbc:oracle:thin:@//localhost:1521/xe";
+  }
+
+  @Override
+  protected String getInitialJdbcUrl() {
+    return "jdbc:oracle:thin:@//localhost:1521/xe";
+  }
+
+  @Override
+  protected boolean isContainerReady(String logOutput) {
+    return logOutput.contains("Oracle started successfully!");
+  }
+
+  @Override
+  protected String getHivePassword() {
+    return "hivepassword";
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestPostgres.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestPostgres.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestPostgres.java
new file mode 100644
index 0000000..9151ac7
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestPostgres.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.dbinstall;
+
+public class ITestPostgres extends DbInstallBase {
+  @Override
+  protected String getDockerContainerName() {
+    return "metastore-test-postgres-install";
+  }
+
+  @Override
+  protected String getDockerImageName() {
+    return "postgres:9.3";
+  }
+
+  @Override
+  protected String[] getDockerAdditionalArgs() {
+    return buildArray(
+        "-p",
+        "5432:5432",
+        "-e",
+        "POSTGRES_PASSWORD=" + getDbRootPassword(),
+        "-d"
+
+    );
+  }
+
+  @Override
+  protected String getDbType() {
+    return "postgres";
+  }
+
+  @Override
+  protected String getDbRootUser() {
+    return "postgres";
+  }
+
+  @Override
+  protected String getDbRootPassword() {
+    return "its-a-secret";
+  }
+
+  @Override
+  protected String getJdbcDriver() {
+    return org.postgresql.Driver.class.getName();
+  }
+
+  @Override
+  protected String getJdbcUrl() {
+    return "jdbc:postgresql://localhost:5432/" + HIVE_DB;
+  }
+
+  @Override
+  protected String getInitialJdbcUrl() {
+    return "jdbc:postgresql://localhost:5432/postgres";
+  }
+
+  @Override
+  protected boolean isContainerReady(String logOutput) {
+    return logOutput.contains("database system is ready to accept connections");
+  }
+
+  @Override
+  protected String getHivePassword() {
+    return "hivepassword";
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestSqlServer.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestSqlServer.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestSqlServer.java
new file mode 100644
index 0000000..67b6eee
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/dbinstall/ITestSqlServer.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.dbinstall;
+
+public class ITestSqlServer extends DbInstallBase {
+  @Override
+  protected String getDockerContainerName() {
+    return "metastore-test-mssql-install";
+  }
+
+  @Override
+  protected String getDockerImageName() {
+    return "microsoft/mssql-server-linux:2017-GA";
+  }
+
+  @Override
+  protected String[] getDockerAdditionalArgs() {
+    return buildArray(
+        "-p",
+        "1433:1433",
+        "-e",
+        "ACCEPT_EULA=Y",
+        "-e",
+        "SA_PASSWORD=" + getDbRootPassword(),
+        "-d"
+    );
+  }
+
+  @Override
+  protected String getDbType() {
+    return "mssql";
+  }
+
+  @Override
+  protected String getDbRootUser() {
+    return "SA";
+  }
+
+  @Override
+  protected String getDbRootPassword() {
+    return "Its-a-s3cret";
+  }
+
+  @Override
+  protected String getJdbcDriver() {
+    return com.microsoft.sqlserver.jdbc.SQLServerDriver.class.getName();
+    //return "com.microsoft.sqlserver.jdbc.SQLServerDriver";
+  }
+
+  @Override
+  protected String getJdbcUrl() {
+    return "jdbc:sqlserver://localhost:1433;DatabaseName=" + HIVE_DB + ";";
+  }
+
+  @Override
+  protected String getInitialJdbcUrl() {
+    return  "jdbc:sqlserver://localhost:1433";
+  }
+
+  @Override
+  protected boolean isContainerReady(String logOutput) {
+    return logOutput.contains("Recovery is complete. This is an informational message only. No user action is required.");
+  }
+
+  @Override
+  protected String getHivePassword() {
+    return "h1vePassword!";
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestMetastoreSchemaTool.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestMetastoreSchemaTool.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestMetastoreSchemaTool.java
new file mode 100644
index 0000000..8b07e93
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestMetastoreSchemaTool.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.tools;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class TestMetastoreSchemaTool {
+
+  private String scriptFile = System.getProperty("java.io.tmpdir") + File.separator + "someScript.sql";
+  @Mock
+  private Configuration conf;
+  private MetastoreSchemaTool.CommandBuilder builder;
+  private String pasword = "reallySimplePassword";
+
+  @Before
+  public void setup() throws IOException {
+    conf = MetastoreConf.newMetastoreConf();
+    File file = new File(scriptFile);
+    if (!file.exists()) {
+      file.createNewFile();
+    }
+    builder = new MetastoreSchemaTool.CommandBuilder(conf, null, null, "testUser", pasword, scriptFile);
+  }
+
+  @After
+  public void globalAssert() throws IOException {
+    new File(scriptFile).delete();
+  }
+
+  @Test
+  public void shouldReturnStrippedPassword() throws IOException {
+    assertFalse(builder.buildToLog().contains(pasword));
+  }
+
+  @Test
+  public void shouldReturnActualPassword() throws IOException {
+    String[] strings = builder.buildToRun();
+    assertTrue(Arrays.asList(strings).contains(pasword));
+  }
+}


[21/50] [abbrv] hive git commit: HIVE-17982 Move metastore specific itests

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
new file mode 100644
index 0000000..2599ab1
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
@@ -0,0 +1,3071 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.lang.reflect.Field;
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.utils.FileUtils;
+import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+import org.datanucleus.api.jdo.JDOPersistenceManager;
+import org.datanucleus.api.jdo.JDOPersistenceManagerFactory;
+import org.junit.Assert;
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hive.metastore.api.AggrStats;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.FunctionType;
+import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.ResourceType;
+import org.apache.hadoop.hive.metastore.api.ResourceUri;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.Type;
+import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.thrift.TException;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public abstract class TestHiveMetaStore {
+  private static final Logger LOG = LoggerFactory.getLogger(TestHiveMetaStore.class);
+  protected static HiveMetaStoreClient client;
+  protected static Configuration conf;
+  protected static Warehouse warehouse;
+  protected static boolean isThriftClient = false;
+
+  private static final String TEST_DB1_NAME = "testdb1";
+  private static final String TEST_DB2_NAME = "testdb2";
+
+  private static final int DEFAULT_LIMIT_PARTITION_REQUEST = 100;
+
+  protected abstract HiveMetaStoreClient createClient() throws Exception;
+
+  @Before
+  public void setUp() throws Exception {
+    conf = MetastoreConf.newMetastoreConf();
+    warehouse = new Warehouse(conf);
+
+    // set some values to use for getting conf. vars
+    MetastoreConf.setBoolVar(conf, ConfVars.METRICS_ENABLED, true);
+    conf.set("hive.key1", "value1");
+    conf.set("hive.key2", "http://www.example.com");
+    conf.set("hive.key3", "");
+    conf.set("hive.key4", "0");
+    conf.set("datanucleus.autoCreateTables", "false");
+
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    MetastoreConf.setLongVar(conf, ConfVars.BATCH_RETRIEVE_MAX, 2);
+    MetastoreConf.setLongVar(conf, ConfVars.LIMIT_PARTITION_REQUEST, DEFAULT_LIMIT_PARTITION_REQUEST);
+    MetastoreConf.setVar(conf, ConfVars.STORAGE_SCHEMA_READER_IMPL, "no.such.class");
+  }
+
+  @Test
+  public void testNameMethods() {
+    Map<String, String> spec = new LinkedHashMap<>();
+    spec.put("ds", "2008-07-01 14:13:12");
+    spec.put("hr", "14");
+    List<String> vals = new ArrayList<>();
+    vals.addAll(spec.values());
+    String partName = "ds=2008-07-01 14%3A13%3A12/hr=14";
+
+    try {
+      List<String> testVals = client.partitionNameToVals(partName);
+      assertTrue("Values from name are incorrect", vals.equals(testVals));
+
+      Map<String, String> testSpec = client.partitionNameToSpec(partName);
+      assertTrue("Spec from name is incorrect", spec.equals(testSpec));
+
+      List<String> emptyVals = client.partitionNameToVals("");
+      assertTrue("Values should be empty", emptyVals.size() == 0);
+
+      Map<String, String> emptySpec =  client.partitionNameToSpec("");
+      assertTrue("Spec should be empty", emptySpec.size() == 0);
+    } catch (Exception e) {
+      fail();
+    }
+  }
+
+  /**
+   * tests create table and partition and tries to drop the table without
+   * droppping the partition
+   *
+   */
+  @Test
+  public void testPartition() throws Exception {
+    partitionTester(client, conf);
+  }
+
+  private static void partitionTester(HiveMetaStoreClient client, Configuration conf)
+    throws Exception {
+    try {
+      String dbName = "compdb";
+      String tblName = "comptbl";
+      String typeName = "Person";
+      List<String> vals = makeVals("2008-07-01 14:13:12", "14");
+      List<String> vals2 = makeVals("2008-07-01 14:13:12", "15");
+      List<String> vals3 = makeVals("2008-07-02 14:13:12", "15");
+      List<String> vals4 = makeVals("2008-07-03 14:13:12", "151");
+
+      client.dropTable(dbName, tblName);
+      silentDropDatabase(dbName);
+      Database db = new Database();
+      db.setName(dbName);
+      client.createDatabase(db);
+      db = client.getDatabase(dbName);
+      Path dbPath = new Path(db.getLocationUri());
+      FileSystem fs = FileSystem.get(dbPath.toUri(), conf);
+
+      client.dropType(typeName);
+      Type typ1 = new Type();
+      typ1.setName(typeName);
+      typ1.setFields(new ArrayList<>(2));
+      typ1.getFields().add(
+          new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+      typ1.getFields().add(
+          new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+      client.createType(typ1);
+
+      List<String> skewedColValue = Collections.singletonList("1");
+      Table tbl = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(tblName)
+          .setCols(typ1.getFields())
+          .setNumBuckets(1)
+          .addBucketCol("name")
+          .addTableParam("test_param_1", "Use this for comments etc")
+          .addSerdeParam(ColumnType.SERIALIZATION_FORMAT, "1")
+          .addSkewedColName("name")
+          .setSkewedColValues(Collections.singletonList(skewedColValue))
+          .setSkewedColValueLocationMaps(Collections.singletonMap(skewedColValue, "location1"))
+          .addPartCol("ds", ColumnType.STRING_TYPE_NAME)
+          .addPartCol("hr", ColumnType.STRING_TYPE_NAME)
+          .build();
+
+      client.createTable(tbl);
+
+      if (isThriftClient) {
+        // the createTable() above does not update the location in the 'tbl'
+        // object when the client is a thrift client and the code below relies
+        // on the location being present in the 'tbl' object - so get the table
+        // from the metastore
+        tbl = client.getTable(dbName, tblName);
+      }
+
+      Partition part = makePartitionObject(dbName, tblName, vals, tbl, "/part1");
+      Partition part2 = makePartitionObject(dbName, tblName, vals2, tbl, "/part2");
+      Partition part3 = makePartitionObject(dbName, tblName, vals3, tbl, "/part3");
+      Partition part4 = makePartitionObject(dbName, tblName, vals4, tbl, "/part4");
+
+      // check if the partition exists (it shouldn't)
+      boolean exceptionThrown = false;
+      try {
+        Partition p = client.getPartition(dbName, tblName, vals);
+      } catch(Exception e) {
+        assertEquals("partition should not have existed",
+            NoSuchObjectException.class, e.getClass());
+        exceptionThrown = true;
+      }
+      assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
+      Partition retp = client.add_partition(part);
+      assertNotNull("Unable to create partition " + part, retp);
+      Partition retp2 = client.add_partition(part2);
+      assertNotNull("Unable to create partition " + part2, retp2);
+      Partition retp3 = client.add_partition(part3);
+      assertNotNull("Unable to create partition " + part3, retp3);
+      Partition retp4 = client.add_partition(part4);
+      assertNotNull("Unable to create partition " + part4, retp4);
+
+      Partition part_get = client.getPartition(dbName, tblName, part.getValues());
+      if(isThriftClient) {
+        // since we are using thrift, 'part' will not have the create time and
+        // last DDL time set since it does not get updated in the add_partition()
+        // call - likewise part2 and part3 - set it correctly so that equals check
+        // doesn't fail
+        adjust(client, part, dbName, tblName);
+        adjust(client, part2, dbName, tblName);
+        adjust(client, part3, dbName, tblName);
+      }
+      assertTrue("Partitions are not same", part.equals(part_get));
+
+      // check null cols schemas for a partition
+      List<String> vals6 = makeVals("2016-02-22 00:00:00", "16");
+      Partition part6 = makePartitionObject(dbName, tblName, vals6, tbl, "/part5");
+      part6.getSd().setCols(null);
+      LOG.info("Creating partition will null field schema");
+      client.add_partition(part6);
+      LOG.info("Listing all partitions for table " + dbName + "." + tblName);
+      final List<Partition> partitions = client.listPartitions(dbName, tblName, (short) -1);
+      boolean foundPart = false;
+      for (Partition p : partitions) {
+        if (p.getValues().equals(vals6)) {
+          assertNull(p.getSd().getCols());
+          LOG.info("Found partition " + p + " having null field schema");
+          foundPart = true;
+        }
+      }
+      assertTrue(foundPart);
+
+      String partName = "ds=" + FileUtils.escapePathName("2008-07-01 14:13:12") + "/hr=14";
+      String part2Name = "ds=" + FileUtils.escapePathName("2008-07-01 14:13:12") + "/hr=15";
+      String part3Name = "ds=" + FileUtils.escapePathName("2008-07-02 14:13:12") + "/hr=15";
+      String part4Name = "ds=" + FileUtils.escapePathName("2008-07-03 14:13:12") + "/hr=151";
+
+      part_get = client.getPartition(dbName, tblName, partName);
+      assertTrue("Partitions are not the same", part.equals(part_get));
+
+      // Test partition listing with a partial spec - ds is specified but hr is not
+      List<String> partialVals = new ArrayList<>();
+      partialVals.add(vals.get(0));
+      Set<Partition> parts = new HashSet<>();
+      parts.add(part);
+      parts.add(part2);
+
+      List<Partition> partial = client.listPartitions(dbName, tblName, partialVals,
+          (short) -1);
+      assertTrue("Should have returned 2 partitions", partial.size() == 2);
+      assertTrue("Not all parts returned", partial.containsAll(parts));
+
+      Set<String> partNames = new HashSet<>();
+      partNames.add(partName);
+      partNames.add(part2Name);
+      List<String> partialNames = client.listPartitionNames(dbName, tblName, partialVals,
+          (short) -1);
+      assertTrue("Should have returned 2 partition names", partialNames.size() == 2);
+      assertTrue("Not all part names returned", partialNames.containsAll(partNames));
+
+      partNames.add(part3Name);
+      partNames.add(part4Name);
+      partialVals.clear();
+      partialVals.add("");
+      partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
+      assertTrue("Should have returned 5 partition names", partialNames.size() == 5);
+      assertTrue("Not all part names returned", partialNames.containsAll(partNames));
+
+      // Test partition listing with a partial spec - hr is specified but ds is not
+      parts.clear();
+      parts.add(part2);
+      parts.add(part3);
+
+      partialVals.clear();
+      partialVals.add("");
+      partialVals.add(vals2.get(1));
+
+      partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
+      assertEquals("Should have returned 2 partitions", 2, partial.size());
+      assertTrue("Not all parts returned", partial.containsAll(parts));
+
+      partNames.clear();
+      partNames.add(part2Name);
+      partNames.add(part3Name);
+      partialNames = client.listPartitionNames(dbName, tblName, partialVals,
+          (short) -1);
+      assertEquals("Should have returned 2 partition names", 2, partialNames.size());
+      assertTrue("Not all part names returned", partialNames.containsAll(partNames));
+
+      // Verify escaped partition names don't return partitions
+      exceptionThrown = false;
+      try {
+        String badPartName = "ds=2008-07-01 14%3A13%3A12/hrs=14";
+        client.getPartition(dbName, tblName, badPartName);
+      } catch(NoSuchObjectException e) {
+        exceptionThrown = true;
+      }
+      assertTrue("Bad partition spec should have thrown an exception", exceptionThrown);
+
+      Path partPath = new Path(part.getSd().getLocation());
+
+
+      assertTrue(fs.exists(partPath));
+      client.dropPartition(dbName, tblName, part.getValues(), true);
+      assertFalse(fs.exists(partPath));
+
+      // Test append_partition_by_name
+      client.appendPartition(dbName, tblName, partName);
+      Partition part5 = client.getPartition(dbName, tblName, part.getValues());
+      assertTrue("Append partition by name failed", part5.getValues().equals(vals));
+      Path part5Path = new Path(part5.getSd().getLocation());
+      assertTrue(fs.exists(part5Path));
+
+      // Test drop_partition_by_name
+      assertTrue("Drop partition by name failed",
+          client.dropPartition(dbName, tblName, partName, true));
+      assertFalse(fs.exists(part5Path));
+
+      // add the partition again so that drop table with a partition can be
+      // tested
+      retp = client.add_partition(part);
+      assertNotNull("Unable to create partition " + part, retp);
+
+      // test add_partitions
+
+      List<String> mvals1 = makeVals("2008-07-04 14:13:12", "14641");
+      List<String> mvals2 = makeVals("2008-07-04 14:13:12", "14642");
+      List<String> mvals3 = makeVals("2008-07-04 14:13:12", "14643");
+      List<String> mvals4 = makeVals("2008-07-04 14:13:12", "14643"); // equal to 3
+      List<String> mvals5 = makeVals("2008-07-04 14:13:12", "14645");
+
+      Exception savedException;
+
+      // add_partitions(empty list) : ok, normal operation
+      client.add_partitions(new ArrayList<>());
+
+      // add_partitions(1,2,3) : ok, normal operation
+      Partition mpart1 = makePartitionObject(dbName, tblName, mvals1, tbl, "/mpart1");
+      Partition mpart2 = makePartitionObject(dbName, tblName, mvals2, tbl, "/mpart2");
+      Partition mpart3 = makePartitionObject(dbName, tblName, mvals3, tbl, "/mpart3");
+      client.add_partitions(Arrays.asList(mpart1,mpart2,mpart3));
+
+      if(isThriftClient) {
+        // do DDL time munging if thrift mode
+        adjust(client, mpart1, dbName, tblName);
+        adjust(client, mpart2, dbName, tblName);
+        adjust(client, mpart3, dbName, tblName);
+      }
+      verifyPartitionsPublished(client, dbName, tblName,
+          Arrays.asList(mvals1.get(0)),
+          Arrays.asList(mpart1,mpart2,mpart3));
+
+      Partition mpart4 = makePartitionObject(dbName, tblName, mvals4, tbl, "/mpart4");
+      Partition mpart5 = makePartitionObject(dbName, tblName, mvals5, tbl, "/mpart5");
+
+      // create dir for /mpart5
+      Path mp5Path = new Path(mpart5.getSd().getLocation());
+      warehouse.mkdirs(mp5Path);
+      assertTrue(fs.exists(mp5Path));
+
+      // add_partitions(5,4) : err = duplicate keyvals on mpart4
+      savedException = null;
+      try {
+        client.add_partitions(Arrays.asList(mpart5,mpart4));
+      } catch (Exception e) {
+        savedException = e;
+      } finally {
+        assertNotNull(savedException);
+      }
+
+      // check that /mpart4 does not exist, but /mpart5 still does.
+      assertTrue(fs.exists(mp5Path));
+      assertFalse(fs.exists(new Path(mpart4.getSd().getLocation())));
+
+      // add_partitions(5) : ok
+      client.add_partitions(Arrays.asList(mpart5));
+
+      if(isThriftClient) {
+        // do DDL time munging if thrift mode
+        adjust(client, mpart5, dbName, tblName);
+      }
+
+      verifyPartitionsPublished(client, dbName, tblName,
+          Arrays.asList(mvals1.get(0)),
+          Arrays.asList(mpart1,mpart2,mpart3,mpart5));
+
+      //// end add_partitions tests
+
+      client.dropTable(dbName, tblName);
+
+      client.dropType(typeName);
+
+      // recreate table as external, drop partition and it should
+      // still exist
+      tbl.setParameters(new HashMap<>());
+      tbl.getParameters().put("EXTERNAL", "TRUE");
+      client.createTable(tbl);
+      retp = client.add_partition(part);
+      assertTrue(fs.exists(partPath));
+      client.dropPartition(dbName, tblName, part.getValues(), true);
+      assertTrue(fs.exists(partPath));
+
+      for (String tableName : client.getTables(dbName, "*")) {
+        client.dropTable(dbName, tableName);
+      }
+
+      client.dropDatabase(dbName);
+
+    } catch (Exception e) {
+      System.err.println(StringUtils.stringifyException(e));
+      System.err.println("testPartition() failed.");
+      throw e;
+    }
+  }
+
+  private static void verifyPartitionsPublished(HiveMetaStoreClient client,
+      String dbName, String tblName, List<String> partialSpec,
+      List<Partition> expectedPartitions) throws TException {
+    // Test partition listing with a partial spec
+
+    List<Partition> mpartial = client.listPartitions(dbName, tblName, partialSpec,
+        (short) -1);
+    assertEquals("Should have returned "+expectedPartitions.size()+
+        " partitions, returned " + mpartial.size(),
+        expectedPartitions.size(), mpartial.size());
+    assertTrue("Not all parts returned", mpartial.containsAll(expectedPartitions));
+  }
+
+  private static List<String> makeVals(String ds, String id) {
+    List <String> vals4 = new ArrayList<>(2);
+    vals4 = new ArrayList<>(2);
+    vals4.add(ds);
+    vals4.add(id);
+    return vals4;
+  }
+
+  private static Partition makePartitionObject(String dbName, String tblName,
+      List<String> ptnVals, Table tbl, String ptnLocationSuffix) throws MetaException {
+    Partition part4 = new Partition();
+    part4.setDbName(dbName);
+    part4.setTableName(tblName);
+    part4.setValues(ptnVals);
+    part4.setParameters(new HashMap<>());
+    part4.setSd(tbl.getSd().deepCopy());
+    part4.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo().deepCopy());
+    part4.getSd().setLocation(tbl.getSd().getLocation() + ptnLocationSuffix);
+    MetaStoreUtils.updatePartitionStatsFast(part4, warehouse, null);
+    return part4;
+  }
+
+  @Test
+  public void testListPartitions() throws Throwable {
+    // create a table with multiple partitions
+    String dbName = "compdb";
+    String tblName = "comptbl";
+    String typeName = "Person";
+
+    cleanUp(dbName, tblName, typeName);
+
+    List<List<String>> values = new ArrayList<>();
+    values.add(makeVals("2008-07-01 14:13:12", "14"));
+    values.add(makeVals("2008-07-01 14:13:12", "15"));
+    values.add(makeVals("2008-07-02 14:13:12", "15"));
+    values.add(makeVals("2008-07-03 14:13:12", "151"));
+
+    createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+
+    List<Partition> partitions = client.listPartitions(dbName, tblName, (short)-1);
+    assertNotNull("should have returned partitions", partitions);
+    assertEquals(" should have returned " + values.size() +
+      " partitions", values.size(), partitions.size());
+
+    partitions = client.listPartitions(dbName, tblName, (short)(values.size()/2));
+
+    assertNotNull("should have returned partitions", partitions);
+    assertEquals(" should have returned " + values.size() / 2 +
+      " partitions",values.size() / 2, partitions.size());
+
+
+    partitions = client.listPartitions(dbName, tblName, (short) (values.size() * 2));
+
+    assertNotNull("should have returned partitions", partitions);
+    assertEquals(" should have returned " + values.size() +
+      " partitions",values.size(), partitions.size());
+
+    cleanUp(dbName, tblName, typeName);
+
+  }
+
+  @Test
+  public void testListPartitionsWihtLimitEnabled() throws Throwable {
+    // create a table with multiple partitions
+    String dbName = "compdb";
+    String tblName = "comptbl";
+    String typeName = "Person";
+
+    cleanUp(dbName, tblName, typeName);
+
+    // Create too many partitions, just enough to validate over limit requests
+    List<List<String>> values = new ArrayList<>();
+    for (int i=0; i<DEFAULT_LIMIT_PARTITION_REQUEST + 1; i++) {
+      values.add(makeVals("2008-07-01 14:13:12", Integer.toString(i)));
+    }
+
+    createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+
+    List<Partition> partitions;
+    short maxParts;
+
+    // Requesting more partitions than allowed should throw an exception
+    try {
+      maxParts = -1;
+      partitions = client.listPartitions(dbName, tblName, maxParts);
+      fail("should have thrown MetaException about partition limit");
+    } catch (MetaException e) {
+      assertTrue(true);
+    }
+
+    // Requesting more partitions than allowed should throw an exception
+    try {
+      maxParts = DEFAULT_LIMIT_PARTITION_REQUEST + 1;
+      partitions = client.listPartitions(dbName, tblName, maxParts);
+      fail("should have thrown MetaException about partition limit");
+    } catch (MetaException e) {
+      assertTrue(true);
+    }
+
+    // Requesting less partitions than allowed should work
+    maxParts = DEFAULT_LIMIT_PARTITION_REQUEST / 2;
+    partitions = client.listPartitions(dbName, tblName, maxParts);
+    assertNotNull("should have returned partitions", partitions);
+    assertEquals(" should have returned 50 partitions", maxParts, partitions.size());
+  }
+
+  @Test
+  public void testAlterTableCascade() throws Throwable {
+    // create a table with multiple partitions
+    String dbName = "compdb";
+    String tblName = "comptbl";
+    String typeName = "Person";
+
+    cleanUp(dbName, tblName, typeName);
+
+    List<List<String>> values = new ArrayList<>();
+    values.add(makeVals("2008-07-01 14:13:12", "14"));
+    values.add(makeVals("2008-07-01 14:13:12", "15"));
+    values.add(makeVals("2008-07-02 14:13:12", "15"));
+    values.add(makeVals("2008-07-03 14:13:12", "151"));
+
+    createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+    Table tbl = client.getTable(dbName, tblName);
+    List<FieldSchema> cols = tbl.getSd().getCols();
+    cols.add(new FieldSchema("new_col", ColumnType.STRING_TYPE_NAME, ""));
+    tbl.getSd().setCols(cols);
+    //add new column with cascade option
+    client.alter_table(dbName, tblName, tbl, true);
+    //
+    Table tbl2 = client.getTable(dbName, tblName);
+    assertEquals("Unexpected number of cols", 3, tbl2.getSd().getCols().size());
+    assertEquals("Unexpected column name", "new_col", tbl2.getSd().getCols().get(2).getName());
+    //get a partition
+    List<String> pvalues = new ArrayList<>(2);
+    pvalues.add("2008-07-01 14:13:12");
+    pvalues.add("14");
+    Partition partition = client.getPartition(dbName, tblName, pvalues);
+    assertEquals("Unexpected number of cols", 3, partition.getSd().getCols().size());
+    assertEquals("Unexpected column name", "new_col", partition.getSd().getCols().get(2).getName());
+
+    //add another column
+    cols = tbl.getSd().getCols();
+    cols.add(new FieldSchema("new_col2", ColumnType.STRING_TYPE_NAME, ""));
+    tbl.getSd().setCols(cols);
+    //add new column with no cascade option
+    client.alter_table(dbName, tblName, tbl, false);
+    tbl2 = client.getTable(dbName, tblName);
+    assertEquals("Unexpected number of cols", 4, tbl2.getSd().getCols().size());
+    assertEquals("Unexpected column name", "new_col2", tbl2.getSd().getCols().get(3).getName());
+    //get partition, this partition should not have the newly added column since cascade option
+    //was false
+    partition = client.getPartition(dbName, tblName, pvalues);
+    assertEquals("Unexpected number of cols", 3, partition.getSd().getCols().size());
+  }
+
+
+  @Test
+  public void testListPartitionNames() throws Throwable {
+    // create a table with multiple partitions
+    String dbName = "compdb";
+    String tblName = "comptbl";
+    String typeName = "Person";
+
+    cleanUp(dbName, tblName, typeName);
+
+    List<List<String>> values = new ArrayList<>();
+    values.add(makeVals("2008-07-01 14:13:12", "14"));
+    values.add(makeVals("2008-07-01 14:13:12", "15"));
+    values.add(makeVals("2008-07-02 14:13:12", "15"));
+    values.add(makeVals("2008-07-03 14:13:12", "151"));
+
+
+
+    createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+
+    List<String> partitions = client.listPartitionNames(dbName, tblName, (short)-1);
+    assertNotNull("should have returned partitions", partitions);
+    assertEquals(" should have returned " + values.size() +
+      " partitions", values.size(), partitions.size());
+
+    partitions = client.listPartitionNames(dbName, tblName, (short)(values.size()/2));
+
+    assertNotNull("should have returned partitions", partitions);
+    assertEquals(" should have returned " + values.size() / 2 +
+      " partitions",values.size() / 2, partitions.size());
+
+
+    partitions = client.listPartitionNames(dbName, tblName, (short) (values.size() * 2));
+
+    assertNotNull("should have returned partitions", partitions);
+    assertEquals(" should have returned " + values.size() +
+      " partitions",values.size(), partitions.size());
+
+    cleanUp(dbName, tblName, typeName);
+
+  }
+
+
+  @Test
+  public void testDropTable() throws Throwable {
+    // create a table with multiple partitions
+    String dbName = "compdb";
+    String tblName = "comptbl";
+    String typeName = "Person";
+
+    cleanUp(dbName, tblName, typeName);
+
+    List<List<String>> values = new ArrayList<>();
+    values.add(makeVals("2008-07-01 14:13:12", "14"));
+    values.add(makeVals("2008-07-01 14:13:12", "15"));
+    values.add(makeVals("2008-07-02 14:13:12", "15"));
+    values.add(makeVals("2008-07-03 14:13:12", "151"));
+
+    createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+
+    client.dropTable(dbName, tblName);
+    client.dropType(typeName);
+
+    boolean exceptionThrown = false;
+    try {
+      client.getTable(dbName, tblName);
+    } catch(Exception e) {
+      assertEquals("table should not have existed",
+          NoSuchObjectException.class, e.getClass());
+      exceptionThrown = true;
+    }
+    assertTrue("Table " + tblName + " should have been dropped ", exceptionThrown);
+
+  }
+
+  @Test
+  public void testAlterViewParititon() throws Throwable {
+    String dbName = "compdb";
+    String tblName = "comptbl";
+    String viewName = "compView";
+
+    client.dropTable(dbName, tblName);
+    silentDropDatabase(dbName);
+    Database db = new Database();
+    db.setName(dbName);
+    db.setDescription("Alter Partition Test database");
+    client.createDatabase(db);
+
+    Table tbl = new TableBuilder()
+        .setDbName(dbName)
+        .setTableName(tblName)
+        .addCol("name", ColumnType.STRING_TYPE_NAME)
+        .addCol("income", ColumnType.INT_TYPE_NAME)
+        .build();
+
+    client.createTable(tbl);
+
+    if (isThriftClient) {
+      // the createTable() above does not update the location in the 'tbl'
+      // object when the client is a thrift client and the code below relies
+      // on the location being present in the 'tbl' object - so get the table
+      // from the metastore
+      tbl = client.getTable(dbName, tblName);
+    }
+
+    ArrayList<FieldSchema> viewCols = new ArrayList<>(1);
+    viewCols.add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+
+    ArrayList<FieldSchema> viewPartitionCols = new ArrayList<>(1);
+    viewPartitionCols.add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+
+    Table view = new Table();
+    view.setDbName(dbName);
+    view.setTableName(viewName);
+    view.setTableType(TableType.VIRTUAL_VIEW.name());
+    view.setPartitionKeys(viewPartitionCols);
+    view.setViewOriginalText("SELECT income, name FROM " + tblName);
+    view.setViewExpandedText("SELECT `" + tblName + "`.`income`, `" + tblName +
+        "`.`name` FROM `" + dbName + "`.`" + tblName + "`");
+    view.setRewriteEnabled(false);
+    StorageDescriptor viewSd = new StorageDescriptor();
+    view.setSd(viewSd);
+    viewSd.setCols(viewCols);
+    viewSd.setCompressed(false);
+    viewSd.setParameters(new HashMap<>());
+    viewSd.setSerdeInfo(new SerDeInfo());
+    viewSd.getSerdeInfo().setParameters(new HashMap<>());
+
+    client.createTable(view);
+
+    if (isThriftClient) {
+      // the createTable() above does not update the location in the 'tbl'
+      // object when the client is a thrift client and the code below relies
+      // on the location being present in the 'tbl' object - so get the table
+      // from the metastore
+      view = client.getTable(dbName, viewName);
+    }
+
+    List<String> vals = new ArrayList<>(1);
+    vals.add("abc");
+
+    Partition part = new Partition();
+    part.setDbName(dbName);
+    part.setTableName(viewName);
+    part.setValues(vals);
+    part.setParameters(new HashMap<>());
+
+    client.add_partition(part);
+
+    Partition part2 = client.getPartition(dbName, viewName, part.getValues());
+
+    part2.getParameters().put("a", "b");
+
+    client.alter_partition(dbName, viewName, part2, null);
+
+    Partition part3 = client.getPartition(dbName, viewName, part.getValues());
+    assertEquals("couldn't view alter partition", part3.getParameters().get(
+        "a"), "b");
+
+    client.dropTable(dbName, viewName);
+
+    client.dropTable(dbName, tblName);
+
+    client.dropDatabase(dbName);
+  }
+
+  @Test
+  public void testAlterPartition() throws Throwable {
+
+    try {
+      String dbName = "compdb";
+      String tblName = "comptbl";
+      List<String> vals = new ArrayList<>(2);
+      vals.add("2008-07-01");
+      vals.add("14");
+
+      client.dropTable(dbName, tblName);
+      silentDropDatabase(dbName);
+      Database db = new Database();
+      db.setName(dbName);
+      db.setDescription("Alter Partition Test database");
+      client.createDatabase(db);
+
+      Table tbl = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(tblName)
+          .addCol("name", ColumnType.STRING_TYPE_NAME)
+          .addCol("income", ColumnType.INT_TYPE_NAME)
+          .addTableParam("test_param_1", "Use this for comments etc")
+          .addBucketCol("name")
+          .addSerdeParam(ColumnType.SERIALIZATION_FORMAT, "1")
+          .addPartCol("ds", ColumnType.STRING_TYPE_NAME)
+          .addPartCol("hr", ColumnType.INT_TYPE_NAME)
+          .build();
+
+      client.createTable(tbl);
+
+      if (isThriftClient) {
+        // the createTable() above does not update the location in the 'tbl'
+        // object when the client is a thrift client and the code below relies
+        // on the location being present in the 'tbl' object - so get the table
+        // from the metastore
+        tbl = client.getTable(dbName, tblName);
+      }
+
+      Partition part = new Partition();
+      part.setDbName(dbName);
+      part.setTableName(tblName);
+      part.setValues(vals);
+      part.setParameters(new HashMap<>());
+      part.setSd(tbl.getSd());
+      part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
+      part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");
+
+      client.add_partition(part);
+
+      Partition part2 = client.getPartition(dbName, tblName, part.getValues());
+
+      part2.getParameters().put("retention", "10");
+      part2.getSd().setNumBuckets(12);
+      part2.getSd().getSerdeInfo().getParameters().put("abc", "1");
+      client.alter_partition(dbName, tblName, part2, null);
+
+      Partition part3 = client.getPartition(dbName, tblName, part.getValues());
+      assertEquals("couldn't alter partition", part3.getParameters().get(
+          "retention"), "10");
+      assertEquals("couldn't alter partition", part3.getSd().getSerdeInfo()
+          .getParameters().get("abc"), "1");
+      assertEquals("couldn't alter partition", part3.getSd().getNumBuckets(),
+          12);
+
+      client.dropTable(dbName, tblName);
+
+      client.dropDatabase(dbName);
+    } catch (Exception e) {
+      System.err.println(StringUtils.stringifyException(e));
+      System.err.println("testPartition() failed.");
+      throw e;
+    }
+  }
+
+  @Test
+  public void testRenamePartition() throws Throwable {
+
+    try {
+      String dbName = "compdb1";
+      String tblName = "comptbl1";
+      List<String> vals = new ArrayList<>(2);
+      vals.add("2011-07-11");
+      vals.add("8");
+      String part_path = "/ds=2011-07-11/hr=8";
+      List<String> tmp_vals = new ArrayList<>(2);
+      tmp_vals.add("tmp_2011-07-11");
+      tmp_vals.add("-8");
+      String part2_path = "/ds=tmp_2011-07-11/hr=-8";
+
+      client.dropTable(dbName, tblName);
+      silentDropDatabase(dbName);
+      Database db = new Database();
+      db.setName(dbName);
+      db.setDescription("Rename Partition Test database");
+      client.createDatabase(db);
+
+      Table tbl = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(tblName)
+          .addCol("name", ColumnType.STRING_TYPE_NAME)
+          .addCol("income", ColumnType.INT_TYPE_NAME)
+          .addPartCol("ds", ColumnType.STRING_TYPE_NAME)
+          .addPartCol("hr", ColumnType.INT_TYPE_NAME)
+          .build();
+
+      client.createTable(tbl);
+
+      if (isThriftClient) {
+        // the createTable() above does not update the location in the 'tbl'
+        // object when the client is a thrift client and the code below relies
+        // on the location being present in the 'tbl' object - so get the table
+        // from the metastore
+        tbl = client.getTable(dbName, tblName);
+      }
+
+      Partition part = new Partition();
+      part.setDbName(dbName);
+      part.setTableName(tblName);
+      part.setValues(vals);
+      part.setParameters(new HashMap<>());
+      part.setSd(tbl.getSd().deepCopy());
+      part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
+      part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");
+      part.getParameters().put("retention", "10");
+      part.getSd().setNumBuckets(12);
+      part.getSd().getSerdeInfo().getParameters().put("abc", "1");
+
+      client.add_partition(part);
+
+      part.setValues(tmp_vals);
+      client.renamePartition(dbName, tblName, vals, part);
+
+      boolean exceptionThrown = false;
+      try {
+        Partition p = client.getPartition(dbName, tblName, vals);
+      } catch(Exception e) {
+        assertEquals("partition should not have existed",
+            NoSuchObjectException.class, e.getClass());
+        exceptionThrown = true;
+      }
+      assertTrue("Expected NoSuchObjectException", exceptionThrown);
+
+      Partition part3 = client.getPartition(dbName, tblName, tmp_vals);
+      assertEquals("couldn't rename partition", part3.getParameters().get(
+          "retention"), "10");
+      assertEquals("couldn't rename partition", part3.getSd().getSerdeInfo()
+          .getParameters().get("abc"), "1");
+      assertEquals("couldn't rename partition", part3.getSd().getNumBuckets(),
+          12);
+      assertEquals("new partition sd matches", part3.getSd().getLocation(),
+          tbl.getSd().getLocation() + part2_path);
+
+      part.setValues(vals);
+      client.renamePartition(dbName, tblName, tmp_vals, part);
+
+      exceptionThrown = false;
+      try {
+        Partition p = client.getPartition(dbName, tblName, tmp_vals);
+      } catch(Exception e) {
+        assertEquals("partition should not have existed",
+            NoSuchObjectException.class, e.getClass());
+        exceptionThrown = true;
+      }
+      assertTrue("Expected NoSuchObjectException", exceptionThrown);
+
+      part3 = client.getPartition(dbName, tblName, vals);
+      assertEquals("couldn't rename partition", part3.getParameters().get(
+          "retention"), "10");
+      assertEquals("couldn't rename partition", part3.getSd().getSerdeInfo()
+          .getParameters().get("abc"), "1");
+      assertEquals("couldn't rename partition", part3.getSd().getNumBuckets(),
+          12);
+      assertEquals("new partition sd matches", part3.getSd().getLocation(),
+          tbl.getSd().getLocation() + part_path);
+
+      client.dropTable(dbName, tblName);
+
+      client.dropDatabase(dbName);
+    } catch (Exception e) {
+      System.err.println(StringUtils.stringifyException(e));
+      System.err.println("testRenamePartition() failed.");
+      throw e;
+    }
+  }
+
+  @Test
+  public void testDatabase() throws Throwable {
+    try {
+      // clear up any existing databases
+      silentDropDatabase(TEST_DB1_NAME);
+      silentDropDatabase(TEST_DB2_NAME);
+
+      Database db = new DatabaseBuilder()
+          .setName(TEST_DB1_NAME)
+          .setOwnerName(SecurityUtils.getUser())
+          .build();
+      Assert.assertEquals(SecurityUtils.getUser(), db.getOwnerName());
+      client.createDatabase(db);
+
+      db = client.getDatabase(TEST_DB1_NAME);
+
+      assertEquals("name of returned db is different from that of inserted db",
+          TEST_DB1_NAME, db.getName());
+      assertEquals("location of the returned db is different from that of inserted db",
+          warehouse.getDatabasePath(db).toString(), db.getLocationUri());
+      assertEquals(db.getOwnerName(), SecurityUtils.getUser());
+      assertEquals(db.getOwnerType(), PrincipalType.USER);
+      Database db2 = new Database();
+      db2.setName(TEST_DB2_NAME);
+      client.createDatabase(db2);
+
+      db2 = client.getDatabase(TEST_DB2_NAME);
+
+      assertEquals("name of returned db is different from that of inserted db",
+          TEST_DB2_NAME, db2.getName());
+      assertEquals("location of the returned db is different from that of inserted db",
+          warehouse.getDatabasePath(db2).toString(), db2.getLocationUri());
+
+      List<String> dbs = client.getDatabases(".*");
+
+      assertTrue("first database is not " + TEST_DB1_NAME, dbs.contains(TEST_DB1_NAME));
+      assertTrue("second database is not " + TEST_DB2_NAME, dbs.contains(TEST_DB2_NAME));
+
+      client.dropDatabase(TEST_DB1_NAME);
+      client.dropDatabase(TEST_DB2_NAME);
+      silentDropDatabase(TEST_DB1_NAME);
+      silentDropDatabase(TEST_DB2_NAME);
+    } catch (Throwable e) {
+      System.err.println(StringUtils.stringifyException(e));
+      System.err.println("testDatabase() failed.");
+      throw e;
+    }
+  }
+
+  @Test
+  public void testDatabaseLocationWithPermissionProblems() throws Exception {
+
+    // Note: The following test will fail if you are running this test as root. Setting
+    // permission to '0' on the database folder will not preclude root from being able
+    // to create the necessary files.
+
+    if (System.getProperty("user.name").equals("root")) {
+      System.err.println("Skipping test because you are running as root!");
+      return;
+    }
+
+    silentDropDatabase(TEST_DB1_NAME);
+
+    Database db = new Database();
+    db.setName(TEST_DB1_NAME);
+    String dbLocation =
+      MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test/_testDB_create_";
+    FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), conf);
+    fs.mkdirs(
+              new Path(MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test"),
+              new FsPermission((short) 0));
+    db.setLocationUri(dbLocation);
+
+
+    boolean createFailed = false;
+    try {
+      client.createDatabase(db);
+    } catch (MetaException cantCreateDB) {
+      createFailed = true;
+    } finally {
+      // Cleanup
+      if (!createFailed) {
+        try {
+          client.dropDatabase(TEST_DB1_NAME);
+        } catch(Exception e) {
+          System.err.println("Failed to remove database in cleanup: " + e.getMessage());
+        }
+      }
+
+      fs.setPermission(new Path(MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test"),
+                       new FsPermission((short) 755));
+      fs.delete(new Path(MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test"), true);
+    }
+
+    assertTrue("Database creation succeeded even with permission problem", createFailed);
+  }
+
+  @Test
+  public void testDatabaseLocation() throws Throwable {
+    try {
+      // clear up any existing databases
+      silentDropDatabase(TEST_DB1_NAME);
+
+      Database db = new Database();
+      db.setName(TEST_DB1_NAME);
+      String dbLocation =
+          MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/_testDB_create_";
+      db.setLocationUri(dbLocation);
+      client.createDatabase(db);
+
+      db = client.getDatabase(TEST_DB1_NAME);
+
+      assertEquals("name of returned db is different from that of inserted db",
+          TEST_DB1_NAME, db.getName());
+      assertEquals("location of the returned db is different from that of inserted db",
+          warehouse.getDnsPath(new Path(dbLocation)).toString(), db.getLocationUri());
+
+      client.dropDatabase(TEST_DB1_NAME);
+      silentDropDatabase(TEST_DB1_NAME);
+
+      boolean objectNotExist = false;
+      try {
+        client.getDatabase(TEST_DB1_NAME);
+      } catch (NoSuchObjectException e) {
+        objectNotExist = true;
+      }
+      assertTrue("Database " + TEST_DB1_NAME + " exists ", objectNotExist);
+
+      db = new Database();
+      db.setName(TEST_DB1_NAME);
+      dbLocation =
+          MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/_testDB_file_";
+      FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), conf);
+      fs.createNewFile(new Path(dbLocation));
+      fs.deleteOnExit(new Path(dbLocation));
+      db.setLocationUri(dbLocation);
+
+      boolean createFailed = false;
+      try {
+        client.createDatabase(db);
+      } catch (MetaException cantCreateDB) {
+        System.err.println(cantCreateDB.getMessage());
+        createFailed = true;
+      }
+      assertTrue("Database creation succeeded even location exists and is a file", createFailed);
+
+      objectNotExist = false;
+      try {
+        client.getDatabase(TEST_DB1_NAME);
+      } catch (NoSuchObjectException e) {
+        objectNotExist = true;
+      }
+      assertTrue("Database " + TEST_DB1_NAME + " exists when location is specified and is a file",
+          objectNotExist);
+
+    } catch (Throwable e) {
+      System.err.println(StringUtils.stringifyException(e));
+      System.err.println("testDatabaseLocation() failed.");
+      throw e;
+    }
+  }
+
+
+  @Test
+  public void testSimpleTypeApi() throws Exception {
+    try {
+      client.dropType(ColumnType.INT_TYPE_NAME);
+
+      Type typ1 = new Type();
+      typ1.setName(ColumnType.INT_TYPE_NAME);
+      boolean ret = client.createType(typ1);
+      assertTrue("Unable to create type", ret);
+
+      Type typ1_2 = client.getType(ColumnType.INT_TYPE_NAME);
+      assertNotNull(typ1_2);
+      assertEquals(typ1.getName(), typ1_2.getName());
+
+      ret = client.dropType(ColumnType.INT_TYPE_NAME);
+      assertTrue("unable to drop type integer", ret);
+
+      boolean exceptionThrown = false;
+      try {
+        client.getType(ColumnType.INT_TYPE_NAME);
+      } catch (NoSuchObjectException e) {
+        exceptionThrown = true;
+      }
+      assertTrue("Expected NoSuchObjectException", exceptionThrown);
+    } catch (Exception e) {
+      System.err.println(StringUtils.stringifyException(e));
+      System.err.println("testSimpleTypeApi() failed.");
+      throw e;
+    }
+  }
+
+  // TODO:pc need to enhance this with complex fields and getType_all function
+  @Test
+  public void testComplexTypeApi() throws Exception {
+    try {
+      client.dropType("Person");
+
+      Type typ1 = new Type();
+      typ1.setName("Person");
+      typ1.setFields(new ArrayList<>(2));
+      typ1.getFields().add(
+          new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+      typ1.getFields().add(
+          new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+      boolean ret = client.createType(typ1);
+      assertTrue("Unable to create type", ret);
+
+      Type typ1_2 = client.getType("Person");
+      assertNotNull("type Person not found", typ1_2);
+      assertEquals(typ1.getName(), typ1_2.getName());
+      assertEquals(typ1.getFields().size(), typ1_2.getFields().size());
+      assertEquals(typ1.getFields().get(0), typ1_2.getFields().get(0));
+      assertEquals(typ1.getFields().get(1), typ1_2.getFields().get(1));
+
+      client.dropType("Family");
+
+      Type fam = new Type();
+      fam.setName("Family");
+      fam.setFields(new ArrayList<>(2));
+      fam.getFields().add(
+          new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+      fam.getFields().add(
+          new FieldSchema("members",
+              ColumnType.getListType(typ1.getName()), ""));
+
+      ret = client.createType(fam);
+      assertTrue("Unable to create type " + fam.getName(), ret);
+
+      Type fam2 = client.getType("Family");
+      assertNotNull("type Person not found", fam2);
+      assertEquals(fam.getName(), fam2.getName());
+      assertEquals(fam.getFields().size(), fam2.getFields().size());
+      assertEquals(fam.getFields().get(0), fam2.getFields().get(0));
+      assertEquals(fam.getFields().get(1), fam2.getFields().get(1));
+
+      ret = client.dropType("Family");
+      assertTrue("unable to drop type Family", ret);
+
+      ret = client.dropType("Person");
+      assertTrue("unable to drop type Person", ret);
+
+      boolean exceptionThrown = false;
+      try {
+        client.getType("Person");
+      } catch (NoSuchObjectException e) {
+        exceptionThrown = true;
+      }
+      assertTrue("Expected NoSuchObjectException", exceptionThrown);
+    } catch (Exception e) {
+      System.err.println(StringUtils.stringifyException(e));
+      System.err.println("testComplexTypeApi() failed.");
+      throw e;
+    }
+  }
+
+  @Test
+  public void testSimpleTable() throws Exception {
+    try {
+      String dbName = "simpdb";
+      String tblName = "simptbl";
+      String tblName2 = "simptbl2";
+      String typeName = "Person";
+
+      client.dropTable(dbName, tblName);
+      silentDropDatabase(dbName);
+
+      Database db = new Database();
+      db.setName(dbName);
+      client.createDatabase(db);
+
+      client.dropType(typeName);
+      Type typ1 = new Type();
+      typ1.setName(typeName);
+      typ1.setFields(new ArrayList<>(2));
+      typ1.getFields().add(
+          new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+      typ1.getFields().add(
+          new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+      client.createType(typ1);
+
+      Table tbl = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(tblName)
+          .setCols(typ1.getFields())
+          .setNumBuckets(1)
+          .addBucketCol("name")
+          .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+          .build();
+
+      client.createTable(tbl);
+
+      if (isThriftClient) {
+        // the createTable() above does not update the location in the 'tbl'
+        // object when the client is a thrift client and the code below relies
+        // on the location being present in the 'tbl' object - so get the table
+        // from the metastore
+        tbl = client.getTable(dbName, tblName);
+      }
+
+      Table tbl2 = client.getTable(dbName, tblName);
+      assertNotNull(tbl2);
+      assertEquals(tbl2.getDbName(), dbName);
+      assertEquals(tbl2.getTableName(), tblName);
+      assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size());
+      assertEquals(tbl2.getSd().isCompressed(), false);
+      assertEquals(tbl2.getSd().getNumBuckets(), 1);
+      assertEquals(tbl2.getSd().getLocation(), tbl.getSd().getLocation());
+      assertNotNull(tbl2.getSd().getSerdeInfo());
+      tbl.getSd().getSerdeInfo().setParameters(new HashMap<>());
+      tbl.getSd().getSerdeInfo().getParameters().put(ColumnType.SERIALIZATION_FORMAT, "1");
+
+      tbl2.setTableName(tblName2);
+      tbl2.setParameters(new HashMap<>());
+      tbl2.getParameters().put("EXTERNAL", "TRUE");
+      tbl2.getSd().setLocation(tbl.getSd().getLocation() + "-2");
+
+      List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName);
+      assertNotNull(fieldSchemas);
+      assertEquals(fieldSchemas.size(), tbl.getSd().getCols().size());
+      for (FieldSchema fs : tbl.getSd().getCols()) {
+        assertTrue(fieldSchemas.contains(fs));
+      }
+
+      List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName);
+      assertNotNull(fieldSchemasFull);
+      assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size()
+          + tbl.getPartitionKeys().size());
+      for (FieldSchema fs : tbl.getSd().getCols()) {
+        assertTrue(fieldSchemasFull.contains(fs));
+      }
+      for (FieldSchema fs : tbl.getPartitionKeys()) {
+        assertTrue(fieldSchemasFull.contains(fs));
+      }
+
+      client.createTable(tbl2);
+      if (isThriftClient) {
+        tbl2 = client.getTable(tbl2.getDbName(), tbl2.getTableName());
+      }
+
+      Table tbl3 = client.getTable(dbName, tblName2);
+      assertNotNull(tbl3);
+      assertEquals(tbl3.getDbName(), dbName);
+      assertEquals(tbl3.getTableName(), tblName2);
+      assertEquals(tbl3.getSd().getCols().size(), typ1.getFields().size());
+      assertEquals(tbl3.getSd().isCompressed(), false);
+      assertEquals(tbl3.getSd().getNumBuckets(), 1);
+      assertEquals(tbl3.getSd().getLocation(), tbl2.getSd().getLocation());
+      assertEquals(tbl3.getParameters(), tbl2.getParameters());
+
+      fieldSchemas = client.getFields(dbName, tblName2);
+      assertNotNull(fieldSchemas);
+      assertEquals(fieldSchemas.size(), tbl2.getSd().getCols().size());
+      for (FieldSchema fs : tbl2.getSd().getCols()) {
+        assertTrue(fieldSchemas.contains(fs));
+      }
+
+      fieldSchemasFull = client.getSchema(dbName, tblName2);
+      assertNotNull(fieldSchemasFull);
+      assertEquals(fieldSchemasFull.size(), tbl2.getSd().getCols().size()
+          + tbl2.getPartitionKeys().size());
+      for (FieldSchema fs : tbl2.getSd().getCols()) {
+        assertTrue(fieldSchemasFull.contains(fs));
+      }
+      for (FieldSchema fs : tbl2.getPartitionKeys()) {
+        assertTrue(fieldSchemasFull.contains(fs));
+      }
+
+      assertEquals("Use this for comments etc", tbl2.getSd().getParameters()
+          .get("test_param_1"));
+      assertEquals("name", tbl2.getSd().getBucketCols().get(0));
+      assertTrue("Partition key list is not empty",
+          (tbl2.getPartitionKeys() == null)
+              || (tbl2.getPartitionKeys().size() == 0));
+
+      //test get_table_objects_by_name functionality
+      ArrayList<String> tableNames = new ArrayList<>();
+      tableNames.add(tblName2);
+      tableNames.add(tblName);
+      tableNames.add(tblName2);
+      List<Table> foundTables = client.getTableObjectsByName(dbName, tableNames);
+
+      assertEquals(2, foundTables.size());
+      for (Table t: foundTables) {
+        if (t.getTableName().equals(tblName2)) {
+          assertEquals(t.getSd().getLocation(), tbl2.getSd().getLocation());
+        } else {
+          assertEquals(t.getTableName(), tblName);
+          assertEquals(t.getSd().getLocation(), tbl.getSd().getLocation());
+        }
+        assertEquals(t.getSd().getCols().size(), typ1.getFields().size());
+        assertEquals(t.getSd().isCompressed(), false);
+        assertEquals(foundTables.get(0).getSd().getNumBuckets(), 1);
+        assertNotNull(t.getSd().getSerdeInfo());
+        assertEquals(t.getDbName(), dbName);
+      }
+
+      tableNames.add(1, "table_that_doesnt_exist");
+      foundTables = client.getTableObjectsByName(dbName, tableNames);
+      assertEquals(foundTables.size(), 2);
+
+      InvalidOperationException ioe = null;
+      try {
+        foundTables = client.getTableObjectsByName(dbName, null);
+      } catch (InvalidOperationException e) {
+        ioe = e;
+      }
+      assertNotNull(ioe);
+      assertTrue("Table not found", ioe.getMessage().contains("null tables"));
+
+      UnknownDBException udbe = null;
+      try {
+        foundTables = client.getTableObjectsByName("db_that_doesnt_exist", tableNames);
+      } catch (UnknownDBException e) {
+        udbe = e;
+      }
+      assertNotNull(udbe);
+      assertTrue("DB not found", udbe.getMessage().contains("not find database db_that_doesnt_exist"));
+
+      udbe = null;
+      try {
+        foundTables = client.getTableObjectsByName("", tableNames);
+      } catch (UnknownDBException e) {
+        udbe = e;
+      }
+      assertNotNull(udbe);
+      assertTrue("DB not found", udbe.getMessage().contains("is null or empty"));
+
+      FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), conf);
+      client.dropTable(dbName, tblName);
+      assertFalse(fs.exists(new Path(tbl.getSd().getLocation())));
+
+      client.dropTable(dbName, tblName2);
+      assertTrue(fs.exists(new Path(tbl2.getSd().getLocation())));
+
+      client.dropType(typeName);
+      client.dropDatabase(dbName);
+    } catch (Exception e) {
+      System.err.println(StringUtils.stringifyException(e));
+      System.err.println("testSimpleTable() failed.");
+      throw e;
+    }
+  }
+
+  // Tests that in the absence of stats for partitions, and/or absence of columns
+  // to get stats for, the metastore does not break. See HIVE-12083 for motivation.
+  @Test
+  public void testStatsFastTrivial() throws Throwable {
+    String dbName = "tstatsfast";
+    String tblName = "t1";
+    String tblOwner = "statstester";
+    String typeName = "Person";
+    int lastAccessed = 12083;
+
+    cleanUp(dbName,tblName,typeName);
+
+    List<List<String>> values = new ArrayList<>();
+    values.add(makeVals("2008-07-01 14:13:12", "14"));
+    values.add(makeVals("2008-07-01 14:13:12", "15"));
+    values.add(makeVals("2008-07-02 14:13:12", "15"));
+    values.add(makeVals("2008-07-03 14:13:12", "151"));
+
+    createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+
+    List<String> emptyColNames = new ArrayList<>();
+    List<String> emptyPartNames = new ArrayList<>();
+
+    List<String> colNames = new ArrayList<>();
+    colNames.add("name");
+    colNames.add("income");
+    List<String> partNames = client.listPartitionNames(dbName,tblName,(short)-1);
+
+    assertEquals(0,emptyColNames.size());
+    assertEquals(0,emptyPartNames.size());
+    assertEquals(2,colNames.size());
+    assertEquals(4,partNames.size());
+
+    // Test for both colNames and partNames being empty:
+    AggrStats aggrStatsEmpty = client.getAggrColStatsFor(dbName,tblName,emptyColNames,emptyPartNames);
+    assertNotNull(aggrStatsEmpty); // short-circuited on client-side, verifying that it's an empty object, not null
+    assertEquals(0,aggrStatsEmpty.getPartsFound());
+    assertNotNull(aggrStatsEmpty.getColStats());
+    assert(aggrStatsEmpty.getColStats().isEmpty());
+
+    // Test for only colNames being empty
+    AggrStats aggrStatsOnlyParts = client.getAggrColStatsFor(dbName,tblName,emptyColNames,partNames);
+    assertNotNull(aggrStatsOnlyParts); // short-circuited on client-side, verifying that it's an empty object, not null
+    assertEquals(0,aggrStatsOnlyParts.getPartsFound());
+    assertNotNull(aggrStatsOnlyParts.getColStats());
+    assert(aggrStatsOnlyParts.getColStats().isEmpty());
+
+    // Test for only partNames being empty
+    AggrStats aggrStatsOnlyCols = client.getAggrColStatsFor(dbName,tblName,colNames,emptyPartNames);
+    assertNotNull(aggrStatsOnlyCols); // short-circuited on client-side, verifying that it's an empty object, not null
+    assertEquals(0,aggrStatsOnlyCols.getPartsFound());
+    assertNotNull(aggrStatsOnlyCols.getColStats());
+    assert(aggrStatsOnlyCols.getColStats().isEmpty());
+
+    // Test for valid values for both.
+    AggrStats aggrStatsFull = client.getAggrColStatsFor(dbName,tblName,colNames,partNames);
+    assertNotNull(aggrStatsFull);
+    assertEquals(0,aggrStatsFull.getPartsFound()); // would still be empty, because no stats are actually populated.
+    assertNotNull(aggrStatsFull.getColStats());
+    assert(aggrStatsFull.getColStats().isEmpty());
+
+  }
+
+  @Test
+  public void testColumnStatistics() throws Throwable {
+
+    String dbName = "columnstatstestdb";
+    String tblName = "tbl";
+    String typeName = "Person";
+    String tblOwner = "testowner";
+    int lastAccessed = 6796;
+
+    try {
+      cleanUp(dbName, tblName, typeName);
+      Database db = new Database();
+      db.setName(dbName);
+      client.createDatabase(db);
+      createTableForTestFilter(dbName,tblName, tblOwner, lastAccessed, true);
+
+      // Create a ColumnStatistics Obj
+      String[] colName = new String[]{"income", "name"};
+      double lowValue = 50000.21;
+      double highValue = 1200000.4525;
+      long numNulls = 3;
+      long numDVs = 22;
+      double avgColLen = 50.30;
+      long maxColLen = 102;
+      String[] colType = new String[] {"double", "string"};
+      boolean isTblLevel = true;
+      String partName = null;
+      List<ColumnStatisticsObj> statsObjs = new ArrayList<>();
+
+      ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc();
+      statsDesc.setDbName(dbName);
+      statsDesc.setTableName(tblName);
+      statsDesc.setIsTblLevel(isTblLevel);
+      statsDesc.setPartName(partName);
+
+      ColumnStatisticsObj statsObj = new ColumnStatisticsObj();
+      statsObj.setColName(colName[0]);
+      statsObj.setColType(colType[0]);
+
+      ColumnStatisticsData statsData = new ColumnStatisticsData();
+      DoubleColumnStatsData numericStats = new DoubleColumnStatsData();
+      statsData.setDoubleStats(numericStats);
+
+      statsData.getDoubleStats().setHighValue(highValue);
+      statsData.getDoubleStats().setLowValue(lowValue);
+      statsData.getDoubleStats().setNumDVs(numDVs);
+      statsData.getDoubleStats().setNumNulls(numNulls);
+
+      statsObj.setStatsData(statsData);
+      statsObjs.add(statsObj);
+
+      statsObj = new ColumnStatisticsObj();
+      statsObj.setColName(colName[1]);
+      statsObj.setColType(colType[1]);
+
+      statsData = new ColumnStatisticsData();
+      StringColumnStatsData stringStats = new StringColumnStatsData();
+      statsData.setStringStats(stringStats);
+      statsData.getStringStats().setAvgColLen(avgColLen);
+      statsData.getStringStats().setMaxColLen(maxColLen);
+      statsData.getStringStats().setNumDVs(numDVs);
+      statsData.getStringStats().setNumNulls(numNulls);
+
+      statsObj.setStatsData(statsData);
+      statsObjs.add(statsObj);
+
+      ColumnStatistics colStats = new ColumnStatistics();
+      colStats.setStatsDesc(statsDesc);
+      colStats.setStatsObj(statsObjs);
+
+      // write stats objs persistently
+      client.updateTableColumnStatistics(colStats);
+
+      // retrieve the stats obj that was just written
+      ColumnStatisticsObj colStats2 = client.getTableColumnStatistics(
+          dbName, tblName, Lists.newArrayList(colName[0])).get(0);
+
+     // compare stats obj to ensure what we get is what we wrote
+      assertNotNull(colStats2);
+      assertEquals(colStats2.getColName(), colName[0]);
+      assertEquals(colStats2.getStatsData().getDoubleStats().getLowValue(), lowValue, 0.01);
+      assertEquals(colStats2.getStatsData().getDoubleStats().getHighValue(), highValue, 0.01);
+      assertEquals(colStats2.getStatsData().getDoubleStats().getNumNulls(), numNulls);
+      assertEquals(colStats2.getStatsData().getDoubleStats().getNumDVs(), numDVs);
+
+      // test delete column stats; if no col name is passed all column stats associated with the
+      // table is deleted
+      boolean status = client.deleteTableColumnStatistics(dbName, tblName, null);
+      assertTrue(status);
+      // try to query stats for a column for which stats doesn't exist
+      assertTrue(client.getTableColumnStatistics(
+          dbName, tblName, Lists.newArrayList(colName[1])).isEmpty());
+
+      colStats.setStatsDesc(statsDesc);
+      colStats.setStatsObj(statsObjs);
+
+      // update table level column stats
+      client.updateTableColumnStatistics(colStats);
+
+      // query column stats for column whose stats were updated in the previous call
+      colStats2 = client.getTableColumnStatistics(
+          dbName, tblName, Lists.newArrayList(colName[0])).get(0);
+
+      // partition level column statistics test
+      // create a table with multiple partitions
+      cleanUp(dbName, tblName, typeName);
+
+      List<List<String>> values = new ArrayList<>();
+      values.add(makeVals("2008-07-01 14:13:12", "14"));
+      values.add(makeVals("2008-07-01 14:13:12", "15"));
+      values.add(makeVals("2008-07-02 14:13:12", "15"));
+      values.add(makeVals("2008-07-03 14:13:12", "151"));
+
+      createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+
+      List<String> partitions = client.listPartitionNames(dbName, tblName, (short)-1);
+
+      partName = partitions.get(0);
+      isTblLevel = false;
+
+      // create a new columnstatistics desc to represent partition level column stats
+      statsDesc = new ColumnStatisticsDesc();
+      statsDesc.setDbName(dbName);
+      statsDesc.setTableName(tblName);
+      statsDesc.setPartName(partName);
+      statsDesc.setIsTblLevel(isTblLevel);
+
+      colStats = new ColumnStatistics();
+      colStats.setStatsDesc(statsDesc);
+      colStats.setStatsObj(statsObjs);
+
+     client.updatePartitionColumnStatistics(colStats);
+
+     colStats2 = client.getPartitionColumnStatistics(dbName, tblName,
+         Lists.newArrayList(partName), Lists.newArrayList(colName[1])).get(partName).get(0);
+
+     // compare stats obj to ensure what we get is what we wrote
+     assertNotNull(colStats2);
+     assertEquals(colStats.getStatsDesc().getPartName(), partName);
+     assertEquals(colStats2.getColName(), colName[1]);
+     assertEquals(colStats2.getStatsData().getStringStats().getMaxColLen(), maxColLen);
+     assertEquals(colStats2.getStatsData().getStringStats().getAvgColLen(), avgColLen, 0.01);
+     assertEquals(colStats2.getStatsData().getStringStats().getNumNulls(), numNulls);
+     assertEquals(colStats2.getStatsData().getStringStats().getNumDVs(), numDVs);
+
+     // test stats deletion at partition level
+     client.deletePartitionColumnStatistics(dbName, tblName, partName, colName[1]);
+
+     colStats2 = client.getPartitionColumnStatistics(dbName, tblName,
+         Lists.newArrayList(partName), Lists.newArrayList(colName[0])).get(partName).get(0);
+
+     // test get stats on a column for which stats doesn't exist
+     assertTrue(client.getPartitionColumnStatistics(dbName, tblName,
+           Lists.newArrayList(partName), Lists.newArrayList(colName[1])).isEmpty());
+    } catch (Exception e) {
+      System.err.println(StringUtils.stringifyException(e));
+      System.err.println("testColumnStatistics() failed.");
+      throw e;
+    } finally {
+      cleanUp(dbName, tblName, typeName);
+    }
+  }
+
+  @Test(expected = MetaException.class)
+  public void testGetSchemaWithNoClassDefFoundError() throws TException {
+    String dbName = "testDb";
+    String tblName = "testTable";
+
+    client.dropTable(dbName, tblName);
+    silentDropDatabase(dbName);
+
+    Database db = new Database();
+    db.setName(dbName);
+    client.createDatabase(db);
+
+    Table tbl = new TableBuilder()
+        .setDbName(dbName)
+        .setTableName(tblName)
+        .addCol("name", ColumnType.STRING_TYPE_NAME, "")
+        .setSerdeLib("no.such.class")
+        .build();
+    client.createTable(tbl);
+
+    client.getSchema(dbName, tblName);
+  }
+
+  @Test
+  public void testAlterTable() throws Exception {
+    String dbName = "alterdb";
+    String invTblName = "alter-tbl";
+    String tblName = "altertbl";
+
+    try {
+      client.dropTable(dbName, tblName);
+      silentDropDatabase(dbName);
+
+      Database db = new Database();
+      db.setName(dbName);
+      client.createDatabase(db);
+
+      ArrayList<FieldSchema> invCols = new ArrayList<>(2);
+      invCols.add(new FieldSchema("n-ame", ColumnType.STRING_TYPE_NAME, ""));
+      invCols.add(new FieldSchema("in.come", ColumnType.INT_TYPE_NAME, ""));
+
+      Table tbl = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(invTblName)
+          .setCols(invCols)
+          .build();
+
+      boolean failed = false;
+      try {
+        client.createTable(tbl);
+      } catch (InvalidObjectException ex) {
+        failed = true;
+      }
+      if (!failed) {
+        assertTrue("Able to create table with invalid name: " + invTblName,
+            false);
+      }
+
+      // create an invalid table which has wrong column type
+      ArrayList<FieldSchema> invColsInvType = new ArrayList<>(2);
+      invColsInvType.add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+      invColsInvType.add(new FieldSchema("income", "xyz", ""));
+      tbl.setTableName(tblName);
+      tbl.getSd().setCols(invColsInvType);
+      boolean failChecker = false;
+      try {
+        client.createTable(tbl);
+      } catch (InvalidObjectException ex) {
+        failChecker = true;
+      }
+      if (!failChecker) {
+        assertTrue("Able to create table with invalid column type: " + invTblName,
+            false);
+      }
+
+      ArrayList<FieldSchema> cols = new ArrayList<>(2);
+      cols.add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+      cols.add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+
+      // create a valid table
+      tbl.setTableName(tblName);
+      tbl.getSd().setCols(cols);
+      client.createTable(tbl);
+
+      if (isThriftClient) {
+        tbl = client.getTable(tbl.getDbName(), tbl.getTableName());
+      }
+
+      // now try to invalid alter table
+      Table tbl2 = client.getTable(dbName, tblName);
+      failed = false;
+      try {
+        tbl2.setTableName(invTblName);
+        tbl2.getSd().setCols(invCols);
+        client.alter_table(dbName, tblName, tbl2);
+      } catch (InvalidOperationException ex) {
+        failed = true;
+      }
+      if (!failed) {
+        assertTrue("Able to rename table with invalid name: " + invTblName,
+            false);
+      }
+
+      //try an invalid alter table with partition key name
+      Table tbl_pk = client.getTable(tbl.getDbName(), tbl.getTableName());
+      List<FieldSchema> partitionKeys = tbl_pk.getPartitionKeys();
+      for (FieldSchema fs : partitionKeys) {
+        fs.setName("invalid_to_change_name");
+        fs.setComment("can_change_comment");
+      }
+      tbl_pk.setPartitionKeys(partitionKeys);
+      try {
+        client.alter_table(dbName, tblName, tbl_pk);
+      } catch (InvalidOperationException ex) {
+        failed = true;
+      }
+      assertTrue("Should not have succeeded in altering partition key name", failed);
+
+      //try a valid alter table partition key comment
+      failed = false;
+      tbl_pk = client.getTable(tbl.getDbName(), tbl.getTableName());
+      partitionKeys = tbl_pk.getPartitionKeys();
+      for (FieldSchema fs : partitionKeys) {
+        fs.setComment("can_change_comment");
+      }
+      tbl_pk.setPartitionKeys(partitionKeys);
+      try {
+        client.alter_table(dbName, tblName, tbl_pk);
+      } catch (InvalidOperationException ex) {
+        failed = true;
+      }
+      assertFalse("Should not have failed alter table partition comment", failed);
+      Table newT = client.getTable(tbl.getDbName(), tbl.getTableName());
+      assertEquals(partitionKeys, newT.getPartitionKeys());
+
+      // try a valid alter table
+      tbl2.setTableName(tblName + "_renamed");
+      tbl2.getSd().setCols(cols);
+      tbl2.getSd().setNumBuckets(32);
+      client.alter_table(dbName, tblName, tbl2);
+      Table tbl3 = client.getTable(dbName, tbl2.getTableName());
+      assertEquals("Alter table didn't succeed. Num buckets is different ",
+          tbl2.getSd().getNumBuckets(), tbl3.getSd().getNumBuckets());
+      // check that data has moved
+      FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), conf);
+      assertFalse("old table location still exists", fs.exists(new Path(tbl
+          .getSd().getLocation())));
+      assertTrue("data did not move to new location", fs.exists(new Path(tbl3
+          .getSd().getLocation())));
+
+      if (!isThriftClient) {
+        assertEquals("alter table didn't move data correct location", tbl3
+            .getSd().getLocation(), tbl2.getSd().getLocation());
+      }
+
+      // alter table with invalid column type
+      tbl_pk.getSd().setCols(invColsInvType);
+      failed = false;
+      try {
+        client.alter_table(dbName, tbl2.getTableName(), tbl_pk);
+      } catch (InvalidOperationException ex) {
+        failed = true;
+      }
+      assertTrue("Should not have succeeded in altering column", failed);
+
+    } catch (Exception e) {
+      System.err.println(StringUtils.stringifyException(e));
+      System.err.println("testSimpleTable() failed.");
+      throw e;
+    } finally {
+      silentDropDatabase(dbName);
+    }
+  }
+
+  @Test
+  public void testComplexTable() throws Exception {
+
+    String dbName = "compdb";
+    String tblName = "comptbl";
+    String typeName = "Person";
+
+    try {
+      client.dropTable(dbName, tblName);
+      silentDropDatabase(dbName);
+      Database db = new Database();
+      db.setName(dbName);
+      client.createDatabase(db);
+
+      client.dropType(typeName);
+      Type typ1 = new Type();
+      typ1.setName(typeName);
+      typ1.setFields(new ArrayList<>(2));
+      typ1.getFields().add(
+          new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+      typ1.getFields().add(
+          new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+      client.createType(typ1);
+
+      Table tbl = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(tblName)
+          .setCols(typ1.getFields())
+          .addPartCol("ds", ColumnType.DATE_TYPE_NAME)
+          .addPartCol("hr", ColumnType.INT_TYPE_NAME)
+          .setNumBuckets(1)
+          .addBucketCol("name")
+          .addStorageDescriptorParam("test_param_1","Use this for comments etc")
+          .build();
+
+      client.createTable(tbl);
+
+      Table tbl2 = client.getTable(dbName, tblName);
+      assertEquals(tbl2.getDbName(), dbName);
+      assertEquals(tbl2.getTableName(), tblName);
+      assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size());
+      assertFalse(tbl2.getSd().isCompressed());
+      assertFalse(tbl2.getSd().isStoredAsSubDirectories());
+      assertEquals(tbl2.getSd().getNumBuckets(), 1);
+
+      assertEquals("Use this for comments etc", tbl2.getSd().getParameters()
+          .get("test_param_1"));
+      assertEquals("name", tbl2.getSd().getBucketCols().get(0));
+
+      assertNotNull(tbl2.getPartitionKeys());
+      assertEquals(2, tbl2.getPartitionKeys().size());
+      assertEquals(ColumnType.DATE_TYPE_NAME, tbl2.getPartitionKeys().get(0)
+          .getType());
+      assertEquals(ColumnType.INT_TYPE_NAME, tbl2.getPartitionKeys().get(1)
+          .getType());
+      assertEquals("ds", tbl2.getPartitionKeys().get(0).getName());
+      assertEquals("hr", tbl2.getPartitionKeys().get(1).getName());
+
+      List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName);
+      assertNotNull(fieldSchemas);
+      assertEquals(fieldSchemas.size(), tbl.getSd().getCols().size());
+      for (FieldSchema fs : tbl.getSd().getCols()) {
+        assertTrue(fieldSchemas.contains(fs));
+      }
+
+      List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName);
+      assertNotNull(fieldSchemasFull);
+      assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size()
+          + tbl.getPartitionKeys().size());
+      for (FieldSchema fs : tbl.getSd().getCols()) {
+        assertTrue(fieldSchemasFull.contains(fs));
+      }
+      for (FieldSchema fs : tbl.getPartitionKeys()) {
+        assertTrue(fieldSchemasFull.contains(fs));
+      }
+    } catch (Exception e) {
+      System.err.println(StringUtils.stringifyException(e));
+      System.err.println("testComplexTable() failed.");
+      throw e;
+    } finally {
+      client.dropTable(dbName, tblName);
+      boolean ret = client.dropType(typeName);
+      assertTrue("Unable to drop type " + typeName, ret);
+      client.dropDatabase(dbName);
+    }
+  }
+
+  @Test
+  public void testTableDatabase() throws Exception {
+    String dbName = "testDb";
+    String tblName_1 = "testTbl_1";
+    String tblName_2 = "testTbl_2";
+
+    try {
+      silentDropDatabase(dbName);
+
+      Database db = new Database();
+      db.setName(dbName);
+      String dbLocation =
+          MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "_testDB_table_create_";
+      db.setLocationUri(dbLocation);
+      client.createDatabase(db);
+      db = client.getDatabase(dbName);
+
+      Table tbl = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(tblName_1)
+          .addCol("name", ColumnType.STRING_TYPE_NAME)
+          .addCol("income", ColumnType.INT_TYPE_NAME)
+          .build();
+
+      client.createTable(tbl);
+      tbl = client.getTable(dbName, tblName_1);
+
+      Path path = new Path(tbl.getSd().getLocation());
+      System.err.println("Table's location " + path + ", Database's location " + db.getLocationUri());
+      assertEquals("Table location is not a subset of the database location",
+          path.getParent().toString(), db.getLocationUri());
+
+    } catch (Exception e) {
+      System.err.println(StringUtils.stringifyException(e));
+      System.err.println("testTableDatabase() failed.");
+      throw e;
+    } finally {
+      silentDropDatabase(dbName);
+    }
+  }
+
+
+  @Test
+  public void testGetConfigValue() {
+
+    String val = "value";
+
+    if (!isThriftClient) {
+      try {
+        assertEquals(client.getConfigValue("hive.key1", val), "value1");
+        assertEquals(client.getConfigValue("hive.key2", val), "http://www.example.com");
+        assertEquals(client.getConfigValue("hive.key3", val), "");
+        assertEquals(client.getConfigValue("hive.key4", val), "0");
+        assertEquals(client.getConfigValue("hive.key5", val), val);
+        assertEquals(client.getConfigValue(null, val), val);
+      } catch (TException e) {
+        e.printStackTrace();
+        fail();
+      }
+    }
+
+    boolean threwException = false;
+    try {
+      // Attempting to get the password should throw an exception
+      client.getConfigValue("javax.jdo.option.ConnectionPassword", "password");
+    } catch (ConfigValSecurityException e) {
+      threwException = true;
+    } catch (TException e) {
+      e.printStackTrace();
+      fail();
+    }
+    assert (threwException);
+  }
+
+  private static void adjust(HiveMetaStoreClient client, Partition part,
+      String dbName, String tblName) throws TException {
+    Partition part_get = client.getPartition(dbName, tblName, part.getValues());
+    part.setCreateTime(part_get.getCreateTime());
+    part.putToParameters(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.DDL_TIME, Long.toString(part_get.getCreateTime()));
+  }
+
+  private static void silentDropDatabase(String dbName) throws TException {
+    try {
+      for (String tableName : client.getTables(dbName, "*")) {
+        client.dropTable(dbName, tableName);
+      }
+      client.dropDatabase(dbName);
+    } catch (NoSuchObjectException|InvalidOperationException e) {
+      // NOP
+    }
+  }
+
+  /**
+   * Tests for list partition by filter functionality.
+   */
+
+  @Test
+  public void testPartitionFilter() throws Exception {
+    String dbName = "filterdb";
+    String tblName = "filtertbl";
+
+    silentDropDatabase(dbName);
+
+    Database db = new Database();
+    db.setName(dbName);
+    client.createDatabase(db);
+
+    Table tbl = new TableBuilder()
+        .setDbName(dbName)
+        .setTableName(tblName)
+        .addCol("c1", ColumnType.STRING_TYPE_NAME)
+        .addCol("c2", ColumnType.INT_TYPE_NAME)
+        .addPartCol("p1", ColumnType.STRING_TYPE_NAME)
+        .addPartCol("p2", ColumnType.STRING_TYPE_NAME)
+        .addPartCol("p3", ColumnType.INT_TYPE_NAME)
+        .build();
+    client.createTable(tbl);
+
+    tbl = client.getTable(dbName, tblName);
+
+    add_partition(client, tbl, Lists.newArrayList("p11", "p21", "31"), "part1");
+    add_partition(client, tbl, Lists.newArrayList("p11", "p22", "32"), "part2");
+    add_partition(client, tbl, Lists.newArrayList("p12", "p21", "31"), "part3");
+    add_partition(client, tbl, Lists.newArrayList("p12", "p23", "32"), "part4");
+    add_partition(client, tbl, Lists.newArrayList("p13", "p24", "31"), "part5");
+    add_partition(client, tbl, Lists.newArrayList("p13", "p25", "-33"), "part6");
+
+    // Test equals operator for strings and integers.
+    checkFilter(client, dbName, tblName, "p1 = \"p11\"", 2);
+    checkFilter(client, dbName, tblName, "p1 = \"p12\"", 2);
+    checkFilter(client, dbName, tblName, "p2 = \"p21\"", 2);
+    checkFilter(client, dbName, tblName, "p2 = \"p23\"", 1);
+    checkFilter(client, dbName, tblName, "p3 = 31", 3);
+    checkFilter(client, dbName, tblName, "p3 = 33", 0);
+    checkFilter(client, dbName, tblName, "p3 = -33", 1);
+    checkFilter(client, dbName, tblName, "p1 = \"p11\" and p2=\"p22\"", 1);
+    checkFilter(client, dbName, tblName, "p1 = \"p11\" or p2=\"p23\"", 3);
+    checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4);
+    checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4);
+    checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4);
+    checkFilter(client, dbName, tblName, "p1 = \"p11\" and p3 = 31", 1);
+    checkFilter(client, dbName, tblName, "p3 = -33 or p1 = \"p12\"", 3);
+
+    // Test not-equals operator for strings and integers.
+    checkFilter(client, dbName, tblName, "p1 != \"p11\"", 4);
+    checkFilter(client, dbName, tblName, "p2 != \"p23\"", 5);
+    checkFilter(client, dbName, tblName, "p2 != \"p33\"", 6);
+    checkFilter(client, dbName, tblName, "p3 != 32", 4);
+    checkFilter(client, dbName, tblName, "p3 != 8589934592", 6);
+    checkFilter(client, dbName, tblName, "p1 != \"p11\" and p1 != \"p12\"", 2);
+    checkFilter(client, dbName, tblName, "p1 != \"p11\" and p2 != \"p22\"", 4);
+    checkFilter(client, dbName, tblName, "p1 != \"p11\" or p2 != \"p22\"", 5);
+    checkFilter(client, dbName, tblName, "p1 != \"p12\" and p2 != \"p25\"", 3);
+    checkFilter(client, dbName, tblName, "p1 != \"p12\" or p2 != \"p25\"", 6);
+    checkFilter(client, dbName, tblName, "p3 != -33 or p1 != \"p13\"", 5);
+    checkFilter(client, dbName, tblName, "p1 != \"p11\" and p3 = 31", 2);
+    checkFilter(client, dbName, tblName, "p3 != 31 and p1 = \"p12\"", 1);
+
+    // Test reverse order.
+    checkFilter(client, dbName, tblName, "31 != p3 and p1 = \"p12\"", 1);
+    checkFilter(client, dbName, tblName, "\"p23\" = p2", 1);
+
+    // Test and/or more...
+    checkFilter(client, dbName, tblName,
+        "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\")", 3);
+    checkFilter(client, dbName, tblName,
+       "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\") Or " +
+       "(p1=\"p13\" aNd p2=\"p24\")", 4);
+    //test for and or precedence
+    checkFilter(client, dbName, tblName,
+       "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1);
+    checkFilter(client, dbName, tblName,
+       "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2);
+
+    // Test gt/lt/lte/gte/like for strings.
+    checkFilter(client, dbName, tblName, "p1 > \"p12\"", 2);
+    checkFilter(client, dbName, tblName, "p1 >= \"p12\"", 4);
+    checkFilter(client, dbName, tblName, "p1 < \"p12\"", 2);
+    checkFilter(client, dbName, tblName, "p1 <= \"p12\"", 4);
+    checkFilter(client, dbName, tblName, "p1 like \"p1.*\"", 6);
+    checkFilter(client, dbName, tblName, "p2 like \"p.*3\"", 1);
+
+    // Test gt/lt/lte/gte for numbers.
+    checkFilter(client, dbName, tblName, "p3 < 0", 1);
+    checkFilter(client, dbName, tblName, "p3 >= -33", 6);
+    checkFilter(client, dbName, tblName, "p3 > -33", 5);
+    checkFilter(client, dbName, tblName, "p3 > 31 and p3 < 32", 0);
+    checkFilter(client, dbName, tblName, "p3 > 31 or p3 < 31", 3);
+    checkFilter(client, dbName, tblName, "p3 > 30 or p3 < 30", 6);
+    checkFilter(client, dbName, tblName, "p3 >= 31 or p3 < -32", 6);
+    checkFilter(client, dbName, tblName, "p3 >= 32", 2);
+    checkFilter(client, dbName, tblName, "p3 > 32", 0);
+
+    // Test between
+    checkFilter(client, dbName, tblName, "p1 between \"p11\" and \"p12\"", 4);
+    checkFilter(client, dbName, tblName, "p1 not between \"p11\" and \"p12\"", 2);
+    checkFilter(client, dbName, tblName, "p3 not between 0 and 2", 6);
+    checkFilter(client, dbName, tblName, "p3 between 31 and 32", 5);
+    checkFilter(client, dbName, tblName, "p3 between 32 and 31", 0);
+    checkFilter(client, dbName, tblName, "p3 between -32 and 34 and p3 not between 31 and 32", 0);
+    checkFilter(client, dbName, tblName, "p3 between 1 and 3 or p3 not between 1 and 3", 6);
+    checkFilter(client, dbName, tblName,
+        "p3 between 31 and 32 and p1 between \"p12\" and \"p14\"", 3);
+
+    //Test for setting the maximum partition count
+    List<Partition> partitions = client.listPartitionsByFilter(dbName,
+        tblName, "p1 >= \"p12\"", (short) 2);
+    assertEquals("User specified row limit for partitions",
+        2, partitions.size());
+
+    //Negative tests
+    Exception me = null;
+    try {
+      client.listPartitionsByFilter(dbName,
+          tblName, "p3 >= \"p12\"", (short) -1);
+    } catch(MetaException e) {
+      me = e;
+    }
+    assertNotNull(me);
+    assertTrue("Filter on int partition key", me.getMessage().contains(
+          "Filtering is supported only on partition keys of type string"));
+
+    me = null;
+    try {
+      client.listPartitionsByFilter(dbName,
+          tblName, "c1 >= \"p12\"", (short) -1);
+    } catch(MetaException e) {
+      me = e;
+    }
+    assertNotNull(me);
+    assertTrue("Filter on invalid key", me.getMessage().contains(
+          "<c1> is not a partitioning key for the table"));
+
+    me = null;
+    try {
+      client.listPartitionsByFilter(dbName,
+          tblName, "c1 >= ", (short) -1);
+    } catch(MetaException e) {
+      me = e;
+    }
+    assertNotNull(me);
+    assertTrue("Invalid filter string", me.getMessage().contains(
+          "Error parsing partition filter"));
+
+    me = null;
+    try {
+      client.listPartitionsByFilter("invDBName",
+          "invTableName", "p1 = \"p11\"", (short) -1);
+    } catch(NoSuchObjectException e) {
+      me = e;
+    }
+    assertNotNull(me);
+    assertTrue("NoSuchObject exception", me.getMessage().contains(
+          "invDBName.invTableName table not found"));
+
+    client.dropTable(dbName, tblName);
+    client.dropDatabase(dbName);
+  }
+
+
+  /**
+   * Test filtering on table with single partition
+   */
+  @Test
+  public void testFilterSinglePartition() throws Exception {
+      String dbName = "filterdb";
+      String tblName = "filtertbl";
+
+      List<String> vals = new ArrayList<>(1);
+      vals.add("p11");
+      List <String> vals2 = new ArrayList<>(1);
+      vals2.add("p12");
+      List <String> vals3 = new ArrayList<>(1);
+      vals3.add("p13");
+
+      silentDropDatabase(dbName);
+
+      Database db = new Database();
+      db.setName(dbName);
+      client.createDatabase(db);
+
+      Table tbl = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(tblName)
+          .addCol("c1", ColumnType.STRING_TYPE_NAME)
+          .addCol("c2", ColumnType.INT_TYPE_NAME)
+          .addPartCol("p1", ColumnType.STRING_TYPE_NAME)
+          .build();
+      client.createTable(tbl);
+
+      tbl = client.getTable(dbName, tblName);
+
+      add_partition(client, tbl, vals, "part1");
+      add_partition(client, tbl, vals2, "part2");
+      add_partition(client, tbl, vals3, "part3");
+
+      checkFilter(client, dbName, tblName, "p1 = \"p12\"", 1);
+      checkFilter(client, dbName, tblName, "p1 < \"p12\"", 1);
+      checkFilter(client, dbName, tblName, "p1 > \"p12\"", 1);
+      checkFilter(client, dbName, tblName, "p1 >= \"p12\"", 2);
+      checkFilter(client, dbName, tblName, "p1 <= \"p12\"", 2);
+      checkFilter(client, dbName, tblName, "p1 <> \"p12\"", 2);
+      checkFilter(client, dbName, tblName, "p1 like \"p1.*\"", 3);
+      checkFilter(client, dbName, tblName, "p1 like \"p.*2\"", 1);
+
+      client.dropTable(dbName, tblName);
+      client.dropDatabase(dbName);
+  }
+
+  /**
+   * Test filtering based on the value of the last partition
+   */
+  @Test
+  public void testFilterLastPartition() throws Exception {
+      String dbName = "filterdb";
+      String tblName = "filtertbl";
+
+      List<String> vals = new ArrayList<>(2);
+      vals.add("p11");
+      vals.add("p21");
+      List <String> vals2 = new ArrayList<>(2);
+      vals2.add("p11");
+      vals2.add("p22");
+      List <String> vals3 = new ArrayList<>(2);
+      vals3.add("p12");
+      vals3.add("p21");
+
+      cleanUp(dbName, tblName, null);
+
+      createDb(dbName);
+
+      Table tbl = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(tblName)
+          .addCol("c1", ColumnType.STRING_TYPE_NAME)
+          .addCol("c2", ColumnType.INT_TYPE_NAME)
+          .addPartCol("p1", ColumnType.STRING_TYPE_NAME)
+          .addPartCol("p2", ColumnType.STRING_TYPE_NAME)
+          .build();
+
+      client.createTable(tbl);
+      tbl = client.getTable(dbName, tblName);
+
+      add_partition(client, tbl, vals, "part1");
+      add_partition(client, tbl, vals2, "part2");
+      add_partition(client, tbl, vals3, "part3");
+
+      checkFilter(client, dbN

<TRUNCATED>

[43/50] [abbrv] hive git commit: HIVE-17990 Add Thrift and DB storage for Schema Registry objects

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java
new file mode 100644
index 0000000..7107e59
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java
@@ -0,0 +1,1407 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class SchemaVersion implements org.apache.thrift.TBase<SchemaVersion, SchemaVersion._Fields>, java.io.Serializable, Cloneable, Comparable<SchemaVersion> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SchemaVersion");
+
+  private static final org.apache.thrift.protocol.TField SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField VERSION_FIELD_DESC = new org.apache.thrift.protocol.TField("version", org.apache.thrift.protocol.TType.I32, (short)2);
+  private static final org.apache.thrift.protocol.TField CREATED_AT_FIELD_DESC = new org.apache.thrift.protocol.TField("createdAt", org.apache.thrift.protocol.TType.I64, (short)3);
+  private static final org.apache.thrift.protocol.TField COLS_FIELD_DESC = new org.apache.thrift.protocol.TField("cols", org.apache.thrift.protocol.TType.LIST, (short)4);
+  private static final org.apache.thrift.protocol.TField STATE_FIELD_DESC = new org.apache.thrift.protocol.TField("state", org.apache.thrift.protocol.TType.I32, (short)5);
+  private static final org.apache.thrift.protocol.TField DESCRIPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("description", org.apache.thrift.protocol.TType.STRING, (short)6);
+  private static final org.apache.thrift.protocol.TField SCHEMA_TEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaText", org.apache.thrift.protocol.TType.STRING, (short)7);
+  private static final org.apache.thrift.protocol.TField FINGERPRINT_FIELD_DESC = new org.apache.thrift.protocol.TField("fingerprint", org.apache.thrift.protocol.TType.STRING, (short)8);
+  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)9);
+  private static final org.apache.thrift.protocol.TField SER_DE_FIELD_DESC = new org.apache.thrift.protocol.TField("serDe", org.apache.thrift.protocol.TType.STRUCT, (short)10);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new SchemaVersionStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new SchemaVersionTupleSchemeFactory());
+  }
+
+  private String schemaName; // required
+  private int version; // required
+  private long createdAt; // required
+  private List<FieldSchema> cols; // required
+  private SchemaVersionState state; // optional
+  private String description; // optional
+  private String schemaText; // optional
+  private String fingerprint; // optional
+  private String name; // optional
+  private SerDeInfo serDe; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    SCHEMA_NAME((short)1, "schemaName"),
+    VERSION((short)2, "version"),
+    CREATED_AT((short)3, "createdAt"),
+    COLS((short)4, "cols"),
+    /**
+     * 
+     * @see SchemaVersionState
+     */
+    STATE((short)5, "state"),
+    DESCRIPTION((short)6, "description"),
+    SCHEMA_TEXT((short)7, "schemaText"),
+    FINGERPRINT((short)8, "fingerprint"),
+    NAME((short)9, "name"),
+    SER_DE((short)10, "serDe");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // SCHEMA_NAME
+          return SCHEMA_NAME;
+        case 2: // VERSION
+          return VERSION;
+        case 3: // CREATED_AT
+          return CREATED_AT;
+        case 4: // COLS
+          return COLS;
+        case 5: // STATE
+          return STATE;
+        case 6: // DESCRIPTION
+          return DESCRIPTION;
+        case 7: // SCHEMA_TEXT
+          return SCHEMA_TEXT;
+        case 8: // FINGERPRINT
+          return FINGERPRINT;
+        case 9: // NAME
+          return NAME;
+        case 10: // SER_DE
+          return SER_DE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __VERSION_ISSET_ID = 0;
+  private static final int __CREATEDAT_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.STATE,_Fields.DESCRIPTION,_Fields.SCHEMA_TEXT,_Fields.FINGERPRINT,_Fields.NAME,_Fields.SER_DE};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("schemaName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.VERSION, new org.apache.thrift.meta_data.FieldMetaData("version", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.CREATED_AT, new org.apache.thrift.meta_data.FieldMetaData("createdAt", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.COLS, new org.apache.thrift.meta_data.FieldMetaData("cols", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FieldSchema.class))));
+    tmpMap.put(_Fields.STATE, new org.apache.thrift.meta_data.FieldMetaData("state", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, SchemaVersionState.class)));
+    tmpMap.put(_Fields.DESCRIPTION, new org.apache.thrift.meta_data.FieldMetaData("description", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.SCHEMA_TEXT, new org.apache.thrift.meta_data.FieldMetaData("schemaText", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.FINGERPRINT, new org.apache.thrift.meta_data.FieldMetaData("fingerprint", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.SER_DE, new org.apache.thrift.meta_data.FieldMetaData("serDe", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SerDeInfo.class)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SchemaVersion.class, metaDataMap);
+  }
+
+  public SchemaVersion() {
+  }
+
+  public SchemaVersion(
+    String schemaName,
+    int version,
+    long createdAt,
+    List<FieldSchema> cols)
+  {
+    this();
+    this.schemaName = schemaName;
+    this.version = version;
+    setVersionIsSet(true);
+    this.createdAt = createdAt;
+    setCreatedAtIsSet(true);
+    this.cols = cols;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public SchemaVersion(SchemaVersion other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetSchemaName()) {
+      this.schemaName = other.schemaName;
+    }
+    this.version = other.version;
+    this.createdAt = other.createdAt;
+    if (other.isSetCols()) {
+      List<FieldSchema> __this__cols = new ArrayList<FieldSchema>(other.cols.size());
+      for (FieldSchema other_element : other.cols) {
+        __this__cols.add(new FieldSchema(other_element));
+      }
+      this.cols = __this__cols;
+    }
+    if (other.isSetState()) {
+      this.state = other.state;
+    }
+    if (other.isSetDescription()) {
+      this.description = other.description;
+    }
+    if (other.isSetSchemaText()) {
+      this.schemaText = other.schemaText;
+    }
+    if (other.isSetFingerprint()) {
+      this.fingerprint = other.fingerprint;
+    }
+    if (other.isSetName()) {
+      this.name = other.name;
+    }
+    if (other.isSetSerDe()) {
+      this.serDe = new SerDeInfo(other.serDe);
+    }
+  }
+
+  public SchemaVersion deepCopy() {
+    return new SchemaVersion(this);
+  }
+
+  @Override
+  public void clear() {
+    this.schemaName = null;
+    setVersionIsSet(false);
+    this.version = 0;
+    setCreatedAtIsSet(false);
+    this.createdAt = 0;
+    this.cols = null;
+    this.state = null;
+    this.description = null;
+    this.schemaText = null;
+    this.fingerprint = null;
+    this.name = null;
+    this.serDe = null;
+  }
+
+  public String getSchemaName() {
+    return this.schemaName;
+  }
+
+  public void setSchemaName(String schemaName) {
+    this.schemaName = schemaName;
+  }
+
+  public void unsetSchemaName() {
+    this.schemaName = null;
+  }
+
+  /** Returns true if field schemaName is set (has been assigned a value) and false otherwise */
+  public boolean isSetSchemaName() {
+    return this.schemaName != null;
+  }
+
+  public void setSchemaNameIsSet(boolean value) {
+    if (!value) {
+      this.schemaName = null;
+    }
+  }
+
+  public int getVersion() {
+    return this.version;
+  }
+
+  public void setVersion(int version) {
+    this.version = version;
+    setVersionIsSet(true);
+  }
+
+  public void unsetVersion() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VERSION_ISSET_ID);
+  }
+
+  /** Returns true if field version is set (has been assigned a value) and false otherwise */
+  public boolean isSetVersion() {
+    return EncodingUtils.testBit(__isset_bitfield, __VERSION_ISSET_ID);
+  }
+
+  public void setVersionIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VERSION_ISSET_ID, value);
+  }
+
+  public long getCreatedAt() {
+    return this.createdAt;
+  }
+
+  public void setCreatedAt(long createdAt) {
+    this.createdAt = createdAt;
+    setCreatedAtIsSet(true);
+  }
+
+  public void unsetCreatedAt() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CREATEDAT_ISSET_ID);
+  }
+
+  /** Returns true if field createdAt is set (has been assigned a value) and false otherwise */
+  public boolean isSetCreatedAt() {
+    return EncodingUtils.testBit(__isset_bitfield, __CREATEDAT_ISSET_ID);
+  }
+
+  public void setCreatedAtIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CREATEDAT_ISSET_ID, value);
+  }
+
+  public int getColsSize() {
+    return (this.cols == null) ? 0 : this.cols.size();
+  }
+
+  public java.util.Iterator<FieldSchema> getColsIterator() {
+    return (this.cols == null) ? null : this.cols.iterator();
+  }
+
+  public void addToCols(FieldSchema elem) {
+    if (this.cols == null) {
+      this.cols = new ArrayList<FieldSchema>();
+    }
+    this.cols.add(elem);
+  }
+
+  public List<FieldSchema> getCols() {
+    return this.cols;
+  }
+
+  public void setCols(List<FieldSchema> cols) {
+    this.cols = cols;
+  }
+
+  public void unsetCols() {
+    this.cols = null;
+  }
+
+  /** Returns true if field cols is set (has been assigned a value) and false otherwise */
+  public boolean isSetCols() {
+    return this.cols != null;
+  }
+
+  public void setColsIsSet(boolean value) {
+    if (!value) {
+      this.cols = null;
+    }
+  }
+
+  /**
+   * 
+   * @see SchemaVersionState
+   */
+  public SchemaVersionState getState() {
+    return this.state;
+  }
+
+  /**
+   * 
+   * @see SchemaVersionState
+   */
+  public void setState(SchemaVersionState state) {
+    this.state = state;
+  }
+
+  public void unsetState() {
+    this.state = null;
+  }
+
+  /** Returns true if field state is set (has been assigned a value) and false otherwise */
+  public boolean isSetState() {
+    return this.state != null;
+  }
+
+  public void setStateIsSet(boolean value) {
+    if (!value) {
+      this.state = null;
+    }
+  }
+
+  public String getDescription() {
+    return this.description;
+  }
+
+  public void setDescription(String description) {
+    this.description = description;
+  }
+
+  public void unsetDescription() {
+    this.description = null;
+  }
+
+  /** Returns true if field description is set (has been assigned a value) and false otherwise */
+  public boolean isSetDescription() {
+    return this.description != null;
+  }
+
+  public void setDescriptionIsSet(boolean value) {
+    if (!value) {
+      this.description = null;
+    }
+  }
+
+  public String getSchemaText() {
+    return this.schemaText;
+  }
+
+  public void setSchemaText(String schemaText) {
+    this.schemaText = schemaText;
+  }
+
+  public void unsetSchemaText() {
+    this.schemaText = null;
+  }
+
+  /** Returns true if field schemaText is set (has been assigned a value) and false otherwise */
+  public boolean isSetSchemaText() {
+    return this.schemaText != null;
+  }
+
+  public void setSchemaTextIsSet(boolean value) {
+    if (!value) {
+      this.schemaText = null;
+    }
+  }
+
+  public String getFingerprint() {
+    return this.fingerprint;
+  }
+
+  public void setFingerprint(String fingerprint) {
+    this.fingerprint = fingerprint;
+  }
+
+  public void unsetFingerprint() {
+    this.fingerprint = null;
+  }
+
+  /** Returns true if field fingerprint is set (has been assigned a value) and false otherwise */
+  public boolean isSetFingerprint() {
+    return this.fingerprint != null;
+  }
+
+  public void setFingerprintIsSet(boolean value) {
+    if (!value) {
+      this.fingerprint = null;
+    }
+  }
+
+  public String getName() {
+    return this.name;
+  }
+
+  public void setName(String name) {
+    this.name = name;
+  }
+
+  public void unsetName() {
+    this.name = null;
+  }
+
+  /** Returns true if field name is set (has been assigned a value) and false otherwise */
+  public boolean isSetName() {
+    return this.name != null;
+  }
+
+  public void setNameIsSet(boolean value) {
+    if (!value) {
+      this.name = null;
+    }
+  }
+
+  public SerDeInfo getSerDe() {
+    return this.serDe;
+  }
+
+  public void setSerDe(SerDeInfo serDe) {
+    this.serDe = serDe;
+  }
+
+  public void unsetSerDe() {
+    this.serDe = null;
+  }
+
+  /** Returns true if field serDe is set (has been assigned a value) and false otherwise */
+  public boolean isSetSerDe() {
+    return this.serDe != null;
+  }
+
+  public void setSerDeIsSet(boolean value) {
+    if (!value) {
+      this.serDe = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case SCHEMA_NAME:
+      if (value == null) {
+        unsetSchemaName();
+      } else {
+        setSchemaName((String)value);
+      }
+      break;
+
+    case VERSION:
+      if (value == null) {
+        unsetVersion();
+      } else {
+        setVersion((Integer)value);
+      }
+      break;
+
+    case CREATED_AT:
+      if (value == null) {
+        unsetCreatedAt();
+      } else {
+        setCreatedAt((Long)value);
+      }
+      break;
+
+    case COLS:
+      if (value == null) {
+        unsetCols();
+      } else {
+        setCols((List<FieldSchema>)value);
+      }
+      break;
+
+    case STATE:
+      if (value == null) {
+        unsetState();
+      } else {
+        setState((SchemaVersionState)value);
+      }
+      break;
+
+    case DESCRIPTION:
+      if (value == null) {
+        unsetDescription();
+      } else {
+        setDescription((String)value);
+      }
+      break;
+
+    case SCHEMA_TEXT:
+      if (value == null) {
+        unsetSchemaText();
+      } else {
+        setSchemaText((String)value);
+      }
+      break;
+
+    case FINGERPRINT:
+      if (value == null) {
+        unsetFingerprint();
+      } else {
+        setFingerprint((String)value);
+      }
+      break;
+
+    case NAME:
+      if (value == null) {
+        unsetName();
+      } else {
+        setName((String)value);
+      }
+      break;
+
+    case SER_DE:
+      if (value == null) {
+        unsetSerDe();
+      } else {
+        setSerDe((SerDeInfo)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case SCHEMA_NAME:
+      return getSchemaName();
+
+    case VERSION:
+      return getVersion();
+
+    case CREATED_AT:
+      return getCreatedAt();
+
+    case COLS:
+      return getCols();
+
+    case STATE:
+      return getState();
+
+    case DESCRIPTION:
+      return getDescription();
+
+    case SCHEMA_TEXT:
+      return getSchemaText();
+
+    case FINGERPRINT:
+      return getFingerprint();
+
+    case NAME:
+      return getName();
+
+    case SER_DE:
+      return getSerDe();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case SCHEMA_NAME:
+      return isSetSchemaName();
+    case VERSION:
+      return isSetVersion();
+    case CREATED_AT:
+      return isSetCreatedAt();
+    case COLS:
+      return isSetCols();
+    case STATE:
+      return isSetState();
+    case DESCRIPTION:
+      return isSetDescription();
+    case SCHEMA_TEXT:
+      return isSetSchemaText();
+    case FINGERPRINT:
+      return isSetFingerprint();
+    case NAME:
+      return isSetName();
+    case SER_DE:
+      return isSetSerDe();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof SchemaVersion)
+      return this.equals((SchemaVersion)that);
+    return false;
+  }
+
+  public boolean equals(SchemaVersion that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_schemaName = true && this.isSetSchemaName();
+    boolean that_present_schemaName = true && that.isSetSchemaName();
+    if (this_present_schemaName || that_present_schemaName) {
+      if (!(this_present_schemaName && that_present_schemaName))
+        return false;
+      if (!this.schemaName.equals(that.schemaName))
+        return false;
+    }
+
+    boolean this_present_version = true;
+    boolean that_present_version = true;
+    if (this_present_version || that_present_version) {
+      if (!(this_present_version && that_present_version))
+        return false;
+      if (this.version != that.version)
+        return false;
+    }
+
+    boolean this_present_createdAt = true;
+    boolean that_present_createdAt = true;
+    if (this_present_createdAt || that_present_createdAt) {
+      if (!(this_present_createdAt && that_present_createdAt))
+        return false;
+      if (this.createdAt != that.createdAt)
+        return false;
+    }
+
+    boolean this_present_cols = true && this.isSetCols();
+    boolean that_present_cols = true && that.isSetCols();
+    if (this_present_cols || that_present_cols) {
+      if (!(this_present_cols && that_present_cols))
+        return false;
+      if (!this.cols.equals(that.cols))
+        return false;
+    }
+
+    boolean this_present_state = true && this.isSetState();
+    boolean that_present_state = true && that.isSetState();
+    if (this_present_state || that_present_state) {
+      if (!(this_present_state && that_present_state))
+        return false;
+      if (!this.state.equals(that.state))
+        return false;
+    }
+
+    boolean this_present_description = true && this.isSetDescription();
+    boolean that_present_description = true && that.isSetDescription();
+    if (this_present_description || that_present_description) {
+      if (!(this_present_description && that_present_description))
+        return false;
+      if (!this.description.equals(that.description))
+        return false;
+    }
+
+    boolean this_present_schemaText = true && this.isSetSchemaText();
+    boolean that_present_schemaText = true && that.isSetSchemaText();
+    if (this_present_schemaText || that_present_schemaText) {
+      if (!(this_present_schemaText && that_present_schemaText))
+        return false;
+      if (!this.schemaText.equals(that.schemaText))
+        return false;
+    }
+
+    boolean this_present_fingerprint = true && this.isSetFingerprint();
+    boolean that_present_fingerprint = true && that.isSetFingerprint();
+    if (this_present_fingerprint || that_present_fingerprint) {
+      if (!(this_present_fingerprint && that_present_fingerprint))
+        return false;
+      if (!this.fingerprint.equals(that.fingerprint))
+        return false;
+    }
+
+    boolean this_present_name = true && this.isSetName();
+    boolean that_present_name = true && that.isSetName();
+    if (this_present_name || that_present_name) {
+      if (!(this_present_name && that_present_name))
+        return false;
+      if (!this.name.equals(that.name))
+        return false;
+    }
+
+    boolean this_present_serDe = true && this.isSetSerDe();
+    boolean that_present_serDe = true && that.isSetSerDe();
+    if (this_present_serDe || that_present_serDe) {
+      if (!(this_present_serDe && that_present_serDe))
+        return false;
+      if (!this.serDe.equals(that.serDe))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_schemaName = true && (isSetSchemaName());
+    list.add(present_schemaName);
+    if (present_schemaName)
+      list.add(schemaName);
+
+    boolean present_version = true;
+    list.add(present_version);
+    if (present_version)
+      list.add(version);
+
+    boolean present_createdAt = true;
+    list.add(present_createdAt);
+    if (present_createdAt)
+      list.add(createdAt);
+
+    boolean present_cols = true && (isSetCols());
+    list.add(present_cols);
+    if (present_cols)
+      list.add(cols);
+
+    boolean present_state = true && (isSetState());
+    list.add(present_state);
+    if (present_state)
+      list.add(state.getValue());
+
+    boolean present_description = true && (isSetDescription());
+    list.add(present_description);
+    if (present_description)
+      list.add(description);
+
+    boolean present_schemaText = true && (isSetSchemaText());
+    list.add(present_schemaText);
+    if (present_schemaText)
+      list.add(schemaText);
+
+    boolean present_fingerprint = true && (isSetFingerprint());
+    list.add(present_fingerprint);
+    if (present_fingerprint)
+      list.add(fingerprint);
+
+    boolean present_name = true && (isSetName());
+    list.add(present_name);
+    if (present_name)
+      list.add(name);
+
+    boolean present_serDe = true && (isSetSerDe());
+    list.add(present_serDe);
+    if (present_serDe)
+      list.add(serDe);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(SchemaVersion other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetSchemaName()).compareTo(other.isSetSchemaName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSchemaName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaName, other.schemaName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetVersion()).compareTo(other.isSetVersion());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetVersion()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.version, other.version);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCreatedAt()).compareTo(other.isSetCreatedAt());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCreatedAt()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.createdAt, other.createdAt);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCols()).compareTo(other.isSetCols());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCols()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.cols, other.cols);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetState()).compareTo(other.isSetState());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetState()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.state, other.state);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDescription()).compareTo(other.isSetDescription());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDescription()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.description, other.description);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetSchemaText()).compareTo(other.isSetSchemaText());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSchemaText()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaText, other.schemaText);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetFingerprint()).compareTo(other.isSetFingerprint());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetFingerprint()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fingerprint, other.fingerprint);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetSerDe()).compareTo(other.isSetSerDe());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSerDe()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.serDe, other.serDe);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("SchemaVersion(");
+    boolean first = true;
+
+    sb.append("schemaName:");
+    if (this.schemaName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.schemaName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("version:");
+    sb.append(this.version);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("createdAt:");
+    sb.append(this.createdAt);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("cols:");
+    if (this.cols == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.cols);
+    }
+    first = false;
+    if (isSetState()) {
+      if (!first) sb.append(", ");
+      sb.append("state:");
+      if (this.state == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.state);
+      }
+      first = false;
+    }
+    if (isSetDescription()) {
+      if (!first) sb.append(", ");
+      sb.append("description:");
+      if (this.description == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.description);
+      }
+      first = false;
+    }
+    if (isSetSchemaText()) {
+      if (!first) sb.append(", ");
+      sb.append("schemaText:");
+      if (this.schemaText == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.schemaText);
+      }
+      first = false;
+    }
+    if (isSetFingerprint()) {
+      if (!first) sb.append(", ");
+      sb.append("fingerprint:");
+      if (this.fingerprint == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.fingerprint);
+      }
+      first = false;
+    }
+    if (isSetName()) {
+      if (!first) sb.append(", ");
+      sb.append("name:");
+      if (this.name == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.name);
+      }
+      first = false;
+    }
+    if (isSetSerDe()) {
+      if (!first) sb.append(", ");
+      sb.append("serDe:");
+      if (this.serDe == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.serDe);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+    if (serDe != null) {
+      serDe.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class SchemaVersionStandardSchemeFactory implements SchemeFactory {
+    public SchemaVersionStandardScheme getScheme() {
+      return new SchemaVersionStandardScheme();
+    }
+  }
+
+  private static class SchemaVersionStandardScheme extends StandardScheme<SchemaVersion> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, SchemaVersion struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // SCHEMA_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.schemaName = iprot.readString();
+              struct.setSchemaNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // VERSION
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.version = iprot.readI32();
+              struct.setVersionIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // CREATED_AT
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.createdAt = iprot.readI64();
+              struct.setCreatedAtIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // COLS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list794 = iprot.readListBegin();
+                struct.cols = new ArrayList<FieldSchema>(_list794.size);
+                FieldSchema _elem795;
+                for (int _i796 = 0; _i796 < _list794.size; ++_i796)
+                {
+                  _elem795 = new FieldSchema();
+                  _elem795.read(iprot);
+                  struct.cols.add(_elem795);
+                }
+                iprot.readListEnd();
+              }
+              struct.setColsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // STATE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.state = org.apache.hadoop.hive.metastore.api.SchemaVersionState.findByValue(iprot.readI32());
+              struct.setStateIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // DESCRIPTION
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.description = iprot.readString();
+              struct.setDescriptionIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // SCHEMA_TEXT
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.schemaText = iprot.readString();
+              struct.setSchemaTextIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 8: // FINGERPRINT
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.fingerprint = iprot.readString();
+              struct.setFingerprintIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 9: // NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.name = iprot.readString();
+              struct.setNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 10: // SER_DE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.serDe = new SerDeInfo();
+              struct.serDe.read(iprot);
+              struct.setSerDeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, SchemaVersion struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.schemaName != null) {
+        oprot.writeFieldBegin(SCHEMA_NAME_FIELD_DESC);
+        oprot.writeString(struct.schemaName);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(VERSION_FIELD_DESC);
+      oprot.writeI32(struct.version);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(CREATED_AT_FIELD_DESC);
+      oprot.writeI64(struct.createdAt);
+      oprot.writeFieldEnd();
+      if (struct.cols != null) {
+        oprot.writeFieldBegin(COLS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.cols.size()));
+          for (FieldSchema _iter797 : struct.cols)
+          {
+            _iter797.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.state != null) {
+        if (struct.isSetState()) {
+          oprot.writeFieldBegin(STATE_FIELD_DESC);
+          oprot.writeI32(struct.state.getValue());
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.description != null) {
+        if (struct.isSetDescription()) {
+          oprot.writeFieldBegin(DESCRIPTION_FIELD_DESC);
+          oprot.writeString(struct.description);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.schemaText != null) {
+        if (struct.isSetSchemaText()) {
+          oprot.writeFieldBegin(SCHEMA_TEXT_FIELD_DESC);
+          oprot.writeString(struct.schemaText);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.fingerprint != null) {
+        if (struct.isSetFingerprint()) {
+          oprot.writeFieldBegin(FINGERPRINT_FIELD_DESC);
+          oprot.writeString(struct.fingerprint);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.name != null) {
+        if (struct.isSetName()) {
+          oprot.writeFieldBegin(NAME_FIELD_DESC);
+          oprot.writeString(struct.name);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.serDe != null) {
+        if (struct.isSetSerDe()) {
+          oprot.writeFieldBegin(SER_DE_FIELD_DESC);
+          struct.serDe.write(oprot);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class SchemaVersionTupleSchemeFactory implements SchemeFactory {
+    public SchemaVersionTupleScheme getScheme() {
+      return new SchemaVersionTupleScheme();
+    }
+  }
+
+  private static class SchemaVersionTupleScheme extends TupleScheme<SchemaVersion> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, SchemaVersion struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetSchemaName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetVersion()) {
+        optionals.set(1);
+      }
+      if (struct.isSetCreatedAt()) {
+        optionals.set(2);
+      }
+      if (struct.isSetCols()) {
+        optionals.set(3);
+      }
+      if (struct.isSetState()) {
+        optionals.set(4);
+      }
+      if (struct.isSetDescription()) {
+        optionals.set(5);
+      }
+      if (struct.isSetSchemaText()) {
+        optionals.set(6);
+      }
+      if (struct.isSetFingerprint()) {
+        optionals.set(7);
+      }
+      if (struct.isSetName()) {
+        optionals.set(8);
+      }
+      if (struct.isSetSerDe()) {
+        optionals.set(9);
+      }
+      oprot.writeBitSet(optionals, 10);
+      if (struct.isSetSchemaName()) {
+        oprot.writeString(struct.schemaName);
+      }
+      if (struct.isSetVersion()) {
+        oprot.writeI32(struct.version);
+      }
+      if (struct.isSetCreatedAt()) {
+        oprot.writeI64(struct.createdAt);
+      }
+      if (struct.isSetCols()) {
+        {
+          oprot.writeI32(struct.cols.size());
+          for (FieldSchema _iter798 : struct.cols)
+          {
+            _iter798.write(oprot);
+          }
+        }
+      }
+      if (struct.isSetState()) {
+        oprot.writeI32(struct.state.getValue());
+      }
+      if (struct.isSetDescription()) {
+        oprot.writeString(struct.description);
+      }
+      if (struct.isSetSchemaText()) {
+        oprot.writeString(struct.schemaText);
+      }
+      if (struct.isSetFingerprint()) {
+        oprot.writeString(struct.fingerprint);
+      }
+      if (struct.isSetName()) {
+        oprot.writeString(struct.name);
+      }
+      if (struct.isSetSerDe()) {
+        struct.serDe.write(oprot);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, SchemaVersion struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(10);
+      if (incoming.get(0)) {
+        struct.schemaName = iprot.readString();
+        struct.setSchemaNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.version = iprot.readI32();
+        struct.setVersionIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.createdAt = iprot.readI64();
+        struct.setCreatedAtIsSet(true);
+      }
+      if (incoming.get(3)) {
+        {
+          org.apache.thrift.protocol.TList _list799 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.cols = new ArrayList<FieldSchema>(_list799.size);
+          FieldSchema _elem800;
+          for (int _i801 = 0; _i801 < _list799.size; ++_i801)
+          {
+            _elem800 = new FieldSchema();
+            _elem800.read(iprot);
+            struct.cols.add(_elem800);
+          }
+        }
+        struct.setColsIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.state = org.apache.hadoop.hive.metastore.api.SchemaVersionState.findByValue(iprot.readI32());
+        struct.setStateIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.description = iprot.readString();
+        struct.setDescriptionIsSet(true);
+      }
+      if (incoming.get(6)) {
+        struct.schemaText = iprot.readString();
+        struct.setSchemaTextIsSet(true);
+      }
+      if (incoming.get(7)) {
+        struct.fingerprint = iprot.readString();
+        struct.setFingerprintIsSet(true);
+      }
+      if (incoming.get(8)) {
+        struct.name = iprot.readString();
+        struct.setNameIsSet(true);
+      }
+      if (incoming.get(9)) {
+        struct.serDe = new SerDeInfo();
+        struct.serDe.read(iprot);
+        struct.setSerDeIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersionState.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersionState.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersionState.java
new file mode 100644
index 0000000..9dd96dc
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersionState.java
@@ -0,0 +1,63 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum SchemaVersionState implements org.apache.thrift.TEnum {
+  INITIATED(1),
+  START_REVIEW(2),
+  CHANGES_REQUIRED(3),
+  REVIEWED(4),
+  ENABLED(5),
+  DISABLED(6),
+  ARCHIVED(7),
+  DELETED(8);
+
+  private final int value;
+
+  private SchemaVersionState(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static SchemaVersionState findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return INITIATED;
+      case 2:
+        return START_REVIEW;
+      case 3:
+        return CHANGES_REQUIRED;
+      case 4:
+        return REVIEWED;
+      case 5:
+        return ENABLED;
+      case 6:
+        return DISABLED;
+      case 7:
+        return ARCHIVED;
+      case 8:
+        return DELETED;
+      default:
+        return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java
index b744f44..a7aba9f 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java
@@ -41,6 +41,10 @@ import org.slf4j.LoggerFactory;
   private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);
   private static final org.apache.thrift.protocol.TField SERIALIZATION_LIB_FIELD_DESC = new org.apache.thrift.protocol.TField("serializationLib", org.apache.thrift.protocol.TType.STRING, (short)2);
   private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC = new org.apache.thrift.protocol.TField("parameters", org.apache.thrift.protocol.TType.MAP, (short)3);
+  private static final org.apache.thrift.protocol.TField DESCRIPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("description", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField SERIALIZER_CLASS_FIELD_DESC = new org.apache.thrift.protocol.TField("serializerClass", org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField DESERIALIZER_CLASS_FIELD_DESC = new org.apache.thrift.protocol.TField("deserializerClass", org.apache.thrift.protocol.TType.STRING, (short)6);
+  private static final org.apache.thrift.protocol.TField SERDE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("serdeType", org.apache.thrift.protocol.TType.I32, (short)7);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -51,12 +55,24 @@ import org.slf4j.LoggerFactory;
   private String name; // required
   private String serializationLib; // required
   private Map<String,String> parameters; // required
+  private String description; // optional
+  private String serializerClass; // optional
+  private String deserializerClass; // optional
+  private SerdeType serdeType; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
     NAME((short)1, "name"),
     SERIALIZATION_LIB((short)2, "serializationLib"),
-    PARAMETERS((short)3, "parameters");
+    PARAMETERS((short)3, "parameters"),
+    DESCRIPTION((short)4, "description"),
+    SERIALIZER_CLASS((short)5, "serializerClass"),
+    DESERIALIZER_CLASS((short)6, "deserializerClass"),
+    /**
+     * 
+     * @see SerdeType
+     */
+    SERDE_TYPE((short)7, "serdeType");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -77,6 +93,14 @@ import org.slf4j.LoggerFactory;
           return SERIALIZATION_LIB;
         case 3: // PARAMETERS
           return PARAMETERS;
+        case 4: // DESCRIPTION
+          return DESCRIPTION;
+        case 5: // SERIALIZER_CLASS
+          return SERIALIZER_CLASS;
+        case 6: // DESERIALIZER_CLASS
+          return DESERIALIZER_CLASS;
+        case 7: // SERDE_TYPE
+          return SERDE_TYPE;
         default:
           return null;
       }
@@ -117,6 +141,7 @@ import org.slf4j.LoggerFactory;
   }
 
   // isset id assignments
+  private static final _Fields optionals[] = {_Fields.DESCRIPTION,_Fields.SERIALIZER_CLASS,_Fields.DESERIALIZER_CLASS,_Fields.SERDE_TYPE};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -128,6 +153,14 @@ import org.slf4j.LoggerFactory;
         new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.DESCRIPTION, new org.apache.thrift.meta_data.FieldMetaData("description", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.SERIALIZER_CLASS, new org.apache.thrift.meta_data.FieldMetaData("serializerClass", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DESERIALIZER_CLASS, new org.apache.thrift.meta_data.FieldMetaData("deserializerClass", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.SERDE_TYPE, new org.apache.thrift.meta_data.FieldMetaData("serdeType", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, SerdeType.class)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SerDeInfo.class, metaDataMap);
   }
@@ -160,6 +193,18 @@ import org.slf4j.LoggerFactory;
       Map<String,String> __this__parameters = new HashMap<String,String>(other.parameters);
       this.parameters = __this__parameters;
     }
+    if (other.isSetDescription()) {
+      this.description = other.description;
+    }
+    if (other.isSetSerializerClass()) {
+      this.serializerClass = other.serializerClass;
+    }
+    if (other.isSetDeserializerClass()) {
+      this.deserializerClass = other.deserializerClass;
+    }
+    if (other.isSetSerdeType()) {
+      this.serdeType = other.serdeType;
+    }
   }
 
   public SerDeInfo deepCopy() {
@@ -171,6 +216,10 @@ import org.slf4j.LoggerFactory;
     this.name = null;
     this.serializationLib = null;
     this.parameters = null;
+    this.description = null;
+    this.serializerClass = null;
+    this.deserializerClass = null;
+    this.serdeType = null;
   }
 
   public String getName() {
@@ -253,6 +302,106 @@ import org.slf4j.LoggerFactory;
     }
   }
 
+  public String getDescription() {
+    return this.description;
+  }
+
+  public void setDescription(String description) {
+    this.description = description;
+  }
+
+  public void unsetDescription() {
+    this.description = null;
+  }
+
+  /** Returns true if field description is set (has been assigned a value) and false otherwise */
+  public boolean isSetDescription() {
+    return this.description != null;
+  }
+
+  public void setDescriptionIsSet(boolean value) {
+    if (!value) {
+      this.description = null;
+    }
+  }
+
+  public String getSerializerClass() {
+    return this.serializerClass;
+  }
+
+  public void setSerializerClass(String serializerClass) {
+    this.serializerClass = serializerClass;
+  }
+
+  public void unsetSerializerClass() {
+    this.serializerClass = null;
+  }
+
+  /** Returns true if field serializerClass is set (has been assigned a value) and false otherwise */
+  public boolean isSetSerializerClass() {
+    return this.serializerClass != null;
+  }
+
+  public void setSerializerClassIsSet(boolean value) {
+    if (!value) {
+      this.serializerClass = null;
+    }
+  }
+
+  public String getDeserializerClass() {
+    return this.deserializerClass;
+  }
+
+  public void setDeserializerClass(String deserializerClass) {
+    this.deserializerClass = deserializerClass;
+  }
+
+  public void unsetDeserializerClass() {
+    this.deserializerClass = null;
+  }
+
+  /** Returns true if field deserializerClass is set (has been assigned a value) and false otherwise */
+  public boolean isSetDeserializerClass() {
+    return this.deserializerClass != null;
+  }
+
+  public void setDeserializerClassIsSet(boolean value) {
+    if (!value) {
+      this.deserializerClass = null;
+    }
+  }
+
+  /**
+   * 
+   * @see SerdeType
+   */
+  public SerdeType getSerdeType() {
+    return this.serdeType;
+  }
+
+  /**
+   * 
+   * @see SerdeType
+   */
+  public void setSerdeType(SerdeType serdeType) {
+    this.serdeType = serdeType;
+  }
+
+  public void unsetSerdeType() {
+    this.serdeType = null;
+  }
+
+  /** Returns true if field serdeType is set (has been assigned a value) and false otherwise */
+  public boolean isSetSerdeType() {
+    return this.serdeType != null;
+  }
+
+  public void setSerdeTypeIsSet(boolean value) {
+    if (!value) {
+      this.serdeType = null;
+    }
+  }
+
   public void setFieldValue(_Fields field, Object value) {
     switch (field) {
     case NAME:
@@ -279,6 +428,38 @@ import org.slf4j.LoggerFactory;
       }
       break;
 
+    case DESCRIPTION:
+      if (value == null) {
+        unsetDescription();
+      } else {
+        setDescription((String)value);
+      }
+      break;
+
+    case SERIALIZER_CLASS:
+      if (value == null) {
+        unsetSerializerClass();
+      } else {
+        setSerializerClass((String)value);
+      }
+      break;
+
+    case DESERIALIZER_CLASS:
+      if (value == null) {
+        unsetDeserializerClass();
+      } else {
+        setDeserializerClass((String)value);
+      }
+      break;
+
+    case SERDE_TYPE:
+      if (value == null) {
+        unsetSerdeType();
+      } else {
+        setSerdeType((SerdeType)value);
+      }
+      break;
+
     }
   }
 
@@ -293,6 +474,18 @@ import org.slf4j.LoggerFactory;
     case PARAMETERS:
       return getParameters();
 
+    case DESCRIPTION:
+      return getDescription();
+
+    case SERIALIZER_CLASS:
+      return getSerializerClass();
+
+    case DESERIALIZER_CLASS:
+      return getDeserializerClass();
+
+    case SERDE_TYPE:
+      return getSerdeType();
+
     }
     throw new IllegalStateException();
   }
@@ -310,6 +503,14 @@ import org.slf4j.LoggerFactory;
       return isSetSerializationLib();
     case PARAMETERS:
       return isSetParameters();
+    case DESCRIPTION:
+      return isSetDescription();
+    case SERIALIZER_CLASS:
+      return isSetSerializerClass();
+    case DESERIALIZER_CLASS:
+      return isSetDeserializerClass();
+    case SERDE_TYPE:
+      return isSetSerdeType();
     }
     throw new IllegalStateException();
   }
@@ -354,6 +555,42 @@ import org.slf4j.LoggerFactory;
         return false;
     }
 
+    boolean this_present_description = true && this.isSetDescription();
+    boolean that_present_description = true && that.isSetDescription();
+    if (this_present_description || that_present_description) {
+      if (!(this_present_description && that_present_description))
+        return false;
+      if (!this.description.equals(that.description))
+        return false;
+    }
+
+    boolean this_present_serializerClass = true && this.isSetSerializerClass();
+    boolean that_present_serializerClass = true && that.isSetSerializerClass();
+    if (this_present_serializerClass || that_present_serializerClass) {
+      if (!(this_present_serializerClass && that_present_serializerClass))
+        return false;
+      if (!this.serializerClass.equals(that.serializerClass))
+        return false;
+    }
+
+    boolean this_present_deserializerClass = true && this.isSetDeserializerClass();
+    boolean that_present_deserializerClass = true && that.isSetDeserializerClass();
+    if (this_present_deserializerClass || that_present_deserializerClass) {
+      if (!(this_present_deserializerClass && that_present_deserializerClass))
+        return false;
+      if (!this.deserializerClass.equals(that.deserializerClass))
+        return false;
+    }
+
+    boolean this_present_serdeType = true && this.isSetSerdeType();
+    boolean that_present_serdeType = true && that.isSetSerdeType();
+    if (this_present_serdeType || that_present_serdeType) {
+      if (!(this_present_serdeType && that_present_serdeType))
+        return false;
+      if (!this.serdeType.equals(that.serdeType))
+        return false;
+    }
+
     return true;
   }
 
@@ -376,6 +613,26 @@ import org.slf4j.LoggerFactory;
     if (present_parameters)
       list.add(parameters);
 
+    boolean present_description = true && (isSetDescription());
+    list.add(present_description);
+    if (present_description)
+      list.add(description);
+
+    boolean present_serializerClass = true && (isSetSerializerClass());
+    list.add(present_serializerClass);
+    if (present_serializerClass)
+      list.add(serializerClass);
+
+    boolean present_deserializerClass = true && (isSetDeserializerClass());
+    list.add(present_deserializerClass);
+    if (present_deserializerClass)
+      list.add(deserializerClass);
+
+    boolean present_serdeType = true && (isSetSerdeType());
+    list.add(present_serdeType);
+    if (present_serdeType)
+      list.add(serdeType.getValue());
+
     return list.hashCode();
   }
 
@@ -417,6 +674,46 @@ import org.slf4j.LoggerFactory;
         return lastComparison;
       }
     }
+    lastComparison = Boolean.valueOf(isSetDescription()).compareTo(other.isSetDescription());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDescription()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.description, other.description);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetSerializerClass()).compareTo(other.isSetSerializerClass());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSerializerClass()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.serializerClass, other.serializerClass);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDeserializerClass()).compareTo(other.isSetDeserializerClass());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDeserializerClass()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.deserializerClass, other.deserializerClass);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetSerdeType()).compareTo(other.isSetSerdeType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSerdeType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.serdeType, other.serdeType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
     return 0;
   }
 
@@ -460,6 +757,46 @@ import org.slf4j.LoggerFactory;
       sb.append(this.parameters);
     }
     first = false;
+    if (isSetDescription()) {
+      if (!first) sb.append(", ");
+      sb.append("description:");
+      if (this.description == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.description);
+      }
+      first = false;
+    }
+    if (isSetSerializerClass()) {
+      if (!first) sb.append(", ");
+      sb.append("serializerClass:");
+      if (this.serializerClass == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.serializerClass);
+      }
+      first = false;
+    }
+    if (isSetDeserializerClass()) {
+      if (!first) sb.append(", ");
+      sb.append("deserializerClass:");
+      if (this.deserializerClass == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.deserializerClass);
+      }
+      first = false;
+    }
+    if (isSetSerdeType()) {
+      if (!first) sb.append(", ");
+      sb.append("serdeType:");
+      if (this.serdeType == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.serdeType);
+      }
+      first = false;
+    }
     sb.append(")");
     return sb.toString();
   }
@@ -539,6 +876,38 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
+          case 4: // DESCRIPTION
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.description = iprot.readString();
+              struct.setDescriptionIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // SERIALIZER_CLASS
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.serializerClass = iprot.readString();
+              struct.setSerializerClassIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // DESERIALIZER_CLASS
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.deserializerClass = iprot.readString();
+              struct.setDeserializerClassIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // SERDE_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.serdeType = org.apache.hadoop.hive.metastore.api.SerdeType.findByValue(iprot.readI32());
+              struct.setSerdeTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
           default:
             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
@@ -575,6 +944,34 @@ import org.slf4j.LoggerFactory;
         }
         oprot.writeFieldEnd();
       }
+      if (struct.description != null) {
+        if (struct.isSetDescription()) {
+          oprot.writeFieldBegin(DESCRIPTION_FIELD_DESC);
+          oprot.writeString(struct.description);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.serializerClass != null) {
+        if (struct.isSetSerializerClass()) {
+          oprot.writeFieldBegin(SERIALIZER_CLASS_FIELD_DESC);
+          oprot.writeString(struct.serializerClass);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.deserializerClass != null) {
+        if (struct.isSetDeserializerClass()) {
+          oprot.writeFieldBegin(DESERIALIZER_CLASS_FIELD_DESC);
+          oprot.writeString(struct.deserializerClass);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.serdeType != null) {
+        if (struct.isSetSerdeType()) {
+          oprot.writeFieldBegin(SERDE_TYPE_FIELD_DESC);
+          oprot.writeI32(struct.serdeType.getValue());
+          oprot.writeFieldEnd();
+        }
+      }
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -602,7 +999,19 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetParameters()) {
         optionals.set(2);
       }
-      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetDescription()) {
+        optionals.set(3);
+      }
+      if (struct.isSetSerializerClass()) {
+        optionals.set(4);
+      }
+      if (struct.isSetDeserializerClass()) {
+        optionals.set(5);
+      }
+      if (struct.isSetSerdeType()) {
+        optionals.set(6);
+      }
+      oprot.writeBitSet(optionals, 7);
       if (struct.isSetName()) {
         oprot.writeString(struct.name);
       }
@@ -619,12 +1028,24 @@ import org.slf4j.LoggerFactory;
           }
         }
       }
+      if (struct.isSetDescription()) {
+        oprot.writeString(struct.description);
+      }
+      if (struct.isSetSerializerClass()) {
+        oprot.writeString(struct.serializerClass);
+      }
+      if (struct.isSetDeserializerClass()) {
+        oprot.writeString(struct.deserializerClass);
+      }
+      if (struct.isSetSerdeType()) {
+        oprot.writeI32(struct.serdeType.getValue());
+      }
     }
 
     @Override
     public void read(org.apache.thrift.protocol.TProtocol prot, SerDeInfo struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
-      BitSet incoming = iprot.readBitSet(3);
+      BitSet incoming = iprot.readBitSet(7);
       if (incoming.get(0)) {
         struct.name = iprot.readString();
         struct.setNameIsSet(true);
@@ -648,6 +1069,22 @@ import org.slf4j.LoggerFactory;
         }
         struct.parameters = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(struct.parameters); struct.setParametersIsSet(true);
       }
+      if (incoming.get(3)) {
+        struct.description = iprot.readString();
+        struct.setDescriptionIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.serializerClass = iprot.readString();
+        struct.setSerializerClassIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.deserializerClass = iprot.readString();
+        struct.setDeserializerClassIsSet(true);
+      }
+      if (incoming.get(6)) {
+        struct.serdeType = org.apache.hadoop.hive.metastore.api.SerdeType.findByValue(iprot.readI32());
+        struct.setSerdeTypeIsSet(true);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerdeType.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerdeType.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerdeType.java
new file mode 100644
index 0000000..7daabed
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerdeType.java
@@ -0,0 +1,45 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum SerdeType implements org.apache.thrift.TEnum {
+  HIVE(1),
+  SCHEMA_REGISTRY(2);
+
+  private final int value;
+
+  private SerdeType(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static SerdeType findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return HIVE;
+      case 2:
+        return SCHEMA_REGISTRY;
+      default:
+        return null;
+    }
+  }
+}


[34/50] [abbrv] hive git commit: HIVE-17983 Make the standalone metastore generate tarballs etc.

Posted by ga...@apache.org.
HIVE-17983 Make the standalone metastore generate tarballs etc.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b9526a7a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b9526a7a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b9526a7a

Branch: refs/heads/standalone-metastore
Commit: b9526a7ac987b8a07b41531ab14e381bbda7f8ab
Parents: 002233b
Author: Alan Gates <ga...@hortonworks.com>
Authored: Thu Oct 19 16:49:38 2017 -0700
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Mon Dec 18 14:56:37 2017 -0800

----------------------------------------------------------------------
 .../org/apache/hive/beeline/HiveSchemaTool.java |    6 +-
 bin/ext/metastore.sh                            |   41 -
 binary-package-licenses/README                  |    1 +
 .../org/apache/hive/beeline/TestSchemaTool.java |   16 +-
 standalone-metastore/DEV-README                 |   40 +
 .../binary-package-licenses/NOTICE              |    4 +
 .../com.google.protobuf-LICENSE                 |   42 +
 .../javax.transaction.transaction-api-LICENSE   |  128 ++
 .../binary-package-licenses/javolution-LICENSE  |   25 +
 .../binary-package-licenses/jline-LICENSE       |   32 +
 .../binary-package-licenses/org.antlr-LICENSE   |   27 +
 .../binary-package-licenses/sqlline-LICENSE     |   33 +
 standalone-metastore/pom.xml                    |   97 +-
 standalone-metastore/src/assembly/bin.xml       |  126 ++
 standalone-metastore/src/assembly/src.xml       |   53 +
 .../hive/metastore/IMetaStoreSchemaInfo.java    |    7 +
 .../hive/metastore/MetaStoreSchemaInfo.java     |   16 +-
 .../hive/metastore/tools/HiveSchemaHelper.java  |   80 +-
 .../metastore/tools/MetastoreSchemaTool.java    | 1309 ++++++++++++++
 .../src/main/resources/metastore-log4j2.xml     |   30 +
 standalone-metastore/src/main/scripts/base      |  238 +++
 .../src/main/scripts/ext/metastore.sh           |   41 +
 .../src/main/scripts/ext/schemaTool.sh          |   33 +
 .../src/main/scripts/metastore-config.sh        |   70 +
 .../src/main/scripts/schematool                 |   21 +
 .../src/main/scripts/start-metastore            |   21 +
 .../main/sql/derby/hive-schema-2.3.0.derby.sql  |  456 +++++
 .../main/sql/derby/hive-schema-3.0.0.derby.sql  |  508 ++++++
 .../sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql  |   46 +
 .../src/main/sql/derby/upgrade.order.derby      |    1 +
 .../src/main/sql/mssql/create-user.mssql.sql    |    5 +
 .../main/sql/mssql/hive-schema-2.3.0.mssql.sql  | 1023 +++++++++++
 .../main/sql/mssql/hive-schema-3.0.0.mssql.sql  | 1112 ++++++++++++
 .../sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql  |  106 ++
 .../src/main/sql/mssql/upgrade.order.mssql      |    1 +
 .../src/main/sql/mysql/create-user.mysql.sql    |    8 +
 .../main/sql/mysql/hive-schema-2.3.0.mysql.sql  |  970 ++++++++++
 .../main/sql/mysql/hive-schema-3.0.0.mysql.sql  | 1045 +++++++++++
 .../sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql  |   90 +
 .../src/main/sql/mysql/upgrade.order.mysql      |    1 +
 .../src/main/sql/oracle/create-user.oracle.sql  |    3 +
 .../sql/oracle/hive-schema-2.3.0.oracle.sql     |  926 ++++++++++
 .../sql/oracle/hive-schema-3.0.0.oracle.sql     | 1014 +++++++++++
 .../oracle/upgrade-2.3.0-to-3.0.0.oracle.sql    |  107 ++
 .../src/main/sql/oracle/upgrade.order.oracle    |    1 +
 .../main/sql/postgres/create-user.postgres.sql  |    2 +
 .../sql/postgres/hive-schema-2.3.0.postgres.sql | 1593 ++++++++++++++++
 .../sql/postgres/hive-schema-3.0.0.postgres.sql | 1699 ++++++++++++++++++
 .../upgrade-2.3.0-to-3.0.0.postgres.sql         |  121 ++
 .../main/sql/postgres/upgrade.order.postgres    |    1 +
 .../hive/metastore/dbinstall/DbInstallBase.java |  280 +++
 .../hive/metastore/dbinstall/ITestMysql.java    |   82 +
 .../hive/metastore/dbinstall/ITestOracle.java   |   83 +
 .../hive/metastore/dbinstall/ITestPostgres.java |   82 +
 .../metastore/dbinstall/ITestSqlServer.java     |   84 +
 .../tools/TestMetastoreSchemaTool.java          |   67 +
 .../tools/TestSchemaToolForMetastore.java       |  467 +++++
 57 files changed, 14431 insertions(+), 90 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
index 74591ac..ca05b2a 100644
--- a/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
+++ b/beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java
@@ -159,12 +159,12 @@ public class HiveSchemaTool {
 
   private NestedScriptParser getDbCommandParser(String dbType, String metaDbType) {
     return HiveSchemaHelper.getDbCommandParser(dbType, dbOpts, userName,
-	passWord, hiveConf, metaDbType);
+	passWord, hiveConf, metaDbType, false);
   }
 
   private NestedScriptParser getDbCommandParser(String dbType) {
     return HiveSchemaHelper.getDbCommandParser(dbType, dbOpts, userName,
-	passWord, hiveConf, null);
+	passWord, hiveConf, null, false);
   }
 
   /***
@@ -804,7 +804,7 @@ public class HiveSchemaTool {
 
   private List<String> findCreateTable(String path, List<String> tableList)
       throws Exception {
-    NestedScriptParser sp           = HiveSchemaHelper.getDbCommandParser(dbType);
+    NestedScriptParser sp           = HiveSchemaHelper.getDbCommandParser(dbType, false);
     Matcher matcher                 = null;
     Pattern regexp                  = null;
     List<String> subs               = new ArrayList<String>();

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/bin/ext/metastore.sh
----------------------------------------------------------------------
diff --git a/bin/ext/metastore.sh b/bin/ext/metastore.sh
deleted file mode 100644
index cc08c95..0000000
--- a/bin/ext/metastore.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-THISSERVICE=metastore
-export SERVICE_LIST="${SERVICE_LIST}${THISSERVICE} "
-
-metastore() {
-  echo "$(timestamp): Starting Hive Metastore Server"
-  CLASS=org.apache.hadoop.hive.metastore.HiveMetaStore
-  if $cygwin; then
-    HIVE_LIB=`cygpath -w "$HIVE_LIB"`
-  fi
-  JAR=${HIVE_LIB}/hive-metastore-*.jar
-
-  # hadoop 20 or newer - skip the aux_jars option and hiveconf
-
-  export HADOOP_CLIENT_OPTS=" -Dproc_metastore $HADOOP_CLIENT_OPTS "
-  export HADOOP_OPTS="$HIVE_METASTORE_HADOOP_OPTS $HADOOP_OPTS"
-  exec $HADOOP jar $JAR $CLASS "$@"
-}
-
-metastore_help() {
-  metastore -h
-}
-
-timestamp()
-{
- date +"%Y-%m-%d %T"
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/binary-package-licenses/README
----------------------------------------------------------------------
diff --git a/binary-package-licenses/README b/binary-package-licenses/README
index ef127e3..c801896 100644
--- a/binary-package-licenses/README
+++ b/binary-package-licenses/README
@@ -42,6 +42,7 @@ guava
 guice*
 hbase*
 hibernate-validator
+Hikaricp
 htrace-core
 http-client
 httpclient

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java
index 9f08693..0c34a1e 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java
@@ -338,7 +338,7 @@ public class TestSchemaTool extends TestCase {
     String expectedSQL = StringUtils.join(resultScript, System.getProperty("line.separator")) +
         System.getProperty("line.separator");
     File testScriptFile = generateTestScript(testScript);
-    String flattenedSql = HiveSchemaHelper.getDbCommandParser("derby")
+    String flattenedSql = HiveSchemaHelper.getDbCommandParser("derby", false)
         .buildCommand(testScriptFile.getParentFile().getPath(),
             testScriptFile.getName());
 
@@ -380,7 +380,7 @@ public class TestSchemaTool extends TestCase {
       };
 
     File testScriptFile = generateTestScript(parentTestScript);
-    String flattenedSql = HiveSchemaHelper.getDbCommandParser("derby")
+    String flattenedSql = HiveSchemaHelper.getDbCommandParser("derby", false)
         .buildCommand(testScriptFile.getParentFile().getPath(),
             testScriptFile.getName());
     assertFalse(flattenedSql.contains("RUN"));
@@ -425,7 +425,7 @@ public class TestSchemaTool extends TestCase {
       };
 
     File testScriptFile = generateTestScript(parentTestScript);
-    String flattenedSql = HiveSchemaHelper.getDbCommandParser("mysql")
+    String flattenedSql = HiveSchemaHelper.getDbCommandParser("mysql", false)
         .buildCommand(testScriptFile.getParentFile().getPath(),
             testScriptFile.getName());
     assertFalse(flattenedSql.contains("RUN"));
@@ -467,7 +467,7 @@ public class TestSchemaTool extends TestCase {
     String expectedSQL = StringUtils.join(resultScript, System.getProperty("line.separator")) +
         System.getProperty("line.separator");
     File testScriptFile = generateTestScript(testScript);
-    NestedScriptParser testDbParser = HiveSchemaHelper.getDbCommandParser("mysql");
+    NestedScriptParser testDbParser = HiveSchemaHelper.getDbCommandParser("mysql", false);
     String flattenedSql = testDbParser.buildCommand(testScriptFile.getParentFile().getPath(),
         testScriptFile.getName());
 
@@ -502,7 +502,7 @@ public class TestSchemaTool extends TestCase {
     String expectedSQL = StringUtils.join(parsedScript, System.getProperty("line.separator")) +
         System.getProperty("line.separator");
     File testScriptFile = generateTestScript(testScript);
-    NestedScriptParser testDbParser = HiveSchemaHelper.getDbCommandParser("mysql");
+    NestedScriptParser testDbParser = HiveSchemaHelper.getDbCommandParser("mysql", false);
     String flattenedSql = testDbParser.buildCommand(testScriptFile.getParentFile().getPath(),
         testScriptFile.getName());
 
@@ -544,7 +544,7 @@ public class TestSchemaTool extends TestCase {
       };
 
     File testScriptFile = generateTestScript(parentTestScript);
-    String flattenedSql = HiveSchemaHelper.getDbCommandParser("oracle")
+    String flattenedSql = HiveSchemaHelper.getDbCommandParser("oracle", false)
         .buildCommand(testScriptFile.getParentFile().getPath(),
             testScriptFile.getName());
     assertFalse(flattenedSql.contains("@"));
@@ -576,7 +576,7 @@ public class TestSchemaTool extends TestCase {
     };
 
     NestedScriptParser noDbOptParser = HiveSchemaHelper
-        .getDbCommandParser("postgres");
+        .getDbCommandParser("postgres", false);
     String expectedSQL = StringUtils.join(
         expectedScriptWithOptionPresent, System.getProperty("line.separator")) +
             System.getProperty("line.separator");
@@ -594,7 +594,7 @@ public class TestSchemaTool extends TestCase {
     NestedScriptParser dbOptParser = HiveSchemaHelper.getDbCommandParser(
         "postgres",
         PostgresCommandParser.POSTGRES_SKIP_STANDARD_STRINGS_DBOPT,
-        null, null, null, null);
+        null, null, null, null, false);
     expectedSQL = StringUtils.join(
         expectedScriptWithOptionAbsent, System.getProperty("line.separator")) +
             System.getProperty("line.separator");

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/DEV-README
----------------------------------------------------------------------
diff --git a/standalone-metastore/DEV-README b/standalone-metastore/DEV-README
new file mode 100644
index 0000000..446f580
--- /dev/null
+++ b/standalone-metastore/DEV-README
@@ -0,0 +1,40 @@
+This file contains information for developers and testers.
+
+Testing metastore installation and upgrade against databases beyond Derby:
+There are integration tests for testing installation and upgrade of the
+metastore on MySQL (actually MariaDB is used), Oracle, Postgres, and SQLServer.
+These tests are not run by default because they take several minutes each and
+they require the developer to download the JDBC driver for Oracle.
+They are run in the integration-test phase.
+
+Each ITest runs two tests, one that installs the latest version of the
+database and one that installs the latest version minus one and then upgrades
+the database.
+
+To run one of the tests you will need to explicitly turn on integration testing,
+in the Oracle case specify the location of the JDBC driver, and optionally
+specify which test you want to run.  To run all of the tests do:
+
+mvn verify -Ditest.jdbc.jars=_oracle_jar_path -DskipITests=false -Dtest=nosuch
+
+To run just one test, do
+
+mvn verify -DskipITests=false -Dit.test=ITestMysql -Dtest=nosuch
+
+You can download the Oracle driver at 
+http://www.oracle.com/technetwork/database/features/jdbc/index-091264.html
+
+If you wish to use one of these containers to run your own tests against a
+non-Derby version of the metastore, you can do that as well.  You must specify
+that only the install test be run (change -Dit.test=ITestMysql in the example
+above to -Dit.test=ITestMysql#install) and tell it to leave the docker container
+running by adding -Dmetastore.itest.no.stop.container=true.  You will then need
+to stop and remove the container yourself once you have finished.  The container
+is recreated for each run of the test, so you cannot rerun the test until you
+have stopped and removed it.  You can construct the connection values to put in
+metastore-site.xml from the information in the appropriate ITest file (e.g.,
+from ITestMysql you can find that the JDBC URL is
+"jdbc:mysql://localhost:3306/hivedb", the JDBC driver is
+"org.mariadb.jdbc.Driver", and the password is "hivepassword".  The user is
+always "hiveuser".
+

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/binary-package-licenses/NOTICE
----------------------------------------------------------------------
diff --git a/standalone-metastore/binary-package-licenses/NOTICE b/standalone-metastore/binary-package-licenses/NOTICE
new file mode 100644
index 0000000..76cce78
--- /dev/null
+++ b/standalone-metastore/binary-package-licenses/NOTICE
@@ -0,0 +1,4 @@
+Binary distributions of this software contain jars that are not licensed under the
+Apache License 2.0.  Additional licenses attached to these jars are contained in the
+same directory as this NOTICE file.  
+

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/binary-package-licenses/com.google.protobuf-LICENSE
----------------------------------------------------------------------
diff --git a/standalone-metastore/binary-package-licenses/com.google.protobuf-LICENSE b/standalone-metastore/binary-package-licenses/com.google.protobuf-LICENSE
new file mode 100644
index 0000000..f028c82
--- /dev/null
+++ b/standalone-metastore/binary-package-licenses/com.google.protobuf-LICENSE
@@ -0,0 +1,42 @@
+This license applies to all parts of Protocol Buffers except the following:
+
+  - Atomicops support for generic gcc, located in
+    src/google/protobuf/stubs/atomicops_internals_generic_gcc.h.
+    This file is copyrighted by Red Hat Inc.
+
+  - Atomicops support for AIX/POWER, located in
+    src/google/protobuf/stubs/atomicops_internals_power.h.
+    This file is copyrighted by Bloomberg Finance LP.
+
+Copyright 2014, Google Inc.  All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Code generated by the Protocol Buffer compiler is owned by the owner
+of the input file used when generating it.  This code is not
+standalone and requires a support library to be linked with it.  This
+support library is itself covered by the above license.

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/binary-package-licenses/javax.transaction.transaction-api-LICENSE
----------------------------------------------------------------------
diff --git a/standalone-metastore/binary-package-licenses/javax.transaction.transaction-api-LICENSE b/standalone-metastore/binary-package-licenses/javax.transaction.transaction-api-LICENSE
new file mode 100644
index 0000000..d7b7cfe
--- /dev/null
+++ b/standalone-metastore/binary-package-licenses/javax.transaction.transaction-api-LICENSE
@@ -0,0 +1,128 @@
+COMMON DEVELOPMENT AND DISTRIBUTION LICENSE Version 1.0 (CDDL-1.0)
+1. Definitions.
+
+1.1. Contributor means each individual or entity that creates or contributes to the creation of Modifications.
+
+1.2. Contributor Version means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor.
+
+1.3. Covered Software means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof.
+
+1.4. Executable means the Covered Software in any form other than Source Code.
+
+1.5. Initial Developer means the individual or entity that first makes Original Software available under this License.
+
+1.6. Larger Work means a work which combines Covered Software or portions thereof with code not governed by the terms of this License.
+
+1.7. License means this document.
+
+1.8. Licensable means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein.
+
+1.9. Modifications means the Source Code and Executable form of any of the following:
+
+A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications;
+
+B. Any new file that contains any part of the Original Software or previous Modification; or
+
+C. Any new file that is contributed or otherwise made available under the terms of this License.
+
+1.10. Original Software means the Source Code and Executable form of computer software code that is originally released under this License.
+
+1.11. Patent Claims means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor.
+
+1.12. Source Code means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code.
+
+1.13. You (or Your) means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, You includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, control means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.
+
+2. License Grants.
+
+2.1. The Initial Developer Grant.
+
+Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and
+
+(b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof).
+
+(c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License.
+
+(d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices.
+
+2.2. Contributor Grant.
+
+Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and
+
+(b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination).
+
+(c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party.
+
+(d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor.
+
+3. Distribution Obligations.
+
+3.1. Availability of Source Code.
+
+Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange.
+
+3.2. Modifications.
+
+The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License.
+
+3.3. Required Notices.
+
+You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer.
+
+3.4. Application of Additional Terms.
+
+You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer.
+
+3.5. Distribution of Executable Versions.
+
+You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipients rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer.
+
+3.6. Larger Works.
+
+You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software.
+
+4. Versions of the License.
+
+4.1. New Versions.
+
+Sun Microsystems, Inc. is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License.
+
+4.2. Effect of New Versions.
+
+You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward.
+
+4.3. Modified Versions.
+
+When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License.
+
+5. DISCLAIMER OF WARRANTY.
+
+COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN AS IS BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+6. TERMINATION.
+
+6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive.
+
+6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as Participant) alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreem
 ent with Participant.
+
+6.3. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination.
+
+7. LIMITATION OF LIABILITY.
+
+UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTYS NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+8. U.S. GOVERNMENT END USERS.
+
+The Covered Software is a commercial item, as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of commercial computer software (as that term is defined at 48 C.F.R.  252.227-7014(a)(1)) and commercial computer software documentation as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License.
+
+9. MISCELLANEOUS.
+
+This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdictions conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be
  construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software.
+
+10. RESPONSIBILITY FOR CLAIMS.
+
+As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability.

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/binary-package-licenses/javolution-LICENSE
----------------------------------------------------------------------
diff --git a/standalone-metastore/binary-package-licenses/javolution-LICENSE b/standalone-metastore/binary-package-licenses/javolution-LICENSE
new file mode 100644
index 0000000..bd6788d
--- /dev/null
+++ b/standalone-metastore/binary-package-licenses/javolution-LICENSE
@@ -0,0 +1,25 @@
+Javolution - Java(tm) Solution for Real-Time and Embedded Systems
+Copyright (c) 2012, Javolution (http://javolution.org/)
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+   1. Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+   2. Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in the
+      documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/binary-package-licenses/jline-LICENSE
----------------------------------------------------------------------
diff --git a/standalone-metastore/binary-package-licenses/jline-LICENSE b/standalone-metastore/binary-package-licenses/jline-LICENSE
new file mode 100644
index 0000000..246f54f
--- /dev/null
+++ b/standalone-metastore/binary-package-licenses/jline-LICENSE
@@ -0,0 +1,32 @@
+Copyright (c) 2002-2006, Marc Prud'hommeaux <mw...@cornell.edu>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following
+conditions are met:
+
+Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with
+the distribution.
+
+Neither the name of JLine nor the names of its contributors
+may be used to endorse or promote products derived from this
+software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+OF THE POSSIBILITY OF SUCH DAMAGE.

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/binary-package-licenses/org.antlr-LICENSE
----------------------------------------------------------------------
diff --git a/standalone-metastore/binary-package-licenses/org.antlr-LICENSE b/standalone-metastore/binary-package-licenses/org.antlr-LICENSE
new file mode 100644
index 0000000..f6d28b7
--- /dev/null
+++ b/standalone-metastore/binary-package-licenses/org.antlr-LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2003-2008, Terence Parr
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.  Redistributions
+in binary form must reproduce the above copyright notice, this list of
+conditions and the following disclaimer in the documentation and/or
+other materials provided with the distribution.  Neither the name of
+the author nor the names of its contributors may be used to endorse or
+promote products derived from this software without specific prior
+written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/binary-package-licenses/sqlline-LICENSE
----------------------------------------------------------------------
diff --git a/standalone-metastore/binary-package-licenses/sqlline-LICENSE b/standalone-metastore/binary-package-licenses/sqlline-LICENSE
new file mode 100644
index 0000000..47e0391
--- /dev/null
+++ b/standalone-metastore/binary-package-licenses/sqlline-LICENSE
@@ -0,0 +1,33 @@
+Copyright (c) 2002,2003,2004,2005,2006 Marc Prud'hommeaux
+All rights reserved.
+
+
+Redistribution and use in source and binary forms,
+with or without modification, are permitted provided
+that the following conditions are met:
+	
+Redistributions of source code must retain the above
+copyright notice, this list of conditions and the following
+disclaimer.
+Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following
+disclaimer in the documentation and/or other materials
+provided with the distribution.
+Neither the name of the <ORGANIZATION> nor the names
+of its contributors may be used to endorse or promote
+products derived from this software without specific
+prior written permission.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS
+AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/pom.xml
----------------------------------------------------------------------
diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml
index d87863e..87efece 100644
--- a/standalone-metastore/pom.xml
+++ b/standalone-metastore/pom.xml
@@ -38,6 +38,7 @@
     <maven.compiler.target>1.8</maven.compiler.target>
     <maven.compiler.useIncrementalCompilation>false</maven.compiler.useIncrementalCompilation>
     <maven.repo.local>${settings.localRepository}</maven.repo.local>
+    <maven.assembly.plugin.version>2.3</maven.assembly.plugin.version>
 
     <!-- Test Properties -->
     <log4j.conf.dir>${project.basedir}/src/test/resources</log4j.conf.dir>
@@ -45,6 +46,8 @@
     <test.warehouse.dir>${project.build.directory}/warehouse</test.warehouse.dir>
     <test.warehouse.scheme>file://</test.warehouse.scheme>
     <test.forkcount>1</test.forkcount>
+    <skipITests>true</skipITests>
+    <itest.jdbc.jars>set-this-to-colon-separated-full-path-list-of-jars-to-run-integration-tests</itest.jdbc.jars>
 
     <!-- Plugin versions -->
     <ant.contrib.version>1.0b3</ant.contrib.version>
@@ -75,6 +78,7 @@
     <log4j2.version>2.8.2</log4j2.version>
     <mockito-all.version>1.10.19</mockito-all.version>
     <protobuf.version>2.5.0</protobuf.version>
+    <sqlline.version>1.3.0</sqlline.version>
     <storage-api.version>3.0.0-SNAPSHOT</storage-api.version>
 
     <!-- Thrift properties -->
@@ -277,10 +281,21 @@
       <version>1.4.0</version>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>sqlline</groupId>
+      <artifactId>sqlline</artifactId>
+      <version>${sqlline.version}</version>
+    </dependency>
 
     <!-- test scope dependencies -->
 
     <dependency>
+      <groupId>com.microsoft.sqlserver</groupId>
+      <artifactId>mssql-jdbc</artifactId>
+      <version>6.2.1.jre8</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
       <version>${junit.version}</version>
@@ -292,6 +307,20 @@
       <version>${mockito-all.version}</version>
       <scope>test</scope>
     </dependency>
+    <dependency>
+        <!-- Note, this is LGPL.  But we're only using it in a test and not changing it, so I
+        believe we are fine. -->
+      <groupId>org.mariadb.jdbc</groupId>
+      <artifactId>mariadb-java-client</artifactId>
+      <version>2.2.0</version>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.postgresql</groupId>
+      <artifactId>postgresql</artifactId>
+      <version>9.3-1102-jdbc41</version>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
   <profiles>
@@ -467,6 +496,21 @@
               <goal>run</goal>
             </goals>
           </execution>
+          <execution>
+            <id>setup-metastore-scripts</id>
+            <phase>process-test-resources</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <target>
+                <mkdir dir="${test.tmp.dir}/scripts/metastore/upgrade" />
+                <copy todir="${test.tmp.dir}/scripts/metastore/upgrade">
+                  <fileset dir="${basedir}/src/main/sql/"/>
+                </copy>
+              </target>
+            </configuration>
+          </execution>
         </executions>
       </plugin>
       <plugin>
@@ -497,11 +541,62 @@
           </execution>
         </executions>
       </plugin>
-      <!-- TODO MS-SPLIT assembly plugin -->
       <!-- TODO MS-SPLIT findbugs plugin -->
       <!-- TODO MS-SPLIT javadoc plugin -->
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <version>${maven.assembly.plugin.version}</version>
+        <executions>
+          <execution>
+            <id>assemble</id>
+            <phase>package</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+            <configuration>
+              <finalName>apache-hive-metastore-${project.version}</finalName>
+              <descriptors>
+                <descriptor>src/assembly/bin.xml</descriptor>
+                <descriptor>src/assembly/src.xml</descriptor>
+              </descriptors>
+              <tarLongFileMode>gnu</tarLongFileMode>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-failsafe-plugin</artifactId>
+        <version>2.20.1</version>
+        <executions>
+          <execution>
+            <goals>
+              <goal>integration-test</goal>
+              <goal>verify</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+          <redirectTestOutputToFile>true</redirectTestOutputToFile>
+          <reuseForks>false</reuseForks>
+          <argLine>-Xmx2048m</argLine>
+          <failIfNoTests>false</failIfNoTests>
+          <systemPropertyVariables>
+            <log4j.debug>true</log4j.debug>
+            <java.io.tmpdir>${test.tmp.dir}</java.io.tmpdir>
+            <test.tmp.dir>${test.tmp.dir}</test.tmp.dir>
+            <hive.in.test>true</hive.in.test>
+          </systemPropertyVariables>
+          <additionalClasspathElements>
+            <additionalClasspathElement>${log4j.conf.dir}</additionalClasspathElement>
+            <additionalClasspathElement>${itest.jdbc.jars}</additionalClasspathElement>
+          </additionalClasspathElements>
+          <skipITs>${skipITests}</skipITs> <!-- set this to false to run these tests -->
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-surefire-plugin</artifactId>
         <version>2.16</version>
         <configuration>

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/assembly/bin.xml
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/assembly/bin.xml b/standalone-metastore/src/assembly/bin.xml
new file mode 100644
index 0000000..15b6213
--- /dev/null
+++ b/standalone-metastore/src/assembly/bin.xml
@@ -0,0 +1,126 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+<assembly
+  xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2 http://maven.apache.org/xsd/assembly-1.1.2.xsd">
+
+  <id>bin</id>
+
+  <formats>
+    <format>dir</format>
+    <format>tar.gz</format>
+  </formats>
+
+  <baseDirectory>apache-hive-metastore-${project.version}-bin</baseDirectory>
+
+  <dependencySets>
+    <dependencySet>
+      <outputDirectory>lib</outputDirectory>
+      <unpack>false</unpack>
+      <useProjectArtifact>true</useProjectArtifact>
+      <useStrictFiltering>true</useStrictFiltering>
+      <useTransitiveFiltering>true</useTransitiveFiltering>
+      <excludes>
+        <exclude>org.apache.hadoop:*</exclude>
+        <exclude>org.slf4j:*</exclude>
+        <exclude>log4j:*</exclude>
+      </excludes>
+    </dependencySet>
+  </dependencySets>
+
+  <fileSets>
+    <fileSet>
+      <directory>${project.basedir}</directory>
+      <excludes>
+        <exclude>target/**</exclude>
+        <exclude>.classpath</exclude>
+        <exclude>.project</exclude>
+        <exclude>.settings/**</exclude>
+        <exclude>lib/**</exclude>
+      </excludes>
+
+      <includes>
+        <include>README.txt</include>
+        <include>LICENSE</include>
+        <include>NOTICE</include>
+      </includes>
+      <outputDirectory>/</outputDirectory>
+    </fileSet>
+
+    <fileSet>
+      <directory>${project.basedir}/binary-package-licenses</directory>
+      <includes>
+        <include>/*</include>
+      </includes>
+      <excludes>
+        <exclude>/README</exclude>
+      </excludes>
+      <outputDirectory>binary-package-licenses</outputDirectory>
+    </fileSet>
+
+    <fileSet>
+      <fileMode>755</fileMode>
+      <directory>${project.basedir}/src/main/scripts</directory>
+      <includes>
+        <include>base</include>
+        <include>schematool</include>
+        <include>start-metastore</include>
+        <include>metastore-config.sh</include>
+        <include>ext/**/*</include>
+      </includes>
+      <outputDirectory>bin</outputDirectory>
+    </fileSet>
+
+    <fileSet>
+      <directory>${project.basedir}/src/main/sql</directory>
+      <includes>
+        <include>**/*</include>
+      </includes>
+      <outputDirectory>scripts/metastore/upgrade</outputDirectory>
+    </fileSet>
+
+    <fileSet>
+      <directory>${project.basedir}/src/gen/thrift/gen-php</directory>
+      <includes>
+        <include>**/*</include>
+      </includes>
+      <outputDirectory>lib/php/packages/hive_metastore</outputDirectory>
+    </fileSet>
+
+    <fileSet>
+      <directory>${project.basedir}/src/gen/thrift/gen-py/hive_metastore</directory>
+      <fileMode>755</fileMode>
+       <includes>
+        <include>**/*</include>
+      </includes>
+      <outputDirectory>lib/py/hive_metastore</outputDirectory>
+    </fileSet>
+  </fileSets>
+
+  <files>
+    <file>
+      <source>${project.basedir}/src/main/resources/metastore-log4j2.xml</source>
+      <outputDirectory>conf</outputDirectory>
+    </file>
+  </files>
+
+</assembly>
+

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/assembly/src.xml
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/assembly/src.xml b/standalone-metastore/src/assembly/src.xml
new file mode 100644
index 0000000..a240544
--- /dev/null
+++ b/standalone-metastore/src/assembly/src.xml
@@ -0,0 +1,53 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+<assembly
+  xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2 http://maven.apache.org/xsd/assembly-1.1.2.xsd">
+
+  <id>src</id>
+
+  <formats>
+    <format>tar.gz</format>
+  </formats>
+
+  <baseDirectory>apache-hive-metastore-${project.version}-src</baseDirectory>
+
+  <fileSets>
+    <fileSet>
+      <directory>${project.basedir}</directory>
+
+      <excludes>
+        <exclude>target/**</exclude>
+      </excludes>
+
+      <includes>
+        <include>.checkstyle</include>
+        <include>.gitattributes</include>
+        <include>.gitignore</include>
+        <include>LICENSE</include>
+        <include>NOTICE</include>
+        <include>pom.xml</include>
+        <include>src/**/*</include>
+      </includes>
+      <outputDirectory>/</outputDirectory>
+    </fileSet>
+  </fileSets>
+</assembly>

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java
index be89f9b..ed4a2ef 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java
@@ -55,6 +55,13 @@ public interface IMetaStoreSchemaInfo {
   String generateInitFileName(String toVersion) throws HiveMetaException;
 
   /**
+   * Get SQL script that will create the user and database for Metastore to use.
+   * @return filename
+   * @throws HiveMetaException if something goes wrong.
+   */
+  String getCreateUserScript() throws HiveMetaException;
+
+  /**
    * Find the directory of metastore scripts
    *
    * @return the path of directory where the sql scripts are

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java
index 0c36855..3bb3643 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java
@@ -38,10 +38,11 @@ import org.apache.hadoop.hive.metastore.utils.MetastoreVersionInfo;
 
 
 public class MetaStoreSchemaInfo implements IMetaStoreSchemaInfo {
-  protected static final String UPGRADE_FILE_PREFIX = "upgrade-";
+  private static final String UPGRADE_FILE_PREFIX = "upgrade-";
   private static final String INIT_FILE_PREFIX = "hive-schema-";
   private static final String VERSION_UPGRADE_LIST = "upgrade.order";
   private static final String PRE_UPGRADE_PREFIX = "pre-";
+  private static final String CREATE_USER_PREFIX = "create-user";
   protected final String dbType;
   private String[] hiveSchemaVersions;
   private final String hiveHome;
@@ -137,6 +138,17 @@ public class MetaStoreSchemaInfo implements IMetaStoreSchemaInfo {
     return initScriptName;
   }
 
+  @Override
+  public String getCreateUserScript() throws HiveMetaException {
+    String createScript = CREATE_USER_PREFIX + "." + dbType + SQL_FILE_EXTENSION;
+    // check if the file exists
+    if (!(new File(getMetaStoreScriptDir() + File.separatorChar +
+        createScript).exists())) {
+      throw new HiveMetaException("Unable to find create user file, expected: " + createScript);
+    }
+    return createScript;
+  }
+
   /**
    * Find the directory of metastore scripts
    * @return
@@ -209,7 +221,7 @@ public class MetaStoreSchemaInfo implements IMetaStoreSchemaInfo {
       throws HiveMetaException {
     String versionQuery;
     boolean needsQuotedIdentifier =
-        HiveSchemaHelper.getDbCommandParser(connectionInfo.getDbType()).needsQuotedIdentifier();
+        HiveSchemaHelper.getDbCommandParser(connectionInfo.getDbType(), false).needsQuotedIdentifier();
     if (needsQuotedIdentifier) {
       versionQuery = "select t.\"SCHEMA_VERSION\" from \"VERSION\" t";
     } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
index 08a3af5..23c5df3 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/HiveSchemaHelper.java
@@ -22,6 +22,8 @@ import com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.HiveMetaException;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.BufferedReader;
 import java.io.File;
@@ -34,6 +36,8 @@ import java.util.IllegalFormatException;
 import java.util.List;
 
 public class HiveSchemaHelper {
+  private static final Logger LOG = LoggerFactory.getLogger(HiveSchemaHelper.class);
+
   public static final String DB_DERBY = "derby";
   public static final String DB_HIVE = "hive";
   public static final String DB_MSSQL = "mssql";
@@ -56,14 +60,15 @@ public class HiveSchemaHelper {
       Configuration conf)
       throws HiveMetaException {
     try {
-      url = url == null ? getValidConfVar(
-        MetastoreConf.ConfVars.CONNECTURLKEY, conf) : url;
-      driver = driver == null ? getValidConfVar(
-        MetastoreConf.ConfVars.CONNECTION_DRIVER, conf) : driver;
+      url = url == null ? MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECTURLKEY) : url;
+      driver = driver == null ? MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECTION_DRIVER) : driver;
       if (printInfo) {
-        System.out.println("Metastore connection URL:\t " + url);
-        System.out.println("Metastore Connection Driver :\t " + driver);
-        System.out.println("Metastore connection User:\t " + userName);
+        MetastoreSchemaTool.logAndPrintToError("Metastore connection URL:\t " + url);
+        MetastoreSchemaTool.logAndPrintToError("Metastore Connection Driver :\t " + driver);
+        MetastoreSchemaTool.logAndPrintToError("Metastore connection User:\t " + userName);
+        if (MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST)) {
+          MetastoreSchemaTool.logAndPrintToError("Metastore connection Password:\t " + password);
+        }
       }
       if ((userName == null) || userName.isEmpty()) {
         throw new HiveMetaException("UserName empty ");
@@ -74,11 +79,11 @@ public class HiveSchemaHelper {
 
       // Connect using the JDBC URL and user/pass from conf
       return DriverManager.getConnection(url, userName, password);
-    } catch (IOException e) {
-      throw new HiveMetaException("Failed to get schema version.", e);
     } catch (SQLException e) {
-      throw new HiveMetaException("Failed to get schema version.", e);
+      LOG.error("Failed to connect", e);
+      throw new HiveMetaException("Failed to connect.", e);
     } catch (ClassNotFoundException e) {
+      LOG.error("Unable to find driver class", e);
       throw new HiveMetaException("Failed to load driver", e);
     }
   }
@@ -201,13 +206,17 @@ public class HiveSchemaHelper {
     private String msUsername;
     private String msPassword;
     private Configuration conf;
+    // Depending on whether we are using beeline or sqlline the line endings have to be handled
+    // differently.
+    private final boolean usingSqlLine;
 
     public AbstractCommandParser(String dbOpts, String msUsername, String msPassword,
-        Configuration conf) {
+        Configuration conf, boolean usingSqlLine) {
       setDbOpts(dbOpts);
       this.msUsername = msUsername;
       this.msPassword = msPassword;
       this.conf = conf;
+      this.usingSqlLine = usingSqlLine;
     }
 
     @Override
@@ -300,6 +309,7 @@ public class HiveSchemaHelper {
             // Now we have a complete statement, process it
             // write the line to buffer
             sb.append(currentCommand);
+            if (usingSqlLine) sb.append(";");
             sb.append(System.getProperty("line.separator"));
           }
         }
@@ -339,8 +349,8 @@ public class HiveSchemaHelper {
     private static final String DERBY_NESTING_TOKEN = "RUN";
 
     public DerbyCommandParser(String dbOpts, String msUsername, String msPassword,
-        Configuration conf) {
-      super(dbOpts, msUsername, msPassword, conf);
+        Configuration conf, boolean usingSqlLine) {
+      super(dbOpts, msUsername, msPassword, conf, usingSqlLine);
     }
 
     @Override
@@ -369,9 +379,9 @@ public class HiveSchemaHelper {
     private final NestedScriptParser nestedDbCommandParser;
 
     public HiveCommandParser(String dbOpts, String msUsername, String msPassword,
-        Configuration conf, String metaDbType) {
-      super(dbOpts, msUsername, msPassword, conf);
-      nestedDbCommandParser = getDbCommandParser(metaDbType);
+        Configuration conf, String metaDbType, boolean usingSqlLine) {
+      super(dbOpts, msUsername, msPassword, conf, usingSqlLine);
+      nestedDbCommandParser = getDbCommandParser(metaDbType, usingSqlLine);
     }
 
     @Override
@@ -405,8 +415,8 @@ public class HiveSchemaHelper {
     private String delimiter = DEFAULT_DELIMITER;
 
     public MySqlCommandParser(String dbOpts, String msUsername, String msPassword,
-        Configuration conf) {
-      super(dbOpts, msUsername, msPassword, conf);
+        Configuration conf, boolean usingSqlLine) {
+      super(dbOpts, msUsername, msPassword, conf, usingSqlLine);
     }
 
     @Override
@@ -471,8 +481,8 @@ public class HiveSchemaHelper {
     public static final String POSTGRES_SKIP_STANDARD_STRINGS_DBOPT = "postgres.filter.81";
 
     public PostgresCommandParser(String dbOpts, String msUsername, String msPassword,
-        Configuration conf) {
-      super(dbOpts, msUsername, msPassword, conf);
+        Configuration conf, boolean usingSqlLine) {
+      super(dbOpts, msUsername, msPassword, conf, usingSqlLine);
     }
 
     @Override
@@ -514,8 +524,8 @@ public class HiveSchemaHelper {
     private static final String ORACLE_NESTING_TOKEN = "@";
 
     public OracleCommandParser(String dbOpts, String msUsername, String msPassword,
-        Configuration conf) {
-      super(dbOpts, msUsername, msPassword, conf);
+        Configuration conf, boolean usingSqlLine) {
+      super(dbOpts, msUsername, msPassword, conf, usingSqlLine);
     }
 
     @Override
@@ -538,8 +548,8 @@ public class HiveSchemaHelper {
     private static final String MSSQL_NESTING_TOKEN = ":r";
 
     public MSSQLCommandParser(String dbOpts, String msUsername, String msPassword,
-        Configuration conf) {
-      super(dbOpts, msUsername, msPassword, conf);
+        Configuration conf, boolean usingSqlLine) {
+      super(dbOpts, msUsername, msPassword, conf, usingSqlLine);
     }
 
     @Override
@@ -557,29 +567,29 @@ public class HiveSchemaHelper {
     }
   }
 
-  public static NestedScriptParser getDbCommandParser(String dbName) {
-    return getDbCommandParser(dbName, null);
+  public static NestedScriptParser getDbCommandParser(String dbName, boolean usingSqlLine) {
+    return getDbCommandParser(dbName, null, usingSqlLine);
   }
 
-  public static NestedScriptParser getDbCommandParser(String dbName, String metaDbName) {
-    return getDbCommandParser(dbName, null, null, null, null, metaDbName);
+  public static NestedScriptParser getDbCommandParser(String dbName, String metaDbName, boolean usingSqlLine) {
+    return getDbCommandParser(dbName, null, null, null, null, metaDbName, usingSqlLine);
   }
 
   public static NestedScriptParser getDbCommandParser(String dbName,
       String dbOpts, String msUsername, String msPassword,
-      Configuration conf, String metaDbType) {
+      Configuration conf, String metaDbType, boolean usingSqlLine) {
     if (dbName.equalsIgnoreCase(DB_DERBY)) {
-      return new DerbyCommandParser(dbOpts, msUsername, msPassword, conf);
+      return new DerbyCommandParser(dbOpts, msUsername, msPassword, conf, usingSqlLine);
     } else if (dbName.equalsIgnoreCase(DB_HIVE)) {
-      return new HiveCommandParser(dbOpts, msUsername, msPassword, conf, metaDbType);
+      return new HiveCommandParser(dbOpts, msUsername, msPassword, conf, metaDbType, usingSqlLine);
     } else if (dbName.equalsIgnoreCase(DB_MSSQL)) {
-      return new MSSQLCommandParser(dbOpts, msUsername, msPassword, conf);
+      return new MSSQLCommandParser(dbOpts, msUsername, msPassword, conf, usingSqlLine);
     } else if (dbName.equalsIgnoreCase(DB_MYSQL)) {
-      return new MySqlCommandParser(dbOpts, msUsername, msPassword, conf);
+      return new MySqlCommandParser(dbOpts, msUsername, msPassword, conf, usingSqlLine);
     } else if (dbName.equalsIgnoreCase(DB_POSTGRACE)) {
-      return new PostgresCommandParser(dbOpts, msUsername, msPassword, conf);
+      return new PostgresCommandParser(dbOpts, msUsername, msPassword, conf, usingSqlLine);
     } else if (dbName.equalsIgnoreCase(DB_ORACLE)) {
-      return new OracleCommandParser(dbOpts, msUsername, msPassword, conf);
+      return new OracleCommandParser(dbOpts, msUsername, msPassword, conf, usingSqlLine);
     } else {
       throw new IllegalArgumentException("Unknown dbType " + dbName);
     }


[09/50] [abbrv] hive git commit: HIVE-18003 : add explicit jdbc connection string args for mappings (Sergey Shelukhin, reviewed by Prasanth Jayachandran)

Posted by ga...@apache.org.
HIVE-18003 : add explicit jdbc connection string args for mappings (Sergey Shelukhin, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e120bd8b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e120bd8b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e120bd8b

Branch: refs/heads/standalone-metastore
Commit: e120bd8b0b8b74516651f3ae9e4e7d3a170b0d4d
Parents: 89dbf4e
Author: sergey <se...@apache.org>
Authored: Thu Dec 14 15:55:25 2017 -0800
Committer: sergey <se...@apache.org>
Committed: Thu Dec 14 15:55:25 2017 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   6 +-
 .../org/apache/hive/jdbc/HiveConnection.java    |   5 +
 jdbc/src/java/org/apache/hive/jdbc/Utils.java   |   1 +
 .../apache/hadoop/hive/ql/exec/tez/TezTask.java |  10 +-
 .../hive/ql/exec/tez/UserPoolMapping.java       |  38 +++++-
 .../hive/ql/exec/tez/WorkloadManager.java       |  30 +++--
 .../hive/ql/exec/tez/TestWorkloadManager.java   | 117 ++++++++++++-------
 7 files changed, 147 insertions(+), 60 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e120bd8b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 7a81612..711dfbd 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2435,9 +2435,13 @@ public class HiveConf extends Configuration {
     HIVE_SERVER2_TEZ_INTERACTIVE_QUEUE("hive.server2.tez.interactive.queue", "",
         "A single YARN queues to use for Hive Interactive sessions. When this is specified,\n" +
         "workload management is enabled and used for these sessions."),
-    HIVE_SERVER2_TEZ_WM_WORKER_THREADS("hive.server2.tez.wm.worker.threads", 4,
+    HIVE_SERVER2_WM_WORKER_THREADS("hive.server2.wm.worker.threads", 4,
         "Number of worker threads to use to perform the synchronous operations with Tez\n" +
         "sessions for workload management (e.g. opening, closing, etc.)"),
+    HIVE_SERVER2_WM_ALLOW_ANY_POOL_VIA_JDBC("hive.server2.wm.allow.any.pool.via.jdbc", false,
+        "Applies when a user specifies a target WM pool in the JDBC connection string. If\n" +
+        "false, the user can only specify a pool he is mapped to (e.g. make a choice among\n" +
+        "multiple group mappings); if true, the user can specify any existing pool."),
     HIVE_SERVER2_TEZ_WM_AM_REGISTRY_TIMEOUT("hive.server2.tez.wm.am.registry.timeout", "30s",
         new TimeValidator(TimeUnit.SECONDS),
         "The timeout for AM registry registration, after which (on attempting to use the\n" +

http://git-wip-us.apache.org/repos/asf/hive/blob/e120bd8b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
index fc937e6..45acf13 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
@@ -138,6 +138,7 @@ public class HiveConnection implements java.sql.Connection {
   private TProtocolVersion protocol;
   private int fetchSize = HiveStatement.DEFAULT_FETCH_SIZE;
   private String initFile = null;
+  private String wmPool = null;
   private Properties clientInfo;
 
   /**
@@ -178,6 +179,7 @@ public class HiveConnection implements java.sql.Connection {
     if (sessConfMap.containsKey(JdbcConnectionParams.INIT_FILE)) {
       initFile = sessConfMap.get(JdbcConnectionParams.INIT_FILE);
     }
+    wmPool = sessConfMap.get(JdbcConnectionParams.WM_POOL);
 
     // add supported protocols
     supportedProtocols.add(TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V1);
@@ -680,6 +682,9 @@ public class HiveConnection implements java.sql.Connection {
     // set the fetchSize
     openConf.put("set:hiveconf:hive.server2.thrift.resultset.default.fetch.size",
       Integer.toString(fetchSize));
+    if (wmPool != null) {
+      openConf.put("set:hivevar:wmpool", wmPool);
+    }
 
     // set the session configuration
     Map<String, String> sessVars = connParams.getSessionVars();

http://git-wip-us.apache.org/repos/asf/hive/blob/e120bd8b/jdbc/src/java/org/apache/hive/jdbc/Utils.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/Utils.java b/jdbc/src/java/org/apache/hive/jdbc/Utils.java
index 855de88..bb13682 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/Utils.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/Utils.java
@@ -123,6 +123,7 @@ public class Utils {
     // Set the fetchSize
     static final String FETCH_SIZE = "fetchSize";
     static final String INIT_FILE = "initFile";
+    static final String WM_POOL = "wmPool";
 
     // --------------- Begin 2 way ssl options -------------------------
     // Use two way ssl. This param will take effect only when ssl=true

http://git-wip-us.apache.org/repos/asf/hive/blob/e120bd8b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
index 27799a8..1a24b44 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
@@ -156,9 +156,13 @@ public class TezTask extends Task<TezWork> {
       // based on Hadoop configuration, as documented at
       // https://hadoop.apache.org/docs/r2.8.0/hadoop-project-dist/hadoop-common/GroupsMapping.html
       String userName = ss.getUserName();
-      MappingInput mi = (userName == null) ? new MappingInput("anonymous", null)
-        : new MappingInput(ss.getUserName(),
-            UserGroupInformation.createRemoteUser(ss.getUserName()).getGroups());
+      List<String> groups = null;
+      if (userName == null) {
+        userName = "anonymous";
+      } else {
+        groups = UserGroupInformation.createRemoteUser(ss.getUserName()).getGroups();
+      }
+      MappingInput mi = new MappingInput(userName, groups, ss.getHiveVariables().get("wmpool"));
 
       WmContext wmContext = ctx.getWmContext();
       // jobConf will hold all the configuration for hadoop, tez, and hive

http://git-wip-us.apache.org/repos/asf/hive/blob/e120bd8b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java
index 33ee8f7..5919f3f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/UserPoolMapping.java
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hive.ql.exec.tez;
 
+import com.google.common.annotations.VisibleForTesting;
+
+import java.util.Set;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -54,13 +57,25 @@ class UserPoolMapping {
 
   /** Contains all the information necessary to map a query to a pool. */
   public static final class MappingInput {
-    private final String userName;
+    private final String userName, wmPool;
     private final List<String> groups;
     // TODO: we may add app name, etc. later
 
-    public MappingInput(String userName, List<String> groups) {
+    public MappingInput(String userName, List<String> groups, String wmPool) {
       this.userName = userName;
       this.groups = groups;
+      this.wmPool = wmPool;
+    }
+
+    // TODO: move these into tests when there are fewer conflicting patches pending.
+    @VisibleForTesting
+    public MappingInput(String userName) {
+      this(userName, null);
+    }
+
+    @VisibleForTesting
+    public MappingInput(String userName, List<String> groups) {
+      this(userName, groups, null);
     }
 
     public List<String> getGroups() {
@@ -73,7 +88,7 @@ class UserPoolMapping {
 
     @Override
     public String toString() {
-      return getUserName() + "; groups " + groups;
+      return "{" + getUserName() + "; pool " + wmPool + "; groups " + groups + "}";
     }
   }
 
@@ -107,17 +122,32 @@ class UserPoolMapping {
     }
   }
 
-  public String mapSessionToPoolName(MappingInput input) {
+  public String mapSessionToPoolName(MappingInput input, boolean allowAnyPool, Set<String> pools) {
+    if (allowAnyPool && input.wmPool != null) {
+      return (pools == null || pools.contains(input.wmPool)) ? input.wmPool : null;
+    }
     // For equal-priority rules, user rules come first because they are more specific (arbitrary).
     Mapping mapping = userMappings.get(input.getUserName());
+    boolean isExplicitMatch = false;
+    if (mapping != null) {
+      isExplicitMatch = isExplicitPoolMatch(input, mapping);
+      if (isExplicitMatch) return mapping.fullPoolName;
+    }
     for (String group : input.getGroups()) {
       Mapping candidate = groupMappings.get(group);
       if (candidate == null) continue;
+      isExplicitMatch = isExplicitPoolMatch(input, candidate);
+      if (isExplicitMatch) return candidate.fullPoolName;
       if (mapping == null || candidate.priority < mapping.priority) {
         mapping = candidate;
       }
     }
+    if (input.wmPool != null && !isExplicitMatch) return null;
     if (mapping != null) return mapping.fullPoolName;
     return defaultPoolPath;
   }
+
+  private static boolean isExplicitPoolMatch(MappingInput input, Mapping mapping) {
+    return input.wmPool != null && input.wmPool.equals(mapping.fullPoolName);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/e120bd8b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
index 1f4843d..f0481fc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
@@ -103,6 +103,7 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
   private final QueryAllocationManager allocationManager;
   private final String yarnQueue;
   private final int amRegistryTimeoutMs;
+  private final boolean allowAnyPool;
   // Note: it's not clear that we need to track this - unlike PoolManager we don't have non-pool
   //       sessions, so the pool itself could internally track the sessions it gave out, since
   //       calling close on an unopened session is probably harmless.
@@ -204,11 +205,14 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
     // Only creates the expiration tracker if expiration is configured.
     expirationTracker = SessionExpirationTracker.create(conf, this);
 
-    workPool = Executors.newFixedThreadPool(HiveConf.getIntVar(conf, ConfVars.HIVE_SERVER2_TEZ_WM_WORKER_THREADS),
-      new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Workload management worker %d").build());
+    workPool = Executors.newFixedThreadPool(HiveConf.getIntVar(
+        conf, ConfVars.HIVE_SERVER2_WM_WORKER_THREADS), new ThreadFactoryBuilder().setDaemon(true)
+        .setNameFormat("Workload management worker %d").build());
 
-    timeoutPool = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder().setDaemon(true)
-      .setNameFormat("Workload management timeout thread").build());
+    timeoutPool = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder()
+      .setDaemon(true).setNameFormat("Workload management timeout thread").build());
+
+    allowAnyPool = HiveConf.getBoolVar(conf, ConfVars.HIVE_SERVER2_WM_ALLOW_ANY_POOL_VIA_JDBC);
 
     wmThread = new Thread(() -> runWmThread(), "Workload management master");
     wmThread.setDaemon(true);
@@ -1047,7 +1051,8 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
 
   private void queueGetRequestOnMasterThread(
       GetRequest req, HashSet<String> poolsToRedistribute, WmThreadSyncWork syncWork) {
-    String poolName = userPoolMapping.mapSessionToPoolName(req.mappingInput);
+    String poolName = userPoolMapping.mapSessionToPoolName(
+        req.mappingInput, allowAnyPool, allowAnyPool ? pools.keySet() : null);
     if (poolName == null) {
       req.future.setException(new NoPoolMappingException(
           "Cannot find any pool mapping for " + req.mappingInput));
@@ -1319,8 +1324,14 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
     }
   }
 
+  @VisibleForTesting
   public WmTezSession getSession(
-    TezSessionState session, MappingInput input, HiveConf conf, final WmContext wmContext) throws Exception {
+      TezSessionState session, MappingInput input, HiveConf conf) throws Exception {
+    return getSession(session, input, conf, null);
+  }
+
+  public WmTezSession getSession(TezSessionState session, MappingInput input, HiveConf conf,
+      final WmContext wmContext) throws Exception {
     WmEvent wmEvent = new WmEvent(WmEvent.EventType.GET);
     // Note: not actually used for pool sessions; verify some things like doAs are not set.
     validateConfig(conf);
@@ -1936,8 +1947,11 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
 
   boolean isManaged(MappingInput input) {
     // This is always replaced atomically, so we don't care about concurrency here.
-    if (userPoolMapping != null) {
-      String mappedPool = userPoolMapping.mapSessionToPoolName(input);
+    UserPoolMapping mapping = userPoolMapping;
+    if (mapping != null) {
+      // Don't pass in the pool set - not thread safe; if the user is trying to force us to
+      // use a non-existent pool, we want to fail anyway. We will fail later during get.
+      String mappedPool = mapping.mapSessionToPoolName(input, allowAnyPool, null);
       LOG.info("Mapping input: {} mapped to pool: {}", input, mappedPool);
       return true;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/e120bd8b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
index fc8f66a..98f5c58 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
@@ -32,7 +32,6 @@ import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
-
 import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.SettableFuture;
 import java.lang.Thread.State;
@@ -86,7 +85,7 @@ public class TestWorkloadManager {
         cdl.countDown();
       }
       try {
-       session.set((WmTezSession) wm.getSession(old, new MappingInput(userName, null), conf, null));
+       session.set((WmTezSession) wm.getSession(old, new MappingInput(userName), conf));
       } catch (Throwable e) {
         error.compareAndSet(null, e);
       }
@@ -227,17 +226,17 @@ public class TestWorkloadManager {
     TezSessionState nonPool = mock(TezSessionState.class);
     when(nonPool.getConf()).thenReturn(conf);
     doNothing().when(nonPool).close(anyBoolean());
-    TezSessionState session = wm.getSession(nonPool, new MappingInput("user", null), conf, null);
+    TezSessionState session = wm.getSession(nonPool, new MappingInput("user"), conf);
     verify(nonPool).close(anyBoolean());
     assertNotSame(nonPool, session);
     session.returnToSessionManager();
     TezSessionPoolSession diffPool = mock(TezSessionPoolSession.class);
     when(diffPool.getConf()).thenReturn(conf);
     doNothing().when(diffPool).returnToSessionManager();
-    session = wm.getSession(diffPool, new MappingInput("user", null), conf, null);
+    session = wm.getSession(diffPool, new MappingInput("user"), conf);
     verify(diffPool).returnToSessionManager();
     assertNotSame(diffPool, session);
-    TezSessionState session2 = wm.getSession(session, new MappingInput("user", null), conf, null);
+    TezSessionState session2 = wm.getSession(session, new MappingInput("user"), conf);
     assertSame(session, session2);
   }
 
@@ -249,11 +248,11 @@ public class TestWorkloadManager {
     wm.start();
     // The queue should be ignored.
     conf.set(TezConfiguration.TEZ_QUEUE_NAME, "test2");
-    TezSessionState session = wm.getSession(null, new MappingInput("user", null), conf, null);
+    TezSessionState session = wm.getSession(null, new MappingInput("user"), conf);
     assertEquals("test", session.getQueueName());
     assertEquals("test", conf.get(TezConfiguration.TEZ_QUEUE_NAME));
     session.setQueueName("test2");
-    session = wm.getSession(session, new MappingInput("user", null), conf, null);
+    session = wm.getSession(session, new MappingInput("user"), conf);
     assertEquals("test", session.getQueueName());
   }
 
@@ -269,7 +268,7 @@ public class TestWorkloadManager {
     WorkloadManager wm = new WorkloadManagerForTest("test", conf, 1, qam);
     wm.start();
     WmTezSession session = (WmTezSession) wm.getSession(
-        null, new MappingInput("user", null), conf, null);
+        null, new MappingInput("user"), conf);
     assertEquals(1.0, session.getClusterFraction(), EPSILON);
     qam.assertWasCalledAndReset();
     WmTezSession session2 = (WmTezSession) session.reopen();
@@ -287,10 +286,10 @@ public class TestWorkloadManager {
     MockQam qam = new MockQam();
     WorkloadManager wm = new WorkloadManagerForTest("test", conf, 2, qam);
     wm.start();
-    WmTezSession session = (WmTezSession) wm.getSession(null, new MappingInput("user", null), conf, null);
+    WmTezSession session = (WmTezSession) wm.getSession(null, new MappingInput("user"), conf);
     assertEquals(1.0, session.getClusterFraction(), EPSILON);
     qam.assertWasCalledAndReset();
-    WmTezSession session2 = (WmTezSession) wm.getSession(null, new MappingInput("user", null), conf, null);
+    WmTezSession session2 = (WmTezSession) wm.getSession(null, new MappingInput("user"), conf);
     assertEquals(0.5, session.getClusterFraction(), EPSILON);
     assertEquals(0.5, session2.getClusterFraction(), EPSILON);
     qam.assertWasCalledAndReset();
@@ -301,7 +300,7 @@ public class TestWorkloadManager {
     qam.assertWasCalledAndReset();
 
     // We never lose pool session, so we should still be able to get.
-    session = (WmTezSession) wm.getSession(null, new MappingInput("user", null), conf, null);
+    session = (WmTezSession) wm.getSession(null, new MappingInput("user"), conf);
     session.returnToSessionManager();
     assertEquals(1.0, session2.getClusterFraction(), EPSILON);
     assertEquals(0.0, session.getClusterFraction(), EPSILON);
@@ -322,20 +321,20 @@ public class TestWorkloadManager {
     assertEquals(5, wm.getNumSessions());
     // Get all the 5 sessions; validate cluster fractions.
     WmTezSession session05of06 = (WmTezSession) wm.getSession(
-        null, new MappingInput("p1", null), conf, null);
+        null, new MappingInput("p1"), conf);
     assertEquals(0.3, session05of06.getClusterFraction(), EPSILON);
     WmTezSession session03of06 = (WmTezSession) wm.getSession(
-        null, new MappingInput("p2", null), conf, null);
+        null, new MappingInput("p2"), conf);
     assertEquals(0.18, session03of06.getClusterFraction(), EPSILON);
     WmTezSession session03of06_2 = (WmTezSession) wm.getSession(
-        null, new MappingInput("p2", null), conf, null);
+        null, new MappingInput("p2"), conf);
     assertEquals(0.09, session03of06.getClusterFraction(), EPSILON);
     assertEquals(0.09, session03of06_2.getClusterFraction(), EPSILON);
     WmTezSession session02of06 = (WmTezSession) wm.getSession(
-        null,new MappingInput("r1", null), conf, null);
+        null,new MappingInput("r1"), conf);
     assertEquals(0.12, session02of06.getClusterFraction(), EPSILON);
     WmTezSession session04 = (WmTezSession) wm.getSession(
-        null, new MappingInput("r2", null), conf, null);
+        null, new MappingInput("r2"), conf);
     assertEquals(0.4, session04.getClusterFraction(), EPSILON);
     session05of06.returnToSessionManager();
     session03of06.returnToSessionManager();
@@ -347,6 +346,7 @@ public class TestWorkloadManager {
   @Test(timeout = 10000)
   public void testMappings() throws Exception {
     HiveConf conf = createConf();
+    conf.set(ConfVars.HIVE_SERVER2_WM_ALLOW_ANY_POOL_VIA_JDBC.varname, "false");
     MockQam qam = new MockQam();
     WMFullResourcePlan plan = new WMFullResourcePlan(plan(),
         Lists.newArrayList(pool("u0"), pool("g0"), pool("g1"), pool("u2")));
@@ -363,6 +363,31 @@ public class TestWorkloadManager {
     verifyMapping(wm, conf, new MappingInput("u0", groups("g0")), "u0");
     verifyMapping(wm, conf, new MappingInput("u2", groups("g1")), "g1");
     verifyMapping(wm, conf, new MappingInput("u2", groups("g0", "g1")), "g0");
+    // Check explicit pool specifications - valid cases where priority is changed.
+    verifyMapping(wm, conf, new MappingInput("u0", groups("g1"), "g1"), "g1");
+    verifyMapping(wm, conf, new MappingInput("u2", groups("g1"), "u2"), "u2");
+    verifyMapping(wm, conf, new MappingInput("zzz", groups("g0", "g1"), "g1"), "g1");
+    // Explicit pool specification - invalid - there's no mapping that matches.
+    try {
+      TezSessionState r = wm.getSession(
+        null, new MappingInput("u0", groups("g0", "g1"), "u2"), conf);
+      fail("Expected failure, but got " + r);
+    } catch (Exception ex) {
+      // Expected.
+    }
+    // Now allow the users to specify any pools.
+    conf.set(ConfVars.HIVE_SERVER2_WM_ALLOW_ANY_POOL_VIA_JDBC.varname, "true");
+    wm = new WorkloadManagerForTest("test", conf, qam, plan);
+    wm.start();
+    verifyMapping(wm, conf, new MappingInput("u0", groups("g0", "g1"), "u2"), "u2");
+    // The mapping that doesn't exist still shouldn't work.
+    try {
+      TezSessionState r = wm.getSession(
+        null, new MappingInput("u0", groups("g0", "g1"), "zzz"), conf);
+      fail("Expected failure, but got " + r);
+    } catch (Exception ex) {
+      // Expected.
+    }
   }
 
   private static void verifyMapping(
@@ -372,6 +397,9 @@ public class TestWorkloadManager {
     session.returnToSessionManager();
   }
 
+  
+
+
   @Test(timeout=10000)
   public void testQueueing() throws Exception {
     final HiveConf conf = createConf();
@@ -381,9 +409,9 @@ public class TestWorkloadManager {
     plan.setMappings(Lists.newArrayList(mapping("A", "A"), mapping("B", "B")));
     final WorkloadManager wm = new WorkloadManagerForTest("test", conf, qam, plan);
     wm.start();
-    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, new MappingInput("A", null), conf, null),
-        sessionA2 = (WmTezSession) wm.getSession(null, new MappingInput("A", null), conf, null),
-        sessionB1 = (WmTezSession) wm.getSession(null, new MappingInput("B", null), conf, null);
+    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf),
+        sessionA2 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf),
+        sessionB1 = (WmTezSession) wm.getSession(null, new MappingInput("B"), conf);
     final AtomicReference<WmTezSession> sessionA3 = new AtomicReference<>(),
         sessionA4 = new AtomicReference<>();
     final AtomicReference<Throwable> error = new AtomicReference<>();
@@ -397,7 +425,7 @@ public class TestWorkloadManager {
     assertNull(sessionA4.get());
     checkError(error);
     // While threads are blocked on A, we should still be able to get and return a B session.
-    WmTezSession sessionB2 = (WmTezSession) wm.getSession(null, new MappingInput("B", null), conf, null);
+    WmTezSession sessionB2 = (WmTezSession) wm.getSession(null, new MappingInput("B"), conf);
     sessionB1.returnToSessionManager();
     sessionB2.returnToSessionManager();
     assertNull(sessionA3.get());
@@ -425,8 +453,8 @@ public class TestWorkloadManager {
     plan.getPlan().setDefaultPoolPath("A");
     final WorkloadManager wm = new WorkloadManagerForTest("test", conf, qam, plan);
     wm.start();
-    WmTezSession session1 = (WmTezSession) wm.getSession(null, new MappingInput("A", null), conf, null),
-        session2 = (WmTezSession) wm.getSession(null, new MappingInput("A", null), conf, null);
+    WmTezSession session1 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf),
+        session2 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf);
     assertEquals(0.5, session1.getClusterFraction(), EPSILON);
     assertEquals(0.5, session2.getClusterFraction(), EPSILON);
     qam.assertWasCalledAndReset();
@@ -448,19 +476,19 @@ public class TestWorkloadManager {
     final WorkloadManager wm = new WorkloadManagerForTest("test", conf, 2, qam);
     wm.start();
     WmTezSession session1 = (WmTezSession) wm.getSession(
-        null, new MappingInput("user", null), conf, null);
+        null, new MappingInput("user"), conf);
     // First, try to reuse from the same pool - should "just work".
     WmTezSession session1a = (WmTezSession) wm.getSession(
-        session1, new MappingInput("user", null), conf, null);
+        session1, new MappingInput("user"), conf);
     assertSame(session1, session1a);
     assertEquals(1.0, session1.getClusterFraction(), EPSILON);
     // Should still be able to get the 2nd session.
     WmTezSession session2 = (WmTezSession) wm.getSession(
-        null, new MappingInput("user", null), conf, null);
+        null, new MappingInput("user"), conf);
 
     // Now try to reuse with no other sessions remaining. Should still work.
     WmTezSession session2a = (WmTezSession) wm.getSession(
-        session2, new MappingInput("user", null), conf, null);
+        session2, new MappingInput("user"), conf);
     assertSame(session2, session2a);
     assertEquals(0.5, session1.getClusterFraction(), EPSILON);
     assertEquals(0.5, session2.getClusterFraction(), EPSILON);
@@ -517,19 +545,19 @@ public class TestWorkloadManager {
     plan.setMappings(Lists.newArrayList(mapping("A", "A"), mapping("B", "B")));
     final WorkloadManager wm = new WorkloadManagerForTest("test", conf, qam, plan);
     wm.start();
-    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, new MappingInput("A", null), conf, null),
-        sessionA2 = (WmTezSession) wm.getSession(null, new MappingInput("A", null), conf, null);
+    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf),
+        sessionA2 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf);
     assertEquals("A", sessionA1.getPoolName());
     assertEquals(0.3f, sessionA1.getClusterFraction(), EPSILON);
     assertEquals("A", sessionA2.getPoolName());
     assertEquals(0.3f, sessionA2.getClusterFraction(), EPSILON);
-    WmTezSession sessionB1 = (WmTezSession) wm.getSession(sessionA1, new MappingInput("B", null), conf, null);
+    WmTezSession sessionB1 = (WmTezSession) wm.getSession(sessionA1, new MappingInput("B"), conf);
     assertSame(sessionA1, sessionB1);
     assertEquals("B", sessionB1.getPoolName());
     assertEquals(0.4f, sessionB1.getClusterFraction(), EPSILON);
     assertEquals(0.6f, sessionA2.getClusterFraction(), EPSILON); // A1 removed from A.
     // Make sure that we can still get a session from A.
-    WmTezSession sessionA3 = (WmTezSession) wm.getSession(null, new MappingInput("A", null), conf, null);
+    WmTezSession sessionA3 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf);
     assertEquals("A", sessionA3.getPoolName());
     assertEquals(0.3f, sessionA3.getClusterFraction(), EPSILON);
     assertEquals(0.3f, sessionA3.getClusterFraction(), EPSILON);
@@ -549,7 +577,7 @@ public class TestWorkloadManager {
     wm.start();
  
     // One session will be running, the other will be queued in "A"
-    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, new MappingInput("U", null), conf, null);
+    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, new MappingInput("U"), conf);
     assertEquals("A", sessionA1.getPoolName());
     assertEquals(0.5f, sessionA1.getClusterFraction(), EPSILON);
     final AtomicReference<WmTezSession> sessionA2 = new AtomicReference<>();
@@ -574,7 +602,7 @@ public class TestWorkloadManager {
     assertEquals(0.4f, sessionA2.get().getClusterFraction(), EPSILON);
     // The new session will also go to B now.
     sessionA2.get().returnToSessionManager();
-    WmTezSession sessionB1 = (WmTezSession) wm.getSession(null, new MappingInput("U", null), conf, null);
+    WmTezSession sessionB1 = (WmTezSession) wm.getSession(null, new MappingInput("U"), conf);
     assertEquals("B", sessionB1.getPoolName());
     assertEquals(0.4f, sessionB1.getClusterFraction(), EPSILON);
     sessionA1.returnToSessionManager();
@@ -598,11 +626,11 @@ public class TestWorkloadManager {
  
     // A: 1/1 running, 1 queued; B: 2/2 running, C: 1/2 running, D: 1/1 running, 1 queued.
     // Total: 5/6 running.
-    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, new MappingInput("A", null), conf, null),
-        sessionB1 = (WmTezSession) wm.getSession(null, new MappingInput("B", null), conf, null),
-        sessionB2 = (WmTezSession) wm.getSession(null, new MappingInput("B", null), conf, null),
-        sessionC1 = (WmTezSession) wm.getSession(null, new MappingInput("C", null), conf, null),
-        sessionD1 = (WmTezSession) wm.getSession(null, new MappingInput("D", null), conf, null);
+    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf),
+        sessionB1 = (WmTezSession) wm.getSession(null, new MappingInput("B"), conf),
+        sessionB2 = (WmTezSession) wm.getSession(null, new MappingInput("B"), conf),
+        sessionC1 = (WmTezSession) wm.getSession(null, new MappingInput("C"), conf),
+        sessionD1 = (WmTezSession) wm.getSession(null, new MappingInput("D"), conf);
     final AtomicReference<WmTezSession> sessionA2 = new AtomicReference<>(),
         sessionD2 = new AtomicReference<>();
     final AtomicReference<Throwable> error = new AtomicReference<>();
@@ -738,7 +766,7 @@ public class TestWorkloadManager {
     final WorkloadManager wm = new WorkloadManagerForTest("test", conf, qam, plan);
     wm.start();
 
-    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, new MappingInput("A", null), conf, null);
+    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf);
 
     // [A: 1, B: 0]
     Map<String, SessionTriggerProvider> allSessionProviders = wm.getAllSessionTriggerProviders();
@@ -762,7 +790,7 @@ public class TestWorkloadManager {
     assertEquals(0.4f, sessionA1.getClusterFraction(), EPSILON);
     assertEquals("B", sessionA1.getPoolName());
 
-    WmTezSession sessionA2 = (WmTezSession) wm.getSession(null, new MappingInput("A", null), conf, null);
+    WmTezSession sessionA2 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf);
     // [A: 1, B: 1]
     allSessionProviders = wm.getAllSessionTriggerProviders();
     assertEquals(1, allSessionProviders.get("A").getSessions().size());
@@ -789,7 +817,7 @@ public class TestWorkloadManager {
     assertEquals("B", sessionA2.getPoolName());
     assertEquals("B", sessionA1.getPoolName());
 
-    WmTezSession sessionA3 = (WmTezSession) wm.getSession(null, new MappingInput("A", null), conf, null);
+    WmTezSession sessionA3 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf);
     // [A: 1, B: 2]
     allSessionProviders = wm.getAllSessionTriggerProviders();
     assertEquals(1, allSessionProviders.get("A").getSessions().size());
@@ -829,7 +857,7 @@ public class TestWorkloadManager {
     final WorkloadManager wm = new WorkloadManagerForTest("test", conf, qam, plan);
     wm.start();
 
-    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, new MappingInput("A", null), conf, null);
+    WmTezSession sessionA1 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf);
 
     // [A: 1, B: 0, B.x: 0, B.y: 0, C: 0]
     Map<String, SessionTriggerProvider> allSessionProviders = wm.getAllSessionTriggerProviders();
@@ -887,7 +915,8 @@ public class TestWorkloadManager {
     assertTrue(allSessionProviders.get("B.x").getSessions().contains(sessionA1));
     assertEquals("B.x", sessionA1.getPoolName());
 
-    WmTezSession sessionA2 = (WmTezSession) wm.getSession(null, new MappingInput("A", null), conf, null);
+    WmTezSession sessionA2 = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf);
+
     // [A: 1, B: 0, B.x: 1, B.y: 0, C: 0]
     allSessionProviders = wm.getAllSessionTriggerProviders();
     assertEquals(1, allSessionProviders.get("A").getSessions().size());
@@ -986,7 +1015,7 @@ public class TestWorkloadManager {
     failedWait.setException(new Exception("foo"));
     theOnlySession.setWaitForAmRegistryFuture(failedWait);
     try {
-      TezSessionState r = wm.getSession(null, new MappingInput("A", null), conf, null);
+      TezSessionState r = wm.getSession(null, new MappingInput("A"), conf);
       fail("Expected an error but got " + r);
     } catch (Exception ex) {
       // Expected.
@@ -1037,7 +1066,7 @@ public class TestWorkloadManager {
     assertEquals(0f, oldSession.getClusterFraction(), EPSILON);
     pool.returnSession(theOnlySession);
     // Make sure we can actually get a session still - parallelism/etc. should not be affected.
-    WmTezSession result = (WmTezSession) wm.getSession(null, new MappingInput("A", null), conf, null);
+    WmTezSession result = (WmTezSession) wm.getSession(null, new MappingInput("A"), conf);
     assertEquals(sessionPoolName, result.getPoolName());
     assertEquals(1f, result.getClusterFraction(), EPSILON);
     result.returnToSessionManager();


[24/50] [abbrv] hive git commit: HIVE-17982 Move metastore specific itests

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
deleted file mode 100644
index f344c47..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
+++ /dev/null
@@ -1,3515 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.lang.reflect.Field;
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-
-import junit.framework.TestCase;
-
-import org.datanucleus.api.jdo.JDOPersistenceManager;
-import org.datanucleus.api.jdo.JDOPersistenceManagerFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hive.common.FileUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
-import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
-import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
-import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Function;
-import org.apache.hadoop.hive.metastore.api.FunctionType;
-import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse;
-import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.Order;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.ResourceType;
-import org.apache.hadoop.hive.metastore.api.ResourceUri;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.SkewedInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.Type;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.io.HiveInputFormat;
-import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.serde2.SerDeException;
-import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.thrift.TException;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.common.collect.Lists;
-
-public abstract class TestHiveMetaStore extends TestCase {
-  private static final Logger LOG = LoggerFactory.getLogger(TestHiveMetaStore.class);
-  protected static HiveMetaStoreClient client;
-  protected static HiveConf hiveConf;
-  protected static Warehouse warehouse;
-  protected static boolean isThriftClient = false;
-
-  private static final String TEST_DB1_NAME = "testdb1";
-  private static final String TEST_DB2_NAME = "testdb2";
-
-  private static final int DEFAULT_LIMIT_PARTITION_REQUEST = 100;
-
-  protected abstract HiveMetaStoreClient createClient() throws Exception;
-
-  @Override
-  protected void setUp() throws Exception {
-    hiveConf = new HiveConf(this.getClass());
-    warehouse = new Warehouse(hiveConf);
-
-    // set some values to use for getting conf. vars
-    hiveConf.set("hive.metastore.metrics.enabled","true");
-    hiveConf.set("hive.key1", "value1");
-    hiveConf.set("hive.key2", "http://www.example.com");
-    hiveConf.set("hive.key3", "");
-    hiveConf.set("hive.key4", "0");
-    hiveConf.set("datanucleus.autoCreateTables", "false");
-
-    hiveConf.setIntVar(ConfVars.METASTORE_BATCH_RETRIEVE_MAX, 2);
-    hiveConf.setIntVar(ConfVars.METASTORE_LIMIT_PARTITION_REQUEST, DEFAULT_LIMIT_PARTITION_REQUEST);
-  }
-
-  public void testNameMethods() {
-    Map<String, String> spec = new LinkedHashMap<String, String>();
-    spec.put("ds", "2008-07-01 14:13:12");
-    spec.put("hr", "14");
-    List<String> vals = new ArrayList<String>();
-    for(String v : spec.values()) {
-      vals.add(v);
-    }
-    String partName = "ds=2008-07-01 14%3A13%3A12/hr=14";
-
-    try {
-      List<String> testVals = client.partitionNameToVals(partName);
-      assertTrue("Values from name are incorrect", vals.equals(testVals));
-
-      Map<String, String> testSpec = client.partitionNameToSpec(partName);
-      assertTrue("Spec from name is incorrect", spec.equals(testSpec));
-
-      List<String> emptyVals = client.partitionNameToVals("");
-      assertTrue("Values should be empty", emptyVals.size() == 0);
-
-      Map<String, String> emptySpec =  client.partitionNameToSpec("");
-      assertTrue("Spec should be empty", emptySpec.size() == 0);
-    } catch (Exception e) {
-      assert(false);
-    }
-  }
-
-  /**
-   * tests create table and partition and tries to drop the table without
-   * droppping the partition
-   *
-   * @throws Exception
-   */
-  public void testPartition() throws Exception {
-    partitionTester(client, hiveConf);
-  }
-
-  public static void partitionTester(HiveMetaStoreClient client, HiveConf hiveConf)
-    throws Exception {
-    try {
-      String dbName = "compdb";
-      String tblName = "comptbl";
-      String typeName = "Person";
-      List<String> vals = makeVals("2008-07-01 14:13:12", "14");
-      List<String> vals2 = makeVals("2008-07-01 14:13:12", "15");
-      List<String> vals3 = makeVals("2008-07-02 14:13:12", "15");
-      List<String> vals4 = makeVals("2008-07-03 14:13:12", "151");
-
-      client.dropTable(dbName, tblName);
-      silentDropDatabase(dbName);
-      Database db = new Database();
-      db.setName(dbName);
-      client.createDatabase(db);
-      db = client.getDatabase(dbName);
-      Path dbPath = new Path(db.getLocationUri());
-      FileSystem fs = FileSystem.get(dbPath.toUri(), hiveConf);
-
-      client.dropType(typeName);
-      Type typ1 = new Type();
-      typ1.setName(typeName);
-      typ1.setFields(new ArrayList<FieldSchema>(2));
-      typ1.getFields().add(
-          new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
-      typ1.getFields().add(
-          new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
-      client.createType(typ1);
-
-      Table tbl = new Table();
-      tbl.setDbName(dbName);
-      tbl.setTableName(tblName);
-      StorageDescriptor sd = new StorageDescriptor();
-      tbl.setSd(sd);
-      sd.setCols(typ1.getFields());
-      sd.setCompressed(false);
-      sd.setNumBuckets(1);
-      sd.setParameters(new HashMap<String, String>());
-      sd.getParameters().put("test_param_1", "Use this for comments etc");
-      sd.setBucketCols(new ArrayList<String>(2));
-      sd.getBucketCols().add("name");
-      sd.setSerdeInfo(new SerDeInfo());
-      sd.getSerdeInfo().setName(tbl.getTableName());
-      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-      sd.getSerdeInfo().getParameters()
-          .put(serdeConstants.SERIALIZATION_FORMAT, "1");
-      sd.setSortCols(new ArrayList<Order>());
-      sd.setStoredAsSubDirectories(false);
-      sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
-      sd.setInputFormat(HiveInputFormat.class.getName());
-      sd.setOutputFormat(HiveOutputFormat.class.getName());
-
-      //skewed information
-      SkewedInfo skewInfor = new SkewedInfo();
-      skewInfor.setSkewedColNames(Arrays.asList("name"));
-      List<String> skv = Arrays.asList("1");
-      skewInfor.setSkewedColValues(Arrays.asList(skv));
-      Map<List<String>, String> scvlm = new HashMap<List<String>, String>();
-      scvlm.put(skv, "location1");
-      skewInfor.setSkewedColValueLocationMaps(scvlm);
-      sd.setSkewedInfo(skewInfor);
-
-      tbl.setPartitionKeys(new ArrayList<FieldSchema>(2));
-      tbl.getPartitionKeys().add(
-          new FieldSchema("ds", serdeConstants.STRING_TYPE_NAME, ""));
-      tbl.getPartitionKeys().add(
-          new FieldSchema("hr", serdeConstants.STRING_TYPE_NAME, ""));
-
-      client.createTable(tbl);
-
-      if (isThriftClient) {
-        // the createTable() above does not update the location in the 'tbl'
-        // object when the client is a thrift client and the code below relies
-        // on the location being present in the 'tbl' object - so get the table
-        // from the metastore
-        tbl = client.getTable(dbName, tblName);
-      }
-
-      Partition part = makePartitionObject(dbName, tblName, vals, tbl, "/part1");
-      Partition part2 = makePartitionObject(dbName, tblName, vals2, tbl, "/part2");
-      Partition part3 = makePartitionObject(dbName, tblName, vals3, tbl, "/part3");
-      Partition part4 = makePartitionObject(dbName, tblName, vals4, tbl, "/part4");
-
-      // check if the partition exists (it shouldn't)
-      boolean exceptionThrown = false;
-      try {
-        Partition p = client.getPartition(dbName, tblName, vals);
-      } catch(Exception e) {
-        assertEquals("partition should not have existed",
-            NoSuchObjectException.class, e.getClass());
-        exceptionThrown = true;
-      }
-      assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
-      Partition retp = client.add_partition(part);
-      assertNotNull("Unable to create partition " + part, retp);
-      Partition retp2 = client.add_partition(part2);
-      assertNotNull("Unable to create partition " + part2, retp2);
-      Partition retp3 = client.add_partition(part3);
-      assertNotNull("Unable to create partition " + part3, retp3);
-      Partition retp4 = client.add_partition(part4);
-      assertNotNull("Unable to create partition " + part4, retp4);
-
-      Partition part_get = client.getPartition(dbName, tblName, part.getValues());
-      if(isThriftClient) {
-        // since we are using thrift, 'part' will not have the create time and
-        // last DDL time set since it does not get updated in the add_partition()
-        // call - likewise part2 and part3 - set it correctly so that equals check
-        // doesn't fail
-        adjust(client, part, dbName, tblName);
-        adjust(client, part2, dbName, tblName);
-        adjust(client, part3, dbName, tblName);
-      }
-      assertTrue("Partitions are not same", part.equals(part_get));
-
-      // check null cols schemas for a partition
-      List<String> vals6 = makeVals("2016-02-22 00:00:00", "16");
-      Partition part6 = makePartitionObject(dbName, tblName, vals6, tbl, "/part5");
-      part6.getSd().setCols(null);
-      LOG.info("Creating partition will null field schema");
-      client.add_partition(part6);
-      LOG.info("Listing all partitions for table " + dbName + "." + tblName);
-      final List<Partition> partitions = client.listPartitions(dbName, tblName, (short) -1);
-      boolean foundPart = false;
-      for (Partition p : partitions) {
-        if (p.getValues().equals(vals6)) {
-          assertNull(p.getSd().getCols());
-          LOG.info("Found partition " + p + " having null field schema");
-          foundPart = true;
-        }
-      }
-      assertTrue(foundPart);
-
-      String partName = "ds=" + FileUtils.escapePathName("2008-07-01 14:13:12") + "/hr=14";
-      String part2Name = "ds=" + FileUtils.escapePathName("2008-07-01 14:13:12") + "/hr=15";
-      String part3Name = "ds=" + FileUtils.escapePathName("2008-07-02 14:13:12") + "/hr=15";
-      String part4Name = "ds=" + FileUtils.escapePathName("2008-07-03 14:13:12") + "/hr=151";
-
-      part_get = client.getPartition(dbName, tblName, partName);
-      assertTrue("Partitions are not the same", part.equals(part_get));
-
-      // Test partition listing with a partial spec - ds is specified but hr is not
-      List<String> partialVals = new ArrayList<String>();
-      partialVals.add(vals.get(0));
-      Set<Partition> parts = new HashSet<Partition>();
-      parts.add(part);
-      parts.add(part2);
-
-      List<Partition> partial = client.listPartitions(dbName, tblName, partialVals,
-          (short) -1);
-      assertTrue("Should have returned 2 partitions", partial.size() == 2);
-      assertTrue("Not all parts returned", partial.containsAll(parts));
-
-      Set<String> partNames = new HashSet<String>();
-      partNames.add(partName);
-      partNames.add(part2Name);
-      List<String> partialNames = client.listPartitionNames(dbName, tblName, partialVals,
-          (short) -1);
-      assertTrue("Should have returned 2 partition names", partialNames.size() == 2);
-      assertTrue("Not all part names returned", partialNames.containsAll(partNames));
-
-      partNames.add(part3Name);
-      partNames.add(part4Name);
-      partialVals.clear();
-      partialVals.add("");
-      partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
-      assertTrue("Should have returned 5 partition names", partialNames.size() == 5);
-      assertTrue("Not all part names returned", partialNames.containsAll(partNames));
-
-      // Test partition listing with a partial spec - hr is specified but ds is not
-      parts.clear();
-      parts.add(part2);
-      parts.add(part3);
-
-      partialVals.clear();
-      partialVals.add("");
-      partialVals.add(vals2.get(1));
-
-      partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
-      assertEquals("Should have returned 2 partitions", 2, partial.size());
-      assertTrue("Not all parts returned", partial.containsAll(parts));
-
-      partNames.clear();
-      partNames.add(part2Name);
-      partNames.add(part3Name);
-      partialNames = client.listPartitionNames(dbName, tblName, partialVals,
-          (short) -1);
-      assertEquals("Should have returned 2 partition names", 2, partialNames.size());
-      assertTrue("Not all part names returned", partialNames.containsAll(partNames));
-
-      // Verify escaped partition names don't return partitions
-      exceptionThrown = false;
-      try {
-        String badPartName = "ds=2008-07-01 14%3A13%3A12/hrs=14";
-        client.getPartition(dbName, tblName, badPartName);
-      } catch(NoSuchObjectException e) {
-        exceptionThrown = true;
-      }
-      assertTrue("Bad partition spec should have thrown an exception", exceptionThrown);
-
-      Path partPath = new Path(part.getSd().getLocation());
-
-
-      assertTrue(fs.exists(partPath));
-      client.dropPartition(dbName, tblName, part.getValues(), true);
-      assertFalse(fs.exists(partPath));
-
-      // Test append_partition_by_name
-      client.appendPartition(dbName, tblName, partName);
-      Partition part5 = client.getPartition(dbName, tblName, part.getValues());
-      assertTrue("Append partition by name failed", part5.getValues().equals(vals));;
-      Path part5Path = new Path(part5.getSd().getLocation());
-      assertTrue(fs.exists(part5Path));
-
-      // Test drop_partition_by_name
-      assertTrue("Drop partition by name failed",
-          client.dropPartition(dbName, tblName, partName, true));
-      assertFalse(fs.exists(part5Path));
-
-      // add the partition again so that drop table with a partition can be
-      // tested
-      retp = client.add_partition(part);
-      assertNotNull("Unable to create partition " + part, retp);
-
-      // test add_partitions
-
-      List<String> mvals1 = makeVals("2008-07-04 14:13:12", "14641");
-      List<String> mvals2 = makeVals("2008-07-04 14:13:12", "14642");
-      List<String> mvals3 = makeVals("2008-07-04 14:13:12", "14643");
-      List<String> mvals4 = makeVals("2008-07-04 14:13:12", "14643"); // equal to 3
-      List<String> mvals5 = makeVals("2008-07-04 14:13:12", "14645");
-
-      Exception savedException;
-
-      // add_partitions(empty list) : ok, normal operation
-      client.add_partitions(new ArrayList<Partition>());
-
-      // add_partitions(1,2,3) : ok, normal operation
-      Partition mpart1 = makePartitionObject(dbName, tblName, mvals1, tbl, "/mpart1");
-      Partition mpart2 = makePartitionObject(dbName, tblName, mvals2, tbl, "/mpart2");
-      Partition mpart3 = makePartitionObject(dbName, tblName, mvals3, tbl, "/mpart3");
-      client.add_partitions(Arrays.asList(mpart1,mpart2,mpart3));
-
-      if(isThriftClient) {
-        // do DDL time munging if thrift mode
-        adjust(client, mpart1, dbName, tblName);
-        adjust(client, mpart2, dbName, tblName);
-        adjust(client, mpart3, dbName, tblName);
-      }
-      verifyPartitionsPublished(client, dbName, tblName,
-          Arrays.asList(mvals1.get(0)),
-          Arrays.asList(mpart1,mpart2,mpart3));
-
-      Partition mpart4 = makePartitionObject(dbName, tblName, mvals4, tbl, "/mpart4");
-      Partition mpart5 = makePartitionObject(dbName, tblName, mvals5, tbl, "/mpart5");
-
-      // create dir for /mpart5
-      Path mp5Path = new Path(mpart5.getSd().getLocation());
-      warehouse.mkdirs(mp5Path);
-      assertTrue(fs.exists(mp5Path));
-
-      // add_partitions(5,4) : err = duplicate keyvals on mpart4
-      savedException = null;
-      try {
-        client.add_partitions(Arrays.asList(mpart5,mpart4));
-      } catch (Exception e) {
-        savedException = e;
-      } finally {
-        assertNotNull(savedException);
-      }
-
-      // check that /mpart4 does not exist, but /mpart5 still does.
-      assertTrue(fs.exists(mp5Path));
-      assertFalse(fs.exists(new Path(mpart4.getSd().getLocation())));
-
-      // add_partitions(5) : ok
-      client.add_partitions(Arrays.asList(mpart5));
-
-      if(isThriftClient) {
-        // do DDL time munging if thrift mode
-        adjust(client, mpart5, dbName, tblName);
-      }
-
-      verifyPartitionsPublished(client, dbName, tblName,
-          Arrays.asList(mvals1.get(0)),
-          Arrays.asList(mpart1,mpart2,mpart3,mpart5));
-
-      //// end add_partitions tests
-
-      client.dropTable(dbName, tblName);
-
-      client.dropType(typeName);
-
-      // recreate table as external, drop partition and it should
-      // still exist
-      tbl.setParameters(new HashMap<String, String>());
-      tbl.getParameters().put("EXTERNAL", "TRUE");
-      client.createTable(tbl);
-      retp = client.add_partition(part);
-      assertTrue(fs.exists(partPath));
-      client.dropPartition(dbName, tblName, part.getValues(), true);
-      assertTrue(fs.exists(partPath));
-
-      for (String tableName : client.getTables(dbName, "*")) {
-        client.dropTable(dbName, tableName);
-      }
-
-      client.dropDatabase(dbName);
-
-    } catch (Exception e) {
-      System.err.println(StringUtils.stringifyException(e));
-      System.err.println("testPartition() failed.");
-      throw e;
-    }
-  }
-
-  private static void verifyPartitionsPublished(HiveMetaStoreClient client,
-      String dbName, String tblName, List<String> partialSpec,
-      List<Partition> expectedPartitions)
-          throws NoSuchObjectException, MetaException, TException {
-    // Test partition listing with a partial spec
-
-    List<Partition> mpartial = client.listPartitions(dbName, tblName, partialSpec,
-        (short) -1);
-    assertEquals("Should have returned "+expectedPartitions.size()+
-        " partitions, returned " + mpartial.size(),
-        expectedPartitions.size(), mpartial.size());
-    assertTrue("Not all parts returned", mpartial.containsAll(expectedPartitions));
-  }
-
-  private static List<String> makeVals(String ds, String id) {
-    List <String> vals4 = new ArrayList<String>(2);
-    vals4 = new ArrayList<String>(2);
-    vals4.add(ds);
-    vals4.add(id);
-    return vals4;
-  }
-
-  private static Partition makePartitionObject(String dbName, String tblName,
-      List<String> ptnVals, Table tbl, String ptnLocationSuffix) throws MetaException {
-    Partition part4 = new Partition();
-    part4.setDbName(dbName);
-    part4.setTableName(tblName);
-    part4.setValues(ptnVals);
-    part4.setParameters(new HashMap<String, String>());
-    part4.setSd(tbl.getSd().deepCopy());
-    part4.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo().deepCopy());
-    part4.getSd().setLocation(tbl.getSd().getLocation() + ptnLocationSuffix);
-    MetaStoreUtils.updatePartitionStatsFast(part4, warehouse, null);
-    return part4;
-  }
-
-  public void testListPartitions() throws Throwable {
-    // create a table with multiple partitions
-    String dbName = "compdb";
-    String tblName = "comptbl";
-    String typeName = "Person";
-
-    cleanUp(dbName, tblName, typeName);
-
-    List<List<String>> values = new ArrayList<List<String>>();
-    values.add(makeVals("2008-07-01 14:13:12", "14"));
-    values.add(makeVals("2008-07-01 14:13:12", "15"));
-    values.add(makeVals("2008-07-02 14:13:12", "15"));
-    values.add(makeVals("2008-07-03 14:13:12", "151"));
-
-    createMultiPartitionTableSchema(dbName, tblName, typeName, values);
-
-    List<Partition> partitions = client.listPartitions(dbName, tblName, (short)-1);
-    assertNotNull("should have returned partitions", partitions);
-    assertEquals(" should have returned " + values.size() +
-      " partitions", values.size(), partitions.size());
-
-    partitions = client.listPartitions(dbName, tblName, (short)(values.size()/2));
-
-    assertNotNull("should have returned partitions", partitions);
-    assertEquals(" should have returned " + values.size() / 2 +
-      " partitions",values.size() / 2, partitions.size());
-
-
-    partitions = client.listPartitions(dbName, tblName, (short) (values.size() * 2));
-
-    assertNotNull("should have returned partitions", partitions);
-    assertEquals(" should have returned " + values.size() +
-      " partitions",values.size(), partitions.size());
-
-    cleanUp(dbName, tblName, typeName);
-
-  }
-
-  public void testListPartitionsWihtLimitEnabled() throws Throwable {
-    // create a table with multiple partitions
-    String dbName = "compdb";
-    String tblName = "comptbl";
-    String typeName = "Person";
-
-    cleanUp(dbName, tblName, typeName);
-
-    // Create too many partitions, just enough to validate over limit requests
-    List<List<String>> values = new ArrayList<List<String>>();
-    for (int i=0; i<DEFAULT_LIMIT_PARTITION_REQUEST + 1; i++) {
-      values.add(makeVals("2008-07-01 14:13:12", Integer.toString(i)));
-    }
-
-    createMultiPartitionTableSchema(dbName, tblName, typeName, values);
-
-    List<Partition> partitions;
-    short maxParts;
-
-    // Requesting more partitions than allowed should throw an exception
-    try {
-      maxParts = -1;
-      partitions = client.listPartitions(dbName, tblName, maxParts);
-      fail("should have thrown MetaException about partition limit");
-    } catch (MetaException e) {
-      assertTrue(true);
-    }
-
-    // Requesting more partitions than allowed should throw an exception
-    try {
-      maxParts = DEFAULT_LIMIT_PARTITION_REQUEST + 1;
-      partitions = client.listPartitions(dbName, tblName, maxParts);
-      fail("should have thrown MetaException about partition limit");
-    } catch (MetaException e) {
-      assertTrue(true);
-    }
-
-    // Requesting less partitions than allowed should work
-    maxParts = DEFAULT_LIMIT_PARTITION_REQUEST / 2;
-    partitions = client.listPartitions(dbName, tblName, maxParts);
-    assertNotNull("should have returned partitions", partitions);
-    assertEquals(" should have returned 50 partitions", maxParts, partitions.size());
-  }
-
-  public void testAlterTableCascade() throws Throwable {
-    // create a table with multiple partitions
-    String dbName = "compdb";
-    String tblName = "comptbl";
-    String typeName = "Person";
-
-    cleanUp(dbName, tblName, typeName);
-
-    List<List<String>> values = new ArrayList<List<String>>();
-    values.add(makeVals("2008-07-01 14:13:12", "14"));
-    values.add(makeVals("2008-07-01 14:13:12", "15"));
-    values.add(makeVals("2008-07-02 14:13:12", "15"));
-    values.add(makeVals("2008-07-03 14:13:12", "151"));
-
-    createMultiPartitionTableSchema(dbName, tblName, typeName, values);
-    Table tbl = client.getTable(dbName, tblName);
-    List<FieldSchema> cols = tbl.getSd().getCols();
-    cols.add(new FieldSchema("new_col", serdeConstants.STRING_TYPE_NAME, ""));
-    tbl.getSd().setCols(cols);
-    //add new column with cascade option
-    client.alter_table(dbName, tblName, tbl, true);
-    //
-    Table tbl2 = client.getTable(dbName, tblName);
-    Assert.assertEquals("Unexpected number of cols", 3, tbl2.getSd().getCols().size());
-    Assert.assertEquals("Unexpected column name", "new_col", tbl2.getSd().getCols().get(2).getName());
-    //get a partition
-    List<String> pvalues = new ArrayList<>(2);
-    pvalues.add("2008-07-01 14:13:12");
-    pvalues.add("14");
-    Partition partition = client.getPartition(dbName, tblName, pvalues);
-    Assert.assertEquals("Unexpected number of cols", 3, partition.getSd().getCols().size());
-    Assert.assertEquals("Unexpected column name", "new_col", partition.getSd().getCols().get(2).getName());
-
-    //add another column
-    cols = tbl.getSd().getCols();
-    cols.add(new FieldSchema("new_col2", serdeConstants.STRING_TYPE_NAME, ""));
-    tbl.getSd().setCols(cols);
-    //add new column with no cascade option
-    client.alter_table(dbName, tblName, tbl, false);
-    tbl2 = client.getTable(dbName, tblName);
-    Assert.assertEquals("Unexpected number of cols", 4, tbl2.getSd().getCols().size());
-    Assert.assertEquals("Unexpected column name", "new_col2", tbl2.getSd().getCols().get(3).getName());
-    //get partition, this partition should not have the newly added column since cascade option
-    //was false
-    partition = client.getPartition(dbName, tblName, pvalues);
-    Assert.assertEquals("Unexpected number of cols", 3, partition.getSd().getCols().size());  
-  }
-
-
-  public void testListPartitionNames() throws Throwable {
-    // create a table with multiple partitions
-    String dbName = "compdb";
-    String tblName = "comptbl";
-    String typeName = "Person";
-
-    cleanUp(dbName, tblName, typeName);
-
-    List<List<String>> values = new ArrayList<List<String>>();
-    values.add(makeVals("2008-07-01 14:13:12", "14"));
-    values.add(makeVals("2008-07-01 14:13:12", "15"));
-    values.add(makeVals("2008-07-02 14:13:12", "15"));
-    values.add(makeVals("2008-07-03 14:13:12", "151"));
-
-
-
-    createMultiPartitionTableSchema(dbName, tblName, typeName, values);
-
-    List<String> partitions = client.listPartitionNames(dbName, tblName, (short)-1);
-    assertNotNull("should have returned partitions", partitions);
-    assertEquals(" should have returned " + values.size() +
-      " partitions", values.size(), partitions.size());
-
-    partitions = client.listPartitionNames(dbName, tblName, (short)(values.size()/2));
-
-    assertNotNull("should have returned partitions", partitions);
-    assertEquals(" should have returned " + values.size() / 2 +
-      " partitions",values.size() / 2, partitions.size());
-
-
-    partitions = client.listPartitionNames(dbName, tblName, (short) (values.size() * 2));
-
-    assertNotNull("should have returned partitions", partitions);
-    assertEquals(" should have returned " + values.size() +
-      " partitions",values.size(), partitions.size());
-
-    cleanUp(dbName, tblName, typeName);
-
-  }
-
-
-  public void testDropTable() throws Throwable {
-    // create a table with multiple partitions
-    String dbName = "compdb";
-    String tblName = "comptbl";
-    String typeName = "Person";
-
-    cleanUp(dbName, tblName, typeName);
-
-    List<List<String>> values = new ArrayList<List<String>>();
-    values.add(makeVals("2008-07-01 14:13:12", "14"));
-    values.add(makeVals("2008-07-01 14:13:12", "15"));
-    values.add(makeVals("2008-07-02 14:13:12", "15"));
-    values.add(makeVals("2008-07-03 14:13:12", "151"));
-
-    createMultiPartitionTableSchema(dbName, tblName, typeName, values);
-
-    client.dropTable(dbName, tblName);
-    client.dropType(typeName);
-
-    boolean exceptionThrown = false;
-    try {
-      client.getTable(dbName, tblName);
-    } catch(Exception e) {
-      assertEquals("table should not have existed",
-          NoSuchObjectException.class, e.getClass());
-      exceptionThrown = true;
-    }
-    assertTrue("Table " + tblName + " should have been dropped ", exceptionThrown);
-
-  }
-
-  public void testAlterViewParititon() throws Throwable {
-    String dbName = "compdb";
-    String tblName = "comptbl";
-    String viewName = "compView";
-
-    client.dropTable(dbName, tblName);
-    silentDropDatabase(dbName);
-    Database db = new Database();
-    db.setName(dbName);
-    db.setDescription("Alter Partition Test database");
-    client.createDatabase(db);
-
-    ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
-    cols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
-    cols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
-
-    Table tbl = new Table();
-    tbl.setDbName(dbName);
-    tbl.setTableName(tblName);
-    StorageDescriptor sd = new StorageDescriptor();
-    tbl.setSd(sd);
-    sd.setCols(cols);
-    sd.setCompressed(false);
-    sd.setParameters(new HashMap<String, String>());
-    sd.setSerdeInfo(new SerDeInfo());
-    sd.getSerdeInfo().setName(tbl.getTableName());
-    sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-    sd.getSerdeInfo().getParameters()
-        .put(serdeConstants.SERIALIZATION_FORMAT, "1");
-    sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
-    sd.setInputFormat(HiveInputFormat.class.getName());
-    sd.setOutputFormat(HiveOutputFormat.class.getName());
-    sd.setSortCols(new ArrayList<Order>());
-
-    client.createTable(tbl);
-
-    if (isThriftClient) {
-      // the createTable() above does not update the location in the 'tbl'
-      // object when the client is a thrift client and the code below relies
-      // on the location being present in the 'tbl' object - so get the table
-      // from the metastore
-      tbl = client.getTable(dbName, tblName);
-    }
-
-    ArrayList<FieldSchema> viewCols = new ArrayList<FieldSchema>(1);
-    viewCols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
-
-    ArrayList<FieldSchema> viewPartitionCols = new ArrayList<FieldSchema>(1);
-    viewPartitionCols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
-
-    Table view = new Table();
-    view.setDbName(dbName);
-    view.setTableName(viewName);
-    view.setTableType(TableType.VIRTUAL_VIEW.name());
-    view.setPartitionKeys(viewPartitionCols);
-    view.setViewOriginalText("SELECT income, name FROM " + tblName);
-    view.setViewExpandedText("SELECT `" + tblName + "`.`income`, `" + tblName +
-        "`.`name` FROM `" + dbName + "`.`" + tblName + "`");
-    view.setRewriteEnabled(false);
-    StorageDescriptor viewSd = new StorageDescriptor();
-    view.setSd(viewSd);
-    viewSd.setCols(viewCols);
-    viewSd.setCompressed(false);
-    viewSd.setParameters(new HashMap<String, String>());
-    viewSd.setSerdeInfo(new SerDeInfo());
-    viewSd.getSerdeInfo().setParameters(new HashMap<String, String>());
-
-    client.createTable(view);
-
-    if (isThriftClient) {
-      // the createTable() above does not update the location in the 'tbl'
-      // object when the client is a thrift client and the code below relies
-      // on the location being present in the 'tbl' object - so get the table
-      // from the metastore
-      view = client.getTable(dbName, viewName);
-    }
-
-    List<String> vals = new ArrayList<String>(1);
-    vals.add("abc");
-
-    Partition part = new Partition();
-    part.setDbName(dbName);
-    part.setTableName(viewName);
-    part.setValues(vals);
-    part.setParameters(new HashMap<String, String>());
-
-    client.add_partition(part);
-
-    Partition part2 = client.getPartition(dbName, viewName, part.getValues());
-
-    part2.getParameters().put("a", "b");
-
-    client.alter_partition(dbName, viewName, part2, null);
-
-    Partition part3 = client.getPartition(dbName, viewName, part.getValues());
-    assertEquals("couldn't view alter partition", part3.getParameters().get(
-        "a"), "b");
-
-    client.dropTable(dbName, viewName);
-
-    client.dropTable(dbName, tblName);
-
-    client.dropDatabase(dbName);
-  }
-
-  public void testAlterPartition() throws Throwable {
-
-    try {
-      String dbName = "compdb";
-      String tblName = "comptbl";
-      List<String> vals = new ArrayList<String>(2);
-      vals.add("2008-07-01");
-      vals.add("14");
-
-      client.dropTable(dbName, tblName);
-      silentDropDatabase(dbName);
-      Database db = new Database();
-      db.setName(dbName);
-      db.setDescription("Alter Partition Test database");
-      client.createDatabase(db);
-
-      ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
-      cols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
-      cols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
-
-      Table tbl = new Table();
-      tbl.setDbName(dbName);
-      tbl.setTableName(tblName);
-      StorageDescriptor sd = new StorageDescriptor();
-      tbl.setSd(sd);
-      sd.setCols(cols);
-      sd.setCompressed(false);
-      sd.setNumBuckets(1);
-      sd.setParameters(new HashMap<String, String>());
-      sd.getParameters().put("test_param_1", "Use this for comments etc");
-      sd.setBucketCols(new ArrayList<String>(2));
-      sd.getBucketCols().add("name");
-      sd.setSerdeInfo(new SerDeInfo());
-      sd.getSerdeInfo().setName(tbl.getTableName());
-      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-      sd.getSerdeInfo().getParameters()
-          .put(serdeConstants.SERIALIZATION_FORMAT, "1");
-      sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
-      sd.setInputFormat(HiveInputFormat.class.getName());
-      sd.setOutputFormat(HiveOutputFormat.class.getName());
-      sd.setSortCols(new ArrayList<Order>());
-
-      tbl.setPartitionKeys(new ArrayList<FieldSchema>(2));
-      tbl.getPartitionKeys().add(
-          new FieldSchema("ds", serdeConstants.STRING_TYPE_NAME, ""));
-      tbl.getPartitionKeys().add(
-          new FieldSchema("hr", serdeConstants.INT_TYPE_NAME, ""));
-
-      client.createTable(tbl);
-
-      if (isThriftClient) {
-        // the createTable() above does not update the location in the 'tbl'
-        // object when the client is a thrift client and the code below relies
-        // on the location being present in the 'tbl' object - so get the table
-        // from the metastore
-        tbl = client.getTable(dbName, tblName);
-      }
-
-      Partition part = new Partition();
-      part.setDbName(dbName);
-      part.setTableName(tblName);
-      part.setValues(vals);
-      part.setParameters(new HashMap<String, String>());
-      part.setSd(tbl.getSd());
-      part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
-      part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");
-
-      client.add_partition(part);
-
-      Partition part2 = client.getPartition(dbName, tblName, part.getValues());
-
-      part2.getParameters().put("retention", "10");
-      part2.getSd().setNumBuckets(12);
-      part2.getSd().getSerdeInfo().getParameters().put("abc", "1");
-      client.alter_partition(dbName, tblName, part2, null);
-
-      Partition part3 = client.getPartition(dbName, tblName, part.getValues());
-      assertEquals("couldn't alter partition", part3.getParameters().get(
-          "retention"), "10");
-      assertEquals("couldn't alter partition", part3.getSd().getSerdeInfo()
-          .getParameters().get("abc"), "1");
-      assertEquals("couldn't alter partition", part3.getSd().getNumBuckets(),
-          12);
-
-      client.dropTable(dbName, tblName);
-
-      client.dropDatabase(dbName);
-    } catch (Exception e) {
-      System.err.println(StringUtils.stringifyException(e));
-      System.err.println("testPartition() failed.");
-      throw e;
-    }
-  }
-
-  public void testRenamePartition() throws Throwable {
-
-    try {
-      String dbName = "compdb1";
-      String tblName = "comptbl1";
-      List<String> vals = new ArrayList<String>(2);
-      vals.add("2011-07-11");
-      vals.add("8");
-      String part_path = "/ds=2011-07-11/hr=8";
-      List<String> tmp_vals = new ArrayList<String>(2);
-      tmp_vals.add("tmp_2011-07-11");
-      tmp_vals.add("-8");
-      String part2_path = "/ds=tmp_2011-07-11/hr=-8";
-
-      client.dropTable(dbName, tblName);
-      silentDropDatabase(dbName);
-      Database db = new Database();
-      db.setName(dbName);
-      db.setDescription("Rename Partition Test database");
-      client.createDatabase(db);
-
-      ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
-      cols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
-      cols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
-
-      Table tbl = new Table();
-      tbl.setDbName(dbName);
-      tbl.setTableName(tblName);
-      StorageDescriptor sd = new StorageDescriptor();
-      tbl.setSd(sd);
-      sd.setCols(cols);
-      sd.setCompressed(false);
-      sd.setNumBuckets(1);
-      sd.setParameters(new HashMap<String, String>());
-      sd.getParameters().put("test_param_1", "Use this for comments etc");
-      sd.setBucketCols(new ArrayList<String>(2));
-      sd.getBucketCols().add("name");
-      sd.setSerdeInfo(new SerDeInfo());
-      sd.getSerdeInfo().setName(tbl.getTableName());
-      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-      sd.getSerdeInfo().getParameters()
-          .put(serdeConstants.SERIALIZATION_FORMAT, "1");
-      sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
-      sd.setInputFormat(HiveInputFormat.class.getName());
-      sd.setOutputFormat(HiveOutputFormat.class.getName());
-      sd.setSortCols(new ArrayList<Order>());
-
-      tbl.setPartitionKeys(new ArrayList<FieldSchema>(2));
-      tbl.getPartitionKeys().add(
-          new FieldSchema("ds", serdeConstants.STRING_TYPE_NAME, ""));
-      tbl.getPartitionKeys().add(
-          new FieldSchema("hr", serdeConstants.INT_TYPE_NAME, ""));
-
-      client.createTable(tbl);
-
-      if (isThriftClient) {
-        // the createTable() above does not update the location in the 'tbl'
-        // object when the client is a thrift client and the code below relies
-        // on the location being present in the 'tbl' object - so get the table
-        // from the metastore
-        tbl = client.getTable(dbName, tblName);
-      }
-
-      Partition part = new Partition();
-      part.setDbName(dbName);
-      part.setTableName(tblName);
-      part.setValues(vals);
-      part.setParameters(new HashMap<String, String>());
-      part.setSd(tbl.getSd().deepCopy());
-      part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
-      part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");
-      part.getParameters().put("retention", "10");
-      part.getSd().setNumBuckets(12);
-      part.getSd().getSerdeInfo().getParameters().put("abc", "1");
-
-      client.add_partition(part);
-
-      part.setValues(tmp_vals);
-      client.renamePartition(dbName, tblName, vals, part);
-
-      boolean exceptionThrown = false;
-      try {
-        Partition p = client.getPartition(dbName, tblName, vals);
-      } catch(Exception e) {
-        assertEquals("partition should not have existed",
-            NoSuchObjectException.class, e.getClass());
-        exceptionThrown = true;
-      }
-      assertTrue("Expected NoSuchObjectException", exceptionThrown);
-
-      Partition part3 = client.getPartition(dbName, tblName, tmp_vals);
-      assertEquals("couldn't rename partition", part3.getParameters().get(
-          "retention"), "10");
-      assertEquals("couldn't rename partition", part3.getSd().getSerdeInfo()
-          .getParameters().get("abc"), "1");
-      assertEquals("couldn't rename partition", part3.getSd().getNumBuckets(),
-          12);
-      assertEquals("new partition sd matches", part3.getSd().getLocation(),
-          tbl.getSd().getLocation() + part2_path);
-
-      part.setValues(vals);
-      client.renamePartition(dbName, tblName, tmp_vals, part);
-
-      exceptionThrown = false;
-      try {
-        Partition p = client.getPartition(dbName, tblName, tmp_vals);
-      } catch(Exception e) {
-        assertEquals("partition should not have existed",
-            NoSuchObjectException.class, e.getClass());
-        exceptionThrown = true;
-      }
-      assertTrue("Expected NoSuchObjectException", exceptionThrown);
-
-      part3 = client.getPartition(dbName, tblName, vals);
-      assertEquals("couldn't rename partition", part3.getParameters().get(
-          "retention"), "10");
-      assertEquals("couldn't rename partition", part3.getSd().getSerdeInfo()
-          .getParameters().get("abc"), "1");
-      assertEquals("couldn't rename partition", part3.getSd().getNumBuckets(),
-          12);
-      assertEquals("new partition sd matches", part3.getSd().getLocation(),
-          tbl.getSd().getLocation() + part_path);
-
-      client.dropTable(dbName, tblName);
-
-      client.dropDatabase(dbName);
-    } catch (Exception e) {
-      System.err.println(StringUtils.stringifyException(e));
-      System.err.println("testRenamePartition() failed.");
-      throw e;
-    }
-  }
-
-  public void testDatabase() throws Throwable {
-    try {
-      // clear up any existing databases
-      silentDropDatabase(TEST_DB1_NAME);
-      silentDropDatabase(TEST_DB2_NAME);
-
-      Database db = new Database();
-      db.setName(TEST_DB1_NAME);
-      db.setOwnerName(SessionState.getUserFromAuthenticator());
-      db.setOwnerType(PrincipalType.USER);
-      client.createDatabase(db);
-
-      db = client.getDatabase(TEST_DB1_NAME);
-
-      assertEquals("name of returned db is different from that of inserted db",
-          TEST_DB1_NAME, db.getName());
-      assertEquals("location of the returned db is different from that of inserted db",
-          warehouse.getDatabasePath(db).toString(), db.getLocationUri());
-      assertEquals(db.getOwnerName(), SessionState.getUserFromAuthenticator());
-      assertEquals(db.getOwnerType(), PrincipalType.USER);
-      Database db2 = new Database();
-      db2.setName(TEST_DB2_NAME);
-      client.createDatabase(db2);
-
-      db2 = client.getDatabase(TEST_DB2_NAME);
-
-      assertEquals("name of returned db is different from that of inserted db",
-          TEST_DB2_NAME, db2.getName());
-      assertEquals("location of the returned db is different from that of inserted db",
-          warehouse.getDatabasePath(db2).toString(), db2.getLocationUri());
-
-      List<String> dbs = client.getDatabases(".*");
-
-      assertTrue("first database is not " + TEST_DB1_NAME, dbs.contains(TEST_DB1_NAME));
-      assertTrue("second database is not " + TEST_DB2_NAME, dbs.contains(TEST_DB2_NAME));
-
-      client.dropDatabase(TEST_DB1_NAME);
-      client.dropDatabase(TEST_DB2_NAME);
-      silentDropDatabase(TEST_DB1_NAME);
-      silentDropDatabase(TEST_DB2_NAME);
-    } catch (Throwable e) {
-      System.err.println(StringUtils.stringifyException(e));
-      System.err.println("testDatabase() failed.");
-      throw e;
-    }
-  }
-
-  public void testDatabaseLocationWithPermissionProblems() throws Exception {
-
-    // Note: The following test will fail if you are running this test as root. Setting
-    // permission to '0' on the database folder will not preclude root from being able
-    // to create the necessary files.
-
-    if (System.getProperty("user.name").equals("root")) {
-      System.err.println("Skipping test because you are running as root!");
-      return;
-    }
-
-    silentDropDatabase(TEST_DB1_NAME);
-
-    Database db = new Database();
-    db.setName(TEST_DB1_NAME);
-    String dbLocation =
-      HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/test/_testDB_create_";
-    FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), hiveConf);
-    fs.mkdirs(
-              new Path(HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/test"),
-              new FsPermission((short) 0));
-    db.setLocationUri(dbLocation);
-
-
-    boolean createFailed = false;
-    try {
-      client.createDatabase(db);
-    } catch (MetaException cantCreateDB) {
-      createFailed = true;
-    } finally {
-      // Cleanup
-      if (!createFailed) {
-        try {
-          client.dropDatabase(TEST_DB1_NAME);
-        } catch(Exception e) {
-          System.err.println("Failed to remove database in cleanup: " + e.getMessage());
-        }
-      }
-
-      fs.setPermission(new Path(HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/test"),
-                       new FsPermission((short) 755));
-      fs.delete(new Path(HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/test"), true);
-    }
-
-    assertTrue("Database creation succeeded even with permission problem", createFailed);
-  }
-
-  public void testDatabaseLocation() throws Throwable {
-    try {
-      // clear up any existing databases
-      silentDropDatabase(TEST_DB1_NAME);
-
-      Database db = new Database();
-      db.setName(TEST_DB1_NAME);
-      String dbLocation =
-          HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/_testDB_create_";
-      db.setLocationUri(dbLocation);
-      client.createDatabase(db);
-
-      db = client.getDatabase(TEST_DB1_NAME);
-
-      assertEquals("name of returned db is different from that of inserted db",
-          TEST_DB1_NAME, db.getName());
-      assertEquals("location of the returned db is different from that of inserted db",
-          warehouse.getDnsPath(new Path(dbLocation)).toString(), db.getLocationUri());
-
-      client.dropDatabase(TEST_DB1_NAME);
-      silentDropDatabase(TEST_DB1_NAME);
-
-      boolean objectNotExist = false;
-      try {
-        client.getDatabase(TEST_DB1_NAME);
-      } catch (NoSuchObjectException e) {
-        objectNotExist = true;
-      }
-      assertTrue("Database " + TEST_DB1_NAME + " exists ", objectNotExist);
-
-      db = new Database();
-      db.setName(TEST_DB1_NAME);
-      dbLocation =
-          HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "/_testDB_file_";
-      FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), hiveConf);
-      fs.createNewFile(new Path(dbLocation));
-      fs.deleteOnExit(new Path(dbLocation));
-      db.setLocationUri(dbLocation);
-
-      boolean createFailed = false;
-      try {
-        client.createDatabase(db);
-      } catch (MetaException cantCreateDB) {
-        System.err.println(cantCreateDB.getMessage());
-        createFailed = true;
-      }
-      assertTrue("Database creation succeeded even location exists and is a file", createFailed);
-
-      objectNotExist = false;
-      try {
-        client.getDatabase(TEST_DB1_NAME);
-      } catch (NoSuchObjectException e) {
-        objectNotExist = true;
-      }
-      assertTrue("Database " + TEST_DB1_NAME + " exists when location is specified and is a file",
-          objectNotExist);
-
-    } catch (Throwable e) {
-      System.err.println(StringUtils.stringifyException(e));
-      System.err.println("testDatabaseLocation() failed.");
-      throw e;
-    }
-  }
-
-
-  public void testSimpleTypeApi() throws Exception {
-    try {
-      client.dropType(serdeConstants.INT_TYPE_NAME);
-
-      Type typ1 = new Type();
-      typ1.setName(serdeConstants.INT_TYPE_NAME);
-      boolean ret = client.createType(typ1);
-      assertTrue("Unable to create type", ret);
-
-      Type typ1_2 = client.getType(serdeConstants.INT_TYPE_NAME);
-      assertNotNull(typ1_2);
-      assertEquals(typ1.getName(), typ1_2.getName());
-
-      ret = client.dropType(serdeConstants.INT_TYPE_NAME);
-      assertTrue("unable to drop type integer", ret);
-
-      boolean exceptionThrown = false;
-      try {
-        client.getType(serdeConstants.INT_TYPE_NAME);
-      } catch (NoSuchObjectException e) {
-        exceptionThrown = true;
-      }
-      assertTrue("Expected NoSuchObjectException", exceptionThrown);
-    } catch (Exception e) {
-      System.err.println(StringUtils.stringifyException(e));
-      System.err.println("testSimpleTypeApi() failed.");
-      throw e;
-    }
-  }
-
-  // TODO:pc need to enhance this with complex fields and getType_all function
-  public void testComplexTypeApi() throws Exception {
-    try {
-      client.dropType("Person");
-
-      Type typ1 = new Type();
-      typ1.setName("Person");
-      typ1.setFields(new ArrayList<FieldSchema>(2));
-      typ1.getFields().add(
-          new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
-      typ1.getFields().add(
-          new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
-      boolean ret = client.createType(typ1);
-      assertTrue("Unable to create type", ret);
-
-      Type typ1_2 = client.getType("Person");
-      assertNotNull("type Person not found", typ1_2);
-      assertEquals(typ1.getName(), typ1_2.getName());
-      assertEquals(typ1.getFields().size(), typ1_2.getFields().size());
-      assertEquals(typ1.getFields().get(0), typ1_2.getFields().get(0));
-      assertEquals(typ1.getFields().get(1), typ1_2.getFields().get(1));
-
-      client.dropType("Family");
-
-      Type fam = new Type();
-      fam.setName("Family");
-      fam.setFields(new ArrayList<FieldSchema>(2));
-      fam.getFields().add(
-          new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
-      fam.getFields().add(
-          new FieldSchema("members",
-              ColumnType.getListType(typ1.getName()), ""));
-
-      ret = client.createType(fam);
-      assertTrue("Unable to create type " + fam.getName(), ret);
-
-      Type fam2 = client.getType("Family");
-      assertNotNull("type Person not found", fam2);
-      assertEquals(fam.getName(), fam2.getName());
-      assertEquals(fam.getFields().size(), fam2.getFields().size());
-      assertEquals(fam.getFields().get(0), fam2.getFields().get(0));
-      assertEquals(fam.getFields().get(1), fam2.getFields().get(1));
-
-      ret = client.dropType("Family");
-      assertTrue("unable to drop type Family", ret);
-
-      ret = client.dropType("Person");
-      assertTrue("unable to drop type Person", ret);
-
-      boolean exceptionThrown = false;
-      try {
-        client.getType("Person");
-      } catch (NoSuchObjectException e) {
-        exceptionThrown = true;
-      }
-      assertTrue("Expected NoSuchObjectException", exceptionThrown);
-    } catch (Exception e) {
-      System.err.println(StringUtils.stringifyException(e));
-      System.err.println("testComplexTypeApi() failed.");
-      throw e;
-    }
-  }
-
-  public void testSimpleTable() throws Exception {
-    try {
-      String dbName = "simpdb";
-      String tblName = "simptbl";
-      String tblName2 = "simptbl2";
-      String typeName = "Person";
-
-      client.dropTable(dbName, tblName);
-      silentDropDatabase(dbName);
-
-      Database db = new Database();
-      db.setName(dbName);
-      client.createDatabase(db);
-
-      client.dropType(typeName);
-      Type typ1 = new Type();
-      typ1.setName(typeName);
-      typ1.setFields(new ArrayList<FieldSchema>(2));
-      typ1.getFields().add(
-          new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
-      typ1.getFields().add(
-          new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
-      client.createType(typ1);
-
-      Table tbl = new Table();
-      tbl.setDbName(dbName);
-      tbl.setTableName(tblName);
-      StorageDescriptor sd = new StorageDescriptor();
-      tbl.setSd(sd);
-      sd.setCols(typ1.getFields());
-      sd.setCompressed(false);
-      sd.setNumBuckets(1);
-      sd.setParameters(new HashMap<String, String>());
-      sd.getParameters().put("test_param_1", "Use this for comments etc");
-      sd.setBucketCols(new ArrayList<String>(2));
-      sd.getBucketCols().add("name");
-      sd.setSerdeInfo(new SerDeInfo());
-      sd.getSerdeInfo().setName(tbl.getTableName());
-      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-      sd.getSerdeInfo().getParameters().put(
-          org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "1");
-      sd.getSerdeInfo().setSerializationLib(
-          org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
-      sd.setInputFormat(HiveInputFormat.class.getName());
-      sd.setInputFormat(HiveOutputFormat.class.getName());
-
-      tbl.setPartitionKeys(new ArrayList<FieldSchema>());
-
-      client.createTable(tbl);
-
-      if (isThriftClient) {
-        // the createTable() above does not update the location in the 'tbl'
-        // object when the client is a thrift client and the code below relies
-        // on the location being present in the 'tbl' object - so get the table
-        // from the metastore
-        tbl = client.getTable(dbName, tblName);
-      }
-
-      Table tbl2 = client.getTable(dbName, tblName);
-      assertNotNull(tbl2);
-      assertEquals(tbl2.getDbName(), dbName);
-      assertEquals(tbl2.getTableName(), tblName);
-      assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size());
-      assertEquals(tbl2.getSd().isCompressed(), false);
-      assertEquals(tbl2.getSd().getNumBuckets(), 1);
-      assertEquals(tbl2.getSd().getLocation(), tbl.getSd().getLocation());
-      assertNotNull(tbl2.getSd().getSerdeInfo());
-      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-      sd.getSerdeInfo().getParameters().put(
-          org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "1");
-
-      tbl2.setTableName(tblName2);
-      tbl2.setParameters(new HashMap<String, String>());
-      tbl2.getParameters().put("EXTERNAL", "TRUE");
-      tbl2.getSd().setLocation(tbl.getSd().getLocation() + "-2");
-
-      List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName);
-      assertNotNull(fieldSchemas);
-      assertEquals(fieldSchemas.size(), tbl.getSd().getCols().size());
-      for (FieldSchema fs : tbl.getSd().getCols()) {
-        assertTrue(fieldSchemas.contains(fs));
-      }
-
-      List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName);
-      assertNotNull(fieldSchemasFull);
-      assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size()
-          + tbl.getPartitionKeys().size());
-      for (FieldSchema fs : tbl.getSd().getCols()) {
-        assertTrue(fieldSchemasFull.contains(fs));
-      }
-      for (FieldSchema fs : tbl.getPartitionKeys()) {
-        assertTrue(fieldSchemasFull.contains(fs));
-      }
-
-      client.createTable(tbl2);
-      if (isThriftClient) {
-        tbl2 = client.getTable(tbl2.getDbName(), tbl2.getTableName());
-      }
-
-      Table tbl3 = client.getTable(dbName, tblName2);
-      assertNotNull(tbl3);
-      assertEquals(tbl3.getDbName(), dbName);
-      assertEquals(tbl3.getTableName(), tblName2);
-      assertEquals(tbl3.getSd().getCols().size(), typ1.getFields().size());
-      assertEquals(tbl3.getSd().isCompressed(), false);
-      assertEquals(tbl3.getSd().getNumBuckets(), 1);
-      assertEquals(tbl3.getSd().getLocation(), tbl2.getSd().getLocation());
-      assertEquals(tbl3.getParameters(), tbl2.getParameters());
-
-      fieldSchemas = client.getFields(dbName, tblName2);
-      assertNotNull(fieldSchemas);
-      assertEquals(fieldSchemas.size(), tbl2.getSd().getCols().size());
-      for (FieldSchema fs : tbl2.getSd().getCols()) {
-        assertTrue(fieldSchemas.contains(fs));
-      }
-
-      fieldSchemasFull = client.getSchema(dbName, tblName2);
-      assertNotNull(fieldSchemasFull);
-      assertEquals(fieldSchemasFull.size(), tbl2.getSd().getCols().size()
-          + tbl2.getPartitionKeys().size());
-      for (FieldSchema fs : tbl2.getSd().getCols()) {
-        assertTrue(fieldSchemasFull.contains(fs));
-      }
-      for (FieldSchema fs : tbl2.getPartitionKeys()) {
-        assertTrue(fieldSchemasFull.contains(fs));
-      }
-
-      assertEquals("Use this for comments etc", tbl2.getSd().getParameters()
-          .get("test_param_1"));
-      assertEquals("name", tbl2.getSd().getBucketCols().get(0));
-      assertTrue("Partition key list is not empty",
-          (tbl2.getPartitionKeys() == null)
-              || (tbl2.getPartitionKeys().size() == 0));
-
-      //test get_table_objects_by_name functionality
-      ArrayList<String> tableNames = new ArrayList<String>();
-      tableNames.add(tblName2);
-      tableNames.add(tblName);
-      tableNames.add(tblName2);
-      List<Table> foundTables = client.getTableObjectsByName(dbName, tableNames);
-
-      assertEquals(2, foundTables.size());
-      for (Table t: foundTables) {
-        if (t.getTableName().equals(tblName2)) {
-          assertEquals(t.getSd().getLocation(), tbl2.getSd().getLocation());
-        } else {
-          assertEquals(t.getTableName(), tblName);
-          assertEquals(t.getSd().getLocation(), tbl.getSd().getLocation());
-        }
-        assertEquals(t.getSd().getCols().size(), typ1.getFields().size());
-        assertEquals(t.getSd().isCompressed(), false);
-        assertEquals(foundTables.get(0).getSd().getNumBuckets(), 1);
-        assertNotNull(t.getSd().getSerdeInfo());
-        assertEquals(t.getDbName(), dbName);
-      }
-
-      tableNames.add(1, "table_that_doesnt_exist");
-      foundTables = client.getTableObjectsByName(dbName, tableNames);
-      assertEquals(foundTables.size(), 2);
-
-      InvalidOperationException ioe = null;
-      try {
-        foundTables = client.getTableObjectsByName(dbName, null);
-      } catch (InvalidOperationException e) {
-        ioe = e;
-      }
-      assertNotNull(ioe);
-      assertTrue("Table not found", ioe.getMessage().contains("null tables"));
-
-      UnknownDBException udbe = null;
-      try {
-        foundTables = client.getTableObjectsByName("db_that_doesnt_exist", tableNames);
-      } catch (UnknownDBException e) {
-        udbe = e;
-      }
-      assertNotNull(udbe);
-      assertTrue("DB not found", udbe.getMessage().contains("not find database db_that_doesnt_exist"));
-
-      udbe = null;
-      try {
-        foundTables = client.getTableObjectsByName("", tableNames);
-      } catch (UnknownDBException e) {
-        udbe = e;
-      }
-      assertNotNull(udbe);
-      assertTrue("DB not found", udbe.getMessage().contains("is null or empty"));
-
-      FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), hiveConf);
-      client.dropTable(dbName, tblName);
-      assertFalse(fs.exists(new Path(tbl.getSd().getLocation())));
-
-      client.dropTable(dbName, tblName2);
-      assertTrue(fs.exists(new Path(tbl2.getSd().getLocation())));
-
-      client.dropType(typeName);
-      client.dropDatabase(dbName);
-    } catch (Exception e) {
-      System.err.println(StringUtils.stringifyException(e));
-      System.err.println("testSimpleTable() failed.");
-      throw e;
-    }
-  }
-
-  // Tests that in the absence of stats for partitions, and/or absence of columns
-  // to get stats for, the metastore does not break. See HIVE-12083 for motivation.
-  public void testStatsFastTrivial() throws Throwable {
-    String dbName = "tstatsfast";
-    String tblName = "t1";
-    String tblOwner = "statstester";
-    String typeName = "Person";
-    int lastAccessed = 12083;
-
-    cleanUp(dbName,tblName,typeName);
-
-    List<List<String>> values = new ArrayList<List<String>>();
-    values.add(makeVals("2008-07-01 14:13:12", "14"));
-    values.add(makeVals("2008-07-01 14:13:12", "15"));
-    values.add(makeVals("2008-07-02 14:13:12", "15"));
-    values.add(makeVals("2008-07-03 14:13:12", "151"));
-
-    createMultiPartitionTableSchema(dbName, tblName, typeName, values);
-
-    List<String> emptyColNames = new ArrayList<String>();
-    List<String> emptyPartNames = new ArrayList<String>();
-
-    List<String> colNames = new ArrayList<String>();
-    colNames.add("name");
-    colNames.add("income");
-    List<String> partNames = client.listPartitionNames(dbName,tblName,(short)-1);
-
-    assertEquals(0,emptyColNames.size());
-    assertEquals(0,emptyPartNames.size());
-    assertEquals(2,colNames.size());
-    assertEquals(4,partNames.size());
-
-    // Test for both colNames and partNames being empty:
-    AggrStats aggrStatsEmpty = client.getAggrColStatsFor(dbName,tblName,emptyColNames,emptyPartNames);
-    assertNotNull(aggrStatsEmpty); // short-circuited on client-side, verifying that it's an empty object, not null
-    assertEquals(0,aggrStatsEmpty.getPartsFound());
-    assertNotNull(aggrStatsEmpty.getColStats());
-    assert(aggrStatsEmpty.getColStats().isEmpty());
-
-    // Test for only colNames being empty
-    AggrStats aggrStatsOnlyParts = client.getAggrColStatsFor(dbName,tblName,emptyColNames,partNames);
-    assertNotNull(aggrStatsOnlyParts); // short-circuited on client-side, verifying that it's an empty object, not null
-    assertEquals(0,aggrStatsOnlyParts.getPartsFound());
-    assertNotNull(aggrStatsOnlyParts.getColStats());
-    assert(aggrStatsOnlyParts.getColStats().isEmpty());
-
-    // Test for only partNames being empty
-    AggrStats aggrStatsOnlyCols = client.getAggrColStatsFor(dbName,tblName,colNames,emptyPartNames);
-    assertNotNull(aggrStatsOnlyCols); // short-circuited on client-side, verifying that it's an empty object, not null
-    assertEquals(0,aggrStatsOnlyCols.getPartsFound());
-    assertNotNull(aggrStatsOnlyCols.getColStats());
-    assert(aggrStatsOnlyCols.getColStats().isEmpty());
-
-    // Test for valid values for both.
-    AggrStats aggrStatsFull = client.getAggrColStatsFor(dbName,tblName,colNames,partNames);
-    assertNotNull(aggrStatsFull);
-    assertEquals(0,aggrStatsFull.getPartsFound()); // would still be empty, because no stats are actually populated.
-    assertNotNull(aggrStatsFull.getColStats());
-    assert(aggrStatsFull.getColStats().isEmpty());
-
-  }
-
-  public void testColumnStatistics() throws Throwable {
-
-    String dbName = "columnstatstestdb";
-    String tblName = "tbl";
-    String typeName = "Person";
-    String tblOwner = "testowner";
-    int lastAccessed = 6796;
-
-    try {
-      cleanUp(dbName, tblName, typeName);
-      Database db = new Database();
-      db.setName(dbName);
-      client.createDatabase(db);
-      createTableForTestFilter(dbName,tblName, tblOwner, lastAccessed, true);
-
-      // Create a ColumnStatistics Obj
-      String[] colName = new String[]{"income", "name"};
-      double lowValue = 50000.21;
-      double highValue = 1200000.4525;
-      long numNulls = 3;
-      long numDVs = 22;
-      double avgColLen = 50.30;
-      long maxColLen = 102;
-      String[] colType = new String[] {"double", "string"};
-      boolean isTblLevel = true;
-      String partName = null;
-      List<ColumnStatisticsObj> statsObjs = new ArrayList<ColumnStatisticsObj>();
-
-      ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc();
-      statsDesc.setDbName(dbName);
-      statsDesc.setTableName(tblName);
-      statsDesc.setIsTblLevel(isTblLevel);
-      statsDesc.setPartName(partName);
-
-      ColumnStatisticsObj statsObj = new ColumnStatisticsObj();
-      statsObj.setColName(colName[0]);
-      statsObj.setColType(colType[0]);
-
-      ColumnStatisticsData statsData = new ColumnStatisticsData();
-      DoubleColumnStatsData numericStats = new DoubleColumnStatsData();
-      statsData.setDoubleStats(numericStats);
-
-      statsData.getDoubleStats().setHighValue(highValue);
-      statsData.getDoubleStats().setLowValue(lowValue);
-      statsData.getDoubleStats().setNumDVs(numDVs);
-      statsData.getDoubleStats().setNumNulls(numNulls);
-
-      statsObj.setStatsData(statsData);
-      statsObjs.add(statsObj);
-
-      statsObj = new ColumnStatisticsObj();
-      statsObj.setColName(colName[1]);
-      statsObj.setColType(colType[1]);
-
-      statsData = new ColumnStatisticsData();
-      StringColumnStatsData stringStats = new StringColumnStatsData();
-      statsData.setStringStats(stringStats);
-      statsData.getStringStats().setAvgColLen(avgColLen);
-      statsData.getStringStats().setMaxColLen(maxColLen);
-      statsData.getStringStats().setNumDVs(numDVs);
-      statsData.getStringStats().setNumNulls(numNulls);
-
-      statsObj.setStatsData(statsData);
-      statsObjs.add(statsObj);
-
-      ColumnStatistics colStats = new ColumnStatistics();
-      colStats.setStatsDesc(statsDesc);
-      colStats.setStatsObj(statsObjs);
-
-      // write stats objs persistently
-      client.updateTableColumnStatistics(colStats);
-
-      // retrieve the stats obj that was just written
-      ColumnStatisticsObj colStats2 = client.getTableColumnStatistics(
-          dbName, tblName, Lists.newArrayList(colName[0])).get(0);
-
-     // compare stats obj to ensure what we get is what we wrote
-      assertNotNull(colStats2);
-      assertEquals(colStats2.getColName(), colName[0]);
-      assertEquals(colStats2.getStatsData().getDoubleStats().getLowValue(), lowValue);
-      assertEquals(colStats2.getStatsData().getDoubleStats().getHighValue(), highValue);
-      assertEquals(colStats2.getStatsData().getDoubleStats().getNumNulls(), numNulls);
-      assertEquals(colStats2.getStatsData().getDoubleStats().getNumDVs(), numDVs);
-
-      // test delete column stats; if no col name is passed all column stats associated with the
-      // table is deleted
-      boolean status = client.deleteTableColumnStatistics(dbName, tblName, null);
-      assertTrue(status);
-      // try to query stats for a column for which stats doesn't exist
-      assertTrue(client.getTableColumnStatistics(
-          dbName, tblName, Lists.newArrayList(colName[1])).isEmpty());
-
-      colStats.setStatsDesc(statsDesc);
-      colStats.setStatsObj(statsObjs);
-
-      // update table level column stats
-      client.updateTableColumnStatistics(colStats);
-
-      // query column stats for column whose stats were updated in the previous call
-      colStats2 = client.getTableColumnStatistics(
-          dbName, tblName, Lists.newArrayList(colName[0])).get(0);
-
-      // partition level column statistics test
-      // create a table with multiple partitions
-      cleanUp(dbName, tblName, typeName);
-
-      List<List<String>> values = new ArrayList<List<String>>();
-      values.add(makeVals("2008-07-01 14:13:12", "14"));
-      values.add(makeVals("2008-07-01 14:13:12", "15"));
-      values.add(makeVals("2008-07-02 14:13:12", "15"));
-      values.add(makeVals("2008-07-03 14:13:12", "151"));
-
-      createMultiPartitionTableSchema(dbName, tblName, typeName, values);
-
-      List<String> partitions = client.listPartitionNames(dbName, tblName, (short)-1);
-
-      partName = partitions.get(0);
-      isTblLevel = false;
-
-      // create a new columnstatistics desc to represent partition level column stats
-      statsDesc = new ColumnStatisticsDesc();
-      statsDesc.setDbName(dbName);
-      statsDesc.setTableName(tblName);
-      statsDesc.setPartName(partName);
-      statsDesc.setIsTblLevel(isTblLevel);
-
-      colStats = new ColumnStatistics();
-      colStats.setStatsDesc(statsDesc);
-      colStats.setStatsObj(statsObjs);
-
-     client.updatePartitionColumnStatistics(colStats);
-
-     colStats2 = client.getPartitionColumnStatistics(dbName, tblName,
-         Lists.newArrayList(partName), Lists.newArrayList(colName[1])).get(partName).get(0);
-
-     // compare stats obj to ensure what we get is what we wrote
-     assertNotNull(colStats2);
-     assertEquals(colStats.getStatsDesc().getPartName(), partName);
-     assertEquals(colStats2.getColName(), colName[1]);
-     assertEquals(colStats2.getStatsData().getStringStats().getMaxColLen(), maxColLen);
-     assertEquals(colStats2.getStatsData().getStringStats().getAvgColLen(), avgColLen);
-     assertEquals(colStats2.getStatsData().getStringStats().getNumNulls(), numNulls);
-     assertEquals(colStats2.getStatsData().getStringStats().getNumDVs(), numDVs);
-
-     // test stats deletion at partition level
-     client.deletePartitionColumnStatistics(dbName, tblName, partName, colName[1]);
-
-     colStats2 = client.getPartitionColumnStatistics(dbName, tblName,
-         Lists.newArrayList(partName), Lists.newArrayList(colName[0])).get(partName).get(0);
-
-     // test get stats on a column for which stats doesn't exist
-     assertTrue(client.getPartitionColumnStatistics(dbName, tblName,
-           Lists.newArrayList(partName), Lists.newArrayList(colName[1])).isEmpty());
-    } catch (Exception e) {
-      System.err.println(StringUtils.stringifyException(e));
-      System.err.println("testColumnStatistics() failed.");
-      throw e;
-    } finally {
-      cleanUp(dbName, tblName, typeName);
-    }
-  }
-
-  static class ClassNotFoundSerde extends LazySimpleSerDe {
-
-    public ClassNotFoundSerde() throws Exception {
-    }
-
-    @Override
-    public void initialize(Configuration job, Properties tbl) throws SerDeException {
-      super.initialize(job, tbl);
-      throw new NoClassDefFoundError();
-    }
-
-  }
-
-  public void testGetSchemaWithNoClassDefFoundError() throws Exception {
-    try {
-      String dbName = "testDb";
-      String tblName = "testTable";
-
-      client.dropTable(dbName, tblName);
-      silentDropDatabase(dbName);
-
-      Database db = new Database();
-      db.setName(dbName);
-      client.createDatabase(db);
-
-      Table tbl = new Table();
-      tbl.setDbName(dbName);
-      tbl.setTableName(tblName);
-
-      ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(1);
-      cols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
-
-      StorageDescriptor sd = new StorageDescriptor();
-      tbl.setSd(sd);
-      sd.setCols(cols);
-      SerDeInfo serdeInfo = new SerDeInfo();
-      sd.setSerdeInfo(serdeInfo);
-      serdeInfo.setSerializationLib(ClassNotFoundSerde.class.getName());
-
-      client.createTable(tbl);
-
-      Boolean MetaExceptionCaught = false;
-      try {
-        client.getSchema(dbName, tblName);
-      } catch (MetaException me) {
-        MetaExceptionCaught = true;
-      }
-      assertTrue("MetaException is expected to be caught for throwing NoClassDefFoundError", MetaExceptionCaught);
-    } catch (Throwable e) {
-      System.err.println(StringUtils.stringifyException(e));
-      System.err.println("testGetSchemaWithNoClassDefFoundError() failed.");
-      throw e;
-    }
-  }
-
-  public void testAlterTable() throws Exception {
-    String dbName = "alterdb";
-    String invTblName = "alter-tbl";
-    String tblName = "altertbl";
-
-    try {
-      client.dropTable(dbName, tblName);
-      silentDropDatabase(dbName);
-
-      Database db = new Database();
-      db.setName(dbName);
-      client.createDatabase(db);
-
-      ArrayList<FieldSchema> invCols = new ArrayList<FieldSchema>(2);
-      invCols.add(new FieldSchema("n-ame", serdeConstants.STRING_TYPE_NAME, ""));
-      invCols.add(new FieldSchema("in.come", serdeConstants.INT_TYPE_NAME, ""));
-
-      Table tbl = new Table();
-      tbl.setDbName(dbName);
-      tbl.setTableName(invTblName);
-      StorageDescriptor sd = new StorageDescriptor();
-      tbl.setSd(sd);
-      sd.setCols(invCols);
-      sd.setCompressed(false);
-      sd.setNumBuckets(1);
-      sd.setParameters(new HashMap<String, String>());
-      sd.getParameters().put("test_param_1", "Use this for comments etc");
-      sd.setBucketCols(new ArrayList<String>(2));
-      sd.getBucketCols().add("name");
-      sd.setSerdeInfo(new SerDeInfo());
-      sd.getSerdeInfo().setName(tbl.getTableName());
-      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-      sd.getSerdeInfo().getParameters().put(
-          org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "1");
-      sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
-      sd.setInputFormat(HiveInputFormat.class.getName());
-      sd.setOutputFormat(HiveOutputFormat.class.getName());
-      
-      boolean failed = false;
-      try {
-        client.createTable(tbl);
-      } catch (InvalidObjectException ex) {
-        failed = true;
-      }
-      if (!failed) {
-        assertTrue("Able to create table with invalid name: " + invTblName,
-            false);
-      }
-
-      // create an invalid table which has wrong column type
-      ArrayList<FieldSchema> invColsInvType = new ArrayList<FieldSchema>(2);
-      invColsInvType.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
-      invColsInvType.add(new FieldSchema("income", "xyz", ""));
-      tbl.setTableName(tblName);
-      tbl.getSd().setCols(invColsInvType);
-      boolean failChecker = false;
-      try {
-        client.createTable(tbl);
-      } catch (InvalidObjectException ex) {
-        failChecker = true;
-      }
-      if (!failChecker) {
-        assertTrue("Able to create table with invalid column type: " + invTblName,
-            false);
-      }
-
-      ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
-      cols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
-      cols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
-
-      // create a valid table
-      tbl.setTableName(tblName);
-      tbl.getSd().setCols(cols);
-      client.createTable(tbl);
-
-      if (isThriftClient) {
-        tbl = client.getTable(tbl.getDbName(), tbl.getTableName());
-      }
-
-      // now try to invalid alter table
-      Table tbl2 = client.getTable(dbName, tblName);
-      failed = false;
-      try {
-        tbl2.setTableName(invTblName);
-        tbl2.getSd().setCols(invCols);
-        client.alter_table(dbName, tblName, tbl2);
-      } catch (InvalidOperationException ex) {
-        failed = true;
-      }
-      if (!failed) {
-        assertTrue("Able to rename table with invalid name: " + invTblName,
-            false);
-      }
-
-      //try an invalid alter table with partition key name
-      Table tbl_pk = client.getTable(tbl.getDbName(), tbl.getTableName());
-      List<FieldSchema> partitionKeys = tbl_pk.getPartitionKeys();
-      for (FieldSchema fs : partitionKeys) {
-        fs.setName("invalid_to_change_name");
-        fs.setComment("can_change_comment");
-      }
-      tbl_pk.setPartitionKeys(partitionKeys);
-      try {
-        client.alter_table(dbName, tblName, tbl_pk);
-      } catch (InvalidOperationException ex) {
-        failed = true;
-      }
-      assertTrue("Should not have succeeded in altering partition key name", failed);
-
-      //try a valid alter table partition key comment
-      failed = false;
-      tbl_pk = client.getTable(tbl.getDbName(), tbl.getTableName());
-      partitionKeys = tbl_pk.getPartitionKeys();
-      for (FieldSchema fs : partitionKeys) {
-        fs.setComment("can_change_comment");
-      }
-      tbl_pk.setPartitionKeys(partitionKeys);
-      try {
-        client.alter_table(dbName, tblName, tbl_pk);
-      } catch (InvalidOperationException ex) {
-        failed = true;
-      }
-      assertFalse("Should not have failed alter table partition comment", failed);
-      Table newT = client.getTable(tbl.getDbName(), tbl.getTableName());
-      assertEquals(partitionKeys, newT.getPartitionKeys());
-
-      // try a valid alter table
-      tbl2.setTableName(tblName + "_renamed");
-      tbl2.getSd().setCols(cols);
-      tbl2.getSd().setNumBuckets(32);
-      client.alter_table(dbName, tblName, tbl2);
-      Table tbl3 = client.getTable(dbName, tbl2.getTableName());
-      assertEquals("Alter table didn't succeed. Num buckets is different ",
-          tbl2.getSd().getNumBuckets(), tbl3.getSd().getNumBuckets());
-      // check that data has moved
-      FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), hiveConf);
-      assertFalse("old table location still exists", fs.exists(new Path(tbl
-          .getSd().getLocation())));
-      assertTrue("data did not move to new location", fs.exists(new Path(tbl3
-          .getSd().getLocation())));
-
-      if (!isThriftClient) {
-        assertEquals("alter table didn't move data correct location", tbl3
-            .getSd().getLocation(), tbl2.getSd().getLocation());
-      }
-
-      // alter table with invalid column type
-      tbl_pk.getSd().setCols(invColsInvType);
-      failed = false;
-      try {
-        client.alter_table(dbName, tbl2.getTableName(), tbl_pk);
-      } catch (InvalidOperationException ex) {
-        failed = true;
-      }
-      assertTrue("Should not have succeeded in altering column", failed);
-
-    } catch (Exception e) {
-      System.err.println(StringUtils.stringifyException(e));
-      System.err.println("testSimpleTable() failed.");
-      throw e;
-    } finally {
-      silentDropDatabase(dbName);
-    }
-  }
-
-  public void testComplexTable() throws Exception {
-
-    String dbName = "compdb";
-    String tblName = "comptbl";
-    String typeName = "Person";
-
-    try {
-      client.dropTable(dbName, tblName);
-      silentDropDatabase(dbName);
-      Database db = new Database();
-      db.setName(dbName);
-      client.createDatabase(db);
-
-      client.dropType(typeName);
-      Type typ1 = new Type();
-      typ1.setName(typeName);
-      typ1.setFields(new ArrayList<FieldSchema>(2));
-      typ1.getFields().add(
-          new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
-      typ1.getFields().add(
-          new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
-      client.createType(typ1);
-
-      Table tbl = new Table();
-      tbl.setDbName(dbName);
-      tbl.setTableName(tblName);
-      StorageDescriptor sd = new StorageDescriptor();
-      tbl.setSd(sd);
-      sd.setCols(typ1.getFields());
-      sd.setCompressed(false);
-      sd.setNumBuckets(1);
-      sd.setParameters(new HashMap<String, String>());
-      sd.getParameters().put("test_param_1", "Use this for comments etc");
-      sd.setBucketCols(new ArrayList<String>(2));
-      sd.getBucketCols().add("name");
-      sd.setSerdeInfo(new SerDeInfo());
-      sd.getSerdeInfo().setName(tbl.getTableName());
-      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-      sd.getSerdeInfo().getParameters().put(
-          org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "9");
-      sd.getSerdeInfo().setSerializationLib(
-          org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
-      sd.setInputFormat(HiveInputFormat.class.getName());
-      sd.setOutputFormat(HiveOutputFormat.class.getName());
-      
-      tbl.setPartitionKeys(new ArrayList<FieldSchema>(2));
-      tbl.getPartitionKeys().add(
-          new FieldSchema("ds",
-              org.apache.hadoop.hive.serde.serdeConstants.DATE_TYPE_NAME, ""));
-      tbl.getPartitionKeys().add(
-          new FieldSchema("hr",
-              org.apache.hadoop.hive.serde.serdeConstants.INT_TYPE_NAME, ""));
-
-      client.createTable(tbl);
-
-      Table tbl2 = client.getTable(dbName, tblName);
-      assertEquals(tbl2.getDbName(), dbName);
-      assertEquals(tbl2.getTableName(), tblName);
-      assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size());
-      assertFalse(tbl2.getSd().isCompressed());
-      assertFalse(tbl2.getSd().isStoredAsSubDirectories());
-      assertEquals(tbl2.getSd().getNumBuckets(), 1);
-
-      assertEquals("Use this for comments etc", tbl2.getSd().getParameters()
-          .get("test_param_1"));
-      assertEquals("name", tbl2.getSd().getBucketCols().get(0));
-
-      assertNotNull(tbl2.getPartitionKeys());
-      assertEquals(2, tbl2.getPartitionKeys().size());
-      assertEquals(serdeConstants.DATE_TYPE_NAME, tbl2.getPartitionKeys().get(0)
-          .getType());
-      assertEquals(serdeConstants.INT_TYPE_NAME, tbl2.getPartitionKeys().get(1)
-          .getType());
-      assertEquals("ds", tbl2.getPartitionKeys().get(0).getName());
-      assertEquals("hr", tbl2.getPartitionKeys().get(1).getName());
-
-      List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName);
-      assertNotNull(fieldSchemas);
-      assertEquals(fieldSchemas.size(), tbl.getSd().getCols().size());
-      for (FieldSchema fs : tbl.getSd().getCols()) {
-        assertTrue(fieldSchemas.contains(fs));
-      }
-
-      List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName);
-      assertNotNull(fieldSchemasFull);
-      assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size()
-          + tbl.getPartitionKeys().size());
-      for (FieldSchema fs : tbl.getSd().getCols()) {
-        assertTrue(fieldSchemasFull.contains(fs));
-      }
-      for (FieldSchema fs : tbl.getPartitionKeys()) {
-        assertTrue(fieldSchemasFull.contains(fs));
-      }
-    } catch (Exception e) {
-      System.err.println(StringUtils.stringifyException(e));
-      System.err.println("testComplexTable() failed.");
-      throw e;
-    } finally {
-      client.dropTable(dbName, tblName);
-      boolean ret = client.dropType(typeName);
-      assertTrue("Unable to drop type " + typeName, ret);
-      client.dropDatabase(dbName);
-    }
-  }
-
-  public void testTableDatabase() throws Exception {
-    String dbName = "testDb";
-    String tblName_1 = "testTbl_1";
-    String tblName_2 = "testTbl_2";
-
-    try {
-      silentDropDatabase(dbName);
-
-      Database db = new Database();
-      db.setName(dbName);
-      String dbLocation =
-          HiveConf.getVar(hiveConf, HiveConf.ConfVars.METASTOREWAREHOUSE) + "_testDB_table_create_";
-      db.setLocationUri(dbLocation);
-      client.createDatabase(db);
-      db = client.getDatabase(dbName);
-
-      Table tbl = new Table();
-      tbl.setDbName(dbName);
-      tbl.setTableName(tblName_1);
-
-      ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
-      cols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
-      cols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
-
-      StorageDescriptor sd = new StorageDescriptor();
-      sd.setSerdeInfo(new SerDeInfo());
-      sd.getSerdeInfo().setName(tbl.getTableName());
-      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-      sd.setParameters(new HashMap<String, String>());
-      sd.getSerdeInfo().getParameters().put(
-          org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "9");
-      sd.getSerdeInfo().setSerializationLib(
-          org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
-      sd.setInputFormat(HiveInputFormat.class.getName());
-      sd.setOutputFormat(HiveOutputFormat.class.getName());
-
-      tbl.setSd(sd);
-      tbl.getSd().setCols(cols);
-      client.createTable(tbl);
-      tbl = client.getTable(dbName, tblName_1);
-
-      Path path = new Path(tbl.getSd().getLocation());
-      System.err.println("Table's location " + path + ", Database's location " + db.getLocationUri());
-      assertEquals("Table location is not a subset of the database location",
-          path.getParent().toString(), db.getLocationUri());
-
-    } catch (Exception e) {
-      System.err.println(StringUtils.stringifyException(e));
-      System.err.println("testTableDatabase() failed.");
-      throw e;
-    } finally {
-      silentDropDatabase(dbName);
-    }
-  }
-
-
-  public void testGetConfigValue() {
-
-    String val = "value";
-
-    if (!isThriftClient) {
-      try {
-        assertEquals(client.getConfigValue("hive.key1", val), "value1");
-        assertEquals(client.getConfigValue("hive.key2", val), "http://www.example.com");
-        assertEquals(client.getConfigValue("hive.key3", val), "");
-        assertEquals(client.getConfigValue("hive.key4", val), "0");
-        assertEquals(client.getConfigValue("hive.key5", val), val);
-        assertEquals(client.getConfigValue(null, val), val);
-      } catch (ConfigValSecurityException e) {
-        e.printStackTrace();
-        assert (false);
-      } catch (TException e) {
-        e.printStackTrace();
-        assert (false);
-      }
-    }
-
-    boolean threwException = false;
-    try {
-      // Attempting to get the password should throw an exception
-      client.getConfigValue("javax.jdo.option.ConnectionPassword", "password");
-    } catch (ConfigValSecurityException e) {
-      threwException = true;
-    } catch (TException e) {
-      e.printStackTrace();
-      assert (false);
-    }
-    assert (threwException);
-  }
-
-  private static void adjust(HiveMetaStoreClient client, Partition part,
-      String dbName, String tblName)
-  throws NoSuchObjectException, MetaException, TException {
-    Partition part_get = client.getPartition(dbName, tblName, part.getValues());
-    part.setCreateTime(part_get.getCreateTime());
-    part.putToParameters(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.DDL_TIME, Long.toString(part_get.getCreateTime()));
-  }
-
-  private static void silentDropDatabase(String dbName) throws MetaException, TException {
-    try {
-      for (String tableName : client.getTables(dbName, "*")) {
-        client.dropTable(dbName, tableName);
-      }
-      client.dropDatabase(dbName);
-    } catch (NoSuchObjectException e) {
-    } catch (InvalidOperationException e) {
-    }
-  }
-
-  /**
-   * Tests for list partition by filter functionality.
-   * @throws Exception
-   */
-
-  public void testPartitionFilter() throws Exception {
-    String dbName = "filterdb";
-    String tblName = "filtertbl";
-
-    silentDropDatabase(dbName);
-
-    Database db = new Database();
-    db.setName(dbName);
-    client.createDatabase(db);
-
-    ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
-    cols.add(new FieldSchema("c1", serdeConstants.STRING_TYPE_NAME, ""));
-    cols.add(new FieldSchema("c2", serdeConstants.INT_TYPE_NAME, ""));
-
-    ArrayList<FieldSchema> partCols = new ArrayList<FieldSchema>(3);
-    partCols.add(new FieldSchema("p1", serdeConstants.STRING_TYPE_NAME, ""));
-    partCols.add(new FieldSchema("p2", serdeConstants.STRING_TYPE_NAME, ""));
-    partCols.add(new FieldSchema("p3", serdeConstants.INT_TYPE_NAME, ""));
-
-    Table tbl = new Table();
-    tbl.setDbName(dbName);
-    tbl.setTableName(tblName);
-    StorageDescriptor sd = new StorageDescriptor();
-    tbl.setSd(sd);
-    sd.setCols(cols);
-    sd.setCompressed(false);
-    sd.setNumBuckets(1);
-    sd.setParameters(new HashMap<String, String>());
-    sd.setBucketCols(new ArrayList<String>());
-    sd.setSerdeInfo(new SerDeInfo());
-  

<TRUNCATED>

[16/50] [abbrv] hive git commit: HIVE-13567: addendum to fix HIVE-18208

Posted by ga...@apache.org.
HIVE-13567: addendum to fix HIVE-18208


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/82590226
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/82590226
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/82590226

Branch: refs/heads/standalone-metastore
Commit: 82590226a89eeac7aa0ace8c311a8d4f4794c5bc
Parents: 8ced3bc
Author: Zoltan Haindrich <ki...@rxd.hu>
Authored: Mon Dec 18 13:53:38 2017 +0100
Committer: Zoltan Haindrich <ki...@rxd.hu>
Committed: Mon Dec 18 13:54:00 2017 +0100

----------------------------------------------------------------------
 .../results/clientpositive/llap/quotedid_smb.q.out  | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/82590226/ql/src/test/results/clientpositive/llap/quotedid_smb.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/quotedid_smb.q.out b/ql/src/test/results/clientpositive/llap/quotedid_smb.q.out
index 97721d0..67ab4fe 100644
--- a/ql/src/test/results/clientpositive/llap/quotedid_smb.q.out
+++ b/ql/src/test/results/clientpositive/llap/quotedid_smb.q.out
@@ -63,25 +63,25 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: b
-                  Statistics: Num rows: 500 Data size: 175168 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: !@#$%^&*()_q is not null (type: boolean)
-                    Statistics: Num rows: 475 Data size: 166409 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: x+1 (type: string), !@#$%^&*()_q (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 475 Data size: 166409 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
             Map Operator Tree:
                 TableScan
                   alias: a
-                  Statistics: Num rows: 500 Data size: 175168 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: (!@#$%^&*()_q is not null and (x+1 < '11')) (type: boolean)
-                    Statistics: Num rows: 141 Data size: 49397 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: x+1 (type: string), !@#$%^&*()_q (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 141 Data size: 49397 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
                       Merge Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -89,10 +89,10 @@ STAGE PLANS:
                           0 _col1 (type: string)
                           1 _col1 (type: string)
                         outputColumnNames: _col0, _col1, _col2, _col3
-                        Statistics: Num rows: 522 Data size: 183049 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 268 Data size: 95408 Basic stats: COMPLETE Column stats: COMPLETE
                         File Output Operator
                           compressed: false
-                          Statistics: Num rows: 522 Data size: 183049 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 268 Data size: 95408 Basic stats: COMPLETE Column stats: COMPLETE
                           table:
                               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                               output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat


[17/50] [abbrv] hive git commit: HIVE-18286 - java.lang.ClassCastException: org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector cannot be cast to org.apache.hadoop.hive.ql.exec.vector.LongColumnVector (Eugene Koifman, reviewed by Alan Gates)

Posted by ga...@apache.org.
HIVE-18286 - java.lang.ClassCastException: org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector cannot be cast to org.apache.hadoop.hive.ql.exec.vector.LongColumnVector (Eugene Koifman, reviewed by Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ad106f0c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ad106f0c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ad106f0c

Branch: refs/heads/standalone-metastore
Commit: ad106f0c4378b10e9c6ef11116e1620bae689ba8
Parents: 8259022
Author: Eugene Koifman <ek...@hortonworks.com>
Authored: Mon Dec 18 14:13:02 2017 -0800
Committer: Eugene Koifman <ek...@hortonworks.com>
Committed: Mon Dec 18 14:13:02 2017 -0800

----------------------------------------------------------------------
 .../io/orc/VectorizedOrcAcidRowBatchReader.java  | 19 ++++++++++++++++---
 1 file changed, 16 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ad106f0c/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
index d571bd0..990e0cb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java
@@ -415,7 +415,9 @@ public class VectorizedOrcAcidRowBatchReader
        * If there are deletes and reading original file, we must produce synthetic ROW_IDs in order
        * to see if any deletes apply
        */
-      if(needSyntheticRowIds(true, !deleteEventRegistry.isEmpty(), rowIdProjected)) {
+      boolean needSyntheticRowId =
+          needSyntheticRowIds(true, !deleteEventRegistry.isEmpty(), rowIdProjected);
+      if(needSyntheticRowId) {
         assert syntheticProps != null && syntheticProps.rowIdOffset >= 0 : "" + syntheticProps;
         assert syntheticProps != null && syntheticProps.bucketProperty >= 0 : "" + syntheticProps;
         if(innerReader == null) {
@@ -459,8 +461,19 @@ public class VectorizedOrcAcidRowBatchReader
         // txnid:0 which is always committed so there is no need to check wrt invalid transactions
         //But originals written by Load Data for example can be in base_x or delta_x_x so we must
         //check if 'x' is committed or not evn if ROW_ID is not needed in the Operator pipeline.
-        findRecordsWithInvalidTransactionIds(innerRecordIdColumnVector,
-          vectorizedRowBatchBase.size, selectedBitSet);
+        if (needSyntheticRowId) {
+          findRecordsWithInvalidTransactionIds(innerRecordIdColumnVector,
+              vectorizedRowBatchBase.size, selectedBitSet);
+        } else {
+          /*since ROW_IDs are not needed we didn't create the ColumnVectors to hold them but we
+          * still have to check if the data being read is committed as far as current
+          * reader (transactions) is concerned.  Since here we are reading 'original' schema file,
+          * all rows in it have been created by the same txn, namely 'syntheticProps.syntheticTxnId'
+          */
+          if (!validTxnList.isTxnValid(syntheticProps.syntheticTxnId)) {
+            selectedBitSet.clear(0, vectorizedRowBatchBase.size);
+          }
+        }
       }
     }
     else {


[50/50] [abbrv] hive git commit: HIVE-17990 Add Thrift and DB storage for Schema Registry objects

Posted by ga...@apache.org.
HIVE-17990 Add Thrift and DB storage for Schema Registry objects


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b3cb8526
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b3cb8526
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b3cb8526

Branch: refs/heads/standalone-metastore
Commit: b3cb8526b4bee028d9cbd65e23309531263dad32
Parents: b9526a7
Author: Alan Gates <ga...@hortonworks.com>
Authored: Thu Oct 19 16:49:38 2017 -0700
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Mon Dec 18 14:56:37 2017 -0800

----------------------------------------------------------------------
 .../listener/DummyRawStoreFailEvent.java        |    73 +
 standalone-metastore/pom.xml                    |     3 +-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 28222 ++++++++++-------
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h    |  2427 +-
 .../ThriftHiveMetastore_server.skeleton.cpp     |    70 +
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  5044 +--
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |   465 +-
 .../metastore/api/FindSchemasByColsResp.java    |   449 +
 .../api/FindSchemasByColsRespEntry.java         |   497 +
 .../metastore/api/FindSchemasByColsRqst.java    |   605 +
 .../hadoop/hive/metastore/api/ISchema.java      |  1162 +
 .../hive/metastore/api/SchemaCompatibility.java |    51 +
 .../hadoop/hive/metastore/api/SchemaType.java   |    45 +
 .../hive/metastore/api/SchemaValidation.java    |    45 +
 .../hive/metastore/api/SchemaVersion.java       |  1407 +
 .../hive/metastore/api/SchemaVersionState.java  |    63 +
 .../hadoop/hive/metastore/api/SerDeInfo.java    |   443 +-
 .../hadoop/hive/metastore/api/SerdeType.java    |    45 +
 .../hive/metastore/api/ThriftHiveMetastore.java | 19230 +++++++++--
 .../gen-php/metastore/ThriftHiveMetastore.php   | 27808 +++++++++-------
 .../src/gen/thrift/gen-php/metastore/Types.php  |  1026 +
 .../hive_metastore/ThriftHiveMetastore-remote   |    98 +
 .../hive_metastore/ThriftHiveMetastore.py       |  5280 ++-
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |   739 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |   186 +-
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   932 +
 .../hadoop/hive/metastore/HiveMetaStore.java    |   476 +-
 .../hive/metastore/HiveMetaStoreClient.java     |    74 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |   158 +
 .../hive/metastore/MetaStoreEventListener.java  |    26 +
 .../metastore/MetaStoreListenerNotifier.java    |    42 +
 .../hadoop/hive/metastore/ObjectStore.java      |   408 +-
 .../apache/hadoop/hive/metastore/RawStore.java  |   135 +
 .../hive/metastore/cache/CachedStore.java       |    75 +
 .../client/builder/DatabaseBuilder.java         |     2 +-
 .../client/builder/ISchemaBuilder.java          |    93 +
 .../client/builder/SchemaVersionBuilder.java    |    90 +
 .../client/builder/SerdeAndColsBuilder.java     |   124 +
 .../builder/StorageDescriptorBuilder.java       |    57 +-
 .../metastore/events/AddSchemaVersionEvent.java |    40 +
 .../metastore/events/AlterISchemaEvent.java     |    45 +
 .../events/AlterSchemaVersionEvent.java         |    46 +
 .../metastore/events/CreateISchemaEvent.java    |    39 +
 .../hive/metastore/events/DropISchemaEvent.java |    39 +
 .../events/DropSchemaVersionEvent.java          |    40 +
 .../events/PreAddSchemaVersionEvent.java        |    39 +
 .../metastore/events/PreAlterISchemaEvent.java  |    44 +
 .../events/PreAlterSchemaVersionEvent.java      |    45 +
 .../metastore/events/PreCreateISchemaEvent.java |    39 +
 .../metastore/events/PreDropISchemaEvent.java   |    39 +
 .../events/PreDropSchemaVersionEvent.java       |    39 +
 .../hive/metastore/events/PreEventContext.java  |    10 +-
 .../metastore/events/PreReadISchemaEvent.java   |    39 +
 .../events/PreReadhSchemaVersionEvent.java      |    36 +
 .../hive/metastore/messaging/EventMessage.java  |     8 +-
 .../metastore/messaging/MessageFactory.java     |     7 +
 .../hadoop/hive/metastore/model/MISchema.java   |   107 +
 .../hive/metastore/model/MSchemaVersion.java    |   127 +
 .../hadoop/hive/metastore/model/MSerDeInfo.java |    48 +-
 .../main/resources/datanucleus-log4j.properties |    17 +
 .../src/main/resources/package.jdo              |    77 +
 .../main/sql/derby/hive-schema-3.0.0.derby.sql  |    30 +-
 .../sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql  |    34 +
 .../main/sql/mssql/hive-schema-3.0.0.mssql.sql  |    33 +-
 .../sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql  |    33 +
 .../main/sql/mysql/hive-schema-3.0.0.mysql.sql  |    38 +
 .../sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql  |    38 +
 .../sql/oracle/hive-schema-3.0.0.oracle.sql     |    33 +-
 .../oracle/upgrade-2.3.0-to-3.0.0.oracle.sql    |    34 +
 .../sql/postgres/hive-schema-3.0.0.postgres.sql |    34 +-
 .../upgrade-2.3.0-to-3.0.0.postgres.sql         |    34 +
 .../src/main/thrift/hive_metastore.thrift       |   112 +-
 .../DummyRawStoreControlledCommit.java          |    73 +
 .../DummyRawStoreForJdoConnection.java          |    72 +
 .../TestHiveMetaStoreSchemaMethods.java         |   887 +
 .../hadoop/hive/metastore/TestObjectStore.java  |    32 -
 .../metastore/TestObjectStoreSchemaMethods.java |   550 +
 .../hive/metastore/cache/TestCachedStore.java   |     1 -
 78 files changed, 72974 insertions(+), 28239 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 62c9172..a2cf55c 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+import org.apache.hadoop.hive.metastore.api.ISchema;
 import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
@@ -60,6 +61,8 @@ import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
 import org.apache.hadoop.hive.metastore.api.WMTrigger;
 import org.apache.hadoop.hive.metastore.api.Role;
@@ -1089,4 +1092,74 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
       String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException {
     objectStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath);
   }
+
+  public void createISchema(ISchema schema) throws AlreadyExistsException, MetaException,
+      NoSuchObjectException {
+    objectStore.createISchema(schema);
+  }
+
+  @Override
+  public void alterISchema(String schemaName, ISchema newSchema) throws NoSuchObjectException,
+      MetaException {
+    objectStore.alterISchema(schemaName, newSchema);
+  }
+
+  @Override
+  public ISchema getISchema(String schemaName) throws MetaException {
+    return objectStore.getISchema(schemaName);
+  }
+
+  @Override
+  public void dropISchema(String schemaName) throws NoSuchObjectException, MetaException {
+    objectStore.dropISchema(schemaName);
+  }
+
+  @Override
+  public void addSchemaVersion(SchemaVersion schemaVersion) throws AlreadyExistsException,
+      InvalidObjectException, NoSuchObjectException, MetaException {
+    objectStore.addSchemaVersion(schemaVersion);
+  }
+
+  @Override
+  public void alterSchemaVersion(String schemaName, int version, SchemaVersion newVersion) throws
+      NoSuchObjectException, MetaException {
+    objectStore.alterSchemaVersion(schemaName, version, newVersion);
+  }
+
+  @Override
+  public SchemaVersion getSchemaVersion(String schemaName, int version) throws MetaException {
+    return objectStore.getSchemaVersion(schemaName, version);
+  }
+
+  @Override
+  public SchemaVersion getLatestSchemaVersion(String schemaName) throws MetaException {
+    return objectStore.getLatestSchemaVersion(schemaName);
+  }
+
+  @Override
+  public List<SchemaVersion> getAllSchemaVersion(String schemaName) throws MetaException {
+    return objectStore.getAllSchemaVersion(schemaName);
+  }
+
+  @Override
+  public List<SchemaVersion> getSchemaVersionsByColumns(String colName, String colNamespace,
+                                                        String type) throws MetaException {
+    return objectStore.getSchemaVersionsByColumns(colName, colNamespace, type);
+  }
+
+  @Override
+  public void dropSchemaVersion(String schemaName, int version) throws NoSuchObjectException,
+      MetaException {
+    objectStore.dropSchemaVersion(schemaName, version);
+  }
+
+  @Override
+  public SerDeInfo getSerDeInfo(String serDeName) throws NoSuchObjectException, MetaException {
+    return objectStore.getSerDeInfo(serDeName);
+  }
+
+  @Override
+  public void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException {
+    objectStore.addSerde(serde);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/pom.xml
----------------------------------------------------------------------
diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml
index 87efece..85c3222 100644
--- a/standalone-metastore/pom.xml
+++ b/standalone-metastore/pom.xml
@@ -682,7 +682,8 @@
         <version>4.0.5</version>
         <configuration>
           <api>JDO</api>
-          <verbose>true</verbose>
+          <verbose>false</verbose>
+          <log4jConfiguration>${basedir}/src/main/resources/datanucleus-log4j.properties</log4jConfiguration>
           <metadataIncludes>**/*.jdo</metadataIncludes>
           <fork>false</fork>
         </configuration>


[11/50] [abbrv] hive git commit: HIVE-18258: Vectorization: Reduce-Side GROUP BY MERGEPARTIAL with duplicate columns is broken (Matt McCline, reviewed by Teddy Choi)

Posted by ga...@apache.org.
HIVE-18258: Vectorization: Reduce-Side GROUP BY MERGEPARTIAL with duplicate columns is broken (Matt McCline, reviewed by Teddy Choi)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f52e8b4b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f52e8b4b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f52e8b4b

Branch: refs/heads/standalone-metastore
Commit: f52e8b4ba38f2a1141650d99efb12c923cee7cd0
Parents: 856d88d
Author: Matt McCline <mm...@hortonworks.com>
Authored: Fri Dec 15 11:14:20 2017 -0600
Committer: Matt McCline <mm...@hortonworks.com>
Committed: Fri Dec 15 11:14:20 2017 -0600

----------------------------------------------------------------------
 .../test/resources/testconfiguration.properties |   1 +
 .../ql/exec/vector/VectorGroupKeyHelper.java    |  54 +++--
 .../vector_reduce_groupby_duplicate_cols.q      |  29 +++
 .../vector_reduce_groupby_duplicate_cols.q.out  | 211 +++++++++++++++++++
 .../vector_reduce_groupby_duplicate_cols.q.out  | 180 ++++++++++++++++
 5 files changed, 454 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f52e8b4b/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 37079b7..2bf64dc 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -381,6 +381,7 @@ minillaplocal.shared.query.files=alter_merge_2_orc.q,\
   vector_reduce2.q,\
   vector_reduce3.q,\
   vector_reduce_groupby_decimal.q,\
+  vector_reduce_grpupby_duplicate_cols.q,\
   vector_row__id.q,\
   vector_string_concat.q,\
   vector_struct_in.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/f52e8b4b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java
index 13a929b..02b0e5c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.io.DataOutputBuffer;
  */
 public class VectorGroupKeyHelper extends VectorColumnSetInfo {
 
-  private int[] outputColumnNums;
+  private int[] inputColumnNums;
 
   public VectorGroupKeyHelper(int keyCount) {
     super(keyCount);
@@ -44,13 +44,18 @@ public class VectorGroupKeyHelper extends VectorColumnSetInfo {
     // case, we use the keyCount passed to the constructor and not keyExpressions.length.
 
     // Inspect the output type of each key expression.  And, remember the output columns.
-    outputColumnNums = new int[keyCount];
+    inputColumnNums = new int[keyCount];
     for(int i = 0; i < keyCount; ++i) {
       VectorExpression keyExpression = keyExpressions[i];
+
       TypeInfo typeInfo = keyExpression.getOutputTypeInfo();
       Type columnVectorType = VectorizationContext.getColumnVectorTypeFromTypeInfo(typeInfo);
       addKey(columnVectorType);
-      outputColumnNums[i] = keyExpression.getOutputColumnNum();
+
+      // The output of the key expression is the input column.
+      final int inputColumnNum = keyExpression.getOutputColumnNum();
+
+      inputColumnNums[i] = inputColumnNum;
     }
     finishAdding();
   }
@@ -64,10 +69,12 @@ public class VectorGroupKeyHelper extends VectorColumnSetInfo {
    */
   public void copyGroupKey(VectorizedRowBatch inputBatch, VectorizedRowBatch outputBatch,
           DataOutputBuffer buffer) throws HiveException {
+
     for(int i = 0; i< longIndices.length; ++i) {
-      final int columnIndex = outputColumnNums[longIndices[i]];
-      LongColumnVector inputColumnVector = (LongColumnVector) inputBatch.cols[columnIndex];
-      LongColumnVector outputColumnVector = (LongColumnVector) outputBatch.cols[columnIndex];
+      final int outputColumnNum = longIndices[i];
+      final int inputColumnNum = inputColumnNums[outputColumnNum];
+      LongColumnVector inputColumnVector = (LongColumnVector) inputBatch.cols[inputColumnNum];
+      LongColumnVector outputColumnVector = (LongColumnVector) outputBatch.cols[outputColumnNum];
 
       // This vectorized code pattern says: 
       //    If the input batch has no nulls at all (noNulls is true) OR
@@ -91,9 +98,10 @@ public class VectorGroupKeyHelper extends VectorColumnSetInfo {
       }
     }
     for(int i=0;i<doubleIndices.length; ++i) {
-      final int columnIndex = outputColumnNums[doubleIndices[i]];
-      DoubleColumnVector inputColumnVector = (DoubleColumnVector) inputBatch.cols[columnIndex];
-      DoubleColumnVector outputColumnVector = (DoubleColumnVector) outputBatch.cols[columnIndex];
+      final int outputColumnNum = doubleIndices[i];
+      final int inputColumnNum = inputColumnNums[outputColumnNum];
+      DoubleColumnVector inputColumnVector = (DoubleColumnVector) inputBatch.cols[inputColumnNum];
+      DoubleColumnVector outputColumnVector = (DoubleColumnVector) outputBatch.cols[outputColumnNum];
       if (inputColumnVector.noNulls || !inputColumnVector.isNull[0]) {
         outputColumnVector.vector[outputBatch.size] = inputColumnVector.vector[0];
       } else {
@@ -102,9 +110,10 @@ public class VectorGroupKeyHelper extends VectorColumnSetInfo {
       }
     }
     for(int i=0;i<stringIndices.length; ++i) {
-      final int columnIndex = outputColumnNums[stringIndices[i]];
-      BytesColumnVector inputColumnVector = (BytesColumnVector) inputBatch.cols[columnIndex];
-      BytesColumnVector outputColumnVector = (BytesColumnVector) outputBatch.cols[columnIndex];
+      final int outputColumnNum = stringIndices[i];
+      final int inputColumnNum = inputColumnNums[outputColumnNum];
+      BytesColumnVector inputColumnVector = (BytesColumnVector) inputBatch.cols[inputColumnNum];
+      BytesColumnVector outputColumnVector = (BytesColumnVector) outputBatch.cols[outputColumnNum];
       if (inputColumnVector.noNulls || !inputColumnVector.isNull[0]) {
         // Copy bytes into scratch buffer.
         int start = buffer.getLength();
@@ -121,9 +130,10 @@ public class VectorGroupKeyHelper extends VectorColumnSetInfo {
       }
     }
     for(int i=0;i<decimalIndices.length; ++i) {
-      final int columnIndex = outputColumnNums[decimalIndices[i]];
-      DecimalColumnVector inputColumnVector = (DecimalColumnVector) inputBatch.cols[columnIndex];
-      DecimalColumnVector outputColumnVector = (DecimalColumnVector) outputBatch.cols[columnIndex];
+      final int outputColumnNum = decimalIndices[i];
+      final int inputColumnNum = inputColumnNums[outputColumnNum];
+      DecimalColumnVector inputColumnVector = (DecimalColumnVector) inputBatch.cols[inputColumnNum];
+      DecimalColumnVector outputColumnVector = (DecimalColumnVector) outputBatch.cols[outputColumnNum];
       if (inputColumnVector.noNulls || !inputColumnVector.isNull[0]) {
 
         // Since we store references to HiveDecimalWritable instances, we must use the update method instead
@@ -135,9 +145,10 @@ public class VectorGroupKeyHelper extends VectorColumnSetInfo {
       }
     }
     for(int i=0;i<timestampIndices.length; ++i) {
-      final int columnIndex = outputColumnNums[timestampIndices[i]];
-      TimestampColumnVector inputColumnVector = (TimestampColumnVector) inputBatch.cols[columnIndex];
-      TimestampColumnVector outputColumnVector = (TimestampColumnVector) outputBatch.cols[columnIndex];
+      final int outputColumnNum = timestampIndices[i];
+      final int inputColumnNum = inputColumnNums[outputColumnNum];
+      TimestampColumnVector inputColumnVector = (TimestampColumnVector) inputBatch.cols[inputColumnNum];
+      TimestampColumnVector outputColumnVector = (TimestampColumnVector) outputBatch.cols[outputColumnNum];
       if (inputColumnVector.noNulls || !inputColumnVector.isNull[0]) {
 
         outputColumnVector.setElement(outputBatch.size, 0, inputColumnVector);
@@ -147,9 +158,10 @@ public class VectorGroupKeyHelper extends VectorColumnSetInfo {
       }
     }
     for(int i=0;i<intervalDayTimeIndices.length; ++i) {
-      final int columnIndex = outputColumnNums[intervalDayTimeIndices[i]];
-      IntervalDayTimeColumnVector inputColumnVector = (IntervalDayTimeColumnVector) inputBatch.cols[columnIndex];
-      IntervalDayTimeColumnVector outputColumnVector = (IntervalDayTimeColumnVector) outputBatch.cols[columnIndex];
+      final int outputColumnNum = intervalDayTimeIndices[i];
+      final int inputColumnNum = inputColumnNums[outputColumnNum];
+      IntervalDayTimeColumnVector inputColumnVector = (IntervalDayTimeColumnVector) inputBatch.cols[inputColumnNum];
+      IntervalDayTimeColumnVector outputColumnVector = (IntervalDayTimeColumnVector) outputBatch.cols[outputColumnNum];
       if (inputColumnVector.noNulls || !inputColumnVector.isNull[0]) {
 
         outputColumnVector.setElement(outputBatch.size, 0, inputColumnVector);

http://git-wip-us.apache.org/repos/asf/hive/blob/f52e8b4b/ql/src/test/queries/clientpositive/vector_reduce_groupby_duplicate_cols.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_reduce_groupby_duplicate_cols.q b/ql/src/test/queries/clientpositive/vector_reduce_groupby_duplicate_cols.q
new file mode 100644
index 0000000..c82c960
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/vector_reduce_groupby_duplicate_cols.q
@@ -0,0 +1,29 @@
+set hive.cli.print.header=true;
+set hive.explain.user=false;
+set hive.vectorized.execution.enabled=true;
+set hive.vectorized.execution.reduce.enabled=true;
+set hive.vectorized.execution.reducesink.new.enabled=false;
+SET hive.auto.convert.join=true;
+SET hive.auto.convert.join.noconditionaltask=true;
+SET hive.auto.convert.join.noconditionaltask.size=1000000000;
+set hive.fetch.task.conversion=none;
+set hive.strict.checks.cartesian.product=false;
+set hive.cbo.enable=false;
+
+-- HIVE-18258
+
+create table demo (one int, two int);
+insert into table demo values (1, 2);
+
+explain vectorization detail
+select one as one_0, two, one as one_1
+from demo a
+join (select 1 as one, 2 as two) b
+on a.one = b.one and a.two = b.two
+group by a.one, a.two, a.one;
+
+select one as one_0, two, one as one_1
+from demo a
+join (select 1 as one, 2 as two) b
+on a.one = b.one and a.two = b.two
+group by a.one, a.two, a.one;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/f52e8b4b/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_duplicate_cols.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_duplicate_cols.q.out b/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_duplicate_cols.q.out
new file mode 100644
index 0000000..afca3df
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/vector_reduce_groupby_duplicate_cols.q.out
@@ -0,0 +1,211 @@
+PREHOOK: query: create table demo (one int, two int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@demo
+POSTHOOK: query: create table demo (one int, two int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@demo
+PREHOOK: query: insert into table demo values (1, 2)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@demo
+POSTHOOK: query: insert into table demo values (1, 2)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@demo
+POSTHOOK: Lineage: demo.one EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: demo.two EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+_col0	_col1
+PREHOOK: query: explain vectorization detail
+select one as one_0, two, one as one_1
+from demo a
+join (select 1 as one, 2 as two) b
+on a.one = b.one and a.two = b.two
+group by a.one, a.two, a.one
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization detail
+select one as one_0, two, one as one_1
+from demo a
+join (select 1 as one, 2 as two) b
+on a.one = b.one and a.two = b.two
+group by a.one, a.two, a.one
+POSTHOOK: type: QUERY
+Explain
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 2 <- Map 1 (BROADCAST_EDGE)
+        Reducer 3 <- Map 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: _dummy_table
+                  Row Limit Per Split: 1
+                  Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    key expressions: 1 (type: int), 2 (type: int)
+                    sort order: ++
+                    Map-reduce partition columns: 1 (type: int), 2 (type: int)
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+            Map Vectorization:
+                enabled: false
+#### A masked pattern was here ####
+        Map 2 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:one:int, 1:two:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
+                  Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: FilterExprAndExpr(children: SelectColumnIsNotNull(col 0:int), SelectColumnIsNotNull(col 1:int))
+                    predicate: (one is not null and two is not null) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Map Join Operator
+                      condition map:
+                           Inner Join 0 to 1
+                      keys:
+                        0 one (type: int), two (type: int)
+                        1 1 (type: int), 2 (type: int)
+                      Map Join Vectorization:
+                          bigTableKeyColumnNums: [0, 1]
+                          bigTableRetainedColumnNums: [0, 1]
+                          bigTableValueColumnNums: [0, 1]
+                          className: VectorMapJoinInnerBigOnlyMultiKeyOperator
+                          native: true
+                          nativeConditionsMet: hive.mapjoin.optimized.hashtable IS true, hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Small table vectorizes IS true, Optimized Table and Supports Key Types IS true
+                          projectedOutputColumnNums: [0, 1]
+                      outputColumnNames: _col0, _col1
+                      input vertices:
+                        1 Map 1
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        Group By Vectorization:
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 0:int, col 1:int
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: []
+                        keys: _col0 (type: int), _col1 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int), _col1 (type: int)
+                          sort order: ++
+                          Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkOperator
+                              native: false
+                              nativeConditionsMet: hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                              nativeConditionsNotMet: hive.vectorized.execution.reducesink.new.enabled IS false
+                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized, llap
+            LLAP IO: no inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
+                inputFormatFeatureSupport: [DECIMAL_64]
+                vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled]
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    includeColumns: [0, 1]
+                    dataColumns: one:int, two:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: []
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                reduceColumnNullOrder: aa
+                reduceColumnSortOrder: ++
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 2
+                    dataColumns: KEY._col0:int, KEY._col1:int
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: []
+            Reduce Operator Tree:
+              Group By Operator
+                Group By Vectorization:
+                    className: VectorGroupByOperator
+                    groupByMode: MERGEPARTIAL
+                    keyExpressions: col 0:int, col 1:int, col 0:int
+                    native: false
+                    vectorProcessingMode: MERGE_PARTIAL
+                    projectedOutputColumnNums: []
+                keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col2 (type: int), _col1 (type: int), _col2 (type: int)
+                  outputColumnNames: _col0, _col1, _col2
+                  Select Vectorization:
+                      className: VectorSelectOperator
+                      native: true
+                      projectedOutputColumnNums: [2, 1, 2]
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    File Sink Vectorization:
+                        className: VectorFileSinkOperator
+                        native: false
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select one as one_0, two, one as one_1
+from demo a
+join (select 1 as one, 2 as two) b
+on a.one = b.one and a.two = b.two
+group by a.one, a.two, a.one
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Input: default@demo
+#### A masked pattern was here ####
+POSTHOOK: query: select one as one_0, two, one as one_1
+from demo a
+join (select 1 as one, 2 as two) b
+on a.one = b.one and a.two = b.two
+group by a.one, a.two, a.one
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Input: default@demo
+#### A masked pattern was here ####
+one_0	two	one_1
+1	2	1

http://git-wip-us.apache.org/repos/asf/hive/blob/f52e8b4b/ql/src/test/results/clientpositive/vector_reduce_groupby_duplicate_cols.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_reduce_groupby_duplicate_cols.q.out b/ql/src/test/results/clientpositive/vector_reduce_groupby_duplicate_cols.q.out
new file mode 100644
index 0000000..eaa4031
--- /dev/null
+++ b/ql/src/test/results/clientpositive/vector_reduce_groupby_duplicate_cols.q.out
@@ -0,0 +1,180 @@
+PREHOOK: query: create table demo (one int, two int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@demo
+POSTHOOK: query: create table demo (one int, two int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@demo
+PREHOOK: query: insert into table demo values (1, 2)
+PREHOOK: type: QUERY
+PREHOOK: Output: default@demo
+POSTHOOK: query: insert into table demo values (1, 2)
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@demo
+POSTHOOK: Lineage: demo.one EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: demo.two EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+_col0	_col1
+PREHOOK: query: explain vectorization detail
+select one as one_0, two, one as one_1
+from demo a
+join (select 1 as one, 2 as two) b
+on a.one = b.one and a.two = b.two
+group by a.one, a.two, a.one
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization detail
+select one as one_0, two, one as one_1
+from demo a
+join (select 1 as one, 2 as two) b
+on a.one = b.one and a.two = b.two
+group by a.one, a.two, a.one
+POSTHOOK: type: QUERY
+Explain
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-5 is a root stage
+  Stage-2 depends on stages: Stage-5
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-5
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        b:_dummy_table 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        b:_dummy_table 
+          TableScan
+            alias: _dummy_table
+            Row Limit Per Split: 1
+            Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+              HashTable Sink Operator
+                keys:
+                  0 one (type: int), two (type: int)
+                  1 1 (type: int), 2 (type: int)
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+            TableScan Vectorization:
+                native: true
+                vectorizationSchemaColumns: [0:one:int, 1:two:int, 2:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
+            Filter Operator
+              Filter Vectorization:
+                  className: VectorFilterOperator
+                  native: true
+                  predicateExpression: FilterExprAndExpr(children: SelectColumnIsNotNull(col 0:int), SelectColumnIsNotNull(col 1:int))
+              predicate: (one is not null and two is not null) (type: boolean)
+              Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+              Map Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 one (type: int), two (type: int)
+                  1 1 (type: int), 2 (type: int)
+                Map Join Vectorization:
+                    bigTableKeyExpressions: col 0:int, col 1:int
+                    bigTableValueExpressions: col 0:int, col 1:int
+                    className: VectorMapJoinOperator
+                    native: false
+                    nativeConditionsMet: hive.mapjoin.optimized.hashtable IS true, hive.vectorized.execution.mapjoin.native.enabled IS true, One MapJoin Condition IS true, No nullsafe IS true, Small table vectorizes IS true, Optimized Table and Supports Key Types IS true
+                    nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  Group By Vectorization:
+                      className: VectorGroupByOperator
+                      groupByMode: HASH
+                      keyExpressions: col 0:int, col 1:int
+                      native: false
+                      vectorProcessingMode: HASH
+                      projectedOutputColumnNums: []
+                  keys: _col0 (type: int), _col1 (type: int)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int), _col1 (type: int)
+                    sort order: ++
+                    Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
+                    Reduce Sink Vectorization:
+                        className: VectorReduceSinkOperator
+                        native: false
+                        nativeConditionsMet: No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsNotMet: hive.vectorized.execution.reducesink.new.enabled IS false, hive.execution.engine mr IN [tez, spark] IS false
+                    Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
+          inputFormatFeatureSupport: [DECIMAL_64]
+          featureSupportInUse: [DECIMAL_64]
+          inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+          rowBatchContext:
+              dataColumnCount: 2
+              includeColumns: [0, 1]
+              dataColumns: one:int, two:int
+              partitionColumnCount: 0
+              scratchColumnTypeNames: []
+      Local Work:
+        Map Reduce Local Work
+      Reduce Vectorization:
+          enabled: false
+          enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
+          enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col0 (type: int)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col2 (type: int), _col1 (type: int), _col2 (type: int)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select one as one_0, two, one as one_1
+from demo a
+join (select 1 as one, 2 as two) b
+on a.one = b.one and a.two = b.two
+group by a.one, a.two, a.one
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Input: default@demo
+#### A masked pattern was here ####
+POSTHOOK: query: select one as one_0, two, one as one_1
+from demo a
+join (select 1 as one, 2 as two) b
+on a.one = b.one and a.two = b.two
+group by a.one, a.two, a.one
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Input: default@demo
+#### A masked pattern was here ####
+one_0	two	one_1
+1	2	1


[22/50] [abbrv] hive git commit: HIVE-17982 Move metastore specific itests

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java
deleted file mode 100644
index dfd80bc..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-import junit.framework.Assert;
-import junit.framework.TestCase;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Order;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.apache.hadoop.hive.ql.io.HiveInputFormat;
-import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
-
-/**
- * TestRetryingHMSHandler. Test case for
- * {@link org.apache.hadoop.hive.metastore.RetryingHMSHandler}
- */
-public class TestRetryingHMSHandler extends TestCase {
-  private HiveConf hiveConf;
-  private HiveMetaStoreClient msc;
-
-  @Override
-  protected void setUp() throws Exception {
-
-    super.setUp();
-    System.setProperty("hive.metastore.pre.event.listeners",
-        AlternateFailurePreListener.class.getName());
-    int port = MetaStoreTestUtils.startMetaStoreWithRetry();
-    hiveConf = new HiveConf(this.getClass());
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-    hiveConf.setIntVar(HiveConf.ConfVars.HMSHANDLERATTEMPTS, 2);
-    hiveConf.setTimeVar(HiveConf.ConfVars.HMSHANDLERINTERVAL, 0, TimeUnit.MILLISECONDS);
-    hiveConf.setBoolVar(HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF, false);
-    msc = new HiveMetaStoreClient(hiveConf);
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    super.tearDown();
-  }
-
-  // Create a database and a table in that database.  Because the AlternateFailurePreListener is
-  // being used each attempt to create something should require two calls by the RetryingHMSHandler
-  public void testRetryingHMSHandler() throws Exception {
-    String dbName = "hive4159";
-    String tblName = "tmptbl";
-
-    Database db = new Database();
-    db.setName(dbName);
-    msc.createDatabase(db);
-
-    Assert.assertEquals(2, AlternateFailurePreListener.getCallCount());
-
-    ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
-    cols.add(new FieldSchema("c1", serdeConstants.STRING_TYPE_NAME, ""));
-    cols.add(new FieldSchema("c2", serdeConstants.INT_TYPE_NAME, ""));
-
-    Map<String, String> params = new HashMap<String, String>();
-    params.put("test_param_1", "Use this for comments etc");
-
-    Map<String, String> serdParams = new HashMap<String, String>();
-    serdParams.put(serdeConstants.SERIALIZATION_FORMAT, "1");
-
-    StorageDescriptor sd = new StorageDescriptor();
-
-    sd.setCols(cols);
-    sd.setCompressed(false);
-    sd.setNumBuckets(1);
-    sd.setParameters(params);
-    sd.setBucketCols(new ArrayList<String>(2));
-    sd.getBucketCols().add("name");
-    sd.setSerdeInfo(new SerDeInfo());
-    sd.getSerdeInfo().setName(tblName);
-    sd.getSerdeInfo().setParameters(serdParams);
-    sd.getSerdeInfo().getParameters()
-        .put(serdeConstants.SERIALIZATION_FORMAT, "1");
-    sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
-    sd.setInputFormat(HiveInputFormat.class.getName());
-    sd.setOutputFormat(HiveOutputFormat.class.getName());
-    sd.setSortCols(new ArrayList<Order>());
-
-    Table tbl = new Table();
-    tbl.setDbName(dbName);
-    tbl.setTableName(tblName);
-    tbl.setSd(sd);
-    tbl.setLastAccessTime(0);
-
-    msc.createTable(tbl);
-
-    Assert.assertEquals(4, AlternateFailurePreListener.getCallCount());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnBothClientServer.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnBothClientServer.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnBothClientServer.java
deleted file mode 100644
index 98708a6..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnBothClientServer.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-
-public class TestSetUGIOnBothClientServer extends TestRemoteHiveMetaStore{
-
-  public TestSetUGIOnBothClientServer() {
-    super();
-    isThriftClient = true;
-    // This will turn on setugi on both client and server processes of the test.
-    System.setProperty(ConfVars.METASTORE_EXECUTE_SET_UGI.varname, "true");
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyClient.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyClient.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyClient.java
deleted file mode 100644
index 1a9abc9..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyClient.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-
-public class TestSetUGIOnOnlyClient extends TestRemoteHiveMetaStore{
-
-  @Override
-  protected HiveMetaStoreClient createClient() throws Exception {
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-    hiveConf.setBoolVar(HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI, true);
-    return new HiveMetaStoreClient(hiveConf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyServer.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyServer.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyServer.java
deleted file mode 100644
index b45fd01..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyServer.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-
-public class TestSetUGIOnOnlyServer extends TestSetUGIOnBothClientServer {
-
-  @Override
-  protected HiveMetaStoreClient createClient() throws Exception {
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-    hiveConf.setBoolVar(HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI, false);
-    return new HiveMetaStoreClient(hiveConf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/pom.xml
----------------------------------------------------------------------
diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml
index cce3282..d87863e 100644
--- a/standalone-metastore/pom.xml
+++ b/standalone-metastore/pom.xml
@@ -44,6 +44,7 @@
     <test.tmp.dir>${project.build.directory}/tmp</test.tmp.dir>
     <test.warehouse.dir>${project.build.directory}/warehouse</test.warehouse.dir>
     <test.warehouse.scheme>file://</test.warehouse.scheme>
+    <test.forkcount>1</test.forkcount>
 
     <!-- Plugin versions -->
     <ant.contrib.version>1.0b3</ant.contrib.version>
@@ -506,6 +507,7 @@
         <configuration>
           <redirectTestOutputToFile>true</redirectTestOutputToFile>
           <reuseForks>false</reuseForks>
+          <forkCount>${test.forkcount}</forkCount>
           <argLine>-Xmx2048m</argLine>
           <failIfNoTests>false</failIfNoTests>
           <systemPropertyVariables>

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/IndexBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/IndexBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/IndexBuilder.java
index 6c8b1d8..50fc186 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/IndexBuilder.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/IndexBuilder.java
@@ -73,6 +73,11 @@ public class IndexBuilder extends StorageDescriptorBuilder<IndexBuilder> {
     return this;
   }
 
+  public IndexBuilder addIndexParam(String key, String value) {
+    indexParams.put(key, value);
+    return this;
+  }
+
   public IndexBuilder setIndexName(String indexName) {
     this.indexName = indexName;
     return this;

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java
index 265625f..38e5a8f 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java
@@ -53,9 +53,10 @@ public class PartitionBuilder extends StorageDescriptorBuilder<PartitionBuilder>
     return this;
   }
 
-  public PartitionBuilder setDbAndTableName(Table table) {
+  public PartitionBuilder fromTable(Table table) {
     this.dbName = table.getDbName();
     this.tableName = table.getTableName();
+    setCols(table.getSd().getCols());
     return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java
index 1d457a6..69acf3c 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java
@@ -17,8 +17,10 @@
  */
 package org.apache.hadoop.hive.metastore.client.builder;
 
+import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
@@ -46,6 +48,8 @@ public class TableBuilder extends StorageDescriptorBuilder<TableBuilder> {
     tableParams = new HashMap<>();
     createTime = lastAccessTime = (int)(System.currentTimeMillis() / 1000);
     retention = 0;
+    partCols = new ArrayList<>();
+    type = TableType.MANAGED_TABLE.name();
     super.setChild(this);
   }
 
@@ -90,7 +94,6 @@ public class TableBuilder extends StorageDescriptorBuilder<TableBuilder> {
   }
 
   public TableBuilder addPartCol(String name, String type, String comment) {
-    if (partCols == null) partCols = new ArrayList<>();
     partCols.add(new FieldSchema(name, type, comment));
     return this;
   }
@@ -135,6 +138,13 @@ public class TableBuilder extends StorageDescriptorBuilder<TableBuilder> {
     return this;
   }
 
+  public TableBuilder fromIndex(Index index) {
+    dbName = index.getDbName();
+    tableName = index.getIndexTableName();
+    setCols(index.getSd().getCols());
+    return this;
+  }
+
   public Table build() throws MetaException {
     if (dbName == null || tableName == null) {
       throw new MetaException("You must set the database and table name");

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
index b46cc38..57692d3 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
@@ -482,7 +482,7 @@ public class MetastoreConf {
         "hive.metastore.hbase.file.metadata.threads", 1,
         "Number of threads to use to read file metadata in background to cache it."),
     FILTER_HOOK("metastore.filter.hook", "hive.metastore.filter.hook",
-        "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl",
+        org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl.class.getName(),
         "Metastore hook class for filtering the metadata read results. If hive.security.authorization.manager"
             + "is set to instance of HiveAuthorizerFactory, then this value is ignored."),
     FS_HANDLER_CLS("metastore.fs.handler.class", "hive.metastore.fs.handler.class",

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/FakeDerby.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/FakeDerby.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/FakeDerby.java
new file mode 100644
index 0000000..cc0bd77
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/FakeDerby.java
@@ -0,0 +1,404 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.lang.Override;
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.CallableStatement;
+import java.sql.Clob;
+import java.sql.DatabaseMetaData;
+import java.sql.NClob;
+import java.sql.PreparedStatement;
+import java.sql.SQLClientInfoException;
+import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
+import java.sql.SQLWarning;
+import java.sql.SQLXML;
+import java.sql.Savepoint;
+import java.sql.Statement;
+import java.sql.Struct;
+import java.util.Map;
+import java.util.concurrent.Executor;
+import java.util.logging.Logger;
+import java.util.Properties;
+
+/**
+ * Fake derby driver - companion class to enable testing by TestObjectStoreInitRetry
+ */
+public class FakeDerby extends org.apache.derby.jdbc.EmbeddedDriver {
+
+  public class Connection implements java.sql.Connection {
+
+    private java.sql.Connection _baseConn;
+
+    public Connection(java.sql.Connection connection) {
+      TestObjectStoreInitRetry.debugTrace();
+      this._baseConn = connection;
+    }
+
+    @Override
+    public Statement createStatement() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.createStatement();
+    }
+
+    @Override
+    public PreparedStatement prepareStatement(String sql) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.prepareStatement(sql);
+    }
+
+    @Override
+    public CallableStatement prepareCall(String sql) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.prepareCall(sql);
+    }
+
+    @Override
+    public String nativeSQL(String sql) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.nativeSQL(sql);
+    }
+
+    @Override
+    public void setAutoCommit(boolean autoCommit) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      TestObjectStoreInitRetry.misbehave();
+      _baseConn.setAutoCommit(autoCommit);
+    }
+
+    @Override
+    public boolean getAutoCommit() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.getAutoCommit();
+    }
+
+    @Override
+    public void commit() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      _baseConn.commit();
+    }
+
+    @Override
+    public void rollback() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      _baseConn.rollback();
+    }
+
+    @Override
+    public void close() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      _baseConn.close();
+    }
+
+    @Override
+    public boolean isClosed() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.isClosed();
+    }
+
+    @Override
+    public DatabaseMetaData getMetaData() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.getMetaData();
+    }
+
+    @Override
+    public void setReadOnly(boolean readOnly) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      _baseConn.setReadOnly(readOnly);
+    }
+
+    @Override
+    public boolean isReadOnly() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.isReadOnly();
+    }
+
+    @Override
+    public void setCatalog(String catalog) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      _baseConn.setCatalog(catalog);
+    }
+
+    @Override
+    public String getCatalog() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.getCatalog();
+    }
+
+    @Override
+    public void setTransactionIsolation(int level) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      _baseConn.setTransactionIsolation(level);
+    }
+
+    @Override
+    public int getTransactionIsolation() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.getTransactionIsolation();
+    }
+
+    @Override
+    public SQLWarning getWarnings() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.getWarnings();
+    }
+
+    @Override
+    public void clearWarnings() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      _baseConn.clearWarnings();
+    }
+
+    @Override
+    public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.createStatement(resultSetType, resultSetConcurrency);
+    }
+
+    @Override
+    public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.prepareStatement(sql, resultSetType, resultSetConcurrency);
+    }
+
+    @Override
+    public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.prepareCall(sql, resultSetType, resultSetConcurrency);
+    }
+
+    @Override
+    public Map<String, Class<?>> getTypeMap() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.getTypeMap();
+    }
+
+    @Override
+    public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      _baseConn.setTypeMap(map);
+    }
+
+    @Override
+    public void setHoldability(int holdability) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      _baseConn.setHoldability(holdability);
+    }
+
+    @Override
+    public int getHoldability() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.getHoldability();
+    }
+
+    @Override
+    public Savepoint setSavepoint() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.setSavepoint();
+    }
+
+    @Override
+    public Savepoint setSavepoint(String name) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.setSavepoint(name);
+    }
+
+    @Override
+    public void rollback(Savepoint savepoint) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      _baseConn.rollback(savepoint);
+    }
+
+    @Override
+    public void releaseSavepoint(Savepoint savepoint) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      _baseConn.releaseSavepoint(savepoint);
+    }
+
+    @Override
+    public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability);
+    }
+
+    @Override
+    public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability);
+    }
+
+    @Override
+    public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability);
+    }
+
+    @Override
+    public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.prepareStatement(sql, autoGeneratedKeys);
+    }
+
+    @Override
+    public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.prepareStatement(sql, columnIndexes);
+    }
+
+    @Override
+    public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.prepareStatement(sql, columnNames);
+    }
+
+    @Override
+    public Clob createClob() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.createClob();
+    }
+
+    @Override
+    public Blob createBlob() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.createBlob();
+    }
+
+    @Override
+    public NClob createNClob() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.createNClob();
+    }
+
+    @Override
+    public SQLXML createSQLXML() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.createSQLXML();
+    }
+
+    @Override
+    public boolean isValid(int timeout) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.isValid(timeout);
+    }
+
+    @Override
+    public void setClientInfo(String name, String value) throws SQLClientInfoException {
+      TestObjectStoreInitRetry.debugTrace();
+      _baseConn.setClientInfo(name, value);
+    }
+
+    @Override
+    public void setClientInfo(Properties properties) throws SQLClientInfoException {
+      TestObjectStoreInitRetry.debugTrace();
+      _baseConn.setClientInfo(properties);
+    }
+
+    @Override
+    public String getClientInfo(String name) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.getClientInfo(name);
+    }
+
+    @Override
+    public Properties getClientInfo() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.getClientInfo();
+    }
+
+    @Override
+    public Array createArrayOf(String typeName, Object[] elements) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.createArrayOf(typeName, elements);
+    }
+
+    @Override
+    public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.createStruct(typeName, attributes);
+    }
+
+    @Override
+    public void setSchema(String schema) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      _baseConn.setSchema(schema);
+    }
+
+    @Override
+    public String getSchema() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.getSchema();
+    }
+
+    @Override
+    public void abort(Executor executor) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      _baseConn.abort(executor);
+    }
+
+    @Override
+    public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      _baseConn.setNetworkTimeout(executor, milliseconds);
+    }
+
+    @Override
+    public int getNetworkTimeout() throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.getNetworkTimeout();
+    }
+
+    @Override
+    public <T> T unwrap(Class<T> iface) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.unwrap(iface);
+    }
+
+    @Override
+    public boolean isWrapperFor(Class<?> iface) throws SQLException {
+      TestObjectStoreInitRetry.debugTrace();
+      return _baseConn.isWrapperFor(iface);
+    }
+  }
+
+  public FakeDerby(){
+  }
+
+  @Override
+  public boolean acceptsURL(String url) throws SQLException {
+    url = url.replace("fderby","derby");
+    return super.acceptsURL(url);
+  }
+
+  @Override
+  public Connection connect(java.lang.String url, java.util.Properties info) throws SQLException {
+    TestObjectStoreInitRetry.misbehave();
+    url = url.replace("fderby","derby");
+    return new FakeDerby.Connection(super.connect(url, info));
+  }
+
+  @Override
+  public Logger getParentLogger() throws SQLFeatureNotSupportedException {
+    throw new SQLFeatureNotSupportedException(); // hope this is respected properly
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java
index 380f3a1..e7146c4 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java
@@ -26,6 +26,7 @@ import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
 import org.apache.hadoop.hive.metastore.events.EventCleanerTask;
 import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.slf4j.Logger;
@@ -91,19 +92,22 @@ public class MetaStoreTestUtils {
 
   public static int startMetaStoreWithRetry(final HadoopThriftAuthBridge bridge, Configuration conf)
       throws Exception {
-    Exception metaStoreException = null;
-    int metaStorePort = 0;
+    int metaStorePort = findFreePort();
+    startMetaStoreWithRetry(metaStorePort, bridge, conf);
+    return metaStorePort;
+  }
 
+  public static void startMetaStoreWithRetry(int port, HadoopThriftAuthBridge bridge,
+                                             Configuration conf) throws Exception {
+    Exception metaStoreException = null;
     for (int tryCount = 0; tryCount < MetaStoreTestUtils.RETRY_COUNT; tryCount++) {
       try {
-        metaStorePort = MetaStoreTestUtils.findFreePort();
-        MetaStoreTestUtils.startMetaStore(metaStorePort, bridge, conf);
-        return metaStorePort;
+        MetaStoreTestUtils.startMetaStore(port, bridge, conf);
+        return;
       } catch (ConnectException ce) {
         metaStoreException = ce;
       }
     }
-
     throw metaStoreException;
   }
 
@@ -198,11 +202,20 @@ public class MetaStoreTestUtils {
   /**
    * Setup a configuration file for standalone mode.  There are a few config variables that have
    * defaults that require parts of Hive that aren't present in standalone mode.  This method
-   * sets them to something that will work without the rest of Hive.
+   * sets them to something that will work without the rest of Hive.  It only changes them if
+   * they have not already been set, to avoid clobbering intentional changes.
    * @param conf Configuration object
    */
   public static void setConfForStandloneMode(Configuration conf) {
-    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.TASK_THREADS_ALWAYS,
-        EventCleanerTask.class.getName());
+    if (MetastoreConf.getVar(conf, ConfVars.TASK_THREADS_ALWAYS).equals(
+        ConfVars.TASK_THREADS_ALWAYS.getDefaultVal())) {
+      MetastoreConf.setVar(conf, ConfVars.TASK_THREADS_ALWAYS,
+          EventCleanerTask.class.getName());
+    }
+    if (MetastoreConf.getVar(conf, ConfVars.EXPRESSION_PROXY_CLASS).equals(
+        ConfVars.EXPRESSION_PROXY_CLASS.getDefaultVal())) {
+      MetastoreConf.setClass(conf, ConfVars.EXPRESSION_PROXY_CLASS,
+          DefaultPartitionExpressionProxy.class, PartitionExpressionProxy.class);
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java
new file mode 100644
index 0000000..3b541d2
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestAdminUser {
+
+  @Test
+  public void testCreateAdminNAddUser() throws MetaException, NoSuchObjectException {
+    Configuration conf = MetastoreConf.newMetastoreConf();
+    MetastoreConf.setVar(conf, ConfVars.USERS_IN_ADMIN_ROLE, "adminuser");
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    RawStore rawStore = new HMSHandler("testcreateroot", conf).getMS();
+    Role adminRole = rawStore.getRole(HiveMetaStore.ADMIN);
+    Assert.assertTrue(adminRole.getOwnerName().equals(HiveMetaStore.ADMIN));
+    Assert.assertEquals(rawStore.listPrincipalGlobalGrants(HiveMetaStore.ADMIN, PrincipalType.ROLE)
+     .get(0).getGrantInfo().getPrivilege(),"All");
+    Assert.assertEquals(rawStore.listRoles("adminuser", PrincipalType.USER).get(0).
+      getRoleName(),HiveMetaStore.ADMIN);
+ }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
new file mode 100644
index 0000000..72758df
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.util.StringUtils;
+import org.junit.After;
+import org.junit.Before;
+
+public class TestEmbeddedHiveMetaStore extends TestHiveMetaStore {
+
+  @Before
+  public void openWarehouse() throws Exception {
+    warehouse = new Warehouse(conf);
+    client = createClient();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    client.close();
+  }
+
+  @Override
+  protected HiveMetaStoreClient createClient() throws Exception {
+    try {
+      return new HiveMetaStoreClient(conf);
+    } catch (Throwable e) {
+      System.err.println("Unable to open the metastore");
+      System.err.println(StringUtils.stringifyException(e));
+      throw new Exception(e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
new file mode 100644
index 0000000..56afe33
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
@@ -0,0 +1,303 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.Index;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionSpec;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.IndexBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TestFilterHooks {
+  private static final Logger LOG = LoggerFactory.getLogger(TestFilterHooks.class);
+
+  public static class DummyMetaStoreFilterHookImpl extends DefaultMetaStoreFilterHookImpl {
+    private static boolean blockResults = false;
+
+    public DummyMetaStoreFilterHookImpl(Configuration conf) {
+      super(conf);
+    }
+
+    @Override
+    public List<String> filterDatabases(List<String> dbList) throws MetaException  {
+      if (blockResults) {
+        return new ArrayList<>();
+      }
+      return super.filterDatabases(dbList);
+    }
+
+    @Override
+    public Database filterDatabase(Database dataBase) throws NoSuchObjectException {
+      if (blockResults) {
+        throw new NoSuchObjectException("Blocked access");
+      }
+      return super.filterDatabase(dataBase);
+    }
+
+    @Override
+    public List<String> filterTableNames(String dbName, List<String> tableList) throws MetaException {
+      if (blockResults) {
+        return new ArrayList<>();
+      }
+      return super.filterTableNames(dbName, tableList);
+    }
+
+    @Override
+    public Table filterTable(Table table) throws NoSuchObjectException {
+      if (blockResults) {
+        throw new NoSuchObjectException("Blocked access");
+      }
+      return super.filterTable(table);
+    }
+
+    @Override
+    public List<Table> filterTables(List<Table> tableList) throws MetaException {
+      if (blockResults) {
+        return new ArrayList<>();
+      }
+      return super.filterTables(tableList);
+    }
+
+    @Override
+    public List<Partition> filterPartitions(List<Partition> partitionList) throws MetaException {
+      if (blockResults) {
+        return new ArrayList<>();
+      }
+      return super.filterPartitions(partitionList);
+    }
+
+    @Override
+    public List<PartitionSpec> filterPartitionSpecs(
+        List<PartitionSpec> partitionSpecList) throws MetaException {
+      if (blockResults) {
+        return new ArrayList<>();
+      }
+      return super.filterPartitionSpecs(partitionSpecList);
+    }
+
+    @Override
+    public Partition filterPartition(Partition partition) throws NoSuchObjectException {
+      if (blockResults) {
+        throw new NoSuchObjectException("Blocked access");
+      }
+      return super.filterPartition(partition);
+    }
+
+    @Override
+    public List<String> filterPartitionNames(String dbName, String tblName,
+        List<String> partitionNames) throws MetaException {
+      if (blockResults) {
+        return new ArrayList<>();
+      }
+      return super.filterPartitionNames(dbName, tblName, partitionNames);
+    }
+
+    @Override
+    public Index filterIndex(Index index) throws NoSuchObjectException {
+      if (blockResults) {
+        throw new NoSuchObjectException("Blocked access");
+      }
+      return super.filterIndex(index);
+    }
+
+    @Override
+    public List<String> filterIndexNames(String dbName, String tblName,
+        List<String> indexList) throws MetaException {
+      if (blockResults) {
+        return new ArrayList<>();
+      }
+      return super.filterIndexNames(dbName, tblName, indexList);
+    }
+
+    @Override
+    public List<Index> filterIndexes(List<Index> indexeList) throws MetaException {
+      if (blockResults) {
+        return new ArrayList<>();
+      }
+      return super.filterIndexes(indexeList);
+    }
+  }
+
+  private static final String DBNAME1 = "testdb1";
+  private static final String DBNAME2 = "testdb2";
+  private static final String TAB1 = "tab1";
+  private static final String TAB2 = "tab2";
+  private static final String INDEX1 = "idx1";
+  private static Configuration conf;
+  private static HiveMetaStoreClient msc;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    DummyMetaStoreFilterHookImpl.blockResults = false;
+
+    conf = MetastoreConf.newMetastoreConf();
+    MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+    MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+    MetastoreConf.setClass(conf, ConfVars.FILTER_HOOK, DummyMetaStoreFilterHookImpl.class,
+        MetaStoreFilterHook.class);
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    int port = MetaStoreTestUtils.findFreePort();
+    MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+    MetaStoreTestUtils.startMetaStoreWithRetry(port, HadoopThriftAuthBridge.getBridge(), conf);
+
+    msc = new HiveMetaStoreClient(conf);
+
+    msc.dropDatabase(DBNAME1, true, true, true);
+    msc.dropDatabase(DBNAME2, true, true, true);
+    Database db1 = new DatabaseBuilder()
+        .setName(DBNAME1)
+        .build();
+    msc.createDatabase(db1);
+    Database db2 = new DatabaseBuilder()
+        .setName(DBNAME2)
+        .build();
+    msc.createDatabase(db2);
+    Table tab1 = new TableBuilder()
+        .setDbName(DBNAME1)
+        .setTableName(TAB1)
+        .addCol("id", "int")
+        .addCol("name", "string")
+        .build();
+    msc.createTable(tab1);
+    Table tab2 = new TableBuilder()
+        .setDbName(DBNAME1)
+        .setTableName(TAB2)
+        .addCol("id", "int")
+        .addPartCol("name", "string")
+        .build();
+    msc.createTable(tab2);
+    Partition part1 = new PartitionBuilder()
+        .fromTable(tab2)
+        .addValue("value1")
+        .build();
+    msc.add_partition(part1);
+    Partition part2 = new PartitionBuilder()
+        .fromTable(tab2)
+        .addValue("value2")
+        .build();
+    msc.add_partition(part2);
+    Index index = new IndexBuilder()
+        .setDbAndTableName(tab1)
+        .setIndexName(INDEX1)
+        .setDeferredRebuild(true)
+        .addCol("id", "int")
+        .build();
+    msc.createIndex(index, new TableBuilder().fromIndex(index).build());
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    msc.close();
+  }
+
+  @Test
+  public void testDefaultFilter() throws Exception {
+    assertNotNull(msc.getTable(DBNAME1, TAB1));
+    assertEquals(3, msc.getTables(DBNAME1, "*").size());
+    assertEquals(3, msc.getAllTables(DBNAME1).size());
+    assertEquals(1, msc.getTables(DBNAME1, TAB2).size());
+    assertEquals(0, msc.getAllTables(DBNAME2).size());
+
+    assertNotNull(msc.getDatabase(DBNAME1));
+    assertEquals(3, msc.getDatabases("*").size());
+    assertEquals(3, msc.getAllDatabases().size());
+    assertEquals(1, msc.getDatabases(DBNAME1).size());
+
+    assertNotNull(msc.getPartition(DBNAME1, TAB2, "name=value1"));
+    assertEquals(1, msc.getPartitionsByNames(DBNAME1, TAB2, Lists.newArrayList("name=value1")).size());
+
+    assertNotNull(msc.getIndex(DBNAME1, TAB1, INDEX1));
+  }
+
+  @Test
+  public void testDummyFilterForTables() throws Exception {
+    DummyMetaStoreFilterHookImpl.blockResults = true;
+    try {
+      msc.getTable(DBNAME1, TAB1);
+      fail("getTable() should fail with blocking mode");
+    } catch (NoSuchObjectException e) {
+      // Excepted
+    }
+    assertEquals(0, msc.getTables(DBNAME1, "*").size());
+    assertEquals(0, msc.getAllTables(DBNAME1).size());
+    assertEquals(0, msc.getTables(DBNAME1, TAB2).size());
+  }
+
+  @Test
+  public void testDummyFilterForDb() throws Exception {
+    DummyMetaStoreFilterHookImpl.blockResults = true;
+    try {
+      assertNotNull(msc.getDatabase(DBNAME1));
+      fail("getDatabase() should fail with blocking mode");
+    } catch (NoSuchObjectException e) {
+        // Excepted
+    }
+    assertEquals(0, msc.getDatabases("*").size());
+    assertEquals(0, msc.getAllDatabases().size());
+    assertEquals(0, msc.getDatabases(DBNAME1).size());
+  }
+
+  @Test
+  public void testDummyFilterForPartition() throws Exception {
+    DummyMetaStoreFilterHookImpl.blockResults = true;
+    try {
+      assertNotNull(msc.getPartition(DBNAME1, TAB2, "name=value1"));
+      fail("getPartition() should fail with blocking mode");
+    } catch (NoSuchObjectException e) {
+      // Excepted
+    }
+    assertEquals(0, msc.getPartitionsByNames(DBNAME1, TAB2,
+        Lists.newArrayList("name=value1")).size());
+  }
+
+  @Test
+  public void testDummyFilterForIndex() throws Exception {
+    DummyMetaStoreFilterHookImpl.blockResults = true;
+    try {
+      assertNotNull(msc.getIndex(DBNAME1, TAB1, INDEX1));
+      fail("getPartition() should fail with blocking mode");
+    } catch (NoSuchObjectException e) {
+      // Excepted
+    }
+  }
+
+}


[33/50] [abbrv] hive git commit: HIVE-17983 Make the standalone metastore generate tarballs etc.

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/MetastoreSchemaTool.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/MetastoreSchemaTool.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/MetastoreSchemaTool.java
new file mode 100644
index 0000000..882e3be
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/MetastoreSchemaTool.java
@@ -0,0 +1,1309 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.tools;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.OptionGroup;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.io.output.NullOutputStream;
+import org.apache.commons.lang.ArrayUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.HiveMetaException;
+import org.apache.hadoop.hive.metastore.IMetaStoreSchemaInfo;
+import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfoFactory;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.MetaStoreConnectionInfo;
+import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.NestedScriptParser;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.ImmutableMap;
+import sqlline.SqlLine;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.net.URI;
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class MetastoreSchemaTool {
+  private static final Logger LOG = LoggerFactory.getLogger(MetastoreSchemaTool.class);
+  private static final String PASSWD_MASK = "[passwd stripped]";
+
+  @VisibleForTesting
+  public static String homeDir;
+
+  private String userName = null;
+  private String passWord = null;
+  private boolean dryRun = false;
+  private boolean verbose = false;
+  private String dbOpts = null;
+  private String url = null;
+  private String driver = null;
+  private URI[] validationServers = null; // The list of servers the database/partition/table can locate on
+  private String hiveUser; // Hive username, for use when creating the user, not for connecting
+  private String hivePasswd; // Hive password, for use when creating the user, not for connecting
+  private String hiveDb; // Hive database, for use when creating the user, not for connecting
+  private final Configuration conf;
+  private final String dbType;
+  private final IMetaStoreSchemaInfo metaStoreSchemaInfo;
+  private boolean needsQuotedIdentifier;
+
+  private static String findHomeDir() {
+    // If METASTORE_HOME is set, use it, else use HIVE_HOME for backwards compatibility.
+    homeDir = homeDir == null ? System.getenv("METASTORE_HOME") : homeDir;
+    return homeDir == null ? System.getenv("HIVE_HOME") : homeDir;
+  }
+
+  private MetastoreSchemaTool(String dbType) throws HiveMetaException {
+    this(findHomeDir(), MetastoreConf.newMetastoreConf(), dbType);
+  }
+
+  MetastoreSchemaTool(String metastoreHome, Configuration conf, String dbType)
+      throws HiveMetaException {
+    if (metastoreHome == null || metastoreHome.isEmpty()) {
+      throw new HiveMetaException("No Metastore home directory provided");
+    }
+    this.conf = conf;
+    this.dbType = dbType;
+    this.needsQuotedIdentifier = getDbCommandParser(dbType).needsQuotedIdentifier();
+    this.metaStoreSchemaInfo = MetaStoreSchemaInfoFactory.get(conf, metastoreHome, dbType);
+  }
+
+  Configuration getConf() {
+    return conf;
+  }
+
+  void setUrl(String url) {
+    this.url = url;
+  }
+
+  void setDriver(String driver) {
+    this.driver = driver;
+  }
+
+  void setUserName(String userName) {
+    this.userName = userName;
+  }
+
+  void setPassWord(String passWord) {
+    this.passWord = passWord;
+  }
+
+  void setDryRun(boolean dryRun) {
+    this.dryRun = dryRun;
+  }
+
+  void setVerbose(boolean verbose) {
+    this.verbose = verbose;
+  }
+
+  private void setDbOpts(String dbOpts) {
+    this.dbOpts = dbOpts;
+  }
+
+  private void setValidationServers(String servers) {
+    if(StringUtils.isNotEmpty(servers)) {
+      String[] strServers = servers.split(",");
+      this.validationServers = new URI[strServers.length];
+      for (int i = 0; i < validationServers.length; i++) {
+        validationServers[i] = new Path(strServers[i]).toUri();
+      }
+    }
+  }
+
+  private void setHiveUser(String hiveUser) {
+    this.hiveUser = hiveUser;
+  }
+
+  private void setHivePasswd(String hivePasswd) {
+    this.hivePasswd = hivePasswd;
+  }
+
+  private void setHiveDb(String hiveDb) {
+    this.hiveDb = hiveDb;
+  }
+
+  private static int usage(Options cmdLineOptions) {
+    HelpFormatter formatter = new HelpFormatter();
+    formatter.printHelp("schemaTool", cmdLineOptions);
+    return 1;
+  }
+
+  Connection getConnectionToMetastore(boolean printInfo)
+      throws HiveMetaException {
+    return HiveSchemaHelper.getConnectionToMetastore(userName,
+        passWord, url, driver, printInfo, conf);
+  }
+
+  private NestedScriptParser getDbCommandParser(String dbType) {
+    return HiveSchemaHelper.getDbCommandParser(dbType, dbOpts, userName,
+	passWord, conf, null, true);
+  }
+
+  /***
+   * Print Hive version and schema version
+   */
+  private void showInfo() throws HiveMetaException {
+    String hiveVersion = metaStoreSchemaInfo.getHiveSchemaVersion();
+    String dbVersion = metaStoreSchemaInfo.getMetaStoreSchemaVersion(getConnectionInfo(true));
+    System.out.println("Hive distribution version:\t " + hiveVersion);
+    System.out.println("Metastore schema version:\t " + dbVersion);
+    assertCompatibleVersion(hiveVersion, dbVersion);
+  }
+
+  boolean validateLocations(Connection conn, URI[] defaultServers) throws HiveMetaException {
+    System.out.println("Validating DFS locations");
+    boolean rtn;
+    rtn = checkMetaStoreDBLocation(conn, defaultServers);
+    rtn = checkMetaStoreTableLocation(conn, defaultServers) && rtn;
+    rtn = checkMetaStorePartitionLocation(conn, defaultServers) && rtn;
+    rtn = checkMetaStoreSkewedColumnsLocation(conn, defaultServers) && rtn;
+    System.out.println((rtn ? "Succeeded" : "Failed") + " in DFS location validation.");
+    return rtn;
+  }
+
+  private String getNameOrID(ResultSet res, int nameInx, int idInx) throws SQLException {
+    String itemName = res.getString(nameInx);
+    return  (itemName == null || itemName.isEmpty()) ? "ID: " + res.getString(idInx) : "Name: " + itemName;
+  }
+
+  private boolean checkMetaStoreDBLocation(Connection conn, URI[] defaultServers)
+      throws HiveMetaException {
+    String dbLoc;
+    boolean isValid = true;
+    int numOfInvalid = 0;
+    if (needsQuotedIdentifier) {
+      dbLoc = "select dbt.\"DB_ID\", dbt.\"NAME\", dbt.\"DB_LOCATION_URI\" from \"DBS\" dbt order by dbt.\"DB_ID\" ";
+    } else {
+      dbLoc = "select dbt.DB_ID, dbt.NAME, dbt.DB_LOCATION_URI from DBS dbt order by dbt.DB_ID";
+    }
+
+    try(Statement stmt = conn.createStatement();
+        ResultSet res = stmt.executeQuery(dbLoc)) {
+      while (res.next()) {
+        String locValue = res.getString(3);
+        String dbName = getNameOrID(res,2,1);
+        if (!checkLocation("Database " + dbName, locValue, defaultServers)) {
+          numOfInvalid++;
+        }
+      }
+    } catch (SQLException e) {
+      throw new HiveMetaException("Failed to get DB Location Info.", e);
+    }
+    if (numOfInvalid > 0) {
+      isValid = false;
+    }
+    return isValid;
+  }
+
+  private boolean checkMetaStoreTableLocation(Connection conn, URI[] defaultServers)
+      throws HiveMetaException {
+    String tabLoc, tabIDRange;
+    boolean isValid = true;
+    int numOfInvalid = 0;
+    if (needsQuotedIdentifier) {
+      tabIDRange = "select max(\"TBL_ID\"), min(\"TBL_ID\") from \"TBLS\" ";
+    } else {
+      tabIDRange = "select max(TBL_ID), min(TBL_ID) from TBLS";
+    }
+
+    if (needsQuotedIdentifier) {
+      tabLoc = "select tbl.\"TBL_ID\", tbl.\"TBL_NAME\", sd.\"LOCATION\", dbt.\"DB_ID\", dbt.\"NAME\" from \"TBLS\" tbl inner join " +
+    "\"SDS\" sd on tbl.\"SD_ID\" = sd.\"SD_ID\" and tbl.\"TBL_TYPE\" != '" + TableType.VIRTUAL_VIEW +
+    "' and tbl.\"TBL_ID\" >= ? and tbl.\"TBL_ID\"<= ? " + "inner join \"DBS\" dbt on tbl.\"DB_ID\" = dbt.\"DB_ID\" order by tbl.\"TBL_ID\" ";
+    } else {
+      tabLoc = "select tbl.TBL_ID, tbl.TBL_NAME, sd.LOCATION, dbt.DB_ID, dbt.NAME from TBLS tbl join SDS sd on tbl.SD_ID = sd.SD_ID and tbl.TBL_TYPE !='"
+      + TableType.VIRTUAL_VIEW + "' and tbl.TBL_ID >= ? and tbl.TBL_ID <= ?  inner join DBS dbt on tbl.DB_ID = dbt.DB_ID order by tbl.TBL_ID";
+    }
+
+    long maxID = 0, minID = 0;
+    long rtnSize = 2000;
+
+    try {
+      Statement stmt = conn.createStatement();
+      ResultSet res = stmt.executeQuery(tabIDRange);
+      if (res.next()) {
+        maxID = res.getLong(1);
+        minID = res.getLong(2);
+      }
+      res.close();
+      stmt.close();
+      PreparedStatement pStmt = conn.prepareStatement(tabLoc);
+      while (minID <= maxID) {
+        pStmt.setLong(1, minID);
+        pStmt.setLong(2, minID + rtnSize);
+        res = pStmt.executeQuery();
+        while (res.next()) {
+          String locValue = res.getString(3);
+          String entity = "Database " + getNameOrID(res, 5, 4) +
+              ", Table "  + getNameOrID(res,2,1);
+          if (!checkLocation(entity, locValue, defaultServers)) {
+            numOfInvalid++;
+          }
+        }
+        res.close();
+        minID += rtnSize + 1;
+
+      }
+      pStmt.close();
+
+    } catch (SQLException e) {
+      throw new HiveMetaException("Failed to get Table Location Info.", e);
+    }
+    if (numOfInvalid > 0) {
+      isValid = false;
+    }
+    return isValid;
+  }
+
+  private boolean checkMetaStorePartitionLocation(Connection conn, URI[] defaultServers)
+      throws HiveMetaException {
+    String partLoc, partIDRange;
+    boolean isValid = true;
+    int numOfInvalid = 0;
+    if (needsQuotedIdentifier) {
+      partIDRange = "select max(\"PART_ID\"), min(\"PART_ID\") from \"PARTITIONS\" ";
+    } else {
+      partIDRange = "select max(PART_ID), min(PART_ID) from PARTITIONS";
+    }
+
+    if (needsQuotedIdentifier) {
+      partLoc = "select pt.\"PART_ID\", pt.\"PART_NAME\", sd.\"LOCATION\", tbl.\"TBL_ID\", tbl.\"TBL_NAME\",dbt.\"DB_ID\", dbt.\"NAME\" from \"PARTITIONS\" pt "
+           + "inner join \"SDS\" sd on pt.\"SD_ID\" = sd.\"SD_ID\" and pt.\"PART_ID\" >= ? and pt.\"PART_ID\"<= ? "
+           + " inner join \"TBLS\" tbl on pt.\"TBL_ID\" = tbl.\"TBL_ID\" inner join "
+           + "\"DBS\" dbt on tbl.\"DB_ID\" = dbt.\"DB_ID\" order by tbl.\"TBL_ID\" ";
+    } else {
+      partLoc = "select pt.PART_ID, pt.PART_NAME, sd.LOCATION, tbl.TBL_ID, tbl.TBL_NAME, dbt.DB_ID, dbt.NAME from PARTITIONS pt "
+          + "inner join SDS sd on pt.SD_ID = sd.SD_ID and pt.PART_ID >= ? and pt.PART_ID <= ?  "
+          + "inner join TBLS tbl on tbl.TBL_ID = pt.TBL_ID inner join DBS dbt on tbl.DB_ID = dbt.DB_ID order by tbl.TBL_ID ";
+    }
+
+    long maxID = 0, minID = 0;
+    long rtnSize = 2000;
+
+    try {
+      Statement stmt = conn.createStatement();
+      ResultSet res = stmt.executeQuery(partIDRange);
+      if (res.next()) {
+        maxID = res.getLong(1);
+        minID = res.getLong(2);
+      }
+      res.close();
+      stmt.close();
+      PreparedStatement pStmt = conn.prepareStatement(partLoc);
+      while (minID <= maxID) {
+        pStmt.setLong(1, minID);
+        pStmt.setLong(2, minID + rtnSize);
+        res = pStmt.executeQuery();
+        while (res.next()) {
+          String locValue = res.getString(3);
+          String entity = "Database " + getNameOrID(res,7,6) +
+              ", Table "  + getNameOrID(res,5,4) +
+              ", Partition " + getNameOrID(res,2,1);
+          if (!checkLocation(entity, locValue, defaultServers)) {
+            numOfInvalid++;
+          }
+        }
+        res.close();
+        minID += rtnSize + 1;
+      }
+      pStmt.close();
+    } catch (SQLException e) {
+      throw new HiveMetaException("Failed to get Partiton Location Info.", e);
+    }
+    if (numOfInvalid > 0) {
+      isValid = false;
+    }
+    return isValid;
+  }
+
+  private boolean checkMetaStoreSkewedColumnsLocation(Connection conn, URI[] defaultServers)
+      throws HiveMetaException {
+    String skewedColLoc, skewedColIDRange;
+    boolean isValid = true;
+    int numOfInvalid = 0;
+    if (needsQuotedIdentifier) {
+      skewedColIDRange = "select max(\"STRING_LIST_ID_KID\"), min(\"STRING_LIST_ID_KID\") from \"SKEWED_COL_VALUE_LOC_MAP\" ";
+    } else {
+      skewedColIDRange = "select max(STRING_LIST_ID_KID), min(STRING_LIST_ID_KID) from SKEWED_COL_VALUE_LOC_MAP";
+    }
+
+    if (needsQuotedIdentifier) {
+      skewedColLoc = "select t.\"TBL_NAME\", t.\"TBL_ID\", sk.\"STRING_LIST_ID_KID\", sk.\"LOCATION\", db.\"NAME\", db.\"DB_ID\" "
+           + " from \"TBLS\" t, \"SDS\" s, \"DBS\" db, \"SKEWED_COL_VALUE_LOC_MAP\" sk "
+           + "where sk.\"SD_ID\" = s.\"SD_ID\" and s.\"SD_ID\" = t.\"SD_ID\" and t.\"DB_ID\" = db.\"DB_ID\" and "
+           + "sk.\"STRING_LIST_ID_KID\" >= ? and sk.\"STRING_LIST_ID_KID\" <= ? order by t.\"TBL_ID\" ";
+    } else {
+      skewedColLoc = "select t.TBL_NAME, t.TBL_ID, sk.STRING_LIST_ID_KID, sk.LOCATION, db.NAME, db.DB_ID from TBLS t, SDS s, DBS db, SKEWED_COL_VALUE_LOC_MAP sk "
+           + "where sk.SD_ID = s.SD_ID and s.SD_ID = t.SD_ID and t.DB_ID = db.DB_ID and sk.STRING_LIST_ID_KID >= ? and sk.STRING_LIST_ID_KID <= ? order by t.TBL_ID ";
+    }
+
+    long maxID = 0, minID = 0;
+    long rtnSize = 2000;
+
+    try {
+      Statement stmt = conn.createStatement();
+      ResultSet res = stmt.executeQuery(skewedColIDRange);
+      if (res.next()) {
+        maxID = res.getLong(1);
+        minID = res.getLong(2);
+      }
+      res.close();
+      stmt.close();
+      PreparedStatement pStmt = conn.prepareStatement(skewedColLoc);
+      while (minID <= maxID) {
+        pStmt.setLong(1, minID);
+        pStmt.setLong(2, minID + rtnSize);
+        res = pStmt.executeQuery();
+        while (res.next()) {
+          String locValue = res.getString(4);
+          String entity = "Database " + getNameOrID(res,5,6) +
+              ", Table " + getNameOrID(res,1,2) +
+              ", String list " + res.getString(3);
+          if (!checkLocation(entity, locValue, defaultServers)) {
+            numOfInvalid++;
+          }
+        }
+        res.close();
+        minID += rtnSize + 1;
+      }
+      pStmt.close();
+    } catch (SQLException e) {
+      throw new HiveMetaException("Failed to get skewed columns location info.", e);
+    }
+    if (numOfInvalid > 0) {
+      isValid = false;
+    }
+    return isValid;
+  }
+
+  /**
+   * Check if the location is valid for the given entity
+   * @param entity          the entity to represent a database, partition or table
+   * @param entityLocation  the location
+   * @param defaultServers  a list of the servers that the location needs to match.
+   *                        The location host needs to match one of the given servers.
+   *                        If empty, then no check against such list.
+   * @return true if the location is valid
+   */
+  private boolean checkLocation(
+      String entity,
+      String entityLocation,
+      URI[] defaultServers) {
+    boolean isValid = true;
+    if (entityLocation == null) {
+      logAndPrintToError(entity + ", Error: empty location");
+      isValid = false;
+    } else {
+      try {
+        URI currentUri = new Path(entityLocation).toUri();
+        String scheme = currentUri.getScheme();
+        String path   = currentUri.getPath();
+        if (StringUtils.isEmpty(scheme)) {
+          logAndPrintToError(entity + ", Location: "+ entityLocation + ", Error: missing location scheme.");
+          isValid = false;
+        } else if (StringUtils.isEmpty(path)) {
+          logAndPrintToError(entity + ", Location: "+ entityLocation + ", Error: missing location path.");
+          isValid = false;
+        } else if (ArrayUtils.isNotEmpty(defaultServers) && currentUri.getAuthority() != null) {
+          String authority = currentUri.getAuthority();
+          boolean matchServer = false;
+          for(URI server : defaultServers) {
+            if (StringUtils.equalsIgnoreCase(server.getScheme(), scheme) &&
+                StringUtils.equalsIgnoreCase(server.getAuthority(), authority)) {
+              matchServer = true;
+              break;
+            }
+          }
+          if (!matchServer) {
+            logAndPrintToError(entity + ", Location: " + entityLocation + ", Error: mismatched server.");
+            isValid = false;
+          }
+        }
+
+        // if there is no path element other than "/", report it but not fail
+        if (isValid && StringUtils.containsOnly(path, "/")) {
+          logAndPrintToError(entity + ", Location: "+ entityLocation + ", Warn: location set to root, not a recommended config.");
+        }
+      } catch (Exception pe) {
+        logAndPrintToError(entity + ", Error: invalid location - " + pe.getMessage());
+        isValid =false;
+      }
+    }
+
+    return isValid;
+  }
+
+  // test the connection metastore using the config property
+  private void testConnectionToMetastore() throws HiveMetaException {
+    Connection conn = getConnectionToMetastore(true);
+    try {
+      conn.close();
+    } catch (SQLException e) {
+      throw new HiveMetaException("Failed to close metastore connection", e);
+    }
+  }
+
+
+  /**
+   * check if the current schema version in metastore matches the Hive version
+   */
+  @VisibleForTesting
+  void verifySchemaVersion() throws HiveMetaException {
+    // don't check version if its a dry run
+    if (dryRun) {
+      return;
+    }
+    String newSchemaVersion = metaStoreSchemaInfo.getMetaStoreSchemaVersion(getConnectionInfo(false));
+    // verify that the new version is added to schema
+    assertCompatibleVersion(metaStoreSchemaInfo.getHiveSchemaVersion(), newSchemaVersion);
+  }
+
+  private void assertCompatibleVersion(String hiveSchemaVersion, String dbSchemaVersion)
+      throws HiveMetaException {
+    if (!metaStoreSchemaInfo.isVersionCompatible(hiveSchemaVersion, dbSchemaVersion)) {
+      throw new HiveMetaException("Metastore schema version is not compatible. Hive Version: "
+          + hiveSchemaVersion + ", Database Schema Version: " + dbSchemaVersion);
+    }
+  }
+
+  /**
+   * Perform metastore schema upgrade. extract the current schema version from metastore
+   */
+  void doUpgrade() throws HiveMetaException {
+    String fromVersion =
+      metaStoreSchemaInfo.getMetaStoreSchemaVersion(getConnectionInfo(false));
+    if (fromVersion == null || fromVersion.isEmpty()) {
+      throw new HiveMetaException("Schema version not stored in the metastore. " +
+          "Metastore schema is too old or corrupt. Try specifying the version manually");
+    }
+    doUpgrade(fromVersion);
+  }
+
+  private MetaStoreConnectionInfo getConnectionInfo(boolean printInfo) {
+    return new MetaStoreConnectionInfo(userName, passWord, url, driver, printInfo, conf,
+        dbType);
+  }
+  /**
+   * Perform metastore schema upgrade
+   *
+   * @param fromSchemaVer
+   *          Existing version of the metastore. If null, then read from the metastore
+   */
+  void doUpgrade(String fromSchemaVer) throws HiveMetaException {
+    if (metaStoreSchemaInfo.getHiveSchemaVersion().equals(fromSchemaVer)) {
+      System.out.println("No schema upgrade required from version " + fromSchemaVer);
+      return;
+    }
+    // Find the list of scripts to execute for this upgrade
+    List<String> upgradeScripts =
+        metaStoreSchemaInfo.getUpgradeScripts(fromSchemaVer);
+    testConnectionToMetastore();
+    System.out.println("Starting upgrade metastore schema from version " +
+        fromSchemaVer + " to " + metaStoreSchemaInfo.getHiveSchemaVersion());
+    String scriptDir = metaStoreSchemaInfo.getMetaStoreScriptDir();
+    try {
+      for (String scriptFile : upgradeScripts) {
+        System.out.println("Upgrade script " + scriptFile);
+        if (!dryRun) {
+          runPreUpgrade(scriptDir, scriptFile);
+          runSqlLine(scriptDir, scriptFile);
+          System.out.println("Completed " + scriptFile);
+        }
+      }
+    } catch (IOException eIO) {
+      throw new HiveMetaException(
+          "Upgrade FAILED! Metastore state would be inconsistent !!", eIO);
+    }
+
+    // Revalidated the new version after upgrade
+    verifySchemaVersion();
+  }
+
+  /**
+   * Initialize the metastore schema to current version
+   *
+   */
+  void doInit() throws HiveMetaException {
+    doInit(metaStoreSchemaInfo.getHiveSchemaVersion());
+
+    // Revalidated the new version after upgrade
+    verifySchemaVersion();
+  }
+
+  /**
+   * Initialize the metastore schema
+   *
+   * @param toVersion
+   *          If null then current hive version is used
+   */
+  void doInit(String toVersion) throws HiveMetaException {
+    testConnectionToMetastore();
+    System.out.println("Starting metastore schema initialization to " + toVersion);
+
+    String initScriptDir = metaStoreSchemaInfo.getMetaStoreScriptDir();
+    String initScriptFile = metaStoreSchemaInfo.generateInitFileName(toVersion);
+
+    try {
+      System.out.println("Initialization script " + initScriptFile);
+      if (!dryRun) {
+        runSqlLine(initScriptDir, initScriptFile);
+        System.out.println("Initialization script completed");
+      }
+    } catch (IOException e) {
+      throw new HiveMetaException("Schema initialization FAILED!" +
+          " Metastore state would be inconsistent !!", e);
+    }
+  }
+
+  private void doCreateUser() throws HiveMetaException {
+    testConnectionToMetastore();
+    System.out.println("Starting user creation");
+
+    String scriptDir = metaStoreSchemaInfo.getMetaStoreScriptDir();
+    String protoCreateFile = metaStoreSchemaInfo.getCreateUserScript();
+
+    try {
+      File createFile = subUserAndPassword(scriptDir, protoCreateFile);
+      System.out.println("Creation script " + createFile.getAbsolutePath());
+      if (!dryRun) {
+        if ("oracle".equals(dbType)) oracleCreateUserHack(createFile);
+        else runSqlLine(createFile.getParent(), createFile.getName());
+        System.out.println("User creation completed");
+      }
+    } catch (IOException e) {
+      throw new HiveMetaException("User creation FAILED!" +
+          " Metastore unusable !!", e);
+    }
+  }
+
+  private File subUserAndPassword(String parent, String filename) throws IOException {
+    File createFile = File.createTempFile("create-hive-user-" + dbType, ".sql");
+    BufferedWriter writer = new BufferedWriter(new FileWriter(createFile));
+    File proto = new File(parent, filename);
+    BufferedReader reader = new BufferedReader(new FileReader(proto));
+    reader.lines()
+        .map(s -> s.replace("_REPLACE_WITH_USER_", hiveUser)
+            .replace("_REPLACE_WITH_PASSWD_", hivePasswd)
+            .replace("_REPLACE_WITH_DB_", hiveDb))
+        .forEach(s -> {
+            try {
+              writer.write(s);
+              writer.newLine();
+            } catch (IOException e) {
+              throw new RuntimeException("Unable to write to tmp file ", e);
+            }
+          });
+    reader.close();
+    writer.close();
+    return createFile;
+  }
+
+  private void oracleCreateUserHack(File createFile) throws HiveMetaException {
+    LOG.debug("Found oracle, hacking our way through it rather than using SqlLine");
+    try (BufferedReader reader = new BufferedReader(new FileReader(createFile))) {
+      try (Connection conn = getConnectionToMetastore(false)) {
+        try (Statement stmt = conn.createStatement()) {
+          reader.lines()
+              .forEach(s -> {
+                assert s.charAt(s.length() - 1) == ';';
+                try {
+                  stmt.execute(s.substring(0, s.length() - 1));
+                } catch (SQLException e) {
+                  LOG.error("statement <" + s.substring(0, s.length() - 2) + "> failed", e);
+                  throw new RuntimeException(e);
+                }
+              });
+        }
+      }
+    } catch (IOException e) {
+      LOG.error("Caught IOException trying to read modified create user script " +
+          createFile.getAbsolutePath(), e);
+      throw new HiveMetaException(e);
+    } catch (HiveMetaException e) {
+      LOG.error("Failed to connect to RDBMS", e);
+      throw e;
+    } catch (SQLException e) {
+      LOG.error("Got SQLException", e);
+    }
+  }
+
+  private int doValidate() throws HiveMetaException {
+    System.out.println("Starting metastore validation\n");
+    Connection conn = getConnectionToMetastore(false);
+    boolean success = true;
+    try {
+      if (validateSchemaVersions()) {
+        System.out.println("[SUCCESS]\n");
+      } else {
+        success = false;
+        System.out.println("[FAIL]\n");
+      }
+      if (validateSequences(conn)) {
+        System.out.println("[SUCCESS]\n");
+      } else {
+        success = false;
+        System.out.println("[FAIL]\n");
+      }
+      if (validateSchemaTables(conn)) {
+        System.out.println("[SUCCESS]\n");
+      } else {
+        success = false;
+        System.out.println("[FAIL]\n");
+      }
+      if (validateLocations(conn, this.validationServers)) {
+        System.out.println("[SUCCESS]\n");
+      } else {
+        System.out.println("[WARN]\n");
+      }
+      if (validateColumnNullValues(conn)) {
+        System.out.println("[SUCCESS]\n");
+      } else {
+        System.out.println("[WARN]\n");
+      }
+    } finally {
+      if (conn != null) {
+        try {
+          conn.close();
+        } catch (SQLException e) {
+          // Not a lot you can do here.
+        }
+      }
+    }
+
+    System.out.print("Done with metastore validation: ");
+    if (!success) {
+      System.out.println("[FAIL]");
+      return 1;
+    } else {
+      System.out.println("[SUCCESS]");
+      return 0;
+    }
+  }
+
+  boolean validateSequences(Connection conn) throws HiveMetaException {
+    Map<String, Pair<String, String>> seqNameToTable =
+        new ImmutableMap.Builder<String, Pair<String, String>>()
+        .put("MDatabase", Pair.of("DBS", "DB_ID"))
+        .put("MRole", Pair.of("ROLES", "ROLE_ID"))
+        .put("MGlobalPrivilege", Pair.of("GLOBAL_PRIVS", "USER_GRANT_ID"))
+        .put("MTable", Pair.of("TBLS","TBL_ID"))
+        .put("MStorageDescriptor", Pair.of("SDS", "SD_ID"))
+        .put("MSerDeInfo", Pair.of("SERDES", "SERDE_ID"))
+        .put("MColumnDescriptor", Pair.of("CDS", "CD_ID"))
+        .put("MTablePrivilege", Pair.of("TBL_PRIVS", "TBL_GRANT_ID"))
+        .put("MTableColumnStatistics", Pair.of("TAB_COL_STATS", "CS_ID"))
+        .put("MPartition", Pair.of("PARTITIONS", "PART_ID"))
+        .put("MPartitionColumnStatistics", Pair.of("PART_COL_STATS", "CS_ID"))
+        .put("MFunction", Pair.of("FUNCS", "FUNC_ID"))
+        .put("MIndex", Pair.of("IDXS", "INDEX_ID"))
+        .put("MStringList", Pair.of("SKEWED_STRING_LIST", "STRING_LIST_ID"))
+        .build();
+
+    System.out.println("Validating sequence number for SEQUENCE_TABLE");
+
+    boolean isValid = true;
+    try {
+      Statement stmt = conn.createStatement();
+      for (String seqName : seqNameToTable.keySet()) {
+        String tableName = seqNameToTable.get(seqName).getLeft();
+        String tableKey = seqNameToTable.get(seqName).getRight();
+        String fullSequenceName = "org.apache.hadoop.hive.metastore.model." + seqName;
+        String seqQuery = needsQuotedIdentifier ?
+            ("select t.\"NEXT_VAL\" from \"SEQUENCE_TABLE\" t WHERE t.\"SEQUENCE_NAME\"=? order by t.\"SEQUENCE_NAME\" ")
+            : ("select t.NEXT_VAL from SEQUENCE_TABLE t WHERE t.SEQUENCE_NAME=? order by t.SEQUENCE_NAME ");
+        String maxIdQuery = needsQuotedIdentifier ?
+            ("select max(\"" + tableKey + "\") from \"" + tableName + "\"")
+            : ("select max(" + tableKey + ") from " + tableName);
+
+        ResultSet res = stmt.executeQuery(maxIdQuery);
+        if (res.next()) {
+          long maxId = res.getLong(1);
+          if (maxId > 0) {
+            PreparedStatement pStmt = conn.prepareStatement(seqQuery);
+            pStmt.setString(1, fullSequenceName);
+            ResultSet resSeq = pStmt.executeQuery();
+            if (!resSeq.next()) {
+              isValid = false;
+              logAndPrintToError("Missing SEQUENCE_NAME " + seqName + " from SEQUENCE_TABLE");
+            } else if (resSeq.getLong(1) < maxId) {
+              isValid = false;
+              logAndPrintToError("NEXT_VAL for " + seqName + " in SEQUENCE_TABLE < max(" +
+                  tableKey + ") in " + tableName);
+            }
+          }
+        }
+      }
+
+      System.out.println((isValid ? "Succeeded" :"Failed") + " in sequence number validation for SEQUENCE_TABLE.");
+      return isValid;
+    } catch(SQLException e) {
+        throw new HiveMetaException("Failed to validate sequence number for SEQUENCE_TABLE", e);
+    }
+  }
+
+  boolean validateSchemaVersions() throws HiveMetaException {
+    System.out.println("Validating schema version");
+    try {
+      String newSchemaVersion = metaStoreSchemaInfo.getMetaStoreSchemaVersion(getConnectionInfo(false));
+      assertCompatibleVersion(metaStoreSchemaInfo.getHiveSchemaVersion(), newSchemaVersion);
+    } catch (HiveMetaException hme) {
+      if (hme.getMessage().contains("Metastore schema version is not compatible")
+        || hme.getMessage().contains("Multiple versions were found in metastore")
+        || hme.getMessage().contains("Could not find version info in metastore VERSION table")) {
+        logAndPrintToError(hme.getMessage());
+        System.out.println("Failed in schema version validation.");
+        return false;
+      } else {
+        throw hme;
+      }
+    }
+    System.out.println("Succeeded in schema version validation.");
+    return true;
+  }
+
+  boolean validateSchemaTables(Connection conn) throws HiveMetaException {
+    String version;
+    ResultSet rs              = null;
+    DatabaseMetaData metadata;
+    List<String> dbTables     = new ArrayList<>();
+    List<String> schemaTables = new ArrayList<>();
+    List<String> subScripts   = new ArrayList<>();
+    Connection hmsConn;
+
+    System.out.println("Validating metastore schema tables");
+    try {
+      version = metaStoreSchemaInfo.getMetaStoreSchemaVersion(getConnectionInfo(false));
+    } catch (HiveMetaException he) {
+      logAndPrintToError("Failed to determine schema version from Hive Metastore DB. " + he.getMessage());
+      System.out.println("Failed in schema table validation.");
+      LOG.debug("Failed to determine schema version from Hive Metastore DB," + he.getMessage());
+      return false;
+    }
+
+    // re-open the hms connection
+    hmsConn = getConnectionToMetastore(false);
+
+    LOG.debug("Validating tables in the schema for version " + version);
+    try {
+      metadata       = conn.getMetaData();
+      String[] types = {"TABLE"};
+      rs             = metadata.getTables(null, hmsConn.getSchema(), "%", types);
+      String table;
+
+      while (rs.next()) {
+        table = rs.getString("TABLE_NAME");
+        dbTables.add(table.toLowerCase());
+        LOG.debug("Found table " + table + " in HMS dbstore");
+      }
+    } catch (SQLException e) {
+      throw new HiveMetaException("Failed to retrieve schema tables from Hive Metastore DB," + e.getMessage());
+    } finally {
+      if (rs != null) {
+        try {
+          rs.close();
+        } catch (SQLException e) {
+          // Not a lot you can do here.
+        }
+      }
+    }
+
+    // parse the schema file to determine the tables that are expected to exist
+    // we are using oracle schema because it is simpler to parse, no quotes or backticks etc
+    String baseDir    = new File(metaStoreSchemaInfo.getMetaStoreScriptDir()).getParent();
+    String schemaFile = new File(metaStoreSchemaInfo.getMetaStoreScriptDir(),
+        metaStoreSchemaInfo.generateInitFileName(version)).getPath();
+    try {
+      LOG.debug("Parsing schema script " + schemaFile);
+      subScripts.addAll(findCreateTable(schemaFile, schemaTables));
+      while (subScripts.size() > 0) {
+        schemaFile = baseDir + "/" + dbType + "/" + subScripts.remove(0);
+        LOG.debug("Parsing subscript " + schemaFile);
+        subScripts.addAll(findCreateTable(schemaFile, schemaTables));
+      }
+    } catch (Exception e) {
+      logAndPrintToError("Exception in parsing schema file. Cause:" + e.getMessage());
+      System.out.println("Failed in schema table validation.");
+      return false;
+    }
+
+    LOG.debug("Schema tables:[ " + Arrays.toString(schemaTables.toArray()) + " ]");
+    LOG.debug("DB tables:[ " + Arrays.toString(dbTables.toArray()) + " ]");
+    // now diff the lists
+    schemaTables.removeAll(dbTables);
+    if (schemaTables.size() > 0) {
+      Collections.sort(schemaTables);
+      logAndPrintToError("Table(s) [ " + Arrays.toString(schemaTables.toArray())
+          + " ] are missing from the metastore database schema.");
+      System.out.println("Failed in schema table validation.");
+      return false;
+    } else {
+      System.out.println("Succeeded in schema table validation.");
+      return true;
+    }
+  }
+
+  private List<String> findCreateTable(String path, List<String> tableList)
+      throws Exception {
+    NestedScriptParser sp           = HiveSchemaHelper.getDbCommandParser(dbType, true);
+    Matcher matcher;
+    Pattern regexp;
+    List<String> subs               = new ArrayList<>();
+    int groupNo                     = 2;
+
+    regexp = Pattern.compile("CREATE TABLE(\\s+IF NOT EXISTS)?\\s+(\\S+).*");
+
+    if (!(new File(path)).exists()) {
+      throw new Exception(path + " does not exist. Potentially incorrect version in the metastore VERSION table");
+    }
+
+    try (
+      BufferedReader reader = new BufferedReader(new FileReader(path))
+    ){
+      String line;
+      while ((line = reader.readLine()) != null) {
+        if (sp.isNestedScript(line)) {
+          String subScript;
+          subScript = sp.getScriptName(line);
+          LOG.debug("Schema subscript " + subScript + " found");
+          subs.add(subScript);
+          continue;
+        }
+        line    = line.replaceAll("( )+", " "); //suppress multi-spaces
+        line    = line.replaceAll("\\(", " ");
+        line    = line.replaceAll("IF NOT EXISTS ", "");
+        line    = line.replaceAll("`","");
+        line    = line.replaceAll("'","");
+        line    = line.replaceAll("\"","");
+        matcher = regexp.matcher(line);
+
+        if (matcher.find()) {
+          String table = matcher.group(groupNo);
+          if (dbType.equals("derby"))
+            table  = table.replaceAll("APP\\.","");
+          tableList.add(table.toLowerCase());
+          LOG.debug("Found table " + table + " in the schema");
+        }
+      }
+    } catch (IOException ex){
+      throw new Exception(ex.getMessage());
+    }
+
+    return subs;
+  }
+
+  boolean validateColumnNullValues(Connection conn) throws HiveMetaException {
+    System.out.println("Validating columns for incorrect NULL values.");
+    boolean isValid = true;
+    try {
+      Statement stmt = conn.createStatement();
+      String tblQuery = needsQuotedIdentifier ?
+          ("select t.* from \"TBLS\" t WHERE t.\"SD_ID\" IS NULL and (t.\"TBL_TYPE\"='" + TableType.EXTERNAL_TABLE + "' or t.\"TBL_TYPE\"='" + TableType.MANAGED_TABLE + "') order by t.\"TBL_ID\" ")
+          : ("select t.* from TBLS t WHERE t.SD_ID IS NULL and (t.TBL_TYPE='" + TableType.EXTERNAL_TABLE + "' or t.TBL_TYPE='" + TableType.MANAGED_TABLE + "') order by t.TBL_ID ");
+
+      ResultSet res = stmt.executeQuery(tblQuery);
+      while (res.next()) {
+         long tableId = res.getLong("TBL_ID");
+         String tableName = res.getString("TBL_NAME");
+         String tableType = res.getString("TBL_TYPE");
+         isValid = false;
+         logAndPrintToError("SD_ID in TBLS should not be NULL for Table Name=" + tableName + ", Table ID=" + tableId + ", Table Type=" + tableType);
+      }
+
+      System.out.println((isValid ? "Succeeded" : "Failed") + " in column validation for incorrect NULL values.");
+      return isValid;
+    } catch(SQLException e) {
+        throw new HiveMetaException("Failed to validate columns for incorrect NULL values", e);
+    }
+  }
+
+  /**
+   *  Run pre-upgrade scripts corresponding to a given upgrade script,
+   *  if any exist. The errors from pre-upgrade are ignored.
+   *  Pre-upgrade scripts typically contain setup statements which
+   *  may fail on some database versions and failure is ignorable.
+   *
+   *  @param scriptDir upgrade script directory name
+   *  @param scriptFile upgrade script file name
+   */
+  private void runPreUpgrade(String scriptDir, String scriptFile) {
+    for (int i = 0;; i++) {
+      String preUpgradeScript =
+          metaStoreSchemaInfo.getPreUpgradeScriptName(i, scriptFile);
+      File preUpgradeScriptFile = new File(scriptDir, preUpgradeScript);
+      if (!preUpgradeScriptFile.isFile()) {
+        break;
+      }
+
+      try {
+        runSqlLine(scriptDir, preUpgradeScript);
+        System.out.println("Completed " + preUpgradeScript);
+      } catch (Exception e) {
+        // Ignore the pre-upgrade script errors
+        logAndPrintToError("Warning in pre-upgrade script " + preUpgradeScript + ": "
+            + e.getMessage());
+        if (verbose) {
+          e.printStackTrace();
+        }
+      }
+    }
+  }
+
+  /***
+   * Run beeline with the given metastore script. Flatten the nested scripts
+   * into single file.
+   */
+  private void runSqlLine(String scriptDir, String scriptFile)
+      throws IOException, HiveMetaException {
+
+    // This no longer does expansions of run commands in the files as it used to.  Instead it
+    // depends on the developers to have already unrolled those in the files.
+    runSqlLine(scriptDir + File.separatorChar + scriptFile);
+  }
+
+  // Generate the beeline args per hive conf and execute the given script
+  void runSqlLine(String sqlScriptFile) throws IOException {
+    CommandBuilder builder = new CommandBuilder(conf, url, driver,
+        userName, passWord, sqlScriptFile);
+
+    // run the script using SqlLine
+    SqlLine sqlLine = new SqlLine();
+    ByteArrayOutputStream outputForLog = null;
+    if (!verbose) {
+      OutputStream out;
+      if (LOG.isDebugEnabled()) {
+        out = outputForLog = new ByteArrayOutputStream();
+      } else {
+        out = new NullOutputStream();
+      }
+      sqlLine.setOutputStream(new PrintStream(out));
+      System.setProperty("sqlline.silent", "true");
+    }
+    //sqlLine.getOpts().setAllowMultiLineCommand(false);
+    //System.setProperty("sqlline.isolation","TRANSACTION_READ_COMMITTED");
+    // We can be pretty sure that an entire line can be processed as a single command since
+    // we always add a line separator at the end while calling dbCommandParser.buildCommand.
+    //sqlLine.getOpts().setEntireLineAsCommand(true);
+    LOG.info("Going to run command <" + builder.buildToLog() + ">");
+    SqlLine.Status status = sqlLine.begin(builder.buildToRun(), null, false);
+    if (LOG.isDebugEnabled() && outputForLog != null) {
+      LOG.debug("Received following output from Sqlline:");
+      LOG.debug(outputForLog.toString("UTF-8"));
+    }
+    if (status != SqlLine.Status.OK) {
+      throw new IOException("Schema script failed, errorcode " + status);
+    }
+  }
+
+  static class CommandBuilder {
+    private final Configuration conf;
+    private final String userName;
+    private final String password;
+    private final String sqlScriptFile;
+    private final String driver;
+    private final String url;
+
+    CommandBuilder(Configuration conf, String url, String driver,
+                   String userName, String password, String sqlScriptFile) {
+      this.conf = conf;
+      this.userName = userName;
+      this.password = password;
+      this.url = url;
+      this.driver = driver;
+      this.sqlScriptFile = sqlScriptFile;
+    }
+
+    String[] buildToRun() throws IOException {
+      return argsWith(password);
+    }
+
+    String buildToLog() throws IOException {
+      logScript();
+      return StringUtils.join(argsWith(PASSWD_MASK), " ");
+    }
+
+    private String[] argsWith(String password) throws IOException {
+      return new String[]
+        {
+          "-u", url == null ? MetastoreConf.getVar(conf, ConfVars.CONNECTURLKEY) : url,
+          "-d", driver == null ? MetastoreConf.getVar(conf, ConfVars.CONNECTION_DRIVER) : driver,
+          "-n", userName,
+          "-p", password,
+          "--isolation=TRANSACTION_READ_COMMITTED",
+          "-f", sqlScriptFile
+        };
+    }
+
+    private void logScript() throws IOException {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Going to invoke file that contains:");
+        try (BufferedReader reader = new BufferedReader(new FileReader(sqlScriptFile))) {
+          String line;
+          while ((line = reader.readLine()) != null) {
+            LOG.debug("script: " + line);
+          }
+        }
+      }
+    }
+  }
+
+  // Create the required command line options
+  @SuppressWarnings("static-access")
+  private static void initOptions(Options cmdLineOptions) {
+    Option help = new Option("help", "print this message");
+    Option upgradeOpt = new Option("upgradeSchema", "Schema upgrade");
+    Option upgradeFromOpt = OptionBuilder.withArgName("upgradeFrom").hasArg().
+                withDescription("Schema upgrade from a version").
+                create("upgradeSchemaFrom");
+    Option initOpt = new Option("initSchema", "Schema initialization");
+    Option initToOpt = OptionBuilder.withArgName("initTo").hasArg().
+                withDescription("Schema initialization to a version").
+                create("initSchemaTo");
+    Option infoOpt = new Option("info", "Show config and schema details");
+    Option validateOpt = new Option("validate", "Validate the database");
+    Option createUserOpt = new Option("createUser", "Create the Hive user, use admin user and " +
+        "password with this");
+
+    OptionGroup optGroup = new OptionGroup();
+    optGroup.addOption(upgradeOpt).addOption(initOpt).
+                addOption(help).addOption(upgradeFromOpt).addOption(createUserOpt)
+                .addOption(initToOpt).addOption(infoOpt).addOption(validateOpt);
+    optGroup.setRequired(true);
+
+    Option userNameOpt = OptionBuilder.withArgName("user")
+                .hasArgs()
+                .withDescription("Override config file user name")
+                .create("userName");
+    Option passwdOpt = OptionBuilder.withArgName("password")
+                .hasArgs()
+                 .withDescription("Override config file password")
+                 .create("passWord");
+    Option hiveUserOpt = OptionBuilder
+        .hasArg()
+        .withDescription("Hive user (for use with createUser)")
+        .create("hiveUser");
+    Option hivePasswdOpt = OptionBuilder
+        .hasArg()
+        .withDescription("Hive password (for use with createUser)")
+        .create("hivePassword");
+    Option hiveDbOpt = OptionBuilder
+        .hasArg()
+        .withDescription("Hive database (for use with createUser)")
+        .create("hiveDb");
+    Option dbTypeOpt = OptionBuilder.withArgName("databaseType")
+                .hasArgs().withDescription("Metastore database type")
+                .create("dbType");
+    Option urlOpt = OptionBuilder.withArgName("url")
+                .hasArgs().withDescription("connection url to the database")
+                .create("url");
+    Option driverOpt = OptionBuilder.withArgName("driver")
+                .hasArgs().withDescription("driver name for connection")
+                .create("driver");
+    Option dbOpts = OptionBuilder.withArgName("databaseOpts")
+                .hasArgs().withDescription("Backend DB specific options")
+                .create("dbOpts");
+    Option dryRunOpt = new Option("dryRun", "list SQL scripts (no execute)");
+    Option verboseOpt = new Option("verbose", "only print SQL statements");
+    Option serversOpt = OptionBuilder.withArgName("serverList")
+        .hasArgs().withDescription("a comma-separated list of servers used in location validation in the format of scheme://authority (e.g. hdfs://localhost:8000)")
+        .create("servers");
+    cmdLineOptions.addOption(help);
+    cmdLineOptions.addOption(dryRunOpt);
+    cmdLineOptions.addOption(userNameOpt);
+    cmdLineOptions.addOption(passwdOpt);
+    cmdLineOptions.addOption(dbTypeOpt);
+    cmdLineOptions.addOption(verboseOpt);
+    cmdLineOptions.addOption(urlOpt);
+    cmdLineOptions.addOption(driverOpt);
+    cmdLineOptions.addOption(dbOpts);
+    cmdLineOptions.addOption(serversOpt);
+    cmdLineOptions.addOption(hiveUserOpt);
+    cmdLineOptions.addOption(hivePasswdOpt);
+    cmdLineOptions.addOption(hiveDbOpt);
+    cmdLineOptions.addOptionGroup(optGroup);
+  }
+
+  static void logAndPrintToError(String errmsg) {
+    LOG.error(errmsg);
+    System.err.println(errmsg);
+  }
+
+  public static void main(String[] args) {
+    System.exit(run(args));
+  }
+
+  public static int run(String[] args) {
+    LOG.debug("Going to run command: " + StringUtils.join(args, " "));
+    CommandLineParser parser = new GnuParser();
+    CommandLine line;
+    String dbType;
+    String schemaVer;
+    Options cmdLineOptions = new Options();
+
+    // Argument handling
+    initOptions(cmdLineOptions);
+    try {
+      line = parser.parse(cmdLineOptions, args);
+    } catch (ParseException e) {
+      logAndPrintToError("HiveSchemaTool:Parsing failed.  Reason: " + e.getLocalizedMessage());
+      return usage(cmdLineOptions);
+    }
+
+    assert line != null;
+    if (line.hasOption("help")) {
+      HelpFormatter formatter = new HelpFormatter();
+      formatter.printHelp("schemaTool", cmdLineOptions);
+      return 1;
+    }
+
+    if (line.hasOption("dbType")) {
+      dbType = line.getOptionValue("dbType");
+      if ((!dbType.equalsIgnoreCase(HiveSchemaHelper.DB_DERBY) &&
+          !dbType.equalsIgnoreCase(HiveSchemaHelper.DB_MSSQL) &&
+          !dbType.equalsIgnoreCase(HiveSchemaHelper.DB_MYSQL) &&
+          !dbType.equalsIgnoreCase(HiveSchemaHelper.DB_POSTGRACE) && !dbType
+          .equalsIgnoreCase(HiveSchemaHelper.DB_ORACLE))) {
+        logAndPrintToError("Unsupported dbType " + dbType);
+        return usage(cmdLineOptions);
+      }
+    } else {
+      logAndPrintToError("no dbType supplied");
+      return usage(cmdLineOptions);
+    }
+
+    System.setProperty(ConfVars.SCHEMA_VERIFICATION.toString(), "true");
+    try {
+      MetastoreSchemaTool schemaTool = new MetastoreSchemaTool(dbType);
+
+      if (line.hasOption("userName")) {
+        schemaTool.setUserName(line.getOptionValue("userName"));
+      } else {
+        schemaTool.setUserName(MetastoreConf.getVar(schemaTool.getConf(), ConfVars.CONNECTION_USER_NAME));
+      }
+      if (line.hasOption("passWord")) {
+        schemaTool.setPassWord(line.getOptionValue("passWord"));
+      } else {
+        try {
+          schemaTool.setPassWord(MetastoreConf.getPassword(schemaTool.getConf(), ConfVars.PWD));
+        } catch (IOException err) {
+          throw new HiveMetaException("Error getting metastore password", err);
+        }
+      }
+      if (line.hasOption("hiveUser")) {
+        schemaTool.setHiveUser(line.getOptionValue("hiveUser"));
+      }
+      if (line.hasOption("hivePassword")) {
+        schemaTool.setHivePasswd(line.getOptionValue("hivePassword"));
+      }
+      if (line.hasOption("hiveDb")) {
+        schemaTool.setHiveDb(line.getOptionValue("hiveDb"));
+      }
+      if (line.hasOption("url")) {
+        schemaTool.setUrl(line.getOptionValue("url"));
+      }
+      if (line.hasOption("driver")) {
+        schemaTool.setDriver(line.getOptionValue("driver"));
+      }
+      if (line.hasOption("dryRun")) {
+        schemaTool.setDryRun(true);
+      }
+      if (line.hasOption("verbose")) {
+        schemaTool.setVerbose(true);
+      }
+      if (line.hasOption("dbOpts")) {
+        schemaTool.setDbOpts(line.getOptionValue("dbOpts"));
+      }
+      if (line.hasOption("validate") && line.hasOption("servers")) {
+        schemaTool.setValidationServers(line.getOptionValue("servers"));
+      }
+      if (line.hasOption("info")) {
+        schemaTool.showInfo();
+      } else if (line.hasOption("upgradeSchema")) {
+        schemaTool.doUpgrade();
+      } else if (line.hasOption("upgradeSchemaFrom")) {
+        schemaVer = line.getOptionValue("upgradeSchemaFrom");
+        schemaTool.doUpgrade(schemaVer);
+      } else if (line.hasOption("initSchema")) {
+        schemaTool.doInit();
+      } else if (line.hasOption("initSchemaTo")) {
+        schemaVer = line.getOptionValue("initSchemaTo");
+        schemaTool.doInit(schemaVer);
+      } else if (line.hasOption("validate")) {
+        return schemaTool.doValidate();
+      } else if (line.hasOption("createUser")) {
+        schemaTool.doCreateUser();
+      } else {
+        logAndPrintToError("no valid option supplied");
+        return usage(cmdLineOptions);
+      }
+    } catch (HiveMetaException e) {
+      logAndPrintToError(e.getMessage());
+      if (e.getCause() != null) {
+        Throwable t = e.getCause();
+        logAndPrintToError("Underlying cause: "
+            + t.getClass().getName() + " : "
+            + t.getMessage());
+        if (e.getCause() instanceof SQLException) {
+          logAndPrintToError("SQL Error code: " + ((SQLException)t).getErrorCode());
+        }
+      }
+      if (line.hasOption("verbose")) {
+        e.printStackTrace();
+      } else {
+        logAndPrintToError("Use --verbose for detailed stacktrace.");
+      }
+      logAndPrintToError("*** schemaTool failed ***");
+      return 1;
+    }
+    System.out.println("schemaTool completed");
+    return 0;
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/resources/metastore-log4j2.xml
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/resources/metastore-log4j2.xml b/standalone-metastore/src/main/resources/metastore-log4j2.xml
new file mode 100644
index 0000000..f6ba8dc
--- /dev/null
+++ b/standalone-metastore/src/main/resources/metastore-log4j2.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<Configuration status="INFO">
+    <Appenders>
+        <Console name="Console" target="SYSTEM_OUT">
+            <PatternLayout pattern="%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n" />
+        </Console>
+        <File name="LogFile" fileName="metastore.log">
+            <PatternLayout pattern="%d{ISO8601} %5p [%t] %c{2}: %m%n"/>
+        </File>
+    </Appenders>
+    <Loggers>
+        <Root level="debug">
+            <!-- <AppenderRef ref="Console" /> --><!-- Uncomment this to get console logging -->
+            <AppenderRef ref="LogFile"/>
+        </Root>
+    </Loggers>
+</Configuration>

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/scripts/base
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/scripts/base b/standalone-metastore/src/main/scripts/base
new file mode 100755
index 0000000..5105e7f
--- /dev/null
+++ b/standalone-metastore/src/main/scripts/base
@@ -0,0 +1,238 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cygwin=false
+case "`uname`" in
+   CYGWIN*) cygwin=true;;
+esac
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/metastore-config.sh
+
+SERVICE=""
+HELP=""
+SKIP_HADOOPVERSION=false
+
+SERVICE_ARGS=()
+while [ $# -gt 0 ]; do
+  case "$1" in
+    --version)
+      shift
+      SERVICE=version
+      ;;
+    --service)
+      shift
+      SERVICE=$1
+      shift
+      ;;
+    --skiphadoopversion)
+      SKIP_HADOOPVERSION=true
+      shift
+      ;;
+    --help)
+      HELP=_help
+      shift
+      ;;
+    --debug*)
+      DEBUG=$1
+      shift
+      ;;
+    *)
+      SERVICE_ARGS=("${SERVICE_ARGS[@]}" "$1")
+      shift
+      ;;
+  esac
+done
+
+if [ "$SERVICE" = "" ] ; then
+  if [ "$HELP" = "_help" ] ; then
+    SERVICE="help"
+  else
+    SERVICE="cli"
+  fi
+fi
+
+if [[ "$SERVICE" =~ ^(help|schemaTool)$ ]] ; then
+  SKIP_HADOOPVERSION=true
+fi
+
+if [ -f "${METASTORE_CONF_DIR}/metastore-env.sh" ]; then
+  . "${METASTORE_CONF_DIR}/metastore-env.sh"
+fi
+
+CLASSPATH="${METASTORE_CONF_DIR}"
+
+METASTORE_LIB=${METASTORE_HOME}/lib
+
+# needed for execution
+if [ ! -f ${METASTORE_LIB}/hive-standalone-metastore-*.jar ]; then
+  echo "Missing Standalone MetaStore Jar"
+  exit 2;
+fi
+
+for f in ${METASTORE_LIB}/*.jar; do
+  CLASSPATH=${CLASSPATH}:$f;
+done
+
+# add the auxillary jars such as serdes
+if [ -d "${METASTORE_AUX_JARS_PATH}" ]; then
+  hive_aux_jars_abspath=`cd ${METASTORE_AUX_JARS_PATH} && pwd`
+  for f in $hive_aux_jars_abspath/*.jar; do
+    if [[ ! -f $f ]]; then
+        continue;
+    fi
+    if $cygwin; then
+	f=`cygpath -w "$f"`
+    fi
+    AUX_CLASSPATH=${AUX_CLASSPATH}:$f
+    if [ "${AUX_PARAM}" == "" ]; then
+        AUX_PARAM=file://$f
+    else
+        AUX_PARAM=${AUX_PARAM},file://$f;
+    fi
+  done
+elif [ "${METASTORE_AUX_JARS_PATH}" != "" ]; then 
+  METASTORE_AUX_JARS_PATH=`echo $METASTORE_AUX_JARS_PATH | sed 's/,/:/g'`
+  if $cygwin; then
+      METASTORE_AUX_JARS_PATH=`cygpath -p -w "$METASTORE_AUX_JARS_PATH"`
+      METASTORE_AUX_JARS_PATH=`echo $METASTORE_AUX_JARS_PATH | sed 's/;/,/g'`
+  fi
+  AUX_CLASSPATH=${AUX_CLASSPATH}:${METASTORE_AUX_JARS_PATH}
+  AUX_PARAM="file://$(echo ${METASTORE_AUX_JARS_PATH} | sed 's/:/,file:\/\//g')"
+fi
+
+if $cygwin; then
+    CLASSPATH=`cygpath -p -w "$CLASSPATH"`
+    CLASSPATH=${CLASSPATH};${AUX_CLASSPATH}
+else
+    CLASSPATH=${CLASSPATH}:${AUX_CLASSPATH}
+fi
+
+# supress the HADOOP_HOME warnings in 1.x.x
+export HADOOP_HOME_WARN_SUPPRESS=true 
+
+# to make sure log4j2.x and jline jars are loaded ahead of the jars pulled by hadoop
+export HADOOP_USER_CLASSPATH_FIRST=true
+
+# pass classpath to hadoop
+if [ "$HADOOP_CLASSPATH" != "" ]; then
+  export HADOOP_CLASSPATH="${CLASSPATH}:${HADOOP_CLASSPATH}"
+else
+  export HADOOP_CLASSPATH="$CLASSPATH"
+fi
+
+# also pass hive classpath to hadoop
+if [ "$METASTORE_CLASSPATH" != "" ]; then
+  export HADOOP_CLASSPATH="${HADOOP_CLASSPATH}:${METASTORE_CLASSPATH}";
+fi
+
+# check for hadoop in the path
+HADOOP_IN_PATH=`which hadoop 2>/dev/null`
+if [ -f ${HADOOP_IN_PATH} ]; then
+  HADOOP_DIR=`dirname "$HADOOP_IN_PATH"`/..
+fi
+# HADOOP_HOME env variable overrides hadoop in the path
+HADOOP_HOME=${HADOOP_HOME:-${HADOOP_PREFIX:-$HADOOP_DIR}}
+if [ "$HADOOP_HOME" == "" ]; then
+  echo "Cannot find hadoop installation: \$HADOOP_HOME or \$HADOOP_PREFIX must be set or hadoop must be in the path";
+  exit 4;
+fi
+
+HADOOP=$HADOOP_HOME/bin/hadoop
+if [ ! -f ${HADOOP} ]; then
+  echo "Cannot find hadoop installation: \$HADOOP_HOME or \$HADOOP_PREFIX must be set or hadoop must be in the path";
+  exit 4;
+fi
+
+if [ "$SKIP_HADOOPVERSION" = false ]; then
+  # Make sure we're using a compatible version of Hadoop
+  if [ "x$HADOOP_VERSION" == "x" ]; then
+      HADOOP_VERSION=$($HADOOP version 2>&2 | awk -F"\t" '/Hadoop/ {print $0}' | cut -d' ' -f 2);
+  fi
+  
+  # Save the regex to a var to workaround quoting incompatabilities
+  # between Bash 3.1 and 3.2
+  hadoop_version_re="^([[:digit:]]+)\.([[:digit:]]+)(\.([[:digit:]]+))?.*$"
+  
+  if [[ "$HADOOP_VERSION" =~ $hadoop_version_re ]]; then
+      hadoop_major_ver=${BASH_REMATCH[1]}
+      hadoop_minor_ver=${BASH_REMATCH[2]}
+      hadoop_patch_ver=${BASH_REMATCH[4]}
+  else
+      echo "Unable to determine Hadoop version information."
+      echo "'hadoop version' returned:"
+      echo `$HADOOP version`
+      exit 5
+  fi
+  
+  if [ "$hadoop_major_ver" -lt "2" ] || [ "$hadoop_major_ver" -eq "2" -a "$hadoop_minor_ver" -lt "6" ]; then
+      echo "Standalone metastore requires Hadoop 2.6 or later."
+      echo "'hadoop version' returned:"
+      echo `$HADOOP version`
+      exit 6
+  fi
+fi
+
+if [ "${AUX_PARAM}" != "" ]; then
+  METASTORE_OPTS="$METASTORE_OPTS --hiveconf hive.aux.jars.path=${AUX_PARAM}"
+  AUX_JARS_CMD_LINE="-libjars ${AUX_PARAM}"
+fi
+
+SERVICE_LIST=""
+
+for i in "$bin"/ext/*.sh ; do
+  . $i
+done
+
+if [ "$DEBUG" ]; then
+  if [ "$HELP" ]; then
+    debug_help
+    exit 0
+  else
+    get_debug_params "$DEBUG"
+    export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS $METASTORE_MAIN_CLIENT_DEBUG_OPTS"
+  fi
+fi
+
+TORUN=""
+for j in $SERVICE_LIST ; do
+  if [ "$j" = "$SERVICE" ] ; then
+    TORUN=${j}$HELP
+  fi
+done
+
+# to initialize logging for all services
+
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS -Dlog4j.configurationFile=metastore-log4j2.xml"
+
+if [[ "$SERVICE" =~ ^(hiveserver2|beeline|cli)$ ]] ; then
+  # If process is backgrounded, don't change terminal settings
+  if [[ ( ! $(ps -o stat= -p $$) =~ "+" ) && ! ( -p /dev/stdin ) ]]; then
+    export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS -Djline.terminal=jline.UnsupportedTerminal"
+  fi
+fi
+
+if [ "$TORUN" = "" ] ; then
+  echo "Service $SERVICE not found"
+  echo "Available Services: $SERVICE_LIST"
+  exit 7
+else
+  set -- "${SERVICE_ARGS[@]}"
+  $TORUN "$@"
+fi

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/scripts/ext/metastore.sh
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/scripts/ext/metastore.sh b/standalone-metastore/src/main/scripts/ext/metastore.sh
new file mode 100644
index 0000000..6b12991
--- /dev/null
+++ b/standalone-metastore/src/main/scripts/ext/metastore.sh
@@ -0,0 +1,41 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+THISSERVICE=metastore
+export SERVICE_LIST="${SERVICE_LIST}${THISSERVICE} "
+
+metastore() {
+  echo "$(timestamp): Starting Metastore Server"
+  CLASS=org.apache.hadoop.hive.metastore.HiveMetaStore
+  if $cygwin; then
+    METASTORE_LIB=`cygpath -w "$METASTORE_LIB"`
+  fi
+  JAR=${METASTORE_LIB}/hive-standalone-metastore-*.jar
+
+  # hadoop 20 or newer - skip the aux_jars option and hiveconf
+
+  export HADOOP_CLIENT_OPTS=" -Dproc_metastore $HADOOP_CLIENT_OPTS "
+  export HADOOP_OPTS="$METASTORE_HADOOP_OPTS $HADOOP_OPTS"
+  exec $HADOOP jar $JAR $CLASS "$@"
+}
+
+metastore_help() {
+  metastore -h
+}
+
+timestamp()
+{
+ date +"%Y-%m-%d %T"
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/scripts/ext/schemaTool.sh
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/scripts/ext/schemaTool.sh b/standalone-metastore/src/main/scripts/ext/schemaTool.sh
new file mode 100644
index 0000000..bfc8d78
--- /dev/null
+++ b/standalone-metastore/src/main/scripts/ext/schemaTool.sh
@@ -0,0 +1,33 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+THISSERVICE=schemaTool
+export SERVICE_LIST="${SERVICE_LIST}${THISSERVICE} "
+
+schemaTool() {
+  METASTORE_OPTS=''
+  CLASS=org.apache.hadoop.hive.metastore.tools.MetastoreSchemaTool
+  if $cygwin; then
+    METASTORE_LIB=`cygpath -w "$METASTORE_LIB"`
+  fi
+  JAR=${METASTORE_LIB}/hive-standalone-metastore-*.jar
+
+  # hadoop 20 or newer - skip the aux_jars option and hiveconf
+  exec $HADOOP jar $JAR $CLASS "$@"
+}
+
+schemaTool_help () {
+  schemaTool -h
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/scripts/metastore-config.sh
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/scripts/metastore-config.sh b/standalone-metastore/src/main/scripts/metastore-config.sh
new file mode 100644
index 0000000..269eae9
--- /dev/null
+++ b/standalone-metastore/src/main/scripts/metastore-config.sh
@@ -0,0 +1,70 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# processes --config option from command line
+#
+
+this="$0"
+while [ -h "$this" ]; do
+  ls=`ls -ld "$this"`
+  link=`expr "$ls" : '.*-> \(.*\)$'`
+  if expr "$link" : '.*/.*' > /dev/null; then
+    this="$link"
+  else
+    this=`dirname "$this"`/"$link"
+  fi
+done
+
+# convert relative path to absolute path
+bin=`dirname "$this"`
+script=`basename "$this"`
+bin=`cd "$bin"; pwd`
+this="$bin/$script"
+
+# the root of the Hive installation
+if [[ -z $METASTORE_HOME ]] ; then
+  export METASTORE_HOME=`dirname "$bin"`
+fi
+
+#check to see if the conf dir is given as an optional argument
+while [ $# -gt 0 ]; do    # Until you run out of parameters . . .
+  case "$1" in
+    --config)
+        shift
+        confdir=$1
+        shift
+        METASTORE_CONF_DIR=$confdir
+        ;;
+    --auxpath)
+        shift
+        METASTORE_AUX_JARS_PATH=$1
+        shift
+        ;;
+    *)
+        break;
+        ;;
+  esac
+done
+
+
+# Allow alternate conf dir location.
+METASTORE_CONF_DIR="${METASTORE_CONF_DIR:-$METASTORE_HOME/conf}"
+
+export METASTORE_CONF_DIR=$METASTORE_CONF_DIR
+export METASTORE_AUX_JARS_PATH=$METASTORE_AUX_JARS_PATH
+
+# Default to use 256MB 
+export HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-256}

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/scripts/schematool
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/scripts/schematool b/standalone-metastore/src/main/scripts/schematool
new file mode 100644
index 0000000..19ef28f
--- /dev/null
+++ b/standalone-metastore/src/main/scripts/schematool
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/base --service schemaTool "$@"

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/scripts/start-metastore
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/scripts/start-metastore b/standalone-metastore/src/main/scripts/start-metastore
new file mode 100644
index 0000000..aaa98bb
--- /dev/null
+++ b/standalone-metastore/src/main/scripts/start-metastore
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/base --service metastore "$@"


[45/50] [abbrv] hive git commit: HIVE-17990 Add Thrift and DB storage for Schema Registry objects

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
index 2219811..cc94082 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -132,6 +132,59 @@ struct EventRequestType {
 
 extern const std::map<int, const char*> _EventRequestType_VALUES_TO_NAMES;
 
+struct SerdeType {
+  enum type {
+    HIVE = 1,
+    SCHEMA_REGISTRY = 2
+  };
+};
+
+extern const std::map<int, const char*> _SerdeType_VALUES_TO_NAMES;
+
+struct SchemaType {
+  enum type {
+    HIVE = 1,
+    AVRO = 2
+  };
+};
+
+extern const std::map<int, const char*> _SchemaType_VALUES_TO_NAMES;
+
+struct SchemaCompatibility {
+  enum type {
+    NONE = 1,
+    BACKWARD = 2,
+    FORWARD = 3,
+    BOTH = 4
+  };
+};
+
+extern const std::map<int, const char*> _SchemaCompatibility_VALUES_TO_NAMES;
+
+struct SchemaValidation {
+  enum type {
+    LATEST = 1,
+    ALL = 2
+  };
+};
+
+extern const std::map<int, const char*> _SchemaValidation_VALUES_TO_NAMES;
+
+struct SchemaVersionState {
+  enum type {
+    INITIATED = 1,
+    START_REVIEW = 2,
+    CHANGES_REQUIRED = 3,
+    REVIEWED = 4,
+    ENABLED = 5,
+    DISABLED = 6,
+    ARCHIVED = 7,
+    DELETED = 8
+  };
+};
+
+extern const std::map<int, const char*> _SchemaVersionState_VALUES_TO_NAMES;
+
 struct FunctionType {
   enum type {
     JAVA = 1
@@ -529,6 +582,16 @@ class WMCreateOrDropTriggerToPoolMappingRequest;
 
 class WMCreateOrDropTriggerToPoolMappingResponse;
 
+class ISchema;
+
+class SchemaVersion;
+
+class FindSchemasByColsRqst;
+
+class FindSchemasByColsRespEntry;
+
+class FindSchemasByColsResp;
+
 class MetaException;
 
 class UnknownTableException;
@@ -2069,10 +2132,14 @@ inline std::ostream& operator<<(std::ostream& out, const Database& obj)
 }
 
 typedef struct _SerDeInfo__isset {
-  _SerDeInfo__isset() : name(false), serializationLib(false), parameters(false) {}
+  _SerDeInfo__isset() : name(false), serializationLib(false), parameters(false), description(false), serializerClass(false), deserializerClass(false), serdeType(false) {}
   bool name :1;
   bool serializationLib :1;
   bool parameters :1;
+  bool description :1;
+  bool serializerClass :1;
+  bool deserializerClass :1;
+  bool serdeType :1;
 } _SerDeInfo__isset;
 
 class SerDeInfo {
@@ -2080,13 +2147,17 @@ class SerDeInfo {
 
   SerDeInfo(const SerDeInfo&);
   SerDeInfo& operator=(const SerDeInfo&);
-  SerDeInfo() : name(), serializationLib() {
+  SerDeInfo() : name(), serializationLib(), description(), serializerClass(), deserializerClass(), serdeType((SerdeType::type)0) {
   }
 
   virtual ~SerDeInfo() throw();
   std::string name;
   std::string serializationLib;
   std::map<std::string, std::string>  parameters;
+  std::string description;
+  std::string serializerClass;
+  std::string deserializerClass;
+  SerdeType::type serdeType;
 
   _SerDeInfo__isset __isset;
 
@@ -2096,6 +2167,14 @@ class SerDeInfo {
 
   void __set_parameters(const std::map<std::string, std::string> & val);
 
+  void __set_description(const std::string& val);
+
+  void __set_serializerClass(const std::string& val);
+
+  void __set_deserializerClass(const std::string& val);
+
+  void __set_serdeType(const SerdeType::type val);
+
   bool operator == (const SerDeInfo & rhs) const
   {
     if (!(name == rhs.name))
@@ -2104,6 +2183,22 @@ class SerDeInfo {
       return false;
     if (!(parameters == rhs.parameters))
       return false;
+    if (__isset.description != rhs.__isset.description)
+      return false;
+    else if (__isset.description && !(description == rhs.description))
+      return false;
+    if (__isset.serializerClass != rhs.__isset.serializerClass)
+      return false;
+    else if (__isset.serializerClass && !(serializerClass == rhs.serializerClass))
+      return false;
+    if (__isset.deserializerClass != rhs.__isset.deserializerClass)
+      return false;
+    else if (__isset.deserializerClass && !(deserializerClass == rhs.deserializerClass))
+      return false;
+    if (__isset.serdeType != rhs.__isset.serdeType)
+      return false;
+    else if (__isset.serdeType && !(serdeType == rhs.serdeType))
+      return false;
     return true;
   }
   bool operator != (const SerDeInfo &rhs) const {
@@ -10466,6 +10561,372 @@ inline std::ostream& operator<<(std::ostream& out, const WMCreateOrDropTriggerTo
   return out;
 }
 
+typedef struct _ISchema__isset {
+  _ISchema__isset() : schemaType(false), name(false), dbName(false), compatibility(false), validationLevel(false), canEvolve(false), schemaGroup(false), description(false) {}
+  bool schemaType :1;
+  bool name :1;
+  bool dbName :1;
+  bool compatibility :1;
+  bool validationLevel :1;
+  bool canEvolve :1;
+  bool schemaGroup :1;
+  bool description :1;
+} _ISchema__isset;
+
+class ISchema {
+ public:
+
+  ISchema(const ISchema&);
+  ISchema& operator=(const ISchema&);
+  ISchema() : schemaType((SchemaType::type)0), name(), dbName(), compatibility((SchemaCompatibility::type)0), validationLevel((SchemaValidation::type)0), canEvolve(0), schemaGroup(), description() {
+  }
+
+  virtual ~ISchema() throw();
+  SchemaType::type schemaType;
+  std::string name;
+  std::string dbName;
+  SchemaCompatibility::type compatibility;
+  SchemaValidation::type validationLevel;
+  bool canEvolve;
+  std::string schemaGroup;
+  std::string description;
+
+  _ISchema__isset __isset;
+
+  void __set_schemaType(const SchemaType::type val);
+
+  void __set_name(const std::string& val);
+
+  void __set_dbName(const std::string& val);
+
+  void __set_compatibility(const SchemaCompatibility::type val);
+
+  void __set_validationLevel(const SchemaValidation::type val);
+
+  void __set_canEvolve(const bool val);
+
+  void __set_schemaGroup(const std::string& val);
+
+  void __set_description(const std::string& val);
+
+  bool operator == (const ISchema & rhs) const
+  {
+    if (!(schemaType == rhs.schemaType))
+      return false;
+    if (!(name == rhs.name))
+      return false;
+    if (!(dbName == rhs.dbName))
+      return false;
+    if (!(compatibility == rhs.compatibility))
+      return false;
+    if (!(validationLevel == rhs.validationLevel))
+      return false;
+    if (!(canEvolve == rhs.canEvolve))
+      return false;
+    if (__isset.schemaGroup != rhs.__isset.schemaGroup)
+      return false;
+    else if (__isset.schemaGroup && !(schemaGroup == rhs.schemaGroup))
+      return false;
+    if (__isset.description != rhs.__isset.description)
+      return false;
+    else if (__isset.description && !(description == rhs.description))
+      return false;
+    return true;
+  }
+  bool operator != (const ISchema &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ISchema & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(ISchema &a, ISchema &b);
+
+inline std::ostream& operator<<(std::ostream& out, const ISchema& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _SchemaVersion__isset {
+  _SchemaVersion__isset() : schemaName(false), version(false), createdAt(false), cols(false), state(false), description(false), schemaText(false), fingerprint(false), name(false), serDe(false) {}
+  bool schemaName :1;
+  bool version :1;
+  bool createdAt :1;
+  bool cols :1;
+  bool state :1;
+  bool description :1;
+  bool schemaText :1;
+  bool fingerprint :1;
+  bool name :1;
+  bool serDe :1;
+} _SchemaVersion__isset;
+
+class SchemaVersion {
+ public:
+
+  SchemaVersion(const SchemaVersion&);
+  SchemaVersion& operator=(const SchemaVersion&);
+  SchemaVersion() : schemaName(), version(0), createdAt(0), state((SchemaVersionState::type)0), description(), schemaText(), fingerprint(), name() {
+  }
+
+  virtual ~SchemaVersion() throw();
+  std::string schemaName;
+  int32_t version;
+  int64_t createdAt;
+  std::vector<FieldSchema>  cols;
+  SchemaVersionState::type state;
+  std::string description;
+  std::string schemaText;
+  std::string fingerprint;
+  std::string name;
+  SerDeInfo serDe;
+
+  _SchemaVersion__isset __isset;
+
+  void __set_schemaName(const std::string& val);
+
+  void __set_version(const int32_t val);
+
+  void __set_createdAt(const int64_t val);
+
+  void __set_cols(const std::vector<FieldSchema> & val);
+
+  void __set_state(const SchemaVersionState::type val);
+
+  void __set_description(const std::string& val);
+
+  void __set_schemaText(const std::string& val);
+
+  void __set_fingerprint(const std::string& val);
+
+  void __set_name(const std::string& val);
+
+  void __set_serDe(const SerDeInfo& val);
+
+  bool operator == (const SchemaVersion & rhs) const
+  {
+    if (!(schemaName == rhs.schemaName))
+      return false;
+    if (!(version == rhs.version))
+      return false;
+    if (!(createdAt == rhs.createdAt))
+      return false;
+    if (!(cols == rhs.cols))
+      return false;
+    if (__isset.state != rhs.__isset.state)
+      return false;
+    else if (__isset.state && !(state == rhs.state))
+      return false;
+    if (__isset.description != rhs.__isset.description)
+      return false;
+    else if (__isset.description && !(description == rhs.description))
+      return false;
+    if (__isset.schemaText != rhs.__isset.schemaText)
+      return false;
+    else if (__isset.schemaText && !(schemaText == rhs.schemaText))
+      return false;
+    if (__isset.fingerprint != rhs.__isset.fingerprint)
+      return false;
+    else if (__isset.fingerprint && !(fingerprint == rhs.fingerprint))
+      return false;
+    if (__isset.name != rhs.__isset.name)
+      return false;
+    else if (__isset.name && !(name == rhs.name))
+      return false;
+    if (__isset.serDe != rhs.__isset.serDe)
+      return false;
+    else if (__isset.serDe && !(serDe == rhs.serDe))
+      return false;
+    return true;
+  }
+  bool operator != (const SchemaVersion &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const SchemaVersion & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(SchemaVersion &a, SchemaVersion &b);
+
+inline std::ostream& operator<<(std::ostream& out, const SchemaVersion& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _FindSchemasByColsRqst__isset {
+  _FindSchemasByColsRqst__isset() : colName(false), colNamespace(false), type(false) {}
+  bool colName :1;
+  bool colNamespace :1;
+  bool type :1;
+} _FindSchemasByColsRqst__isset;
+
+class FindSchemasByColsRqst {
+ public:
+
+  FindSchemasByColsRqst(const FindSchemasByColsRqst&);
+  FindSchemasByColsRqst& operator=(const FindSchemasByColsRqst&);
+  FindSchemasByColsRqst() : colName(), colNamespace(), type() {
+  }
+
+  virtual ~FindSchemasByColsRqst() throw();
+  std::string colName;
+  std::string colNamespace;
+  std::string type;
+
+  _FindSchemasByColsRqst__isset __isset;
+
+  void __set_colName(const std::string& val);
+
+  void __set_colNamespace(const std::string& val);
+
+  void __set_type(const std::string& val);
+
+  bool operator == (const FindSchemasByColsRqst & rhs) const
+  {
+    if (__isset.colName != rhs.__isset.colName)
+      return false;
+    else if (__isset.colName && !(colName == rhs.colName))
+      return false;
+    if (__isset.colNamespace != rhs.__isset.colNamespace)
+      return false;
+    else if (__isset.colNamespace && !(colNamespace == rhs.colNamespace))
+      return false;
+    if (__isset.type != rhs.__isset.type)
+      return false;
+    else if (__isset.type && !(type == rhs.type))
+      return false;
+    return true;
+  }
+  bool operator != (const FindSchemasByColsRqst &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const FindSchemasByColsRqst & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(FindSchemasByColsRqst &a, FindSchemasByColsRqst &b);
+
+inline std::ostream& operator<<(std::ostream& out, const FindSchemasByColsRqst& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _FindSchemasByColsRespEntry__isset {
+  _FindSchemasByColsRespEntry__isset() : schemaName(false), version(false) {}
+  bool schemaName :1;
+  bool version :1;
+} _FindSchemasByColsRespEntry__isset;
+
+class FindSchemasByColsRespEntry {
+ public:
+
+  FindSchemasByColsRespEntry(const FindSchemasByColsRespEntry&);
+  FindSchemasByColsRespEntry& operator=(const FindSchemasByColsRespEntry&);
+  FindSchemasByColsRespEntry() : schemaName(), version(0) {
+  }
+
+  virtual ~FindSchemasByColsRespEntry() throw();
+  std::string schemaName;
+  int32_t version;
+
+  _FindSchemasByColsRespEntry__isset __isset;
+
+  void __set_schemaName(const std::string& val);
+
+  void __set_version(const int32_t val);
+
+  bool operator == (const FindSchemasByColsRespEntry & rhs) const
+  {
+    if (!(schemaName == rhs.schemaName))
+      return false;
+    if (!(version == rhs.version))
+      return false;
+    return true;
+  }
+  bool operator != (const FindSchemasByColsRespEntry &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const FindSchemasByColsRespEntry & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(FindSchemasByColsRespEntry &a, FindSchemasByColsRespEntry &b);
+
+inline std::ostream& operator<<(std::ostream& out, const FindSchemasByColsRespEntry& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _FindSchemasByColsResp__isset {
+  _FindSchemasByColsResp__isset() : schemaVersions(false) {}
+  bool schemaVersions :1;
+} _FindSchemasByColsResp__isset;
+
+class FindSchemasByColsResp {
+ public:
+
+  FindSchemasByColsResp(const FindSchemasByColsResp&);
+  FindSchemasByColsResp& operator=(const FindSchemasByColsResp&);
+  FindSchemasByColsResp() {
+  }
+
+  virtual ~FindSchemasByColsResp() throw();
+  std::vector<FindSchemasByColsRespEntry>  schemaVersions;
+
+  _FindSchemasByColsResp__isset __isset;
+
+  void __set_schemaVersions(const std::vector<FindSchemasByColsRespEntry> & val);
+
+  bool operator == (const FindSchemasByColsResp & rhs) const
+  {
+    if (!(schemaVersions == rhs.schemaVersions))
+      return false;
+    return true;
+  }
+  bool operator != (const FindSchemasByColsResp &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const FindSchemasByColsResp & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(FindSchemasByColsResp &a, FindSchemasByColsResp &b);
+
+inline std::ostream& operator<<(std::ostream& out, const FindSchemasByColsResp& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
 typedef struct _MetaException__isset {
   _MetaException__isset() : message(false) {}
   bool message :1;

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java
new file mode 100644
index 0000000..5ba07d7
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java
@@ -0,0 +1,449 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class FindSchemasByColsResp implements org.apache.thrift.TBase<FindSchemasByColsResp, FindSchemasByColsResp._Fields>, java.io.Serializable, Cloneable, Comparable<FindSchemasByColsResp> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FindSchemasByColsResp");
+
+  private static final org.apache.thrift.protocol.TField SCHEMA_VERSIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaVersions", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new FindSchemasByColsRespStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new FindSchemasByColsRespTupleSchemeFactory());
+  }
+
+  private List<FindSchemasByColsRespEntry> schemaVersions; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    SCHEMA_VERSIONS((short)1, "schemaVersions");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // SCHEMA_VERSIONS
+          return SCHEMA_VERSIONS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.SCHEMA_VERSIONS, new org.apache.thrift.meta_data.FieldMetaData("schemaVersions", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FindSchemasByColsRespEntry.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FindSchemasByColsResp.class, metaDataMap);
+  }
+
+  public FindSchemasByColsResp() {
+  }
+
+  public FindSchemasByColsResp(
+    List<FindSchemasByColsRespEntry> schemaVersions)
+  {
+    this();
+    this.schemaVersions = schemaVersions;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public FindSchemasByColsResp(FindSchemasByColsResp other) {
+    if (other.isSetSchemaVersions()) {
+      List<FindSchemasByColsRespEntry> __this__schemaVersions = new ArrayList<FindSchemasByColsRespEntry>(other.schemaVersions.size());
+      for (FindSchemasByColsRespEntry other_element : other.schemaVersions) {
+        __this__schemaVersions.add(new FindSchemasByColsRespEntry(other_element));
+      }
+      this.schemaVersions = __this__schemaVersions;
+    }
+  }
+
+  public FindSchemasByColsResp deepCopy() {
+    return new FindSchemasByColsResp(this);
+  }
+
+  @Override
+  public void clear() {
+    this.schemaVersions = null;
+  }
+
+  public int getSchemaVersionsSize() {
+    return (this.schemaVersions == null) ? 0 : this.schemaVersions.size();
+  }
+
+  public java.util.Iterator<FindSchemasByColsRespEntry> getSchemaVersionsIterator() {
+    return (this.schemaVersions == null) ? null : this.schemaVersions.iterator();
+  }
+
+  public void addToSchemaVersions(FindSchemasByColsRespEntry elem) {
+    if (this.schemaVersions == null) {
+      this.schemaVersions = new ArrayList<FindSchemasByColsRespEntry>();
+    }
+    this.schemaVersions.add(elem);
+  }
+
+  public List<FindSchemasByColsRespEntry> getSchemaVersions() {
+    return this.schemaVersions;
+  }
+
+  public void setSchemaVersions(List<FindSchemasByColsRespEntry> schemaVersions) {
+    this.schemaVersions = schemaVersions;
+  }
+
+  public void unsetSchemaVersions() {
+    this.schemaVersions = null;
+  }
+
+  /** Returns true if field schemaVersions is set (has been assigned a value) and false otherwise */
+  public boolean isSetSchemaVersions() {
+    return this.schemaVersions != null;
+  }
+
+  public void setSchemaVersionsIsSet(boolean value) {
+    if (!value) {
+      this.schemaVersions = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case SCHEMA_VERSIONS:
+      if (value == null) {
+        unsetSchemaVersions();
+      } else {
+        setSchemaVersions((List<FindSchemasByColsRespEntry>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case SCHEMA_VERSIONS:
+      return getSchemaVersions();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case SCHEMA_VERSIONS:
+      return isSetSchemaVersions();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof FindSchemasByColsResp)
+      return this.equals((FindSchemasByColsResp)that);
+    return false;
+  }
+
+  public boolean equals(FindSchemasByColsResp that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_schemaVersions = true && this.isSetSchemaVersions();
+    boolean that_present_schemaVersions = true && that.isSetSchemaVersions();
+    if (this_present_schemaVersions || that_present_schemaVersions) {
+      if (!(this_present_schemaVersions && that_present_schemaVersions))
+        return false;
+      if (!this.schemaVersions.equals(that.schemaVersions))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_schemaVersions = true && (isSetSchemaVersions());
+    list.add(present_schemaVersions);
+    if (present_schemaVersions)
+      list.add(schemaVersions);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(FindSchemasByColsResp other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetSchemaVersions()).compareTo(other.isSetSchemaVersions());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSchemaVersions()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaVersions, other.schemaVersions);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("FindSchemasByColsResp(");
+    boolean first = true;
+
+    sb.append("schemaVersions:");
+    if (this.schemaVersions == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.schemaVersions);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class FindSchemasByColsRespStandardSchemeFactory implements SchemeFactory {
+    public FindSchemasByColsRespStandardScheme getScheme() {
+      return new FindSchemasByColsRespStandardScheme();
+    }
+  }
+
+  private static class FindSchemasByColsRespStandardScheme extends StandardScheme<FindSchemasByColsResp> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, FindSchemasByColsResp struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // SCHEMA_VERSIONS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list802 = iprot.readListBegin();
+                struct.schemaVersions = new ArrayList<FindSchemasByColsRespEntry>(_list802.size);
+                FindSchemasByColsRespEntry _elem803;
+                for (int _i804 = 0; _i804 < _list802.size; ++_i804)
+                {
+                  _elem803 = new FindSchemasByColsRespEntry();
+                  _elem803.read(iprot);
+                  struct.schemaVersions.add(_elem803);
+                }
+                iprot.readListEnd();
+              }
+              struct.setSchemaVersionsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, FindSchemasByColsResp struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.schemaVersions != null) {
+        oprot.writeFieldBegin(SCHEMA_VERSIONS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.schemaVersions.size()));
+          for (FindSchemasByColsRespEntry _iter805 : struct.schemaVersions)
+          {
+            _iter805.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class FindSchemasByColsRespTupleSchemeFactory implements SchemeFactory {
+    public FindSchemasByColsRespTupleScheme getScheme() {
+      return new FindSchemasByColsRespTupleScheme();
+    }
+  }
+
+  private static class FindSchemasByColsRespTupleScheme extends TupleScheme<FindSchemasByColsResp> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsResp struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetSchemaVersions()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetSchemaVersions()) {
+        {
+          oprot.writeI32(struct.schemaVersions.size());
+          for (FindSchemasByColsRespEntry _iter806 : struct.schemaVersions)
+          {
+            _iter806.write(oprot);
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsResp struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TList _list807 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.schemaVersions = new ArrayList<FindSchemasByColsRespEntry>(_list807.size);
+          FindSchemasByColsRespEntry _elem808;
+          for (int _i809 = 0; _i809 < _list807.size; ++_i809)
+          {
+            _elem808 = new FindSchemasByColsRespEntry();
+            _elem808.read(iprot);
+            struct.schemaVersions.add(_elem808);
+          }
+        }
+        struct.setSchemaVersionsIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsRespEntry.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsRespEntry.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsRespEntry.java
new file mode 100644
index 0000000..5a71d21
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsRespEntry.java
@@ -0,0 +1,497 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class FindSchemasByColsRespEntry implements org.apache.thrift.TBase<FindSchemasByColsRespEntry, FindSchemasByColsRespEntry._Fields>, java.io.Serializable, Cloneable, Comparable<FindSchemasByColsRespEntry> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FindSchemasByColsRespEntry");
+
+  private static final org.apache.thrift.protocol.TField SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField VERSION_FIELD_DESC = new org.apache.thrift.protocol.TField("version", org.apache.thrift.protocol.TType.I32, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new FindSchemasByColsRespEntryStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new FindSchemasByColsRespEntryTupleSchemeFactory());
+  }
+
+  private String schemaName; // required
+  private int version; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    SCHEMA_NAME((short)1, "schemaName"),
+    VERSION((short)2, "version");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // SCHEMA_NAME
+          return SCHEMA_NAME;
+        case 2: // VERSION
+          return VERSION;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __VERSION_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("schemaName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.VERSION, new org.apache.thrift.meta_data.FieldMetaData("version", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FindSchemasByColsRespEntry.class, metaDataMap);
+  }
+
+  public FindSchemasByColsRespEntry() {
+  }
+
+  public FindSchemasByColsRespEntry(
+    String schemaName,
+    int version)
+  {
+    this();
+    this.schemaName = schemaName;
+    this.version = version;
+    setVersionIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public FindSchemasByColsRespEntry(FindSchemasByColsRespEntry other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetSchemaName()) {
+      this.schemaName = other.schemaName;
+    }
+    this.version = other.version;
+  }
+
+  public FindSchemasByColsRespEntry deepCopy() {
+    return new FindSchemasByColsRespEntry(this);
+  }
+
+  @Override
+  public void clear() {
+    this.schemaName = null;
+    setVersionIsSet(false);
+    this.version = 0;
+  }
+
+  public String getSchemaName() {
+    return this.schemaName;
+  }
+
+  public void setSchemaName(String schemaName) {
+    this.schemaName = schemaName;
+  }
+
+  public void unsetSchemaName() {
+    this.schemaName = null;
+  }
+
+  /** Returns true if field schemaName is set (has been assigned a value) and false otherwise */
+  public boolean isSetSchemaName() {
+    return this.schemaName != null;
+  }
+
+  public void setSchemaNameIsSet(boolean value) {
+    if (!value) {
+      this.schemaName = null;
+    }
+  }
+
+  public int getVersion() {
+    return this.version;
+  }
+
+  public void setVersion(int version) {
+    this.version = version;
+    setVersionIsSet(true);
+  }
+
+  public void unsetVersion() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VERSION_ISSET_ID);
+  }
+
+  /** Returns true if field version is set (has been assigned a value) and false otherwise */
+  public boolean isSetVersion() {
+    return EncodingUtils.testBit(__isset_bitfield, __VERSION_ISSET_ID);
+  }
+
+  public void setVersionIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VERSION_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case SCHEMA_NAME:
+      if (value == null) {
+        unsetSchemaName();
+      } else {
+        setSchemaName((String)value);
+      }
+      break;
+
+    case VERSION:
+      if (value == null) {
+        unsetVersion();
+      } else {
+        setVersion((Integer)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case SCHEMA_NAME:
+      return getSchemaName();
+
+    case VERSION:
+      return getVersion();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case SCHEMA_NAME:
+      return isSetSchemaName();
+    case VERSION:
+      return isSetVersion();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof FindSchemasByColsRespEntry)
+      return this.equals((FindSchemasByColsRespEntry)that);
+    return false;
+  }
+
+  public boolean equals(FindSchemasByColsRespEntry that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_schemaName = true && this.isSetSchemaName();
+    boolean that_present_schemaName = true && that.isSetSchemaName();
+    if (this_present_schemaName || that_present_schemaName) {
+      if (!(this_present_schemaName && that_present_schemaName))
+        return false;
+      if (!this.schemaName.equals(that.schemaName))
+        return false;
+    }
+
+    boolean this_present_version = true;
+    boolean that_present_version = true;
+    if (this_present_version || that_present_version) {
+      if (!(this_present_version && that_present_version))
+        return false;
+      if (this.version != that.version)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_schemaName = true && (isSetSchemaName());
+    list.add(present_schemaName);
+    if (present_schemaName)
+      list.add(schemaName);
+
+    boolean present_version = true;
+    list.add(present_version);
+    if (present_version)
+      list.add(version);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(FindSchemasByColsRespEntry other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetSchemaName()).compareTo(other.isSetSchemaName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSchemaName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaName, other.schemaName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetVersion()).compareTo(other.isSetVersion());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetVersion()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.version, other.version);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("FindSchemasByColsRespEntry(");
+    boolean first = true;
+
+    sb.append("schemaName:");
+    if (this.schemaName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.schemaName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("version:");
+    sb.append(this.version);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class FindSchemasByColsRespEntryStandardSchemeFactory implements SchemeFactory {
+    public FindSchemasByColsRespEntryStandardScheme getScheme() {
+      return new FindSchemasByColsRespEntryStandardScheme();
+    }
+  }
+
+  private static class FindSchemasByColsRespEntryStandardScheme extends StandardScheme<FindSchemasByColsRespEntry> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, FindSchemasByColsRespEntry struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // SCHEMA_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.schemaName = iprot.readString();
+              struct.setSchemaNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // VERSION
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.version = iprot.readI32();
+              struct.setVersionIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, FindSchemasByColsRespEntry struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.schemaName != null) {
+        oprot.writeFieldBegin(SCHEMA_NAME_FIELD_DESC);
+        oprot.writeString(struct.schemaName);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(VERSION_FIELD_DESC);
+      oprot.writeI32(struct.version);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class FindSchemasByColsRespEntryTupleSchemeFactory implements SchemeFactory {
+    public FindSchemasByColsRespEntryTupleScheme getScheme() {
+      return new FindSchemasByColsRespEntryTupleScheme();
+    }
+  }
+
+  private static class FindSchemasByColsRespEntryTupleScheme extends TupleScheme<FindSchemasByColsRespEntry> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsRespEntry struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetSchemaName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetVersion()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetSchemaName()) {
+        oprot.writeString(struct.schemaName);
+      }
+      if (struct.isSetVersion()) {
+        oprot.writeI32(struct.version);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsRespEntry struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        struct.schemaName = iprot.readString();
+        struct.setSchemaNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.version = iprot.readI32();
+        struct.setVersionIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsRqst.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsRqst.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsRqst.java
new file mode 100644
index 0000000..d5230c9
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsRqst.java
@@ -0,0 +1,605 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class FindSchemasByColsRqst implements org.apache.thrift.TBase<FindSchemasByColsRqst, FindSchemasByColsRqst._Fields>, java.io.Serializable, Cloneable, Comparable<FindSchemasByColsRqst> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FindSchemasByColsRqst");
+
+  private static final org.apache.thrift.protocol.TField COL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("colName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField COL_NAMESPACE_FIELD_DESC = new org.apache.thrift.protocol.TField("colNamespace", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.STRING, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new FindSchemasByColsRqstStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new FindSchemasByColsRqstTupleSchemeFactory());
+  }
+
+  private String colName; // optional
+  private String colNamespace; // optional
+  private String type; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    COL_NAME((short)1, "colName"),
+    COL_NAMESPACE((short)2, "colNamespace"),
+    TYPE((short)3, "type");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // COL_NAME
+          return COL_NAME;
+        case 2: // COL_NAMESPACE
+          return COL_NAMESPACE;
+        case 3: // TYPE
+          return TYPE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.COL_NAME,_Fields.COL_NAMESPACE,_Fields.TYPE};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.COL_NAME, new org.apache.thrift.meta_data.FieldMetaData("colName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.COL_NAMESPACE, new org.apache.thrift.meta_data.FieldMetaData("colNamespace", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FindSchemasByColsRqst.class, metaDataMap);
+  }
+
+  public FindSchemasByColsRqst() {
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public FindSchemasByColsRqst(FindSchemasByColsRqst other) {
+    if (other.isSetColName()) {
+      this.colName = other.colName;
+    }
+    if (other.isSetColNamespace()) {
+      this.colNamespace = other.colNamespace;
+    }
+    if (other.isSetType()) {
+      this.type = other.type;
+    }
+  }
+
+  public FindSchemasByColsRqst deepCopy() {
+    return new FindSchemasByColsRqst(this);
+  }
+
+  @Override
+  public void clear() {
+    this.colName = null;
+    this.colNamespace = null;
+    this.type = null;
+  }
+
+  public String getColName() {
+    return this.colName;
+  }
+
+  public void setColName(String colName) {
+    this.colName = colName;
+  }
+
+  public void unsetColName() {
+    this.colName = null;
+  }
+
+  /** Returns true if field colName is set (has been assigned a value) and false otherwise */
+  public boolean isSetColName() {
+    return this.colName != null;
+  }
+
+  public void setColNameIsSet(boolean value) {
+    if (!value) {
+      this.colName = null;
+    }
+  }
+
+  public String getColNamespace() {
+    return this.colNamespace;
+  }
+
+  public void setColNamespace(String colNamespace) {
+    this.colNamespace = colNamespace;
+  }
+
+  public void unsetColNamespace() {
+    this.colNamespace = null;
+  }
+
+  /** Returns true if field colNamespace is set (has been assigned a value) and false otherwise */
+  public boolean isSetColNamespace() {
+    return this.colNamespace != null;
+  }
+
+  public void setColNamespaceIsSet(boolean value) {
+    if (!value) {
+      this.colNamespace = null;
+    }
+  }
+
+  public String getType() {
+    return this.type;
+  }
+
+  public void setType(String type) {
+    this.type = type;
+  }
+
+  public void unsetType() {
+    this.type = null;
+  }
+
+  /** Returns true if field type is set (has been assigned a value) and false otherwise */
+  public boolean isSetType() {
+    return this.type != null;
+  }
+
+  public void setTypeIsSet(boolean value) {
+    if (!value) {
+      this.type = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case COL_NAME:
+      if (value == null) {
+        unsetColName();
+      } else {
+        setColName((String)value);
+      }
+      break;
+
+    case COL_NAMESPACE:
+      if (value == null) {
+        unsetColNamespace();
+      } else {
+        setColNamespace((String)value);
+      }
+      break;
+
+    case TYPE:
+      if (value == null) {
+        unsetType();
+      } else {
+        setType((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case COL_NAME:
+      return getColName();
+
+    case COL_NAMESPACE:
+      return getColNamespace();
+
+    case TYPE:
+      return getType();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case COL_NAME:
+      return isSetColName();
+    case COL_NAMESPACE:
+      return isSetColNamespace();
+    case TYPE:
+      return isSetType();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof FindSchemasByColsRqst)
+      return this.equals((FindSchemasByColsRqst)that);
+    return false;
+  }
+
+  public boolean equals(FindSchemasByColsRqst that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_colName = true && this.isSetColName();
+    boolean that_present_colName = true && that.isSetColName();
+    if (this_present_colName || that_present_colName) {
+      if (!(this_present_colName && that_present_colName))
+        return false;
+      if (!this.colName.equals(that.colName))
+        return false;
+    }
+
+    boolean this_present_colNamespace = true && this.isSetColNamespace();
+    boolean that_present_colNamespace = true && that.isSetColNamespace();
+    if (this_present_colNamespace || that_present_colNamespace) {
+      if (!(this_present_colNamespace && that_present_colNamespace))
+        return false;
+      if (!this.colNamespace.equals(that.colNamespace))
+        return false;
+    }
+
+    boolean this_present_type = true && this.isSetType();
+    boolean that_present_type = true && that.isSetType();
+    if (this_present_type || that_present_type) {
+      if (!(this_present_type && that_present_type))
+        return false;
+      if (!this.type.equals(that.type))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_colName = true && (isSetColName());
+    list.add(present_colName);
+    if (present_colName)
+      list.add(colName);
+
+    boolean present_colNamespace = true && (isSetColNamespace());
+    list.add(present_colNamespace);
+    if (present_colNamespace)
+      list.add(colNamespace);
+
+    boolean present_type = true && (isSetType());
+    list.add(present_type);
+    if (present_type)
+      list.add(type);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(FindSchemasByColsRqst other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetColName()).compareTo(other.isSetColName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetColName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colName, other.colName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetColNamespace()).compareTo(other.isSetColNamespace());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetColNamespace()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colNamespace, other.colNamespace);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetType()).compareTo(other.isSetType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.type, other.type);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("FindSchemasByColsRqst(");
+    boolean first = true;
+
+    if (isSetColName()) {
+      sb.append("colName:");
+      if (this.colName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.colName);
+      }
+      first = false;
+    }
+    if (isSetColNamespace()) {
+      if (!first) sb.append(", ");
+      sb.append("colNamespace:");
+      if (this.colNamespace == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.colNamespace);
+      }
+      first = false;
+    }
+    if (isSetType()) {
+      if (!first) sb.append(", ");
+      sb.append("type:");
+      if (this.type == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.type);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class FindSchemasByColsRqstStandardSchemeFactory implements SchemeFactory {
+    public FindSchemasByColsRqstStandardScheme getScheme() {
+      return new FindSchemasByColsRqstStandardScheme();
+    }
+  }
+
+  private static class FindSchemasByColsRqstStandardScheme extends StandardScheme<FindSchemasByColsRqst> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, FindSchemasByColsRqst struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // COL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.colName = iprot.readString();
+              struct.setColNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // COL_NAMESPACE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.colNamespace = iprot.readString();
+              struct.setColNamespaceIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.type = iprot.readString();
+              struct.setTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, FindSchemasByColsRqst struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.colName != null) {
+        if (struct.isSetColName()) {
+          oprot.writeFieldBegin(COL_NAME_FIELD_DESC);
+          oprot.writeString(struct.colName);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.colNamespace != null) {
+        if (struct.isSetColNamespace()) {
+          oprot.writeFieldBegin(COL_NAMESPACE_FIELD_DESC);
+          oprot.writeString(struct.colNamespace);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.type != null) {
+        if (struct.isSetType()) {
+          oprot.writeFieldBegin(TYPE_FIELD_DESC);
+          oprot.writeString(struct.type);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class FindSchemasByColsRqstTupleSchemeFactory implements SchemeFactory {
+    public FindSchemasByColsRqstTupleScheme getScheme() {
+      return new FindSchemasByColsRqstTupleScheme();
+    }
+  }
+
+  private static class FindSchemasByColsRqstTupleScheme extends TupleScheme<FindSchemasByColsRqst> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsRqst struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetColName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetColNamespace()) {
+        optionals.set(1);
+      }
+      if (struct.isSetType()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetColName()) {
+        oprot.writeString(struct.colName);
+      }
+      if (struct.isSetColNamespace()) {
+        oprot.writeString(struct.colNamespace);
+      }
+      if (struct.isSetType()) {
+        oprot.writeString(struct.type);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsRqst struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        struct.colName = iprot.readString();
+        struct.setColNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.colNamespace = iprot.readString();
+        struct.setColNamespaceIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.type = iprot.readString();
+        struct.setTypeIsSet(true);
+      }
+    }
+  }
+
+}
+


[48/50] [abbrv] hive git commit: HIVE-17990 Add Thrift and DB storage for Schema Registry objects

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index dc9540d..7c8448c 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -201,6 +201,20 @@ class ThriftHiveMetastoreIf : virtual public  ::facebook::fb303::FacebookService
   virtual void create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return, const WMCreateOrUpdateMappingRequest& request) = 0;
   virtual void drop_wm_mapping(WMDropMappingResponse& _return, const WMDropMappingRequest& request) = 0;
   virtual void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return, const WMCreateOrDropTriggerToPoolMappingRequest& request) = 0;
+  virtual void create_ischema(const ISchema& schema) = 0;
+  virtual void alter_ischema(const std::string& schemaName, const ISchema& newSchema) = 0;
+  virtual void get_ischema(ISchema& _return, const std::string& schemaName) = 0;
+  virtual void drop_ischema(const std::string& schemaName) = 0;
+  virtual void add_schema_version(const SchemaVersion& schemaVersion) = 0;
+  virtual void get_schema_version(SchemaVersion& _return, const std::string& schemaName, const int32_t version) = 0;
+  virtual void get_schema_latest_version(SchemaVersion& _return, const std::string& schemaName) = 0;
+  virtual void get_schema_all_versions(std::vector<SchemaVersion> & _return, const std::string& schemaName) = 0;
+  virtual void drop_schema_version(const std::string& schemaName, const int32_t version) = 0;
+  virtual void get_schemas_by_cols(FindSchemasByColsResp& _return, const FindSchemasByColsRqst& rqst) = 0;
+  virtual void map_schema_version_to_serde(const std::string& schemaName, const int32_t version, const std::string& serdeName) = 0;
+  virtual void set_schema_version_state(const std::string& schemaName, const int32_t version, const SchemaVersionState::type state) = 0;
+  virtual void add_serde(const SerDeInfo& serde) = 0;
+  virtual void get_serde(SerDeInfo& _return, const std::string& serdeName) = 0;
 };
 
 class ThriftHiveMetastoreIfFactory : virtual public  ::facebook::fb303::FacebookServiceIfFactory {
@@ -795,6 +809,48 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p
   void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& /* _return */, const WMCreateOrDropTriggerToPoolMappingRequest& /* request */) {
     return;
   }
+  void create_ischema(const ISchema& /* schema */) {
+    return;
+  }
+  void alter_ischema(const std::string& /* schemaName */, const ISchema& /* newSchema */) {
+    return;
+  }
+  void get_ischema(ISchema& /* _return */, const std::string& /* schemaName */) {
+    return;
+  }
+  void drop_ischema(const std::string& /* schemaName */) {
+    return;
+  }
+  void add_schema_version(const SchemaVersion& /* schemaVersion */) {
+    return;
+  }
+  void get_schema_version(SchemaVersion& /* _return */, const std::string& /* schemaName */, const int32_t /* version */) {
+    return;
+  }
+  void get_schema_latest_version(SchemaVersion& /* _return */, const std::string& /* schemaName */) {
+    return;
+  }
+  void get_schema_all_versions(std::vector<SchemaVersion> & /* _return */, const std::string& /* schemaName */) {
+    return;
+  }
+  void drop_schema_version(const std::string& /* schemaName */, const int32_t /* version */) {
+    return;
+  }
+  void get_schemas_by_cols(FindSchemasByColsResp& /* _return */, const FindSchemasByColsRqst& /* rqst */) {
+    return;
+  }
+  void map_schema_version_to_serde(const std::string& /* schemaName */, const int32_t /* version */, const std::string& /* serdeName */) {
+    return;
+  }
+  void set_schema_version_state(const std::string& /* schemaName */, const int32_t /* version */, const SchemaVersionState::type /* state */) {
+    return;
+  }
+  void add_serde(const SerDeInfo& /* serde */) {
+    return;
+  }
+  void get_serde(SerDeInfo& /* _return */, const std::string& /* serdeName */) {
+    return;
+  }
 };
 
 typedef struct _ThriftHiveMetastore_getMetaConf_args__isset {
@@ -22905,228 +22961,1917 @@ class ThriftHiveMetastore_create_or_drop_wm_trigger_to_pool_mapping_presult {
 
 };
 
-class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public  ::facebook::fb303::FacebookServiceClient {
+typedef struct _ThriftHiveMetastore_create_ischema_args__isset {
+  _ThriftHiveMetastore_create_ischema_args__isset() : schema(false) {}
+  bool schema :1;
+} _ThriftHiveMetastore_create_ischema_args__isset;
+
+class ThriftHiveMetastore_create_ischema_args {
  public:
-  ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) :
-     ::facebook::fb303::FacebookServiceClient(prot, prot) {}
-  ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> iprot, boost::shared_ptr< ::apache::thrift::protocol::TProtocol> oprot) :     ::facebook::fb303::FacebookServiceClient(iprot, oprot) {}
-  boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getInputProtocol() {
-    return piprot_;
+
+  ThriftHiveMetastore_create_ischema_args(const ThriftHiveMetastore_create_ischema_args&);
+  ThriftHiveMetastore_create_ischema_args& operator=(const ThriftHiveMetastore_create_ischema_args&);
+  ThriftHiveMetastore_create_ischema_args() {
   }
-  boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getOutputProtocol() {
-    return poprot_;
+
+  virtual ~ThriftHiveMetastore_create_ischema_args() throw();
+  ISchema schema;
+
+  _ThriftHiveMetastore_create_ischema_args__isset __isset;
+
+  void __set_schema(const ISchema& val);
+
+  bool operator == (const ThriftHiveMetastore_create_ischema_args & rhs) const
+  {
+    if (!(schema == rhs.schema))
+      return false;
+    return true;
   }
-  void getMetaConf(std::string& _return, const std::string& key);
-  void send_getMetaConf(const std::string& key);
-  void recv_getMetaConf(std::string& _return);
-  void setMetaConf(const std::string& key, const std::string& value);
-  void send_setMetaConf(const std::string& key, const std::string& value);
-  void recv_setMetaConf();
-  void create_database(const Database& database);
-  void send_create_database(const Database& database);
-  void recv_create_database();
-  void get_database(Database& _return, const std::string& name);
-  void send_get_database(const std::string& name);
-  void recv_get_database(Database& _return);
-  void drop_database(const std::string& name, const bool deleteData, const bool cascade);
-  void send_drop_database(const std::string& name, const bool deleteData, const bool cascade);
-  void recv_drop_database();
-  void get_databases(std::vector<std::string> & _return, const std::string& pattern);
-  void send_get_databases(const std::string& pattern);
-  void recv_get_databases(std::vector<std::string> & _return);
-  void get_all_databases(std::vector<std::string> & _return);
-  void send_get_all_databases();
-  void recv_get_all_databases(std::vector<std::string> & _return);
-  void alter_database(const std::string& dbname, const Database& db);
-  void send_alter_database(const std::string& dbname, const Database& db);
-  void recv_alter_database();
-  void get_type(Type& _return, const std::string& name);
-  void send_get_type(const std::string& name);
-  void recv_get_type(Type& _return);
-  bool create_type(const Type& type);
-  void send_create_type(const Type& type);
-  bool recv_create_type();
-  bool drop_type(const std::string& type);
-  void send_drop_type(const std::string& type);
-  bool recv_drop_type();
-  void get_type_all(std::map<std::string, Type> & _return, const std::string& name);
-  void send_get_type_all(const std::string& name);
-  void recv_get_type_all(std::map<std::string, Type> & _return);
-  void get_fields(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name);
-  void send_get_fields(const std::string& db_name, const std::string& table_name);
-  void recv_get_fields(std::vector<FieldSchema> & _return);
-  void get_fields_with_environment_context(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context);
-  void send_get_fields_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context);
-  void recv_get_fields_with_environment_context(std::vector<FieldSchema> & _return);
-  void get_schema(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name);
-  void send_get_schema(const std::string& db_name, const std::string& table_name);
-  void recv_get_schema(std::vector<FieldSchema> & _return);
-  void get_schema_with_environment_context(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context);
-  void send_get_schema_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context);
-  void recv_get_schema_with_environment_context(std::vector<FieldSchema> & _return);
-  void create_table(const Table& tbl);
-  void send_create_table(const Table& tbl);
-  void recv_create_table();
-  void create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context);
-  void send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context);
-  void recv_create_table_with_environment_context();
-  void create_table_with_constraints(const Table& tbl, const std::vector<SQLPrimaryKey> & primaryKeys, const std::vector<SQLForeignKey> & foreignKeys, const std::vector<SQLUniqueConstraint> & uniqueConstraints, const std::vector<SQLNotNullConstraint> & notNullConstraints);
-  void send_create_table_with_constraints(const Table& tbl, const std::vector<SQLPrimaryKey> & primaryKeys, const std::vector<SQLForeignKey> & foreignKeys, const std::vector<SQLUniqueConstraint> & uniqueConstraints, const std::vector<SQLNotNullConstraint> & notNullConstraints);
-  void recv_create_table_with_constraints();
-  void drop_constraint(const DropConstraintRequest& req);
-  void send_drop_constraint(const DropConstraintRequest& req);
-  void recv_drop_constraint();
-  void add_primary_key(const AddPrimaryKeyRequest& req);
-  void send_add_primary_key(const AddPrimaryKeyRequest& req);
-  void recv_add_primary_key();
-  void add_foreign_key(const AddForeignKeyRequest& req);
-  void send_add_foreign_key(const AddForeignKeyRequest& req);
-  void recv_add_foreign_key();
-  void add_unique_constraint(const AddUniqueConstraintRequest& req);
-  void send_add_unique_constraint(const AddUniqueConstraintRequest& req);
-  void recv_add_unique_constraint();
-  void add_not_null_constraint(const AddNotNullConstraintRequest& req);
-  void send_add_not_null_constraint(const AddNotNullConstraintRequest& req);
-  void recv_add_not_null_constraint();
-  void drop_table(const std::string& dbname, const std::string& name, const bool deleteData);
-  void send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData);
-  void recv_drop_table();
-  void drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context);
-  void send_drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context);
-  void recv_drop_table_with_environment_context();
-  void truncate_table(const std::string& dbName, const std::string& tableName, const std::vector<std::string> & partNames);
-  void send_truncate_table(const std::string& dbName, const std::string& tableName, const std::vector<std::string> & partNames);
-  void recv_truncate_table();
-  void get_tables(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern);
-  void send_get_tables(const std::string& db_name, const std::string& pattern);
-  void recv_get_tables(std::vector<std::string> & _return);
-  void get_tables_by_type(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType);
-  void send_get_tables_by_type(const std::string& db_name, const std::string& pattern, const std::string& tableType);
-  void recv_get_tables_by_type(std::vector<std::string> & _return);
-  void get_table_meta(std::vector<TableMeta> & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector<std::string> & tbl_types);
-  void send_get_table_meta(const std::string& db_patterns, const std::string& tbl_patterns, const std::vector<std::string> & tbl_types);
-  void recv_get_table_meta(std::vector<TableMeta> & _return);
-  void get_all_tables(std::vector<std::string> & _return, const std::string& db_name);
-  void send_get_all_tables(const std::string& db_name);
-  void recv_get_all_tables(std::vector<std::string> & _return);
-  void get_table(Table& _return, const std::string& dbname, const std::string& tbl_name);
-  void send_get_table(const std::string& dbname, const std::string& tbl_name);
-  void recv_get_table(Table& _return);
-  void get_table_objects_by_name(std::vector<Table> & _return, const std::string& dbname, const std::vector<std::string> & tbl_names);
-  void send_get_table_objects_by_name(const std::string& dbname, const std::vector<std::string> & tbl_names);
-  void recv_get_table_objects_by_name(std::vector<Table> & _return);
-  void get_table_req(GetTableResult& _return, const GetTableRequest& req);
-  void send_get_table_req(const GetTableRequest& req);
-  void recv_get_table_req(GetTableResult& _return);
-  void get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req);
-  void send_get_table_objects_by_name_req(const GetTablesRequest& req);
-  void recv_get_table_objects_by_name_req(GetTablesResult& _return);
-  void get_table_names_by_filter(std::vector<std::string> & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables);
-  void send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables);
-  void recv_get_table_names_by_filter(std::vector<std::string> & _return);
-  void alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl);
-  void send_alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl);
-  void recv_alter_table();
-  void alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context);
-  void send_alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context);
-  void recv_alter_table_with_environment_context();
-  void alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade);
-  void send_alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade);
-  void recv_alter_table_with_cascade();
-  void add_partition(Partition& _return, const Partition& new_part);
-  void send_add_partition(const Partition& new_part);
-  void recv_add_partition(Partition& _return);
-  void add_partition_with_environment_context(Partition& _return, const Partition& new_part, const EnvironmentContext& environment_context);
-  void send_add_partition_with_environment_context(const Partition& new_part, const EnvironmentContext& environment_context);
-  void recv_add_partition_with_environment_context(Partition& _return);
-  int32_t add_partitions(const std::vector<Partition> & new_parts);
-  void send_add_partitions(const std::vector<Partition> & new_parts);
-  int32_t recv_add_partitions();
-  int32_t add_partitions_pspec(const std::vector<PartitionSpec> & new_parts);
-  void send_add_partitions_pspec(const std::vector<PartitionSpec> & new_parts);
-  int32_t recv_add_partitions_pspec();
-  void append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals);
-  void send_append_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals);
-  void recv_append_partition(Partition& _return);
-  void add_partitions_req(AddPartitionsResult& _return, const AddPartitionsRequest& request);
-  void send_add_partitions_req(const AddPartitionsRequest& request);
-  void recv_add_partitions_req(AddPartitionsResult& _return);
-  void append_partition_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const EnvironmentContext& environment_context);
-  void send_append_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const EnvironmentContext& environment_context);
-  void recv_append_partition_with_environment_context(Partition& _return);
-  void append_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name);
-  void send_append_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name);
-  void recv_append_partition_by_name(Partition& _return);
-  void append_partition_by_name_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context);
-  void send_append_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context);
-  void recv_append_partition_by_name_with_environment_context(Partition& _return);
-  bool drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const bool deleteData);
-  void send_drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const bool deleteData);
-  bool recv_drop_partition();
-  bool drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const bool deleteData, const EnvironmentContext& environment_context);
-  void send_drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const bool deleteData, const EnvironmentContext& environment_context);
-  bool recv_drop_partition_with_environment_context();
-  bool drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData);
-  void send_drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData);
-  bool recv_drop_partition_by_name();
-  bool drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context);
-  void send_drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context);
-  bool recv_drop_partition_by_name_with_environment_context();
-  void drop_partitions_req(DropPartitionsResult& _return, const DropPartitionsRequest& req);
-  void send_drop_partitions_req(const DropPartitionsRequest& req);
-  void recv_drop_partitions_req(DropPartitionsResult& _return);
-  void get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals);
-  void send_get_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals);
-  void recv_get_partition(Partition& _return);
-  void exchange_partition(Partition& _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
-  void send_exchange_partition(const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
-  void recv_exchange_partition(Partition& _return);
-  void exchange_partitions(std::vector<Partition> & _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
-  void send_exchange_partitions(const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name);
-  void recv_exchange_partitions(std::vector<Partition> & _return);
-  void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names);
-  void send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names);
-  void recv_get_partition_with_auth(Partition& _return);
-  void get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name);
-  void send_get_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name);
-  void recv_get_partition_by_name(Partition& _return);
-  void get_partitions(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts);
-  void send_get_partitions(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts);
-  void recv_get_partitions(std::vector<Partition> & _return);
-  void get_partitions_with_auth(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector<std::string> & group_names);
-  void send_get_partitions_with_auth(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector<std::string> & group_names);
-  void recv_get_partitions_with_auth(std::vector<Partition> & _return);
-  void get_partitions_pspec(std::vector<PartitionSpec> & _return, const std::string& db_name, const std::string& tbl_name, const int32_t max_parts);
-  void send_get_partitions_pspec(const std::string& db_name, const std::string& tbl_name, const int32_t max_parts);
-  void recv_get_partitions_pspec(std::vector<PartitionSpec> & _return);
-  void get_partition_names(std::vector<std::string> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts);
-  void send_get_partition_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts);
-  void recv_get_partition_names(std::vector<std::string> & _return);
-  void get_partition_values(PartitionValuesResponse& _return, const PartitionValuesRequest& request);
-  void send_get_partition_values(const PartitionValuesRequest& request);
-  void recv_get_partition_values(PartitionValuesResponse& _return);
-  void get_partitions_ps(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts);
-  void send_get_partitions_ps(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts);
-  void recv_get_partitions_ps(std::vector<Partition> & _return);
-  void get_partitions_ps_with_auth(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector<std::string> & group_names);
-  void send_get_partitions_ps_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector<std::string> & group_names);
-  void recv_get_partitions_ps_with_auth(std::vector<Partition> & _return);
-  void get_partition_names_ps(std::vector<std::string> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts);
-  void send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts);
-  void recv_get_partition_names_ps(std::vector<std::string> & _return);
-  void get_partitions_by_filter(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts);
-  void send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts);
-  void recv_get_partitions_by_filter(std::vector<Partition> & _return);
-  void get_part_specs_by_filter(std::vector<PartitionSpec> & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts);
-  void send_get_part_specs_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts);
-  void recv_get_part_specs_by_filter(std::vector<PartitionSpec> & _return);
-  void get_partitions_by_expr(PartitionsByExprResult& _return, const PartitionsByExprRequest& req);
-  void send_get_partitions_by_expr(const PartitionsByExprRequest& req);
-  void recv_get_partitions_by_expr(PartitionsByExprResult& _return);
-  int32_t get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter);
-  void send_get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter);
-  int32_t recv_get_num_partitions_by_filter();
-  void get_partitions_by_names(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & names);
+  bool operator != (const ThriftHiveMetastore_create_ischema_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_create_ischema_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_create_ischema_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_create_ischema_pargs() throw();
+  const ISchema* schema;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_create_ischema_result__isset {
+  _ThriftHiveMetastore_create_ischema_result__isset() : o1(false), o2(false), o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_create_ischema_result__isset;
+
+class ThriftHiveMetastore_create_ischema_result {
+ public:
+
+  ThriftHiveMetastore_create_ischema_result(const ThriftHiveMetastore_create_ischema_result&);
+  ThriftHiveMetastore_create_ischema_result& operator=(const ThriftHiveMetastore_create_ischema_result&);
+  ThriftHiveMetastore_create_ischema_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_create_ischema_result() throw();
+  AlreadyExistsException o1;
+  NoSuchObjectException o2;
+  MetaException o3;
+
+  _ThriftHiveMetastore_create_ischema_result__isset __isset;
+
+  void __set_o1(const AlreadyExistsException& val);
+
+  void __set_o2(const NoSuchObjectException& val);
+
+  void __set_o3(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_create_ischema_result & rhs) const
+  {
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    if (!(o3 == rhs.o3))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_create_ischema_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_create_ischema_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_create_ischema_presult__isset {
+  _ThriftHiveMetastore_create_ischema_presult__isset() : o1(false), o2(false), o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_create_ischema_presult__isset;
+
+class ThriftHiveMetastore_create_ischema_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_create_ischema_presult() throw();
+  AlreadyExistsException o1;
+  NoSuchObjectException o2;
+  MetaException o3;
+
+  _ThriftHiveMetastore_create_ischema_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_alter_ischema_args__isset {
+  _ThriftHiveMetastore_alter_ischema_args__isset() : schemaName(false), newSchema(false) {}
+  bool schemaName :1;
+  bool newSchema :1;
+} _ThriftHiveMetastore_alter_ischema_args__isset;
+
+class ThriftHiveMetastore_alter_ischema_args {
+ public:
+
+  ThriftHiveMetastore_alter_ischema_args(const ThriftHiveMetastore_alter_ischema_args&);
+  ThriftHiveMetastore_alter_ischema_args& operator=(const ThriftHiveMetastore_alter_ischema_args&);
+  ThriftHiveMetastore_alter_ischema_args() : schemaName() {
+  }
+
+  virtual ~ThriftHiveMetastore_alter_ischema_args() throw();
+  std::string schemaName;
+  ISchema newSchema;
+
+  _ThriftHiveMetastore_alter_ischema_args__isset __isset;
+
+  void __set_schemaName(const std::string& val);
+
+  void __set_newSchema(const ISchema& val);
+
+  bool operator == (const ThriftHiveMetastore_alter_ischema_args & rhs) const
+  {
+    if (!(schemaName == rhs.schemaName))
+      return false;
+    if (!(newSchema == rhs.newSchema))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_alter_ischema_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_alter_ischema_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_alter_ischema_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_alter_ischema_pargs() throw();
+  const std::string* schemaName;
+  const ISchema* newSchema;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_alter_ischema_result__isset {
+  _ThriftHiveMetastore_alter_ischema_result__isset() : o1(false), o2(false) {}
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_alter_ischema_result__isset;
+
+class ThriftHiveMetastore_alter_ischema_result {
+ public:
+
+  ThriftHiveMetastore_alter_ischema_result(const ThriftHiveMetastore_alter_ischema_result&);
+  ThriftHiveMetastore_alter_ischema_result& operator=(const ThriftHiveMetastore_alter_ischema_result&);
+  ThriftHiveMetastore_alter_ischema_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_alter_ischema_result() throw();
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_alter_ischema_result__isset __isset;
+
+  void __set_o1(const NoSuchObjectException& val);
+
+  void __set_o2(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_alter_ischema_result & rhs) const
+  {
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_alter_ischema_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_alter_ischema_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_alter_ischema_presult__isset {
+  _ThriftHiveMetastore_alter_ischema_presult__isset() : o1(false), o2(false) {}
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_alter_ischema_presult__isset;
+
+class ThriftHiveMetastore_alter_ischema_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_alter_ischema_presult() throw();
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_alter_ischema_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_get_ischema_args__isset {
+  _ThriftHiveMetastore_get_ischema_args__isset() : schemaName(false) {}
+  bool schemaName :1;
+} _ThriftHiveMetastore_get_ischema_args__isset;
+
+class ThriftHiveMetastore_get_ischema_args {
+ public:
+
+  ThriftHiveMetastore_get_ischema_args(const ThriftHiveMetastore_get_ischema_args&);
+  ThriftHiveMetastore_get_ischema_args& operator=(const ThriftHiveMetastore_get_ischema_args&);
+  ThriftHiveMetastore_get_ischema_args() : schemaName() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_ischema_args() throw();
+  std::string schemaName;
+
+  _ThriftHiveMetastore_get_ischema_args__isset __isset;
+
+  void __set_schemaName(const std::string& val);
+
+  bool operator == (const ThriftHiveMetastore_get_ischema_args & rhs) const
+  {
+    if (!(schemaName == rhs.schemaName))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_ischema_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_ischema_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_get_ischema_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_ischema_pargs() throw();
+  const std::string* schemaName;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_ischema_result__isset {
+  _ThriftHiveMetastore_get_ischema_result__isset() : success(false), o1(false), o2(false) {}
+  bool success :1;
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_get_ischema_result__isset;
+
+class ThriftHiveMetastore_get_ischema_result {
+ public:
+
+  ThriftHiveMetastore_get_ischema_result(const ThriftHiveMetastore_get_ischema_result&);
+  ThriftHiveMetastore_get_ischema_result& operator=(const ThriftHiveMetastore_get_ischema_result&);
+  ThriftHiveMetastore_get_ischema_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_ischema_result() throw();
+  ISchema success;
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_get_ischema_result__isset __isset;
+
+  void __set_success(const ISchema& val);
+
+  void __set_o1(const NoSuchObjectException& val);
+
+  void __set_o2(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_get_ischema_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_ischema_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_ischema_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_ischema_presult__isset {
+  _ThriftHiveMetastore_get_ischema_presult__isset() : success(false), o1(false), o2(false) {}
+  bool success :1;
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_get_ischema_presult__isset;
+
+class ThriftHiveMetastore_get_ischema_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_ischema_presult() throw();
+  ISchema* success;
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_get_ischema_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_drop_ischema_args__isset {
+  _ThriftHiveMetastore_drop_ischema_args__isset() : schemaName(false) {}
+  bool schemaName :1;
+} _ThriftHiveMetastore_drop_ischema_args__isset;
+
+class ThriftHiveMetastore_drop_ischema_args {
+ public:
+
+  ThriftHiveMetastore_drop_ischema_args(const ThriftHiveMetastore_drop_ischema_args&);
+  ThriftHiveMetastore_drop_ischema_args& operator=(const ThriftHiveMetastore_drop_ischema_args&);
+  ThriftHiveMetastore_drop_ischema_args() : schemaName() {
+  }
+
+  virtual ~ThriftHiveMetastore_drop_ischema_args() throw();
+  std::string schemaName;
+
+  _ThriftHiveMetastore_drop_ischema_args__isset __isset;
+
+  void __set_schemaName(const std::string& val);
+
+  bool operator == (const ThriftHiveMetastore_drop_ischema_args & rhs) const
+  {
+    if (!(schemaName == rhs.schemaName))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_drop_ischema_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_drop_ischema_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_drop_ischema_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_drop_ischema_pargs() throw();
+  const std::string* schemaName;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_drop_ischema_result__isset {
+  _ThriftHiveMetastore_drop_ischema_result__isset() : o1(false), o2(false), o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_drop_ischema_result__isset;
+
+class ThriftHiveMetastore_drop_ischema_result {
+ public:
+
+  ThriftHiveMetastore_drop_ischema_result(const ThriftHiveMetastore_drop_ischema_result&);
+  ThriftHiveMetastore_drop_ischema_result& operator=(const ThriftHiveMetastore_drop_ischema_result&);
+  ThriftHiveMetastore_drop_ischema_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_drop_ischema_result() throw();
+  NoSuchObjectException o1;
+  InvalidOperationException o2;
+  MetaException o3;
+
+  _ThriftHiveMetastore_drop_ischema_result__isset __isset;
+
+  void __set_o1(const NoSuchObjectException& val);
+
+  void __set_o2(const InvalidOperationException& val);
+
+  void __set_o3(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_drop_ischema_result & rhs) const
+  {
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    if (!(o3 == rhs.o3))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_drop_ischema_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_drop_ischema_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_drop_ischema_presult__isset {
+  _ThriftHiveMetastore_drop_ischema_presult__isset() : o1(false), o2(false), o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_drop_ischema_presult__isset;
+
+class ThriftHiveMetastore_drop_ischema_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_drop_ischema_presult() throw();
+  NoSuchObjectException o1;
+  InvalidOperationException o2;
+  MetaException o3;
+
+  _ThriftHiveMetastore_drop_ischema_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_add_schema_version_args__isset {
+  _ThriftHiveMetastore_add_schema_version_args__isset() : schemaVersion(false) {}
+  bool schemaVersion :1;
+} _ThriftHiveMetastore_add_schema_version_args__isset;
+
+class ThriftHiveMetastore_add_schema_version_args {
+ public:
+
+  ThriftHiveMetastore_add_schema_version_args(const ThriftHiveMetastore_add_schema_version_args&);
+  ThriftHiveMetastore_add_schema_version_args& operator=(const ThriftHiveMetastore_add_schema_version_args&);
+  ThriftHiveMetastore_add_schema_version_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_add_schema_version_args() throw();
+  SchemaVersion schemaVersion;
+
+  _ThriftHiveMetastore_add_schema_version_args__isset __isset;
+
+  void __set_schemaVersion(const SchemaVersion& val);
+
+  bool operator == (const ThriftHiveMetastore_add_schema_version_args & rhs) const
+  {
+    if (!(schemaVersion == rhs.schemaVersion))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_add_schema_version_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_add_schema_version_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_add_schema_version_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_add_schema_version_pargs() throw();
+  const SchemaVersion* schemaVersion;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_add_schema_version_result__isset {
+  _ThriftHiveMetastore_add_schema_version_result__isset() : o1(false), o2(false), o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_add_schema_version_result__isset;
+
+class ThriftHiveMetastore_add_schema_version_result {
+ public:
+
+  ThriftHiveMetastore_add_schema_version_result(const ThriftHiveMetastore_add_schema_version_result&);
+  ThriftHiveMetastore_add_schema_version_result& operator=(const ThriftHiveMetastore_add_schema_version_result&);
+  ThriftHiveMetastore_add_schema_version_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_add_schema_version_result() throw();
+  AlreadyExistsException o1;
+  NoSuchObjectException o2;
+  MetaException o3;
+
+  _ThriftHiveMetastore_add_schema_version_result__isset __isset;
+
+  void __set_o1(const AlreadyExistsException& val);
+
+  void __set_o2(const NoSuchObjectException& val);
+
+  void __set_o3(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_add_schema_version_result & rhs) const
+  {
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    if (!(o3 == rhs.o3))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_add_schema_version_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_add_schema_version_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_add_schema_version_presult__isset {
+  _ThriftHiveMetastore_add_schema_version_presult__isset() : o1(false), o2(false), o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_add_schema_version_presult__isset;
+
+class ThriftHiveMetastore_add_schema_version_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_add_schema_version_presult() throw();
+  AlreadyExistsException o1;
+  NoSuchObjectException o2;
+  MetaException o3;
+
+  _ThriftHiveMetastore_add_schema_version_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_get_schema_version_args__isset {
+  _ThriftHiveMetastore_get_schema_version_args__isset() : schemaName(false), version(false) {}
+  bool schemaName :1;
+  bool version :1;
+} _ThriftHiveMetastore_get_schema_version_args__isset;
+
+class ThriftHiveMetastore_get_schema_version_args {
+ public:
+
+  ThriftHiveMetastore_get_schema_version_args(const ThriftHiveMetastore_get_schema_version_args&);
+  ThriftHiveMetastore_get_schema_version_args& operator=(const ThriftHiveMetastore_get_schema_version_args&);
+  ThriftHiveMetastore_get_schema_version_args() : schemaName(), version(0) {
+  }
+
+  virtual ~ThriftHiveMetastore_get_schema_version_args() throw();
+  std::string schemaName;
+  int32_t version;
+
+  _ThriftHiveMetastore_get_schema_version_args__isset __isset;
+
+  void __set_schemaName(const std::string& val);
+
+  void __set_version(const int32_t val);
+
+  bool operator == (const ThriftHiveMetastore_get_schema_version_args & rhs) const
+  {
+    if (!(schemaName == rhs.schemaName))
+      return false;
+    if (!(version == rhs.version))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_schema_version_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_schema_version_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_get_schema_version_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_schema_version_pargs() throw();
+  const std::string* schemaName;
+  const int32_t* version;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_schema_version_result__isset {
+  _ThriftHiveMetastore_get_schema_version_result__isset() : success(false), o1(false), o2(false) {}
+  bool success :1;
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_get_schema_version_result__isset;
+
+class ThriftHiveMetastore_get_schema_version_result {
+ public:
+
+  ThriftHiveMetastore_get_schema_version_result(const ThriftHiveMetastore_get_schema_version_result&);
+  ThriftHiveMetastore_get_schema_version_result& operator=(const ThriftHiveMetastore_get_schema_version_result&);
+  ThriftHiveMetastore_get_schema_version_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_schema_version_result() throw();
+  SchemaVersion success;
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_get_schema_version_result__isset __isset;
+
+  void __set_success(const SchemaVersion& val);
+
+  void __set_o1(const NoSuchObjectException& val);
+
+  void __set_o2(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_get_schema_version_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_schema_version_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_schema_version_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_schema_version_presult__isset {
+  _ThriftHiveMetastore_get_schema_version_presult__isset() : success(false), o1(false), o2(false) {}
+  bool success :1;
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_get_schema_version_presult__isset;
+
+class ThriftHiveMetastore_get_schema_version_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_schema_version_presult() throw();
+  SchemaVersion* success;
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_get_schema_version_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_get_schema_latest_version_args__isset {
+  _ThriftHiveMetastore_get_schema_latest_version_args__isset() : schemaName(false) {}
+  bool schemaName :1;
+} _ThriftHiveMetastore_get_schema_latest_version_args__isset;
+
+class ThriftHiveMetastore_get_schema_latest_version_args {
+ public:
+
+  ThriftHiveMetastore_get_schema_latest_version_args(const ThriftHiveMetastore_get_schema_latest_version_args&);
+  ThriftHiveMetastore_get_schema_latest_version_args& operator=(const ThriftHiveMetastore_get_schema_latest_version_args&);
+  ThriftHiveMetastore_get_schema_latest_version_args() : schemaName() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_schema_latest_version_args() throw();
+  std::string schemaName;
+
+  _ThriftHiveMetastore_get_schema_latest_version_args__isset __isset;
+
+  void __set_schemaName(const std::string& val);
+
+  bool operator == (const ThriftHiveMetastore_get_schema_latest_version_args & rhs) const
+  {
+    if (!(schemaName == rhs.schemaName))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_schema_latest_version_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_schema_latest_version_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_get_schema_latest_version_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_schema_latest_version_pargs() throw();
+  const std::string* schemaName;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_schema_latest_version_result__isset {
+  _ThriftHiveMetastore_get_schema_latest_version_result__isset() : success(false), o1(false), o2(false) {}
+  bool success :1;
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_get_schema_latest_version_result__isset;
+
+class ThriftHiveMetastore_get_schema_latest_version_result {
+ public:
+
+  ThriftHiveMetastore_get_schema_latest_version_result(const ThriftHiveMetastore_get_schema_latest_version_result&);
+  ThriftHiveMetastore_get_schema_latest_version_result& operator=(const ThriftHiveMetastore_get_schema_latest_version_result&);
+  ThriftHiveMetastore_get_schema_latest_version_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_schema_latest_version_result() throw();
+  SchemaVersion success;
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_get_schema_latest_version_result__isset __isset;
+
+  void __set_success(const SchemaVersion& val);
+
+  void __set_o1(const NoSuchObjectException& val);
+
+  void __set_o2(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_get_schema_latest_version_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_schema_latest_version_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_schema_latest_version_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_schema_latest_version_presult__isset {
+  _ThriftHiveMetastore_get_schema_latest_version_presult__isset() : success(false), o1(false), o2(false) {}
+  bool success :1;
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_get_schema_latest_version_presult__isset;
+
+class ThriftHiveMetastore_get_schema_latest_version_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_schema_latest_version_presult() throw();
+  SchemaVersion* success;
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_get_schema_latest_version_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_get_schema_all_versions_args__isset {
+  _ThriftHiveMetastore_get_schema_all_versions_args__isset() : schemaName(false) {}
+  bool schemaName :1;
+} _ThriftHiveMetastore_get_schema_all_versions_args__isset;
+
+class ThriftHiveMetastore_get_schema_all_versions_args {
+ public:
+
+  ThriftHiveMetastore_get_schema_all_versions_args(const ThriftHiveMetastore_get_schema_all_versions_args&);
+  ThriftHiveMetastore_get_schema_all_versions_args& operator=(const ThriftHiveMetastore_get_schema_all_versions_args&);
+  ThriftHiveMetastore_get_schema_all_versions_args() : schemaName() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_schema_all_versions_args() throw();
+  std::string schemaName;
+
+  _ThriftHiveMetastore_get_schema_all_versions_args__isset __isset;
+
+  void __set_schemaName(const std::string& val);
+
+  bool operator == (const ThriftHiveMetastore_get_schema_all_versions_args & rhs) const
+  {
+    if (!(schemaName == rhs.schemaName))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_schema_all_versions_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_schema_all_versions_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_get_schema_all_versions_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_schema_all_versions_pargs() throw();
+  const std::string* schemaName;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_schema_all_versions_result__isset {
+  _ThriftHiveMetastore_get_schema_all_versions_result__isset() : success(false), o1(false), o2(false) {}
+  bool success :1;
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_get_schema_all_versions_result__isset;
+
+class ThriftHiveMetastore_get_schema_all_versions_result {
+ public:
+
+  ThriftHiveMetastore_get_schema_all_versions_result(const ThriftHiveMetastore_get_schema_all_versions_result&);
+  ThriftHiveMetastore_get_schema_all_versions_result& operator=(const ThriftHiveMetastore_get_schema_all_versions_result&);
+  ThriftHiveMetastore_get_schema_all_versions_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_schema_all_versions_result() throw();
+  std::vector<SchemaVersion>  success;
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_get_schema_all_versions_result__isset __isset;
+
+  void __set_success(const std::vector<SchemaVersion> & val);
+
+  void __set_o1(const NoSuchObjectException& val);
+
+  void __set_o2(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_get_schema_all_versions_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_schema_all_versions_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_schema_all_versions_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_schema_all_versions_presult__isset {
+  _ThriftHiveMetastore_get_schema_all_versions_presult__isset() : success(false), o1(false), o2(false) {}
+  bool success :1;
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_get_schema_all_versions_presult__isset;
+
+class ThriftHiveMetastore_get_schema_all_versions_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_schema_all_versions_presult() throw();
+  std::vector<SchemaVersion> * success;
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_get_schema_all_versions_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_drop_schema_version_args__isset {
+  _ThriftHiveMetastore_drop_schema_version_args__isset() : schemaName(false), version(false) {}
+  bool schemaName :1;
+  bool version :1;
+} _ThriftHiveMetastore_drop_schema_version_args__isset;
+
+class ThriftHiveMetastore_drop_schema_version_args {
+ public:
+
+  ThriftHiveMetastore_drop_schema_version_args(const ThriftHiveMetastore_drop_schema_version_args&);
+  ThriftHiveMetastore_drop_schema_version_args& operator=(const ThriftHiveMetastore_drop_schema_version_args&);
+  ThriftHiveMetastore_drop_schema_version_args() : schemaName(), version(0) {
+  }
+
+  virtual ~ThriftHiveMetastore_drop_schema_version_args() throw();
+  std::string schemaName;
+  int32_t version;
+
+  _ThriftHiveMetastore_drop_schema_version_args__isset __isset;
+
+  void __set_schemaName(const std::string& val);
+
+  void __set_version(const int32_t val);
+
+  bool operator == (const ThriftHiveMetastore_drop_schema_version_args & rhs) const
+  {
+    if (!(schemaName == rhs.schemaName))
+      return false;
+    if (!(version == rhs.version))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_drop_schema_version_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_drop_schema_version_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_drop_schema_version_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_drop_schema_version_pargs() throw();
+  const std::string* schemaName;
+  const int32_t* version;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_drop_schema_version_result__isset {
+  _ThriftHiveMetastore_drop_schema_version_result__isset() : o1(false), o2(false) {}
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_drop_schema_version_result__isset;
+
+class ThriftHiveMetastore_drop_schema_version_result {
+ public:
+
+  ThriftHiveMetastore_drop_schema_version_result(const ThriftHiveMetastore_drop_schema_version_result&);
+  ThriftHiveMetastore_drop_schema_version_result& operator=(const ThriftHiveMetastore_drop_schema_version_result&);
+  ThriftHiveMetastore_drop_schema_version_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_drop_schema_version_result() throw();
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_drop_schema_version_result__isset __isset;
+
+  void __set_o1(const NoSuchObjectException& val);
+
+  void __set_o2(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_drop_schema_version_result & rhs) const
+  {
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_drop_schema_version_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_drop_schema_version_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_drop_schema_version_presult__isset {
+  _ThriftHiveMetastore_drop_schema_version_presult__isset() : o1(false), o2(false) {}
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_drop_schema_version_presult__isset;
+
+class ThriftHiveMetastore_drop_schema_version_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_drop_schema_version_presult() throw();
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_drop_schema_version_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_get_schemas_by_cols_args__isset {
+  _ThriftHiveMetastore_get_schemas_by_cols_args__isset() : rqst(false) {}
+  bool rqst :1;
+} _ThriftHiveMetastore_get_schemas_by_cols_args__isset;
+
+class ThriftHiveMetastore_get_schemas_by_cols_args {
+ public:
+
+  ThriftHiveMetastore_get_schemas_by_cols_args(const ThriftHiveMetastore_get_schemas_by_cols_args&);
+  ThriftHiveMetastore_get_schemas_by_cols_args& operator=(const ThriftHiveMetastore_get_schemas_by_cols_args&);
+  ThriftHiveMetastore_get_schemas_by_cols_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_schemas_by_cols_args() throw();
+  FindSchemasByColsRqst rqst;
+
+  _ThriftHiveMetastore_get_schemas_by_cols_args__isset __isset;
+
+  void __set_rqst(const FindSchemasByColsRqst& val);
+
+  bool operator == (const ThriftHiveMetastore_get_schemas_by_cols_args & rhs) const
+  {
+    if (!(rqst == rhs.rqst))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_schemas_by_cols_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_schemas_by_cols_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_get_schemas_by_cols_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_schemas_by_cols_pargs() throw();
+  const FindSchemasByColsRqst* rqst;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_schemas_by_cols_result__isset {
+  _ThriftHiveMetastore_get_schemas_by_cols_result__isset() : success(false), o1(false) {}
+  bool success :1;
+  bool o1 :1;
+} _ThriftHiveMetastore_get_schemas_by_cols_result__isset;
+
+class ThriftHiveMetastore_get_schemas_by_cols_result {
+ public:
+
+  ThriftHiveMetastore_get_schemas_by_cols_result(const ThriftHiveMetastore_get_schemas_by_cols_result&);
+  ThriftHiveMetastore_get_schemas_by_cols_result& operator=(const ThriftHiveMetastore_get_schemas_by_cols_result&);
+  ThriftHiveMetastore_get_schemas_by_cols_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_schemas_by_cols_result() throw();
+  FindSchemasByColsResp success;
+  MetaException o1;
+
+  _ThriftHiveMetastore_get_schemas_by_cols_result__isset __isset;
+
+  void __set_success(const FindSchemasByColsResp& val);
+
+  void __set_o1(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_get_schemas_by_cols_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    if (!(o1 == rhs.o1))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_schemas_by_cols_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_schemas_by_cols_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_schemas_by_cols_presult__isset {
+  _ThriftHiveMetastore_get_schemas_by_cols_presult__isset() : success(false), o1(false) {}
+  bool success :1;
+  bool o1 :1;
+} _ThriftHiveMetastore_get_schemas_by_cols_presult__isset;
+
+class ThriftHiveMetastore_get_schemas_by_cols_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_schemas_by_cols_presult() throw();
+  FindSchemasByColsResp* success;
+  MetaException o1;
+
+  _ThriftHiveMetastore_get_schemas_by_cols_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_map_schema_version_to_serde_args__isset {
+  _ThriftHiveMetastore_map_schema_version_to_serde_args__isset() : schemaName(false), version(false), serdeName(false) {}
+  bool schemaName :1;
+  bool version :1;
+  bool serdeName :1;
+} _ThriftHiveMetastore_map_schema_version_to_serde_args__isset;
+
+class ThriftHiveMetastore_map_schema_version_to_serde_args {
+ public:
+
+  ThriftHiveMetastore_map_schema_version_to_serde_args(const ThriftHiveMetastore_map_schema_version_to_serde_args&);
+  ThriftHiveMetastore_map_schema_version_to_serde_args& operator=(const ThriftHiveMetastore_map_schema_version_to_serde_args&);
+  ThriftHiveMetastore_map_schema_version_to_serde_args() : schemaName(), version(0), serdeName() {
+  }
+
+  virtual ~ThriftHiveMetastore_map_schema_version_to_serde_args() throw();
+  std::string schemaName;
+  int32_t version;
+  std::string serdeName;
+
+  _ThriftHiveMetastore_map_schema_version_to_serde_args__isset __isset;
+
+  void __set_schemaName(const std::string& val);
+
+  void __set_version(const int32_t val);
+
+  void __set_serdeName(const std::string& val);
+
+  bool operator == (const ThriftHiveMetastore_map_schema_version_to_serde_args & rhs) const
+  {
+    if (!(schemaName == rhs.schemaName))
+      return false;
+    if (!(version == rhs.version))
+      return false;
+    if (!(serdeName == rhs.serdeName))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_map_schema_version_to_serde_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_map_schema_version_to_serde_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_map_schema_version_to_serde_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_map_schema_version_to_serde_pargs() throw();
+  const std::string* schemaName;
+  const int32_t* version;
+  const std::string* serdeName;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_map_schema_version_to_serde_result__isset {
+  _ThriftHiveMetastore_map_schema_version_to_serde_result__isset() : o1(false), o2(false) {}
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_map_schema_version_to_serde_result__isset;
+
+class ThriftHiveMetastore_map_schema_version_to_serde_result {
+ public:
+
+  ThriftHiveMetastore_map_schema_version_to_serde_result(const ThriftHiveMetastore_map_schema_version_to_serde_result&);
+  ThriftHiveMetastore_map_schema_version_to_serde_result& operator=(const ThriftHiveMetastore_map_schema_version_to_serde_result&);
+  ThriftHiveMetastore_map_schema_version_to_serde_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_map_schema_version_to_serde_result() throw();
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_map_schema_version_to_serde_result__isset __isset;
+
+  void __set_o1(const NoSuchObjectException& val);
+
+  void __set_o2(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_map_schema_version_to_serde_result & rhs) const
+  {
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_map_schema_version_to_serde_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_map_schema_version_to_serde_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_map_schema_version_to_serde_presult__isset {
+  _ThriftHiveMetastore_map_schema_version_to_serde_presult__isset() : o1(false), o2(false) {}
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_map_schema_version_to_serde_presult__isset;
+
+class ThriftHiveMetastore_map_schema_version_to_serde_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_map_schema_version_to_serde_presult() throw();
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_map_schema_version_to_serde_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_set_schema_version_state_args__isset {
+  _ThriftHiveMetastore_set_schema_version_state_args__isset() : schemaName(false), version(false), state(false) {}
+  bool schemaName :1;
+  bool version :1;
+  bool state :1;
+} _ThriftHiveMetastore_set_schema_version_state_args__isset;
+
+class ThriftHiveMetastore_set_schema_version_state_args {
+ public:
+
+  ThriftHiveMetastore_set_schema_version_state_args(const ThriftHiveMetastore_set_schema_version_state_args&);
+  ThriftHiveMetastore_set_schema_version_state_args& operator=(const ThriftHiveMetastore_set_schema_version_state_args&);
+  ThriftHiveMetastore_set_schema_version_state_args() : schemaName(), version(0), state((SchemaVersionState::type)0) {
+  }
+
+  virtual ~ThriftHiveMetastore_set_schema_version_state_args() throw();
+  std::string schemaName;
+  int32_t version;
+  SchemaVersionState::type state;
+
+  _ThriftHiveMetastore_set_schema_version_state_args__isset __isset;
+
+  void __set_schemaName(const std::string& val);
+
+  void __set_version(const int32_t val);
+
+  void __set_state(const SchemaVersionState::type val);
+
+  bool operator == (const ThriftHiveMetastore_set_schema_version_state_args & rhs) const
+  {
+    if (!(schemaName == rhs.schemaName))
+      return false;
+    if (!(version == rhs.version))
+      return false;
+    if (!(state == rhs.state))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_set_schema_version_state_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_set_schema_version_state_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_set_schema_version_state_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_set_schema_version_state_pargs() throw();
+  const std::string* schemaName;
+  const int32_t* version;
+  const SchemaVersionState::type* state;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_set_schema_version_state_result__isset {
+  _ThriftHiveMetastore_set_schema_version_state_result__isset() : o1(false), o2(false), o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_set_schema_version_state_result__isset;
+
+class ThriftHiveMetastore_set_schema_version_state_result {
+ public:
+
+  ThriftHiveMetastore_set_schema_version_state_result(const ThriftHiveMetastore_set_schema_version_state_result&);
+  ThriftHiveMetastore_set_schema_version_state_result& operator=(const ThriftHiveMetastore_set_schema_version_state_result&);
+  ThriftHiveMetastore_set_schema_version_state_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_set_schema_version_state_result() throw();
+  NoSuchObjectException o1;
+  InvalidOperationException o2;
+  MetaException o3;
+
+  _ThriftHiveMetastore_set_schema_version_state_result__isset __isset;
+
+  void __set_o1(const NoSuchObjectException& val);
+
+  void __set_o2(const InvalidOperationException& val);
+
+  void __set_o3(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_set_schema_version_state_result & rhs) const
+  {
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    if (!(o3 == rhs.o3))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_set_schema_version_state_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_set_schema_version_state_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_set_schema_version_state_presult__isset {
+  _ThriftHiveMetastore_set_schema_version_state_presult__isset() : o1(false), o2(false), o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_set_schema_version_state_presult__isset;
+
+class ThriftHiveMetastore_set_schema_version_state_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_set_schema_version_state_presult() throw();
+  NoSuchObjectException o1;
+  InvalidOperationException o2;
+  MetaException o3;
+
+  _ThriftHiveMetastore_set_schema_version_state_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_add_serde_args__isset {
+  _ThriftHiveMetastore_add_serde_args__isset() : serde(false) {}
+  bool serde :1;
+} _ThriftHiveMetastore_add_serde_args__isset;
+
+class ThriftHiveMetastore_add_serde_args {
+ public:
+
+  ThriftHiveMetastore_add_serde_args(const ThriftHiveMetastore_add_serde_args&);
+  ThriftHiveMetastore_add_serde_args& operator=(const ThriftHiveMetastore_add_serde_args&);
+  ThriftHiveMetastore_add_serde_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_add_serde_args() throw();
+  SerDeInfo serde;
+
+  _ThriftHiveMetastore_add_serde_args__isset __isset;
+
+  void __set_serde(const SerDeInfo& val);
+
+  bool operator == (const ThriftHiveMetastore_add_serde_args & rhs) const
+  {
+    if (!(serde == rhs.serde))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_add_serde_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_add_serde_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_add_serde_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_add_serde_pargs() throw();
+  const SerDeInfo* serde;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_add_serde_result__isset {
+  _ThriftHiveMetastore_add_serde_result__isset() : o1(false), o2(false) {}
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_add_serde_result__isset;
+
+class ThriftHiveMetastore_add_serde_result {
+ public:
+
+  ThriftHiveMetastore_add_serde_result(const ThriftHiveMetastore_add_serde_result&);
+  ThriftHiveMetastore_add_serde_result& operator=(const ThriftHiveMetastore_add_serde_result&);
+  ThriftHiveMetastore_add_serde_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_add_serde_result() throw();
+  AlreadyExistsException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_add_serde_result__isset __isset;
+
+  void __set_o1(const AlreadyExistsException& val);
+
+  void __set_o2(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_add_serde_result & rhs) const
+  {
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_add_serde_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_add_serde_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_add_serde_presult__isset {
+  _ThriftHiveMetastore_add_serde_presult__isset() : o1(false), o2(false) {}
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_add_serde_presult__isset;
+
+class ThriftHiveMetastore_add_serde_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_add_serde_presult() throw();
+  AlreadyExistsException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_add_serde_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_get_serde_args__isset {
+  _ThriftHiveMetastore_get_serde_args__isset() : serdeName(false) {}
+  bool serdeName :1;
+} _ThriftHiveMetastore_get_serde_args__isset;
+
+class ThriftHiveMetastore_get_serde_args {
+ public:
+
+  ThriftHiveMetastore_get_serde_args(const ThriftHiveMetastore_get_serde_args&);
+  ThriftHiveMetastore_get_serde_args& operator=(const ThriftHiveMetastore_get_serde_args&);
+  ThriftHiveMetastore_get_serde_args() : serdeName() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_serde_args() throw();
+  std::string serdeName;
+
+  _ThriftHiveMetastore_get_serde_args__isset __isset;
+
+  void __set_serdeName(const std::string& val);
+
+  bool operator == (const ThriftHiveMetastore_get_serde_args & rhs) const
+  {
+    if (!(serdeName == rhs.serdeName))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_serde_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_serde_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_get_serde_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_serde_pargs() throw();
+  const std::string* serdeName;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_serde_result__isset {
+  _ThriftHiveMetastore_get_serde_result__isset() : success(false), o1(false), o2(false) {}
+  bool success :1;
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_get_serde_result__isset;
+
+class ThriftHiveMetastore_get_serde_result {
+ public:
+
+  ThriftHiveMetastore_get_serde_result(const ThriftHiveMetastore_get_serde_result&);
+  ThriftHiveMetastore_get_serde_result& operator=(const ThriftHiveMetastore_get_serde_result&);
+  ThriftHiveMetastore_get_serde_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_serde_result() throw();
+  SerDeInfo success;
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_get_serde_result__isset __isset;
+
+  void __set_success(const SerDeInfo& val);
+
+  void __set_o1(const NoSuchObjectException& val);
+
+  void __set_o2(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_get_serde_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_serde_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_serde_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_serde_presult__isset {
+  _ThriftHiveMetastore_get_serde_presult__isset() : success(false), o1(false), o2(false) {}
+  bool success :1;
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_get_serde_presult__isset;
+
+class ThriftHiveMetastore_get_serde_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_serde_presult() throw();
+  SerDeInfo* success;
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_get_serde_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public  ::facebook::fb303::FacebookServiceClient {
+ public:
+  ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) :
+     ::facebook::fb303::FacebookServiceClient(prot, prot) {}
+  ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> iprot, boost::shared_ptr< ::apache::thrift::protocol::TProtocol> oprot) :     ::facebook::fb303::FacebookServiceClient(iprot, oprot) {}
+  boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getInputProtocol() {
+    return piprot_;
+  }
+  boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getOutputProtocol() {
+    return poprot_;
+  }
+  void getMetaConf(std::string& _return, const std::string& key);
+  void send_getMetaConf(const std::string& key);
+  void recv_getMetaConf(std::string& _return);
+  void setMetaConf(const std::string& key, const std::string& value);
+  void send_setMetaConf(const std::string& key, const std::string& value);
+  void recv_setMetaConf();
+  void create_database(const Database& database);
+  void send_create_database(const Database& database);
+  void recv_create_database();
+  void get_database(Database& _return, const std::string& name);
+  void send_get_database(const std::string& name);
+  void recv_get_database(Database& _return);
+  void drop_database(const std::string& name, const bool deleteData, const bool cascade);
+  void send_drop_database(const std::string& name, const bool deleteData, const bool cascade);
+  void recv_drop_database();
+  void get_databases(std::vector<std::string> & _return, const std::string& pattern);
+  void send_get_databases(const std::string& pattern);
+  void recv_get_databases(std::vector<std::string> & _return);
+  void get_all_databases(std::vector<std::string> & _return);
+  void send_get_all_databases();
+  void recv_get_all_databases(std::vector<std::string> & _return);
+  void alter_database(const std::string& dbname, const Database& db);
+  void send_alter_database(const std::string& dbname, const Database& db);
+  void recv_alter_database();
+  void get_type(Type& _return, const std::string& name);
+  void send_get_type(const std::string& name);
+  void recv_get_type(Type& _return);
+  bool create_type(const Type& type);
+  void send_create_type(const Type& type);
+  bool recv_create_type();
+  bool drop_type(const std::string& type);
+  void send_drop_type(const std::string& type);
+  bool recv_drop_type();
+  void get_type_all(std::map<std::string, Type> & _return, const std::string& name);
+  void send_get_type_all(const std::string& name);
+  void recv_get_type_all(std::map<std::string, Type> & _return);
+  void get_fields(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name);
+  void send_get_fields(const std::string& db_name, const std::string& table_name);
+  void recv_get_fields(std::vector<FieldSchema> & _return);
+  void get_fields_with_environment_context(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context);
+  void send_get_fields_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context);
+  void recv_get_fields_with_environment_context(std::vector<FieldSchema> & _return);
+  void get_schema(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name);
+  void send_get_schema(const std::string& db_name, const std::string& table_name);
+  void recv_get_schema(std::vector<FieldSchema> & _return);
+  void get_schema_with_environment_context(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context);
+  void send_get_schema_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context);
+  void recv_get_schema_with_environment_context(std::vector<FieldSchema> & _return);
+  void create_table(const Table& tbl);
+  void send_create_table(const Table& tbl);
+  void recv_create_table();
+  void create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context);
+  void send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context);
+  void recv_create_table_with_environment_context();
+  void create_table_with_constraints(const Table& tbl, const std::vector<SQLPrimaryKey> & primaryKeys, const std::vector<SQLForeignKey> & foreignKeys, const std::vector<SQLUniqueConstraint> & uniqueConstraints, const std::vector<SQLNotNullConstraint> & notNullConstraints);
+  void send_create_table_with_constraints(const Table& tbl, const std::vector<SQLPrimaryKey> & primaryKeys, const std::vector<SQLForeignKey> & foreignKeys, const std::vector<SQLUniqueConstraint> & uniqueConstraints, const std::vector<SQLNotNullConstraint> & notNullConstraints);
+  void recv_create_table_with_constraints();
+  void drop_constraint(const DropConstraintRequest& req);
+  void send_drop_constraint(const DropConstraintRequest& req);
+  void recv_drop_constraint();
+  void add_primary_key(const AddPrimaryKeyRequest& req);
+  void send_add_primary_key(const AddPrimaryKeyRequest& req);
+  void recv_add_primary_key();
+  void add_foreign_key(const AddForeignKeyRequest& req);
+  void send_add_foreign_key(const AddForeignKeyRequest& req);
+  void recv_add_foreign_key();
+  void add_unique_constraint(const AddUniqueConstraintRequest& req);
+  void send_add_unique_constraint(const AddUniqueConstraintRequest& req);
+  void recv_add_unique_constraint();
+  void add_not_null_constraint(const AddNotNullConstraintRequest& req);
+  void send_add_not_null_constraint(const AddNotNullConstraintRequest& req);
+  void recv_add_not_null_constraint();
+  void drop_table(const std::string& dbname, const std::string& name, const bool deleteData);
+  void send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData);
+  void recv_drop_table();
+  void drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context);
+  void send_drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context);
+  void recv_drop_table_with_environment_context();
+  void truncate_table(const std::string& dbName, const std::string& tableName, const std::vector<std::string> & partNames);
+  void send_truncate_table(const std::string& dbName, const std::string& tableName, const std::vector<std::string> & partNames);
+  void recv_truncate_table();
+  void get_tables(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern);
+  void send_get_tables(const std::string& db_name, const std::string& pattern);
+  void recv_get_tables(std::vector<std::string> & _return);
+  void get_tables_by_type(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType);
+  void send_get_tables_by_type(const std::string& db_name, const std::string& pattern, const std::string& tableType);
+  void recv_get_tables_by_type(std::vector<std::string> & _return);
+  void get_table_meta(std::vector<TableMeta> & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector<std::string> & tbl_types);
+  void send_get_table_meta(const std::string& db_patterns, const std::string& tbl_patterns, const std::vector<std::string> & tbl_types);
+  void recv_get_table_meta(std::vector<TableMeta> & _return);
+  void get_all_tables(std::vector<std::string> & _return, const std::string& db_name);
+  void send_get_all_tables(const std::string& db_name);
+  void recv_get_all_tables(std::vector<std::string> & _return);
+  void get_table(Table& _return, const std::string& dbname, const std::string& tbl_name);
+  void send_get_table(const std::string& dbname, const std::string& tbl_name);
+  void recv_get_table(Table& _return);
+  void get_table_objects_by_name(std::vector<Table> & _return, const std::string& dbname, const std::vector<std::string> & tbl_names);
+  void send_get_table_objects_by_name(const std::string& dbname, const std::vector<std::string> & tbl_names);
+  void recv_get_table_objects_by_name(std::vector<Table> & _return);
+  void get_table_req(GetTableResult& _return, const GetTableRequest& req);
+  void send_get_table_req(const GetTableRequest& req);
+  void recv_get_table_req(GetTableResult& _return);
+  void get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req);
+  void send_get_table_objects_by_name_req(const GetTablesRequest& req);
+  void recv_get_table_objects_by_name_req(GetTablesResult& _return);
+  void get_table_names_by_filter(std::vector<std::string> & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables);
+  void send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables);
+  void recv_get_table_names_by_filter(std::vector<std::string> & _return);
+  void alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl);
+  void send_alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl);
+  void recv_alter_table();
+  void alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context);
+  void send_alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context);
+  void recv_

<TRUNCATED>

[36/50] [abbrv] hive git commit: HIVE-17990 Add Thrift and DB storage for Schema Registry objects

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreAlterISchemaEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreAlterISchemaEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreAlterISchemaEvent.java
new file mode 100644
index 0000000..3df3780
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreAlterISchemaEvent.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.events;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.api.ISchema;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class PreAlterISchemaEvent extends PreEventContext {
+
+  private final ISchema oldSchema, newSchema;
+
+  public PreAlterISchemaEvent(IHMSHandler handler, ISchema oldSchema, ISchema newSchema) {
+    super(PreEventType.ALTER_ISCHEMA, handler);
+    this.oldSchema = oldSchema;
+    this.newSchema = newSchema;
+  }
+
+  public ISchema getOldSchema() {
+    return oldSchema;
+  }
+
+  public ISchema getNewSchema() {
+    return newSchema;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreAlterSchemaVersionEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreAlterSchemaVersionEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreAlterSchemaVersionEvent.java
new file mode 100644
index 0000000..63ddb3b
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreAlterSchemaVersionEvent.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.events;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class PreAlterSchemaVersionEvent extends PreEventContext {
+
+  private final SchemaVersion oldSchemaVersion, newSchemaVersion;
+
+  public PreAlterSchemaVersionEvent(IHMSHandler handler, SchemaVersion oldSchemaVersion,
+                                    SchemaVersion newSchemaVersion) {
+    super(PreEventType.ALTER_SCHEMA_VERSION, handler);
+    this.oldSchemaVersion = oldSchemaVersion;
+    this.newSchemaVersion = newSchemaVersion;
+  }
+
+  public SchemaVersion getOldSchemaVersion() {
+    return oldSchemaVersion;
+  }
+
+  public SchemaVersion getNewSchemaVersion() {
+    return newSchemaVersion;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreCreateISchemaEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreCreateISchemaEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreCreateISchemaEvent.java
new file mode 100644
index 0000000..d8e9f04
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreCreateISchemaEvent.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.events;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.api.ISchema;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class PreCreateISchemaEvent extends PreEventContext {
+
+  private final ISchema schema;
+
+  public PreCreateISchemaEvent(IHMSHandler handler, ISchema schema) {
+    super(PreEventType.CREATE_ISCHEMA, handler);
+    this.schema = schema;
+  }
+
+  public ISchema getSchema() {
+    return schema;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreDropISchemaEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreDropISchemaEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreDropISchemaEvent.java
new file mode 100644
index 0000000..5755374
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreDropISchemaEvent.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.events;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.api.ISchema;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class PreDropISchemaEvent extends PreEventContext {
+
+  private final ISchema schema;
+
+  public PreDropISchemaEvent(IHMSHandler handler, ISchema schema) {
+    super(PreEventType.DROP_ISCHEMA, handler);
+    this.schema = schema;
+  }
+
+  public ISchema getSchema() {
+    return schema;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreDropSchemaVersionEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreDropSchemaVersionEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreDropSchemaVersionEvent.java
new file mode 100644
index 0000000..2958bd9
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreDropSchemaVersionEvent.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.events;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class PreDropSchemaVersionEvent extends PreEventContext {
+
+  private final SchemaVersion schemaVersion;
+
+  public PreDropSchemaVersionEvent(IHMSHandler handler, SchemaVersion schemaVersion) {
+    super(PreEventType.DROP_SCHEMA_VERSION, handler);
+    this.schemaVersion = schemaVersion;
+  }
+
+  public SchemaVersion getSchemaVersion() {
+    return schemaVersion;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java
index a3f4836..74b3bc7 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java
@@ -45,7 +45,15 @@ public abstract class PreEventContext {
     ADD_INDEX,
     ALTER_INDEX,
     DROP_INDEX,
-    ALTER_DATABASE
+    ALTER_DATABASE,
+    CREATE_ISCHEMA,
+    ALTER_ISCHEMA,
+    DROP_ISCHEMA,
+    ADD_SCHEMA_VERSION,
+    ALTER_SCHEMA_VERSION,
+    DROP_SCHEMA_VERSION,
+    READ_ISCHEMA,
+    READ_SCHEMA_VERSION
   }
 
   private final PreEventType eventType;

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreReadISchemaEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreReadISchemaEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreReadISchemaEvent.java
new file mode 100644
index 0000000..de8ce04
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreReadISchemaEvent.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.events;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.api.ISchema;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class PreReadISchemaEvent extends PreEventContext {
+
+  private final ISchema schema;
+
+  public PreReadISchemaEvent(IHMSHandler handler, ISchema schema) {
+    super(PreEventType.READ_ISCHEMA, handler);
+    this.schema = schema;
+  }
+
+  public ISchema getSchema() {
+    return schema;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreReadhSchemaVersionEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreReadhSchemaVersionEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreReadhSchemaVersionEvent.java
new file mode 100644
index 0000000..fbe4879
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreReadhSchemaVersionEvent.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.events;
+
+import org.apache.hadoop.hive.metastore.IHMSHandler;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+
+import java.util.List;
+
+public class PreReadhSchemaVersionEvent extends PreEventContext {
+  private final List<SchemaVersion> schemaVersions;
+
+  public PreReadhSchemaVersionEvent(IHMSHandler handler, List<SchemaVersion> schemaVersions) {
+    super(PreEventType.READ_SCHEMA_VERSION, handler);
+    this.schemaVersions = schemaVersions;
+  }
+
+  public List<SchemaVersion> getSchemaVersions() {
+    return schemaVersions;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java
index 7b22fac..e45473b 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java
@@ -48,7 +48,13 @@ public abstract class EventMessage {
     ADD_FOREIGNKEY(MessageFactory.ADD_FOREIGNKEY_EVENT),
     ADD_UNIQUECONSTRAINT(MessageFactory.ADD_UNIQUECONSTRAINT_EVENT),
     ADD_NOTNULLCONSTRAINT(MessageFactory.ADD_NOTNULLCONSTRAINT_EVENT),
-    DROP_CONSTRAINT(MessageFactory.DROP_CONSTRAINT_EVENT);
+    DROP_CONSTRAINT(MessageFactory.DROP_CONSTRAINT_EVENT),
+    CREATE_ISCHEMA(MessageFactory.CREATE_ISCHEMA_EVENT),
+    ALTER_ISCHEMA(MessageFactory.ALTER_ISCHEMA_EVENT),
+    DROP_ISCHEMA(MessageFactory.DROP_ISCHEMA_EVENT),
+    ADD_SCHEMA_VERSION(MessageFactory.ADD_SCHEMA_VERSION_EVENT),
+    ALTER_SCHEMA_VERSION(MessageFactory.ALTER_SCHEMA_VERSION_EVENT),
+    DROP_SCHEMA_VERSION(MessageFactory.DROP_SCHEMA_VERSION_EVENT);
 
     private String typeString;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java
index 46fd336..6ff660b 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java
@@ -62,6 +62,13 @@ public abstract class MessageFactory {
   public static final String ADD_UNIQUECONSTRAINT_EVENT = "ADD_UNIQUECONSTRAINT";
   public static final String ADD_NOTNULLCONSTRAINT_EVENT = "ADD_NOTNULLCONSTRAINT";
   public static final String DROP_CONSTRAINT_EVENT = "DROP_CONSTRAINT";
+  public static final String CREATE_ISCHEMA_EVENT = "CREATE_ISCHEMA";
+  public static final String ALTER_ISCHEMA_EVENT = "ALTER_ISCHEMA";
+  public static final String DROP_ISCHEMA_EVENT = "DROP_ISCHEMA";
+  public static final String ADD_SCHEMA_VERSION_EVENT = "ADD_SCHEMA_VERSION";
+  public static final String ALTER_SCHEMA_VERSION_EVENT = "ALTER_SCHEMA_VERSION";
+  public static final String DROP_SCHEMA_VERSION_EVENT = "DROP_SCHEMA_VERSION";
+
 
   private static MessageFactory instance = null;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MISchema.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MISchema.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MISchema.java
new file mode 100644
index 0000000..e64b0e9
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MISchema.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.model;
+
+public class MISchema {
+  private int schemaType;
+  private String name;
+  private MDatabase db;
+  private int compatibility;
+  private int validationLevel;
+  private boolean canEvolve;
+  private String schemaGroup;
+  private String description;
+
+  public MISchema(int schemaType, String name, MDatabase db, int compatibility,
+                  int validationLevel, boolean canEvolve, String schemaGroup, String description) {
+    this.schemaType = schemaType;
+    this.name = name;
+    this.db= db;
+    this.compatibility = compatibility;
+    this.validationLevel = validationLevel;
+    this.canEvolve = canEvolve;
+    this.schemaGroup = schemaGroup;
+    this.description = description;
+  }
+
+  public int getSchemaType() {
+    return schemaType;
+  }
+
+  public void setSchemaType(int schemaType) {
+    this.schemaType = schemaType;
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public void setName(String name) {
+    this.name = name;
+  }
+
+  public MDatabase getDb() {
+    return db;
+  }
+
+  public MISchema setDb(MDatabase db) {
+    this.db = db;
+    return this;
+  }
+
+  public int getCompatibility() {
+    return compatibility;
+  }
+
+  public void setCompatibility(int compatibility) {
+    this.compatibility = compatibility;
+  }
+
+  public int getValidationLevel() {
+    return validationLevel;
+  }
+
+  public void setValidationLevel(int validationLevel) {
+    this.validationLevel = validationLevel;
+  }
+
+  public boolean getCanEvolve() {
+    return canEvolve;
+  }
+
+  public void setCanEvolve(boolean canEvolve) {
+    this.canEvolve = canEvolve;
+  }
+
+  public String getSchemaGroup() {
+    return schemaGroup;
+  }
+
+  public void setSchemaGroup(String schemaGroup) {
+    this.schemaGroup = schemaGroup;
+  }
+
+  public String getDescription() {
+    return description;
+  }
+
+  public void setDescription(String description) {
+    this.description = description;
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MSchemaVersion.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MSchemaVersion.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MSchemaVersion.java
new file mode 100644
index 0000000..7c8a6d4
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MSchemaVersion.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.model;
+
+public class MSchemaVersion {
+  private MISchema iSchema;
+  private int version;
+  private long createdAt;
+  private MColumnDescriptor cols;
+  private int state;
+  private String description;
+  private String schemaText;
+  private String fingerprint;
+  private String name;
+  private MSerDeInfo serDe;
+
+  public MSchemaVersion(MISchema iSchema, int version, long createdAt,
+                        MColumnDescriptor cols, int state, String description,
+                        String schemaText, String fingerprint, String name,
+                        MSerDeInfo serDe) {
+    this.iSchema = iSchema;
+    this.version = version;
+    this.createdAt = createdAt;
+    this.cols = cols;
+    this.state = state;
+    this.description = description;
+    this.schemaText = schemaText;
+    this.fingerprint = fingerprint;
+    this.name = name;
+    this.serDe = serDe;
+  }
+
+  public MISchema getiSchema() {
+    return iSchema;
+  }
+
+  public void setiSchema(MISchema iSchema) {
+    this.iSchema = iSchema;
+  }
+
+  public int getVersion() {
+    return version;
+  }
+
+  public void setVersion(int version) {
+    this.version = version;
+  }
+
+  public long getCreatedAt() {
+    return createdAt;
+  }
+
+  public void setCreatedAt(long createdAt) {
+    this.createdAt = createdAt;
+  }
+
+  public MColumnDescriptor getCols() {
+    return cols;
+  }
+
+  public void setCols(MColumnDescriptor cols) {
+    this.cols = cols;
+  }
+
+  public int getState() {
+    return state;
+  }
+
+  public void setState(int state) {
+    this.state = state;
+  }
+
+  public String getDescription() {
+    return description;
+  }
+
+  public void setDescription(String description) {
+    this.description = description;
+  }
+
+  public String getSchemaText() {
+    return schemaText;
+  }
+
+  public void setSchemaText(String schemaText) {
+    this.schemaText = schemaText;
+  }
+
+  public String getFingerprint() {
+    return fingerprint;
+  }
+
+  public void setFingerprint(String fingerprint) {
+    this.fingerprint = fingerprint;
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public void setName(String name) {
+    this.name = name;
+  }
+
+  public MSerDeInfo getSerDe() {
+    return serDe;
+  }
+
+  public void setSerDe(MSerDeInfo serDe) {
+    this.serDe = serDe;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MSerDeInfo.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MSerDeInfo.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MSerDeInfo.java
index 2c16e61..967565b 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MSerDeInfo.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MSerDeInfo.java
@@ -24,16 +24,31 @@ public class MSerDeInfo {
   private String name;
   private String serializationLib;
   private Map<String, String> parameters;
+  private String description;
+  private String serializerClass;
+  private String deserializerClass;
+  private int serdeType;
 
   /**
+   *
    * @param name
    * @param serializationLib
    * @param parameters
+   * @param description
+   * @param serializerClass
+   * @param deserializerClass
+   * @param serdeType
    */
-  public MSerDeInfo(String name, String serializationLib, Map<String, String> parameters) {
+  public MSerDeInfo(String name, String serializationLib, Map<String, String> parameters,
+                    String description, String serializerClass, String deserializerClass,
+                    int serdeType) {
     this.name = name;
     this.serializationLib = serializationLib;
     this.parameters = parameters;
+    this.description = description;
+    this.serializerClass = serializerClass;
+    this.deserializerClass = deserializerClass;
+    this.serdeType = serdeType;
   }
 
   /**
@@ -78,4 +93,35 @@ public class MSerDeInfo {
     this.parameters = parameters;
   }
 
+  public String getDescription() {
+    return description;
+  }
+
+  public void setDescription(String description) {
+    this.description = description;
+  }
+
+  public String getSerializerClass() {
+    return serializerClass;
+  }
+
+  public void setSerializerClass(String serializerClass) {
+    this.serializerClass = serializerClass;
+  }
+
+  public String getDeserializerClass() {
+    return deserializerClass;
+  }
+
+  public void setDeserializerClass(String deserializerClass) {
+    this.deserializerClass = deserializerClass;
+  }
+
+  public int getSerdeType() {
+    return serdeType;
+  }
+
+  public void setSerdeType(int serdeType) {
+    this.serdeType = serdeType;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/resources/datanucleus-log4j.properties
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/resources/datanucleus-log4j.properties b/standalone-metastore/src/main/resources/datanucleus-log4j.properties
new file mode 100644
index 0000000..80f17e8
--- /dev/null
+++ b/standalone-metastore/src/main/resources/datanucleus-log4j.properties
@@ -0,0 +1,17 @@
+# Define the destination and format of our logging
+log4j.appender.A1=org.apache.log4j.FileAppender
+log4j.appender.A1.File=target/datanucleus.log
+log4j.appender.A1.layout=org.apache.log4j.PatternLayout
+log4j.appender.A1.layout.ConversionPattern=%d{HH:mm:ss,SSS} (%t) %-5p [%c] - %m%n
+
+# DataNucleus Categories
+log4j.category.DataNucleus.JDO=INFO, A1
+log4j.category.DataNucleus.Cache=INFO, A1
+log4j.category.DataNucleus.MetaData=INFO, A1
+log4j.category.DataNucleus.General=INFO, A1
+log4j.category.DataNucleus.Transaction=INFO, A1
+log4j.category.DataNucleus.Datastore=DEBUG, A1
+log4j.category.DataNucleus.ValueGeneration=DEBUG, A1
+
+log4j.category.DataNucleus.Enhancer=INFO, A1
+log4j.category.DataNucleus.SchemaTool=INFO, A1

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/resources/package.jdo
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/resources/package.jdo b/standalone-metastore/src/main/resources/package.jdo
index 57e75f8..33b0095 100644
--- a/standalone-metastore/src/main/resources/package.jdo
+++ b/standalone-metastore/src/main/resources/package.jdo
@@ -248,6 +248,18 @@
            <column name="PARAM_VALUE" length="32672" jdbc-type="VARCHAR"/>
         </value>
       </field>
+      <field name="description">
+        <column name="DESCRIPTION" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+      </field>
+      <field name="serializerClass">
+        <column name="SERIALIZER_CLASS" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+      </field>
+      <field name="deserializerClass">
+        <column name="DESERIALIZER_CLASS" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+      </field>
+      <field name="serdeType">
+        <column name="SERDE_TYPE" jdbc-type="integer" allows-null="true"/>
+      </field>
     </class>
 
     <class name="MOrder" embedded-only="true" table="SORT_ORDER" detachable="true">
@@ -1198,6 +1210,71 @@
       </index>
     </class>
 
+    <class name="MISchema" identity-type="datastore" table="I_SCHEMA" detachable="true">
+      <datastore-identity>
+        <column name="SCHEMA_ID"/>
+      </datastore-identity>
+      <field name="schemaType">
+        <column name="SCHEMA_TYPE" jdbc-type="integer"/>
+      </field>
+      <field name="name">
+        <column name="NAME" jdbc-type="varchar" length="256"/>
+      </field>
+      <field name="db">
+        <column name="DB_ID"/>
+      </field>
+      <field name="compatibility">
+        <column name="COMPATIBILITY" jdbc-type="integer"/>
+      </field>
+      <field name="validationLevel">
+        <column name="VALIDATION_LEVEL" jdbc-type="integer"/>
+      </field>
+      <field name="canEvolve">
+        <column name="CAN_EVOLVE"/>
+      </field>
+      <field name="schemaGroup">
+        <column name="SCHEMA_GROUP" jdbc-type="varchar" length="256" allows-null="true"/>
+      </field>
+      <field name="description">
+        <column name="DESCRIPTION" jdbc-type="varchar" length="4000" allows-null="true"/>
+      </field>
+    </class>
+
+    <class name="MSchemaVersion" identity-type="datastore" table="SCHEMA_VERSION" detachable="true">
+      <datastore-identity>
+        <column name="SCHEMA_VERSION_ID"/>
+      </datastore-identity>
+      <field name="iSchema">
+        <column name="SCHEMA_ID"/>
+      </field>
+      <field name="version">
+        <column name="VERSION" jdbc-type="integer"/>
+      </field>
+      <field name="createdAt">
+        <column name="CREATED_AT" jdbc-type="bigint"/>
+      </field>
+      <field name="cols">
+          <column name="CD_ID"/>
+      </field>
+      <field name="state">
+        <column name="STATE" jdbc-type="integer"/>
+      </field>
+      <field name="description">
+        <column name="DESCRIPTION" jdbc-type="varchar" length="4000" allows-null="true"/>
+      </field>
+      <field name="schemaText" default-fetch-group="false">
+        <column name="SCHEMA_TEXT" jdbc-type="LONGVARCHAR"/>
+      </field>
+      <field name="fingerprint">
+        <column name="FINGERPRINT" jdbc-type="varchar" length="256" allows-null="true"/>
+      </field>
+      <field name="name">
+        <column name="SCHEMA_VERSION_NAME" jdbc-type="varchar" length="256" allows-null="true"/>
+      </field>
+      <field name="serDe">
+        <column name="SERDE_ID"/>
+      </field>
+    </class>
   </package>
 </jdo>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql b/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
index 1763246..390f708 100644
--- a/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
+++ b/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
@@ -42,7 +42,7 @@ CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCH
 
 CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT);
 
-CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000));
+CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000), "DESCRIPTION" VARCHAR(4000), "SERIALIZER_CLASS" VARCHAR(4000), "DESERIALIZER_CLASS" VARCHAR(4000), SERDE_TYPE INTEGER);
 
 CREATE TABLE "APP"."PART_PRIVS" ("PART_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_PRIV" VARCHAR(128));
 
@@ -502,6 +502,34 @@ CREATE TABLE WRITE_SET (
   WS_OPERATION_TYPE char(1) NOT NULL
 );
 
+CREATE TABLE "APP"."I_SCHEMA" (
+  "SCHEMA_ID" bigint primary key,
+  "SCHEMA_TYPE" integer not null,
+  "NAME" varchar(256) unique,
+  "DB_ID" bigint references "APP"."DBS" ("DB_ID"),
+  "COMPATIBILITY" integer not null,
+  "VALIDATION_LEVEL" integer not null,
+  "CAN_EVOLVE" char(1) not null,
+  "SCHEMA_GROUP" varchar(256),
+  "DESCRIPTION" varchar(4000)
+);
+
+CREATE TABLE "APP"."SCHEMA_VERSION" (
+  "SCHEMA_VERSION_ID" bigint primary key,
+  "SCHEMA_ID" bigint references "APP"."I_SCHEMA" ("SCHEMA_ID"),
+  "VERSION" integer not null,
+  "CREATED_AT" bigint not null,
+  "CD_ID" bigint references "APP"."CDS" ("CD_ID"),
+  "STATE" integer not null,
+  "DESCRIPTION" varchar(4000),
+  "SCHEMA_TEXT" clob,
+  "FINGERPRINT" varchar(256),
+  "SCHEMA_VERSION_NAME" varchar(256),
+  "SERDE_ID" bigint references "APP"."SERDES" ("SERDE_ID")
+);
+
+CREATE UNIQUE INDEX "APP"."UNIQUE_SCHEMA_VERSION" ON "APP"."SCHEMA_VERSION" ("SCHEMA_ID", "VERSION");
+
 -- -----------------------------------------------------------------
 -- Record schema version. Should be the last step in the init script
 -- -----------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql b/standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql
index 4472b97..c301761 100644
--- a/standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql
+++ b/standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql
@@ -43,4 +43,38 @@ ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_PK" PRIMARY KEY ("MAPP
 ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
 ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
 
+-- Upgrades for Schema Registry objects
+ALTER TABLE "APP"."SERDES" ADD COLUMN "DESCRIPTION" VARCHAR(4000);
+ALTER TABLE "APP"."SERDES" ADD COLUMN "SERIALIZER_CLASS" VARCHAR(4000);
+ALTER TABLE "APP"."SERDES" ADD COLUMN "DESERIALIZER_CLASS" VARCHAR(4000);
+ALTER TABLE "APP"."SERDES" ADD COLUMN "SERDE_TYPE" INTEGER;
+
+CREATE TABLE "APP"."I_SCHEMA" (
+  "SCHEMA_ID" bigint primary key,
+  "SCHEMA_TYPE" integer not null,
+  "NAME" varchar(256) unique,
+  "DB_ID" bigint references "APP"."DBS" ("DB_ID"),
+  "COMPATIBILITY" integer not null,
+  "VALIDATION_LEVEL" integer not null,
+  "CAN_EVOLVE" char(1) not null,
+  "SCHEMA_GROUP" varchar(256),
+  "DESCRIPTION" varchar(4000)
+);
+
+CREATE TABLE "APP"."SCHEMA_VERSION" (
+  "SCHEMA_VERSION_ID" bigint primary key,
+  "SCHEMA_ID" bigint references "APP"."I_SCHEMA" ("SCHEMA_ID"),
+  "VERSION" integer not null,
+  "CREATED_AT" bigint not null,
+  "CD_ID" bigint references "APP"."CDS" ("CD_ID"),
+  "STATE" integer not null,
+  "DESCRIPTION" varchar(4000),
+  "SCHEMA_TEXT" clob,
+  "FINGERPRINT" varchar(256),
+  "SCHEMA_VERSION_NAME" varchar(256),
+  "SERDE_ID" bigint references "APP"."SERDES" ("SERDE_ID")
+);
+
+CREATE UNIQUE INDEX "APP"."UNIQUE_SCHEMA_VERSION" ON "APP"."SCHEMA_VERSION" ("SCHEMA_ID", "VERSION");
+
 UPDATE "APP".VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql b/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
index e8e0fd2..ecba45a 100644
--- a/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
+++ b/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
@@ -312,7 +312,11 @@ CREATE TABLE SERDES
 (
     SERDE_ID bigint NOT NULL,
     "NAME" nvarchar(128) NULL,
-    SLIB nvarchar(4000) NULL
+    SLIB nvarchar(4000) NULL,
+    "DESCRIPTION" nvarchar(4000),
+    "SERIALIZER_CLASS" nvarchar(4000),
+    "DESERIALIZER_CLASS" nvarchar(4000),
+    "SERDE_TYPE" int
 );
 
 ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
@@ -1106,6 +1110,33 @@ CREATE TABLE METASTORE_DB_PROPERTIES (
 
 ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY);
 
+CREATE TABLE "I_SCHEMA" (
+  "SCHEMA_ID" bigint primary key,
+  "SCHEMA_TYPE" int not null,
+  "NAME" nvarchar(256) unique,
+  "DB_ID" bigint references "DBS" ("DB_ID"),
+  "COMPATIBILITY" int not null,
+  "VALIDATION_LEVEL" int not null,
+  "CAN_EVOLVE" bit not null,
+  "SCHEMA_GROUP" nvarchar(256),
+  "DESCRIPTION" nvarchar(4000),
+);
+
+CREATE TABLE "SCHEMA_VERSION" (
+  "SCHEMA_VERSION_ID" bigint primary key,
+  "SCHEMA_ID" bigint references "I_SCHEMA" ("SCHEMA_ID"),
+  "VERSION" int not null,
+  "CREATED_AT" bigint not null,
+  "CD_ID" bigint references "CDS" ("CD_ID"),
+  "STATE" int not null,
+  "DESCRIPTION" nvarchar(4000),
+  "SCHEMA_TEXT" varchar(max),
+  "FINGERPRINT" nvarchar(256),
+  "SCHEMA_VERSION_NAME" nvarchar(256),
+  "SERDE_ID" bigint references "SERDES" ("SERDE_ID"),
+  unique ("SCHEMA_ID", "VERSION")
+);
+
 -- -----------------------------------------------------------------
 -- Record schema version. Should be the last step in the init script
 -- -----------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql b/standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql
index 60d51ff..600a6bb 100644
--- a/standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql
+++ b/standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql
@@ -102,5 +102,38 @@ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFEREN
 
 ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
 
+-- Upgrades for Schema Registry objects
+ALTER TABLE "SERDES" ADD "DESCRIPTION" nvarchar(4000);
+ALTER TABLE "SERDES" ADD "SERIALIZER_CLASS" nvarchar(4000);
+ALTER TABLE "SERDES" ADD "DESERIALIZER_CLASS" nvarchar(4000);
+ALTER TABLE "SERDES" ADD "SERDE_TYPE" int;
+
+CREATE TABLE "I_SCHEMA" (
+  "SCHEMA_ID" bigint primary key,
+  "SCHEMA_TYPE" int not null,
+  "NAME" nvarchar(256) unique,
+  "DB_ID" bigint references "DBS" ("DB_ID"),
+  "COMPATIBILITY" int not null,
+  "VALIDATION_LEVEL" int not null,
+  "CAN_EVOLVE" bit not null,
+  "SCHEMA_GROUP" nvarchar(256),
+  "DESCRIPTION" nvarchar(4000),
+);
+
+CREATE TABLE "SCHEMA_VERSION" (
+  "SCHEMA_VERSION_ID" bigint primary key,
+  "SCHEMA_ID" bigint references "I_SCHEMA" ("SCHEMA_ID"),
+  "VERSION" int not null,
+  "CREATED_AT" bigint not null,
+  "CD_ID" bigint references "CDS" ("CD_ID"),
+  "STATE" int not null,
+  "DESCRIPTION" nvarchar(4000),
+  "SCHEMA_TEXT" varchar(max),
+  "FINGERPRINT" nvarchar(256),
+  "SCHEMA_VERSION_NAME" nvarchar(256),
+  "SERDE_ID" bigint references "SERDES" ("SERDE_ID"),
+  unique ("SCHEMA_ID", "VERSION")
+);
+
 UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS MESSAGE;

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql b/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
index 8176fff..bbd56f3 100644
--- a/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
+++ b/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
@@ -436,6 +436,10 @@ CREATE TABLE IF NOT EXISTS `SERDES` (
   `SERDE_ID` bigint(20) NOT NULL,
   `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
   `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DESCRIPTION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SERIALIZER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DESERIALIZER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SERDE_TYPE` integer,
   PRIMARY KEY (`SERDE_ID`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 /*!40101 SET character_set_client = @saved_cs_client */;
@@ -1026,6 +1030,40 @@ CREATE TABLE WRITE_SET (
   WS_COMMIT_ID bigint NOT NULL,
   WS_OPERATION_TYPE char(1) NOT NULL
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE `I_SCHEMA` (
+  `SCHEMA_ID` BIGINT PRIMARY KEY,
+  `SCHEMA_TYPE` INTEGER NOT NULL,
+  `NAME` VARCHAR(256),
+  `DB_ID` BIGINT,
+  `COMPATIBILITY` INTEGER NOT NULL,
+  `VALIDATION_LEVEL` INTEGER NOT NULL,
+  `CAN_EVOLVE` bit(1) NOT NULL,
+  `SCHEMA_GROUP` VARCHAR(256),
+  `DESCRIPTION` VARCHAR(4000),
+  FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
+  KEY `UNIQUE_NAME` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE `SCHEMA_VERSION` (
+  `SCHEMA_VERSION_ID` bigint primary key,
+  `SCHEMA_ID` BIGINT,
+  `VERSION` INTEGER NOT NULL,
+  `CREATED_AT` BIGINT NOT NULL,
+  `CD_ID` BIGINT, 
+  `STATE` INTEGER NOT NULL,
+  `DESCRIPTION` VARCHAR(4000),
+  `SCHEMA_TEXT` mediumtext,
+  `FINGERPRINT` VARCHAR(256),
+  `SCHEMA_VERSION_NAME` VARCHAR(256),
+  `SERDE_ID` bigint, 
+  FOREIGN KEY (`SCHEMA_ID`) REFERENCES `I_SCHEMA` (`SCHEMA_ID`),
+  FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`),
+  FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+  KEY `UNIQUE_VERSION` (`SCHEMA_ID`, `VERSION`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+
 -- -----------------------------------------------------------------
 -- Record schema version. Should be the last step in the init script
 -- -----------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql b/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
index 63128fb..b740bca 100644
--- a/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
+++ b/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
@@ -85,6 +85,44 @@ CREATE TABLE IF NOT EXISTS WM_MAPPING
     CONSTRAINT `WM_MAPPING_FK2` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 
+-- Upgrades for Schema Registry objects
+ALTER TABLE `SERDES` ADD COLUMN `DESCRIPTION` VARCHAR(4000);
+ALTER TABLE `SERDES` ADD COLUMN `SERIALIZER_CLASS` VARCHAR(4000);
+ALTER TABLE `SERDES` ADD COLUMN `DESERIALIZER_CLASS` VARCHAR(4000);
+ALTER TABLE `SERDES` ADD COLUMN `SERDE_TYPE` INTEGER;
+
+CREATE TABLE `I_SCHEMA` (
+  `SCHEMA_ID` BIGINT PRIMARY KEY,
+  `SCHEMA_TYPE` INTEGER NOT NULL,
+  `NAME` VARCHAR(256),
+  `DB_ID` BIGINT,
+  `COMPATIBILITY` INTEGER NOT NULL,
+  `VALIDATION_LEVEL` INTEGER NOT NULL,
+  `CAN_EVOLVE` bit(1) NOT NULL,
+  `SCHEMA_GROUP` VARCHAR(256),
+  `DESCRIPTION` VARCHAR(4000),
+  FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
+  KEY `UNIQUE_NAME` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE `SCHEMA_VERSION` (
+  `SCHEMA_VERSION_ID` bigint primary key,
+  `SCHEMA_ID` BIGINT,
+  `VERSION` INTEGER NOT NULL,
+  `CREATED_AT` BIGINT NOT NULL,
+  `CD_ID` BIGINT, 
+  `STATE` INTEGER NOT NULL,
+  `DESCRIPTION` VARCHAR(4000),
+  `SCHEMA_TEXT` mediumtext,
+  `FINGERPRINT` VARCHAR(256),
+  `SCHEMA_VERSION_NAME` VARCHAR(256),
+  `SERDE_ID` bigint, 
+  FOREIGN KEY (`SCHEMA_ID`) REFERENCES `I_SCHEMA` (`SCHEMA_ID`),
+  FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`),
+  FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+  KEY `UNIQUE_VERSION` (`SCHEMA_ID`, `VERSION`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
 UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS ' ';
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql b/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
index feaf2a6..62614e4 100644
--- a/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
+++ b/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
@@ -100,7 +100,11 @@ CREATE TABLE SERDES
 (
     SERDE_ID NUMBER NOT NULL,
     "NAME" VARCHAR2(128) NULL,
-    SLIB VARCHAR2(4000) NULL
+    SLIB VARCHAR2(4000) NULL,
+    "DESCRIPTION" VARCHAR2(4000),
+    "SERIALIZER_CLASS" VARCHAR2(4000),
+    "DESERIALIZER_CLASS" VARCHAR2(4000),
+    "SERDE_TYPE" NUMBER
 );
 
 ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
@@ -1008,6 +1012,33 @@ CREATE TABLE WRITE_SET (
   WS_OPERATION_TYPE char(1) NOT NULL
 );
 
+CREATE TABLE "I_SCHEMA" (
+  "SCHEMA_ID" number primary key,
+  "SCHEMA_TYPE" number not null,
+  "NAME" varchar2(256) unique,
+  "DB_ID" number references "DBS" ("DB_ID"),
+  "COMPATIBILITY" number not null,
+  "VALIDATION_LEVEL" number not null,
+  "CAN_EVOLVE" number(1) not null,
+  "SCHEMA_GROUP" varchar2(256),
+  "DESCRIPTION" varchar2(4000)
+);
+
+CREATE TABLE "SCHEMA_VERSION" (
+  "SCHEMA_VERSION_ID" number primary key,
+  "SCHEMA_ID" number references "I_SCHEMA" ("SCHEMA_ID"),
+  "VERSION" number not null,
+  "CREATED_AT" number not null,
+  "CD_ID" number references "CDS" ("CD_ID"), 
+  "STATE" number not null,
+  "DESCRIPTION" varchar2(4000),
+  "SCHEMA_TEXT" clob,
+  "FINGERPRINT" varchar2(256),
+  "SCHEMA_VERSION_NAME" varchar2(256),
+  "SERDE_ID" number references "SERDES" ("SERDE_ID"), 
+  UNIQUE ("SCHEMA_ID", "VERSION")
+);
+
 -- -----------------------------------------------------------------
 -- Record schema version. Should be the last step in the init script
 -- -----------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql b/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
index a24948a..cf0d404 100644
--- a/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
+++ b/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
@@ -103,5 +103,39 @@ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFEREN
 
 ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
 
+-- Upgrades for Schema Registry objects
+ALTER TABLE "SERDES" ADD "DESCRIPTION" VARCHAR(4000);
+ALTER TABLE "SERDES" ADD "SERIALIZER_CLASS" VARCHAR(4000);
+ALTER TABLE "SERDES" ADD "DESERIALIZER_CLASS" VARCHAR(4000);
+ALTER TABLE "SERDES" ADD "SERDE_TYPE" INTEGER;
+
+CREATE TABLE "I_SCHEMA" (
+  "SCHEMA_ID" number primary key,
+  "SCHEMA_TYPE" number not null,
+  "NAME" varchar2(256) unique,
+  "DB_ID" number references "DBS" ("DB_ID"),
+  "COMPATIBILITY" number not null,
+  "VALIDATION_LEVEL" number not null,
+  "CAN_EVOLVE" number(1) not null,
+  "SCHEMA_GROUP" varchar2(256),
+  "DESCRIPTION" varchar2(4000)
+);
+
+CREATE TABLE "SCHEMA_VERSION" (
+  "SCHEMA_VERSION_ID" number primary key,
+  "SCHEMA_ID" number references "I_SCHEMA" ("SCHEMA_ID"),
+  "VERSION" number not null,
+  "CREATED_AT" number not null,
+  "CD_ID" number references "CDS" ("CD_ID"), 
+  "STATE" number not null,
+  "DESCRIPTION" varchar2(4000),
+  "SCHEMA_TEXT" clob,
+  "FINGERPRINT" varchar2(256),
+  "SCHEMA_VERSION_NAME" varchar2(256),
+  "SERDE_ID" number references "SERDES" ("SERDE_ID"), 
+  UNIQUE ("SCHEMA_ID", "VERSION")
+);
+
+
 UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual;

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql b/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
index 4bb3631..91fa9a9 100644
--- a/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
+++ b/standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql
@@ -319,7 +319,11 @@ CREATE TABLE "SEQUENCE_TABLE" (
 CREATE TABLE "SERDES" (
     "SERDE_ID" bigint NOT NULL,
     "NAME" character varying(128) DEFAULT NULL::character varying,
-    "SLIB" character varying(4000) DEFAULT NULL::character varying
+    "SLIB" character varying(4000) DEFAULT NULL::character varying,
+    "DESCRIPTION" varchar(4000),
+    "SERIALIZER_CLASS" varchar(4000),
+    "DESERIALIZER_CLASS" varchar(4000),
+    "SERDE_TYPE" integer
 );
 
 
@@ -1693,6 +1697,34 @@ CREATE TABLE WRITE_SET (
   WS_OPERATION_TYPE char(1) NOT NULL
 );
 
+CREATE TABLE "I_SCHEMA" (
+  "SCHEMA_ID" bigint primary key,
+  "SCHEMA_TYPE" integer not null,
+  "NAME" varchar(256) unique,
+  "DB_ID" bigint references "DBS" ("DB_ID"),
+  "COMPATIBILITY" integer not null,
+  "VALIDATION_LEVEL" integer not null,
+  "CAN_EVOLVE" boolean not null,
+  "SCHEMA_GROUP" varchar(256),
+  "DESCRIPTION" varchar(4000)
+);
+
+CREATE TABLE "SCHEMA_VERSION" (
+  "SCHEMA_VERSION_ID" bigint primary key,
+  "SCHEMA_ID" bigint references "I_SCHEMA" ("SCHEMA_ID"),
+  "VERSION" integer not null,
+  "CREATED_AT" bigint not null,
+  "CD_ID" bigint references "CDS" ("CD_ID"), 
+  "STATE" integer not null,
+  "DESCRIPTION" varchar(4000),
+  "SCHEMA_TEXT" text,
+  "FINGERPRINT" varchar(256),
+  "SCHEMA_VERSION_NAME" varchar(256),
+  "SERDE_ID" bigint references "SERDES" ("SERDE_ID"), 
+  unique ("SCHEMA_ID", "VERSION")
+);
+
+
 -- -----------------------------------------------------------------
 -- Record schema version. Should be the last step in the init script
 -- -----------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql b/standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql
index 81f5a66..2238c80 100644
--- a/standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql
+++ b/standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql
@@ -116,6 +116,40 @@ ALTER TABLE ONLY "WM_MAPPING"
 ALTER TABLE ONLY "WM_MAPPING"
     ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
 
+-- Upgrades for Schema Registry objects
+ALTER TABLE "SERDES" ADD COLUMN "DESCRIPTION" VARCHAR(4000);
+ALTER TABLE "SERDES" ADD COLUMN "SERIALIZER_CLASS" VARCHAR(4000);
+ALTER TABLE "SERDES" ADD COLUMN "DESERIALIZER_CLASS" VARCHAR(4000);
+ALTER TABLE "SERDES" ADD COLUMN "SERDE_TYPE" INTEGER;
+
+CREATE TABLE "I_SCHEMA" (
+  "SCHEMA_ID" bigint primary key,
+  "SCHEMA_TYPE" integer not null,
+  "NAME" varchar(256) unique,
+  "DB_ID" bigint references "DBS" ("DB_ID"),
+  "COMPATIBILITY" integer not null,
+  "VALIDATION_LEVEL" integer not null,
+  "CAN_EVOLVE" boolean not null,
+  "SCHEMA_GROUP" varchar(256),
+  "DESCRIPTION" varchar(4000)
+);
+
+CREATE TABLE "SCHEMA_VERSION" (
+  "SCHEMA_VERSION_ID" bigint primary key,
+  "SCHEMA_ID" bigint references "I_SCHEMA" ("SCHEMA_ID"),
+  "VERSION" integer not null,
+  "CREATED_AT" bigint not null,
+  "CD_ID" bigint references "CDS" ("CD_ID"), 
+  "STATE" integer not null,
+  "DESCRIPTION" varchar(4000),
+  "SCHEMA_TEXT" text,
+  "FINGERPRINT" varchar(256),
+  "SCHEMA_VERSION_NAME" varchar(256),
+  "SERDE_ID" bigint references "SERDES" ("SERDE_ID"), 
+  unique ("SCHEMA_ID", "VERSION")
+);
+
+
 UPDATE "VERSION" SET "SCHEMA_VERSION"='3.0.0', "VERSION_COMMENT"='Hive release version 3.0.0' where "VER_ID"=1;
 SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0';
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/thrift/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/thrift/hive_metastore.thrift b/standalone-metastore/src/main/thrift/hive_metastore.thrift
index 1085ce5..cef1210 100644
--- a/standalone-metastore/src/main/thrift/hive_metastore.thrift
+++ b/standalone-metastore/src/main/thrift/hive_metastore.thrift
@@ -172,6 +172,39 @@ enum EventRequestType {
     DELETE = 3,
 }
 
+enum SerdeType {
+  HIVE = 1,
+  SCHEMA_REGISTRY = 2,
+}
+
+enum SchemaType {
+  HIVE = 1,
+  AVRO = 2,
+}
+
+enum SchemaCompatibility {
+  NONE = 1,
+  BACKWARD = 2,
+  FORWARD = 3,
+  BOTH = 4
+}
+
+enum SchemaValidation {
+  LATEST = 1,
+  ALL = 2
+}
+
+enum SchemaVersionState {
+  INITIATED = 1,
+  START_REVIEW = 2,
+  CHANGES_REQUIRED = 3,
+  REVIEWED = 4,
+  ENABLED = 5,
+  DISABLED = 6,
+  ARCHIVED = 7,
+  DELETED = 8
+}
+
 struct HiveObjectRef{
   1: HiveObjectType objectType,
   2: string dbName,
@@ -278,7 +311,11 @@ struct Database {
 struct SerDeInfo {
   1: string name,                   // name of the serde, table name by default
   2: string serializationLib,       // usually the class that implements the extractor & loader
-  3: map<string, string> parameters // initialization parameters
+  3: map<string, string> parameters, // initialization parameters
+  4: optional string description,
+  5: optional string serializerClass,
+  6: optional string deserializerClass,
+  7: optional SerdeType serdeType
 }
 
 // sort order of a column (column name along with asc(1)/desc(0))
@@ -1216,6 +1253,47 @@ struct WMCreateOrDropTriggerToPoolMappingRequest {
 struct WMCreateOrDropTriggerToPoolMappingResponse {
 }
 
+// Schema objects
+// Schema is already taken, so for the moment I'm calling it an ISchema for Independent Schema
+struct ISchema {
+  1: SchemaType schemaType,
+  2: string name,
+  3: string dbName,
+  4: SchemaCompatibility compatibility,
+  5: SchemaValidation validationLevel,
+  6: bool canEvolve,
+  7: optional string schemaGroup,
+  8: optional string description
+}
+
+struct SchemaVersion {
+  1:  string schemaName,
+  2:  i32 version,
+  3:  i64 createdAt,
+  4:  list<FieldSchema> cols,
+  5:  optional SchemaVersionState state,
+  6:  optional string description,
+  7:  optional string schemaText,
+  8:  optional string fingerprint,
+  9:  optional string name,
+  10: optional SerDeInfo serDe
+}
+
+struct FindSchemasByColsRqst {
+  1: optional string colName,
+  2: optional string colNamespace,
+  3: optional string type
+}
+
+struct FindSchemasByColsRespEntry {
+  1: string schemaName,
+  2: i32 version
+}
+
+struct FindSchemasByColsResp {
+  1: list<FindSchemasByColsRespEntry> schemaVersions
+}
+
 // Exceptions.
 
 exception MetaException {
@@ -1832,6 +1910,38 @@ service ThriftHiveMetastore extends fb303.FacebookService
 
   WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping(1:WMCreateOrDropTriggerToPoolMappingRequest request)
       throws(1:AlreadyExistsException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3, 4:MetaException o4)
+
+  // Schema calls
+  void create_ischema(1:ISchema schema) throws(1:AlreadyExistsException o1,
+        NoSuchObjectException o2, 3:MetaException o3)
+  void alter_ischema(1:string schemaName, 2:ISchema newSchema)
+        throws(1:NoSuchObjectException o1, 2:MetaException o2)
+  ISchema get_ischema(1:string schemaName) throws (1:NoSuchObjectException o1, 2:MetaException o2)
+  void drop_ischema(1:string schemaName)
+        throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
+
+  void add_schema_version(1:SchemaVersion schemaVersion)
+        throws(1:AlreadyExistsException o1, 2:NoSuchObjectException o2, 3:MetaException o3)
+  SchemaVersion get_schema_version(1: string schemaName, 2: i32 version)
+        throws (1:NoSuchObjectException o1, 2:MetaException o2)
+  SchemaVersion get_schema_latest_version(1: string schemaName)
+        throws (1:NoSuchObjectException o1, 2:MetaException o2)
+  list<SchemaVersion> get_schema_all_versions(1: string schemaName)
+        throws (1:NoSuchObjectException o1, 2:MetaException o2)
+  void drop_schema_version(1: string schemaName, 2: i32 version)
+        throws(1:NoSuchObjectException o1, 2:MetaException o2)
+  FindSchemasByColsResp get_schemas_by_cols(1: FindSchemasByColsRqst rqst)
+        throws(1:MetaException o1)
+  // There is no blanket update of SchemaVersion since it is (mostly) immutable.  The only
+  // updates are the specific ones to associate a version with a serde and to change its state
+  void map_schema_version_to_serde(1: string schemaName, 2: i32 version, 3: string serdeName)
+        throws(1:NoSuchObjectException o1, 2:MetaException o2)
+  void set_schema_version_state(1: string schemaName, 2: i32 version, 3: SchemaVersionState state)
+        throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
+
+  void add_serde(1: SerDeInfo serde) throws(1:AlreadyExistsException o1, 2:MetaException o2)
+  SerDeInfo get_serde(1: string serdeName) throws(1:NoSuchObjectException o1, 2:MetaException o2)
+
 }
 
 // * Note about the DDL_TIME: When creating or altering a table or a partition,

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 24c59f2..e7277e2 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+import org.apache.hadoop.hive.metastore.api.ISchema;
 import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
@@ -64,6 +65,8 @@ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.TableMeta;
 import org.apache.hadoop.hive.metastore.api.Type;
@@ -1050,4 +1053,74 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
       String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException {
     objectStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath);
   }
+
+  public void createISchema(ISchema schema) throws AlreadyExistsException, MetaException,
+      NoSuchObjectException {
+    objectStore.createISchema(schema);
+  }
+
+  @Override
+  public void alterISchema(String schemaName, ISchema newSchema) throws NoSuchObjectException,
+      MetaException {
+    objectStore.alterISchema(schemaName, newSchema);
+  }
+
+  @Override
+  public ISchema getISchema(String schemaName) throws MetaException {
+    return objectStore.getISchema(schemaName);
+  }
+
+  @Override
+  public void dropISchema(String schemaName) throws NoSuchObjectException, MetaException {
+    objectStore.dropISchema(schemaName);
+  }
+
+  @Override
+  public void addSchemaVersion(SchemaVersion schemaVersion) throws
+      AlreadyExistsException, InvalidObjectException, NoSuchObjectException, MetaException {
+    objectStore.addSchemaVersion(schemaVersion);
+  }
+
+  @Override
+  public void alterSchemaVersion(String schemaName, int version, SchemaVersion newVersion) throws
+      NoSuchObjectException, MetaException {
+    objectStore.alterSchemaVersion(schemaName, version, newVersion);
+  }
+
+  @Override
+  public SchemaVersion getSchemaVersion(String schemaName, int version) throws MetaException {
+    return objectStore.getSchemaVersion(schemaName, version);
+  }
+
+  @Override
+  public SchemaVersion getLatestSchemaVersion(String schemaName) throws MetaException {
+    return objectStore.getLatestSchemaVersion(schemaName);
+  }
+
+  @Override
+  public List<SchemaVersion> getAllSchemaVersion(String schemaName) throws MetaException {
+    return objectStore.getAllSchemaVersion(schemaName);
+  }
+
+  @Override
+  public List<SchemaVersion> getSchemaVersionsByColumns(String colName, String colNamespace,
+                                                        String type) throws MetaException {
+    return objectStore.getSchemaVersionsByColumns(colName, colNamespace, type);
+  }
+
+  @Override
+  public void dropSchemaVersion(String schemaName, int version) throws NoSuchObjectException,
+      MetaException {
+    objectStore.dropSchemaVersion(schemaName, version);
+  }
+
+  @Override
+  public SerDeInfo getSerDeInfo(String serDeName) throws NoSuchObjectException, MetaException {
+    return objectStore.getSerDeInfo(serDeName);
+  }
+
+  @Override
+  public void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException {
+    objectStore.addSerde(serde);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index 1e4fe5d..636c0ab 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+import org.apache.hadoop.hive.metastore.api.ISchema;
 import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
@@ -62,6 +63,8 @@ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.TableMeta;
 import org.apache.hadoop.hive.metastore.api.Type;
@@ -1046,5 +1049,74 @@ public class DummyRawStoreForJdoConnection implements RawStore {
   @Override
   public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
       String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException {
+
+  }
+
+  public void createISchema(ISchema schema) throws AlreadyExistsException, MetaException {
+
+  }
+
+  @Override
+  public void alterISchema(String schemaName, ISchema newSchema) throws NoSuchObjectException,
+      MetaException {
+
+  }
+
+  @Override
+  public ISchema getISchema(String schemaName) throws MetaException {
+    return null;
+  }
+
+  public void dropISchema(String schemaName) throws NoSuchObjectException, MetaException {
+
+  }
+
+  @Override
+  public void addSchemaVersion(SchemaVersion schemaVersion) throws
+      AlreadyExistsException, InvalidObjectException, NoSuchObjectException, MetaException {
+
+  }
+
+  @Override
+  public void alterSchemaVersion(String schemaName, int version, SchemaVersion newVersion) throws
+      NoSuchObjectException, MetaException {
+
+  }
+
+  @Override
+  public SchemaVersion getSchemaVersion(String schemaName, int version) throws MetaException {
+    return null;
+  }
+
+  @Override
+  public SchemaVersion getLatestSchemaVersion(String schemaName) throws MetaException {
+    return null;
+  }
+
+  @Override
+  public List<SchemaVersion> getAllSchemaVersion(String schemaName) throws MetaException {
+    return null;
+  }
+
+  @Override
+  public List<SchemaVersion> getSchemaVersionsByColumns(String colName, String colNamespace,
+                                                        String type) throws MetaException {
+    return null;
+  }
+
+  @Override
+  public void dropSchemaVersion(String schemaName, int version) throws NoSuchObjectException,
+      MetaException {
+
+  }
+
+  @Override
+  public SerDeInfo getSerDeInfo(String serDeName) throws MetaException {
+    return null;
+  }
+
+  @Override
+  public void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException {
+
   }
 }


[49/50] [abbrv] hive git commit: HIVE-17990 Add Thrift and DB storage for Schema Registry objects

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index dede79b..f667ba0 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -1240,14 +1240,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1018;
-            ::apache::thrift::protocol::TType _etype1021;
-            xfer += iprot->readListBegin(_etype1021, _size1018);
-            this->success.resize(_size1018);
-            uint32_t _i1022;
-            for (_i1022 = 0; _i1022 < _size1018; ++_i1022)
+            uint32_t _size1045;
+            ::apache::thrift::protocol::TType _etype1048;
+            xfer += iprot->readListBegin(_etype1048, _size1045);
+            this->success.resize(_size1045);
+            uint32_t _i1049;
+            for (_i1049 = 0; _i1049 < _size1045; ++_i1049)
             {
-              xfer += iprot->readString(this->success[_i1022]);
+              xfer += iprot->readString(this->success[_i1049]);
             }
             xfer += iprot->readListEnd();
           }
@@ -1286,10 +1286,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter1023;
-      for (_iter1023 = this->success.begin(); _iter1023 != this->success.end(); ++_iter1023)
+      std::vector<std::string> ::const_iterator _iter1050;
+      for (_iter1050 = this->success.begin(); _iter1050 != this->success.end(); ++_iter1050)
       {
-        xfer += oprot->writeString((*_iter1023));
+        xfer += oprot->writeString((*_iter1050));
       }
       xfer += oprot->writeListEnd();
     }
@@ -1334,14 +1334,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1024;
-            ::apache::thrift::protocol::TType _etype1027;
-            xfer += iprot->readListBegin(_etype1027, _size1024);
-            (*(this->success)).resize(_size1024);
-            uint32_t _i1028;
-            for (_i1028 = 0; _i1028 < _size1024; ++_i1028)
+            uint32_t _size1051;
+            ::apache::thrift::protocol::TType _etype1054;
+            xfer += iprot->readListBegin(_etype1054, _size1051);
+            (*(this->success)).resize(_size1051);
+            uint32_t _i1055;
+            for (_i1055 = 0; _i1055 < _size1051; ++_i1055)
             {
-              xfer += iprot->readString((*(this->success))[_i1028]);
+              xfer += iprot->readString((*(this->success))[_i1055]);
             }
             xfer += iprot->readListEnd();
           }
@@ -1458,14 +1458,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1029;
-            ::apache::thrift::protocol::TType _etype1032;
-            xfer += iprot->readListBegin(_etype1032, _size1029);
-            this->success.resize(_size1029);
-            uint32_t _i1033;
-            for (_i1033 = 0; _i1033 < _size1029; ++_i1033)
+            uint32_t _size1056;
+            ::apache::thrift::protocol::TType _etype1059;
+            xfer += iprot->readListBegin(_etype1059, _size1056);
+            this->success.resize(_size1056);
+            uint32_t _i1060;
+            for (_i1060 = 0; _i1060 < _size1056; ++_i1060)
             {
-              xfer += iprot->readString(this->success[_i1033]);
+              xfer += iprot->readString(this->success[_i1060]);
             }
             xfer += iprot->readListEnd();
           }
@@ -1504,10 +1504,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter1034;
-      for (_iter1034 = this->success.begin(); _iter1034 != this->success.end(); ++_iter1034)
+      std::vector<std::string> ::const_iterator _iter1061;
+      for (_iter1061 = this->success.begin(); _iter1061 != this->success.end(); ++_iter1061)
       {
-        xfer += oprot->writeString((*_iter1034));
+        xfer += oprot->writeString((*_iter1061));
       }
       xfer += oprot->writeListEnd();
     }
@@ -1552,14 +1552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1035;
-            ::apache::thrift::protocol::TType _etype1038;
-            xfer += iprot->readListBegin(_etype1038, _size1035);
-            (*(this->success)).resize(_size1035);
-            uint32_t _i1039;
-            for (_i1039 = 0; _i1039 < _size1035; ++_i1039)
+            uint32_t _size1062;
+            ::apache::thrift::protocol::TType _etype1065;
+            xfer += iprot->readListBegin(_etype1065, _size1062);
+            (*(this->success)).resize(_size1062);
+            uint32_t _i1066;
+            for (_i1066 = 0; _i1066 < _size1062; ++_i1066)
             {
-              xfer += iprot->readString((*(this->success))[_i1039]);
+              xfer += iprot->readString((*(this->success))[_i1066]);
             }
             xfer += iprot->readListEnd();
           }
@@ -2621,17 +2621,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->success.clear();
-            uint32_t _size1040;
-            ::apache::thrift::protocol::TType _ktype1041;
-            ::apache::thrift::protocol::TType _vtype1042;
-            xfer += iprot->readMapBegin(_ktype1041, _vtype1042, _size1040);
-            uint32_t _i1044;
-            for (_i1044 = 0; _i1044 < _size1040; ++_i1044)
+            uint32_t _size1067;
+            ::apache::thrift::protocol::TType _ktype1068;
+            ::apache::thrift::protocol::TType _vtype1069;
+            xfer += iprot->readMapBegin(_ktype1068, _vtype1069, _size1067);
+            uint32_t _i1071;
+            for (_i1071 = 0; _i1071 < _size1067; ++_i1071)
             {
-              std::string _key1045;
-              xfer += iprot->readString(_key1045);
-              Type& _val1046 = this->success[_key1045];
-              xfer += _val1046.read(iprot);
+              std::string _key1072;
+              xfer += iprot->readString(_key1072);
+              Type& _val1073 = this->success[_key1072];
+              xfer += _val1073.read(iprot);
             }
             xfer += iprot->readMapEnd();
           }
@@ -2670,11 +2670,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
     {
       xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::map<std::string, Type> ::const_iterator _iter1047;
-      for (_iter1047 = this->success.begin(); _iter1047 != this->success.end(); ++_iter1047)
+      std::map<std::string, Type> ::const_iterator _iter1074;
+      for (_iter1074 = this->success.begin(); _iter1074 != this->success.end(); ++_iter1074)
       {
-        xfer += oprot->writeString(_iter1047->first);
-        xfer += _iter1047->second.write(oprot);
+        xfer += oprot->writeString(_iter1074->first);
+        xfer += _iter1074->second.write(oprot);
       }
       xfer += oprot->writeMapEnd();
     }
@@ -2719,17 +2719,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             (*(this->success)).clear();
-            uint32_t _size1048;
-            ::apache::thrift::protocol::TType _ktype1049;
-            ::apache::thrift::protocol::TType _vtype1050;
-            xfer += iprot->readMapBegin(_ktype1049, _vtype1050, _size1048);
-            uint32_t _i1052;
-            for (_i1052 = 0; _i1052 < _size1048; ++_i1052)
+            uint32_t _size1075;
+            ::apache::thrift::protocol::TType _ktype1076;
+            ::apache::thrift::protocol::TType _vtype1077;
+            xfer += iprot->readMapBegin(_ktype1076, _vtype1077, _size1075);
+            uint32_t _i1079;
+            for (_i1079 = 0; _i1079 < _size1075; ++_i1079)
             {
-              std::string _key1053;
-              xfer += iprot->readString(_key1053);
-              Type& _val1054 = (*(this->success))[_key1053];
-              xfer += _val1054.read(iprot);
+              std::string _key1080;
+              xfer += iprot->readString(_key1080);
+              Type& _val1081 = (*(this->success))[_key1080];
+              xfer += _val1081.read(iprot);
             }
             xfer += iprot->readMapEnd();
           }
@@ -2883,14 +2883,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1055;
-            ::apache::thrift::protocol::TType _etype1058;
-            xfer += iprot->readListBegin(_etype1058, _size1055);
-            this->success.resize(_size1055);
-            uint32_t _i1059;
-            for (_i1059 = 0; _i1059 < _size1055; ++_i1059)
+            uint32_t _size1082;
+            ::apache::thrift::protocol::TType _etype1085;
+            xfer += iprot->readListBegin(_etype1085, _size1082);
+            this->success.resize(_size1082);
+            uint32_t _i1086;
+            for (_i1086 = 0; _i1086 < _size1082; ++_i1086)
             {
-              xfer += this->success[_i1059].read(iprot);
+              xfer += this->success[_i1086].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -2945,10 +2945,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter1060;
-      for (_iter1060 = this->success.begin(); _iter1060 != this->success.end(); ++_iter1060)
+      std::vector<FieldSchema> ::const_iterator _iter1087;
+      for (_iter1087 = this->success.begin(); _iter1087 != this->success.end(); ++_iter1087)
       {
-        xfer += (*_iter1060).write(oprot);
+        xfer += (*_iter1087).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -3001,14 +3001,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1061;
-            ::apache::thrift::protocol::TType _etype1064;
-            xfer += iprot->readListBegin(_etype1064, _size1061);
-            (*(this->success)).resize(_size1061);
-            uint32_t _i1065;
-            for (_i1065 = 0; _i1065 < _size1061; ++_i1065)
+            uint32_t _size1088;
+            ::apache::thrift::protocol::TType _etype1091;
+            xfer += iprot->readListBegin(_etype1091, _size1088);
+            (*(this->success)).resize(_size1088);
+            uint32_t _i1092;
+            for (_i1092 = 0; _i1092 < _size1088; ++_i1092)
             {
-              xfer += (*(this->success))[_i1065].read(iprot);
+              xfer += (*(this->success))[_i1092].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3194,14 +3194,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1066;
-            ::apache::thrift::protocol::TType _etype1069;
-            xfer += iprot->readListBegin(_etype1069, _size1066);
-            this->success.resize(_size1066);
-            uint32_t _i1070;
-            for (_i1070 = 0; _i1070 < _size1066; ++_i1070)
+            uint32_t _size1093;
+            ::apache::thrift::protocol::TType _etype1096;
+            xfer += iprot->readListBegin(_etype1096, _size1093);
+            this->success.resize(_size1093);
+            uint32_t _i1097;
+            for (_i1097 = 0; _i1097 < _size1093; ++_i1097)
             {
-              xfer += this->success[_i1070].read(iprot);
+              xfer += this->success[_i1097].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3256,10 +3256,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(:
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter1071;
-      for (_iter1071 = this->success.begin(); _iter1071 != this->success.end(); ++_iter1071)
+      std::vector<FieldSchema> ::const_iterator _iter1098;
+      for (_iter1098 = this->success.begin(); _iter1098 != this->success.end(); ++_iter1098)
       {
-        xfer += (*_iter1071).write(oprot);
+        xfer += (*_iter1098).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -3312,14 +3312,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1072;
-            ::apache::thrift::protocol::TType _etype1075;
-            xfer += iprot->readListBegin(_etype1075, _size1072);
-            (*(this->success)).resize(_size1072);
-            uint32_t _i1076;
-            for (_i1076 = 0; _i1076 < _size1072; ++_i1076)
+            uint32_t _size1099;
+            ::apache::thrift::protocol::TType _etype1102;
+            xfer += iprot->readListBegin(_etype1102, _size1099);
+            (*(this->success)).resize(_size1099);
+            uint32_t _i1103;
+            for (_i1103 = 0; _i1103 < _size1099; ++_i1103)
             {
-              xfer += (*(this->success))[_i1076].read(iprot);
+              xfer += (*(this->success))[_i1103].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3489,14 +3489,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1077;
-            ::apache::thrift::protocol::TType _etype1080;
-            xfer += iprot->readListBegin(_etype1080, _size1077);
-            this->success.resize(_size1077);
-            uint32_t _i1081;
-            for (_i1081 = 0; _i1081 < _size1077; ++_i1081)
+            uint32_t _size1104;
+            ::apache::thrift::protocol::TType _etype1107;
+            xfer += iprot->readListBegin(_etype1107, _size1104);
+            this->success.resize(_size1104);
+            uint32_t _i1108;
+            for (_i1108 = 0; _i1108 < _size1104; ++_i1108)
             {
-              xfer += this->success[_i1081].read(iprot);
+              xfer += this->success[_i1108].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3551,10 +3551,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter1082;
-      for (_iter1082 = this->success.begin(); _iter1082 != this->success.end(); ++_iter1082)
+      std::vector<FieldSchema> ::const_iterator _iter1109;
+      for (_iter1109 = this->success.begin(); _iter1109 != this->success.end(); ++_iter1109)
       {
-        xfer += (*_iter1082).write(oprot);
+        xfer += (*_iter1109).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -3607,14 +3607,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1083;
-            ::apache::thrift::protocol::TType _etype1086;
-            xfer += iprot->readListBegin(_etype1086, _size1083);
-            (*(this->success)).resize(_size1083);
-            uint32_t _i1087;
-            for (_i1087 = 0; _i1087 < _size1083; ++_i1087)
+            uint32_t _size1110;
+            ::apache::thrift::protocol::TType _etype1113;
+            xfer += iprot->readListBegin(_etype1113, _size1110);
+            (*(this->success)).resize(_size1110);
+            uint32_t _i1114;
+            for (_i1114 = 0; _i1114 < _size1110; ++_i1114)
             {
-              xfer += (*(this->success))[_i1087].read(iprot);
+              xfer += (*(this->success))[_i1114].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3800,14 +3800,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1088;
-            ::apache::thrift::protocol::TType _etype1091;
-            xfer += iprot->readListBegin(_etype1091, _size1088);
-            this->success.resize(_size1088);
-            uint32_t _i1092;
-            for (_i1092 = 0; _i1092 < _size1088; ++_i1092)
+            uint32_t _size1115;
+            ::apache::thrift::protocol::TType _etype1118;
+            xfer += iprot->readListBegin(_etype1118, _size1115);
+            this->success.resize(_size1115);
+            uint32_t _i1119;
+            for (_i1119 = 0; _i1119 < _size1115; ++_i1119)
             {
-              xfer += this->success[_i1092].read(iprot);
+              xfer += this->success[_i1119].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -3862,10 +3862,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(:
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter1093;
-      for (_iter1093 = this->success.begin(); _iter1093 != this->success.end(); ++_iter1093)
+      std::vector<FieldSchema> ::const_iterator _iter1120;
+      for (_iter1120 = this->success.begin(); _iter1120 != this->success.end(); ++_iter1120)
       {
-        xfer += (*_iter1093).write(oprot);
+        xfer += (*_iter1120).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -3918,14 +3918,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1094;
-            ::apache::thrift::protocol::TType _etype1097;
-            xfer += iprot->readListBegin(_etype1097, _size1094);
-            (*(this->success)).resize(_size1094);
-            uint32_t _i1098;
-            for (_i1098 = 0; _i1098 < _size1094; ++_i1098)
+            uint32_t _size1121;
+            ::apache::thrift::protocol::TType _etype1124;
+            xfer += iprot->readListBegin(_etype1124, _size1121);
+            (*(this->success)).resize(_size1121);
+            uint32_t _i1125;
+            for (_i1125 = 0; _i1125 < _size1121; ++_i1125)
             {
-              xfer += (*(this->success))[_i1098].read(iprot);
+              xfer += (*(this->success))[_i1125].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4518,14 +4518,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->primaryKeys.clear();
-            uint32_t _size1099;
-            ::apache::thrift::protocol::TType _etype1102;
-            xfer += iprot->readListBegin(_etype1102, _size1099);
-            this->primaryKeys.resize(_size1099);
-            uint32_t _i1103;
-            for (_i1103 = 0; _i1103 < _size1099; ++_i1103)
+            uint32_t _size1126;
+            ::apache::thrift::protocol::TType _etype1129;
+            xfer += iprot->readListBegin(_etype1129, _size1126);
+            this->primaryKeys.resize(_size1126);
+            uint32_t _i1130;
+            for (_i1130 = 0; _i1130 < _size1126; ++_i1130)
             {
-              xfer += this->primaryKeys[_i1103].read(iprot);
+              xfer += this->primaryKeys[_i1130].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4538,14 +4538,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->foreignKeys.clear();
-            uint32_t _size1104;
-            ::apache::thrift::protocol::TType _etype1107;
-            xfer += iprot->readListBegin(_etype1107, _size1104);
-            this->foreignKeys.resize(_size1104);
-            uint32_t _i1108;
-            for (_i1108 = 0; _i1108 < _size1104; ++_i1108)
+            uint32_t _size1131;
+            ::apache::thrift::protocol::TType _etype1134;
+            xfer += iprot->readListBegin(_etype1134, _size1131);
+            this->foreignKeys.resize(_size1131);
+            uint32_t _i1135;
+            for (_i1135 = 0; _i1135 < _size1131; ++_i1135)
             {
-              xfer += this->foreignKeys[_i1108].read(iprot);
+              xfer += this->foreignKeys[_i1135].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4558,14 +4558,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->uniqueConstraints.clear();
-            uint32_t _size1109;
-            ::apache::thrift::protocol::TType _etype1112;
-            xfer += iprot->readListBegin(_etype1112, _size1109);
-            this->uniqueConstraints.resize(_size1109);
-            uint32_t _i1113;
-            for (_i1113 = 0; _i1113 < _size1109; ++_i1113)
+            uint32_t _size1136;
+            ::apache::thrift::protocol::TType _etype1139;
+            xfer += iprot->readListBegin(_etype1139, _size1136);
+            this->uniqueConstraints.resize(_size1136);
+            uint32_t _i1140;
+            for (_i1140 = 0; _i1140 < _size1136; ++_i1140)
             {
-              xfer += this->uniqueConstraints[_i1113].read(iprot);
+              xfer += this->uniqueConstraints[_i1140].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4578,14 +4578,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->notNullConstraints.clear();
-            uint32_t _size1114;
-            ::apache::thrift::protocol::TType _etype1117;
-            xfer += iprot->readListBegin(_etype1117, _size1114);
-            this->notNullConstraints.resize(_size1114);
-            uint32_t _i1118;
-            for (_i1118 = 0; _i1118 < _size1114; ++_i1118)
+            uint32_t _size1141;
+            ::apache::thrift::protocol::TType _etype1144;
+            xfer += iprot->readListBegin(_etype1144, _size1141);
+            this->notNullConstraints.resize(_size1141);
+            uint32_t _i1145;
+            for (_i1145 = 0; _i1145 < _size1141; ++_i1145)
             {
-              xfer += this->notNullConstraints[_i1118].read(iprot);
+              xfer += this->notNullConstraints[_i1145].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4618,10 +4618,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
   xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->primaryKeys.size()));
-    std::vector<SQLPrimaryKey> ::const_iterator _iter1119;
-    for (_iter1119 = this->primaryKeys.begin(); _iter1119 != this->primaryKeys.end(); ++_iter1119)
+    std::vector<SQLPrimaryKey> ::const_iterator _iter1146;
+    for (_iter1146 = this->primaryKeys.begin(); _iter1146 != this->primaryKeys.end(); ++_iter1146)
     {
-      xfer += (*_iter1119).write(oprot);
+      xfer += (*_iter1146).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -4630,10 +4630,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
   xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->foreignKeys.size()));
-    std::vector<SQLForeignKey> ::const_iterator _iter1120;
-    for (_iter1120 = this->foreignKeys.begin(); _iter1120 != this->foreignKeys.end(); ++_iter1120)
+    std::vector<SQLForeignKey> ::const_iterator _iter1147;
+    for (_iter1147 = this->foreignKeys.begin(); _iter1147 != this->foreignKeys.end(); ++_iter1147)
     {
-      xfer += (*_iter1120).write(oprot);
+      xfer += (*_iter1147).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -4642,10 +4642,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
   xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->uniqueConstraints.size()));
-    std::vector<SQLUniqueConstraint> ::const_iterator _iter1121;
-    for (_iter1121 = this->uniqueConstraints.begin(); _iter1121 != this->uniqueConstraints.end(); ++_iter1121)
+    std::vector<SQLUniqueConstraint> ::const_iterator _iter1148;
+    for (_iter1148 = this->uniqueConstraints.begin(); _iter1148 != this->uniqueConstraints.end(); ++_iter1148)
     {
-      xfer += (*_iter1121).write(oprot);
+      xfer += (*_iter1148).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -4654,10 +4654,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
   xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->notNullConstraints.size()));
-    std::vector<SQLNotNullConstraint> ::const_iterator _iter1122;
-    for (_iter1122 = this->notNullConstraints.begin(); _iter1122 != this->notNullConstraints.end(); ++_iter1122)
+    std::vector<SQLNotNullConstraint> ::const_iterator _iter1149;
+    for (_iter1149 = this->notNullConstraints.begin(); _iter1149 != this->notNullConstraints.end(); ++_iter1149)
     {
-      xfer += (*_iter1122).write(oprot);
+      xfer += (*_iter1149).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -4685,10 +4685,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
   xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->primaryKeys)).size()));
-    std::vector<SQLPrimaryKey> ::const_iterator _iter1123;
-    for (_iter1123 = (*(this->primaryKeys)).begin(); _iter1123 != (*(this->primaryKeys)).end(); ++_iter1123)
+    std::vector<SQLPrimaryKey> ::const_iterator _iter1150;
+    for (_iter1150 = (*(this->primaryKeys)).begin(); _iter1150 != (*(this->primaryKeys)).end(); ++_iter1150)
     {
-      xfer += (*_iter1123).write(oprot);
+      xfer += (*_iter1150).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -4697,10 +4697,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
   xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->foreignKeys)).size()));
-    std::vector<SQLForeignKey> ::const_iterator _iter1124;
-    for (_iter1124 = (*(this->foreignKeys)).begin(); _iter1124 != (*(this->foreignKeys)).end(); ++_iter1124)
+    std::vector<SQLForeignKey> ::const_iterator _iter1151;
+    for (_iter1151 = (*(this->foreignKeys)).begin(); _iter1151 != (*(this->foreignKeys)).end(); ++_iter1151)
     {
-      xfer += (*_iter1124).write(oprot);
+      xfer += (*_iter1151).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -4709,10 +4709,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
   xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->uniqueConstraints)).size()));
-    std::vector<SQLUniqueConstraint> ::const_iterator _iter1125;
-    for (_iter1125 = (*(this->uniqueConstraints)).begin(); _iter1125 != (*(this->uniqueConstraints)).end(); ++_iter1125)
+    std::vector<SQLUniqueConstraint> ::const_iterator _iter1152;
+    for (_iter1152 = (*(this->uniqueConstraints)).begin(); _iter1152 != (*(this->uniqueConstraints)).end(); ++_iter1152)
     {
-      xfer += (*_iter1125).write(oprot);
+      xfer += (*_iter1152).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -4721,10 +4721,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
   xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->notNullConstraints)).size()));
-    std::vector<SQLNotNullConstraint> ::const_iterator _iter1126;
-    for (_iter1126 = (*(this->notNullConstraints)).begin(); _iter1126 != (*(this->notNullConstraints)).end(); ++_iter1126)
+    std::vector<SQLNotNullConstraint> ::const_iterator _iter1153;
+    for (_iter1153 = (*(this->notNullConstraints)).begin(); _iter1153 != (*(this->notNullConstraints)).end(); ++_iter1153)
     {
-      xfer += (*_iter1126).write(oprot);
+      xfer += (*_iter1153).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -6478,14 +6478,14 @@ uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->partNames.clear();
-            uint32_t _size1127;
-            ::apache::thrift::protocol::TType _etype1130;
-            xfer += iprot->readListBegin(_etype1130, _size1127);
-            this->partNames.resize(_size1127);
-            uint32_t _i1131;
-            for (_i1131 = 0; _i1131 < _size1127; ++_i1131)
+            uint32_t _size1154;
+            ::apache::thrift::protocol::TType _etype1157;
+            xfer += iprot->readListBegin(_etype1157, _size1154);
+            this->partNames.resize(_size1154);
+            uint32_t _i1158;
+            for (_i1158 = 0; _i1158 < _size1154; ++_i1158)
             {
-              xfer += iprot->readString(this->partNames[_i1131]);
+              xfer += iprot->readString(this->partNames[_i1158]);
             }
             xfer += iprot->readListEnd();
           }
@@ -6522,10 +6522,10 @@ uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partNames.size()));
-    std::vector<std::string> ::const_iterator _iter1132;
-    for (_iter1132 = this->partNames.begin(); _iter1132 != this->partNames.end(); ++_iter1132)
+    std::vector<std::string> ::const_iterator _iter1159;
+    for (_iter1159 = this->partNames.begin(); _iter1159 != this->partNames.end(); ++_iter1159)
     {
-      xfer += oprot->writeString((*_iter1132));
+      xfer += oprot->writeString((*_iter1159));
     }
     xfer += oprot->writeListEnd();
   }
@@ -6557,10 +6557,10 @@ uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::proto
   xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partNames)).size()));
-    std::vector<std::string> ::const_iterator _iter1133;
-    for (_iter1133 = (*(this->partNames)).begin(); _iter1133 != (*(this->partNames)).end(); ++_iter1133)
+    std::vector<std::string> ::const_iterator _iter1160;
+    for (_iter1160 = (*(this->partNames)).begin(); _iter1160 != (*(this->partNames)).end(); ++_iter1160)
     {
-      xfer += oprot->writeString((*_iter1133));
+      xfer += oprot->writeString((*_iter1160));
     }
     xfer += oprot->writeListEnd();
   }
@@ -6804,14 +6804,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1134;
-            ::apache::thrift::protocol::TType _etype1137;
-            xfer += iprot->readListBegin(_etype1137, _size1134);
-            this->success.resize(_size1134);
-            uint32_t _i1138;
-            for (_i1138 = 0; _i1138 < _size1134; ++_i1138)
+            uint32_t _size1161;
+            ::apache::thrift::protocol::TType _etype1164;
+            xfer += iprot->readListBegin(_etype1164, _size1161);
+            this->success.resize(_size1161);
+            uint32_t _i1165;
+            for (_i1165 = 0; _i1165 < _size1161; ++_i1165)
             {
-              xfer += iprot->readString(this->success[_i1138]);
+              xfer += iprot->readString(this->success[_i1165]);
             }
             xfer += iprot->readListEnd();
           }
@@ -6850,10 +6850,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter1139;
-      for (_iter1139 = this->success.begin(); _iter1139 != this->success.end(); ++_iter1139)
+      std::vector<std::string> ::const_iterator _iter1166;
+      for (_iter1166 = this->success.begin(); _iter1166 != this->success.end(); ++_iter1166)
       {
-        xfer += oprot->writeString((*_iter1139));
+        xfer += oprot->writeString((*_iter1166));
       }
       xfer += oprot->writeListEnd();
     }
@@ -6898,14 +6898,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1140;
-            ::apache::thrift::protocol::TType _etype1143;
-            xfer += iprot->readListBegin(_etype1143, _size1140);
-            (*(this->success)).resize(_size1140);
-            uint32_t _i1144;
-            for (_i1144 = 0; _i1144 < _size1140; ++_i1144)
+            uint32_t _size1167;
+            ::apache::thrift::protocol::TType _etype1170;
+            xfer += iprot->readListBegin(_etype1170, _size1167);
+            (*(this->success)).resize(_size1167);
+            uint32_t _i1171;
+            for (_i1171 = 0; _i1171 < _size1167; ++_i1171)
             {
-              xfer += iprot->readString((*(this->success))[_i1144]);
+              xfer += iprot->readString((*(this->success))[_i1171]);
             }
             xfer += iprot->readListEnd();
           }
@@ -7075,14 +7075,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1145;
-            ::apache::thrift::protocol::TType _etype1148;
-            xfer += iprot->readListBegin(_etype1148, _size1145);
-            this->success.resize(_size1145);
-            uint32_t _i1149;
-            for (_i1149 = 0; _i1149 < _size1145; ++_i1149)
+            uint32_t _size1172;
+            ::apache::thrift::protocol::TType _etype1175;
+            xfer += iprot->readListBegin(_etype1175, _size1172);
+            this->success.resize(_size1172);
+            uint32_t _i1176;
+            for (_i1176 = 0; _i1176 < _size1172; ++_i1176)
             {
-              xfer += iprot->readString(this->success[_i1149]);
+              xfer += iprot->readString(this->success[_i1176]);
             }
             xfer += iprot->readListEnd();
           }
@@ -7121,10 +7121,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift::
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter1150;
-      for (_iter1150 = this->success.begin(); _iter1150 != this->success.end(); ++_iter1150)
+      std::vector<std::string> ::const_iterator _iter1177;
+      for (_iter1177 = this->success.begin(); _iter1177 != this->success.end(); ++_iter1177)
       {
-        xfer += oprot->writeString((*_iter1150));
+        xfer += oprot->writeString((*_iter1177));
       }
       xfer += oprot->writeListEnd();
     }
@@ -7169,14 +7169,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1151;
-            ::apache::thrift::protocol::TType _etype1154;
-            xfer += iprot->readListBegin(_etype1154, _size1151);
-            (*(this->success)).resize(_size1151);
-            uint32_t _i1155;
-            for (_i1155 = 0; _i1155 < _size1151; ++_i1155)
+            uint32_t _size1178;
+            ::apache::thrift::protocol::TType _etype1181;
+            xfer += iprot->readListBegin(_etype1181, _size1178);
+            (*(this->success)).resize(_size1178);
+            uint32_t _i1182;
+            for (_i1182 = 0; _i1182 < _size1178; ++_i1182)
             {
-              xfer += iprot->readString((*(this->success))[_i1155]);
+              xfer += iprot->readString((*(this->success))[_i1182]);
             }
             xfer += iprot->readListEnd();
           }
@@ -7251,14 +7251,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->tbl_types.clear();
-            uint32_t _size1156;
-            ::apache::thrift::protocol::TType _etype1159;
-            xfer += iprot->readListBegin(_etype1159, _size1156);
-            this->tbl_types.resize(_size1156);
-            uint32_t _i1160;
-            for (_i1160 = 0; _i1160 < _size1156; ++_i1160)
+            uint32_t _size1183;
+            ::apache::thrift::protocol::TType _etype1186;
+            xfer += iprot->readListBegin(_etype1186, _size1183);
+            this->tbl_types.resize(_size1183);
+            uint32_t _i1187;
+            for (_i1187 = 0; _i1187 < _size1183; ++_i1187)
             {
-              xfer += iprot->readString(this->tbl_types[_i1160]);
+              xfer += iprot->readString(this->tbl_types[_i1187]);
             }
             xfer += iprot->readListEnd();
           }
@@ -7295,10 +7295,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_types.size()));
-    std::vector<std::string> ::const_iterator _iter1161;
-    for (_iter1161 = this->tbl_types.begin(); _iter1161 != this->tbl_types.end(); ++_iter1161)
+    std::vector<std::string> ::const_iterator _iter1188;
+    for (_iter1188 = this->tbl_types.begin(); _iter1188 != this->tbl_types.end(); ++_iter1188)
     {
-      xfer += oprot->writeString((*_iter1161));
+      xfer += oprot->writeString((*_iter1188));
     }
     xfer += oprot->writeListEnd();
   }
@@ -7330,10 +7330,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto
   xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_types)).size()));
-    std::vector<std::string> ::const_iterator _iter1162;
-    for (_iter1162 = (*(this->tbl_types)).begin(); _iter1162 != (*(this->tbl_types)).end(); ++_iter1162)
+    std::vector<std::string> ::const_iterator _iter1189;
+    for (_iter1189 = (*(this->tbl_types)).begin(); _iter1189 != (*(this->tbl_types)).end(); ++_iter1189)
     {
-      xfer += oprot->writeString((*_iter1162));
+      xfer += oprot->writeString((*_iter1189));
     }
     xfer += oprot->writeListEnd();
   }
@@ -7374,14 +7374,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1163;
-            ::apache::thrift::protocol::TType _etype1166;
-            xfer += iprot->readListBegin(_etype1166, _size1163);
-            this->success.resize(_size1163);
-            uint32_t _i1167;
-            for (_i1167 = 0; _i1167 < _size1163; ++_i1167)
+            uint32_t _size1190;
+            ::apache::thrift::protocol::TType _etype1193;
+            xfer += iprot->readListBegin(_etype1193, _size1190);
+            this->success.resize(_size1190);
+            uint32_t _i1194;
+            for (_i1194 = 0; _i1194 < _size1190; ++_i1194)
             {
-              xfer += this->success[_i1167].read(iprot);
+              xfer += this->success[_i1194].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -7420,10 +7420,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<TableMeta> ::const_iterator _iter1168;
-      for (_iter1168 = this->success.begin(); _iter1168 != this->success.end(); ++_iter1168)
+      std::vector<TableMeta> ::const_iterator _iter1195;
+      for (_iter1195 = this->success.begin(); _iter1195 != this->success.end(); ++_iter1195)
       {
-        xfer += (*_iter1168).write(oprot);
+        xfer += (*_iter1195).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -7468,14 +7468,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1169;
-            ::apache::thrift::protocol::TType _etype1172;
-            xfer += iprot->readListBegin(_etype1172, _size1169);
-            (*(this->success)).resize(_size1169);
-            uint32_t _i1173;
-            for (_i1173 = 0; _i1173 < _size1169; ++_i1173)
+            uint32_t _size1196;
+            ::apache::thrift::protocol::TType _etype1199;
+            xfer += iprot->readListBegin(_etype1199, _size1196);
+            (*(this->success)).resize(_size1196);
+            uint32_t _i1200;
+            for (_i1200 = 0; _i1200 < _size1196; ++_i1200)
             {
-              xfer += (*(this->success))[_i1173].read(iprot);
+              xfer += (*(this->success))[_i1200].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -7613,14 +7613,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1174;
-            ::apache::thrift::protocol::TType _etype1177;
-            xfer += iprot->readListBegin(_etype1177, _size1174);
-            this->success.resize(_size1174);
-            uint32_t _i1178;
-            for (_i1178 = 0; _i1178 < _size1174; ++_i1178)
+            uint32_t _size1201;
+            ::apache::thrift::protocol::TType _etype1204;
+            xfer += iprot->readListBegin(_etype1204, _size1201);
+            this->success.resize(_size1201);
+            uint32_t _i1205;
+            for (_i1205 = 0; _i1205 < _size1201; ++_i1205)
             {
-              xfer += iprot->readString(this->success[_i1178]);
+              xfer += iprot->readString(this->success[_i1205]);
             }
             xfer += iprot->readListEnd();
           }
@@ -7659,10 +7659,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter1179;
-      for (_iter1179 = this->success.begin(); _iter1179 != this->success.end(); ++_iter1179)
+      std::vector<std::string> ::const_iterator _iter1206;
+      for (_iter1206 = this->success.begin(); _iter1206 != this->success.end(); ++_iter1206)
       {
-        xfer += oprot->writeString((*_iter1179));
+        xfer += oprot->writeString((*_iter1206));
       }
       xfer += oprot->writeListEnd();
     }
@@ -7707,14 +7707,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1180;
-            ::apache::thrift::protocol::TType _etype1183;
-            xfer += iprot->readListBegin(_etype1183, _size1180);
-            (*(this->success)).resize(_size1180);
-            uint32_t _i1184;
-            for (_i1184 = 0; _i1184 < _size1180; ++_i1184)
+            uint32_t _size1207;
+            ::apache::thrift::protocol::TType _etype1210;
+            xfer += iprot->readListBegin(_etype1210, _size1207);
+            (*(this->success)).resize(_size1207);
+            uint32_t _i1211;
+            for (_i1211 = 0; _i1211 < _size1207; ++_i1211)
             {
-              xfer += iprot->readString((*(this->success))[_i1184]);
+              xfer += iprot->readString((*(this->success))[_i1211]);
             }
             xfer += iprot->readListEnd();
           }
@@ -8024,14 +8024,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->tbl_names.clear();
-            uint32_t _size1185;
-            ::apache::thrift::protocol::TType _etype1188;
-            xfer += iprot->readListBegin(_etype1188, _size1185);
-            this->tbl_names.resize(_size1185);
-            uint32_t _i1189;
-            for (_i1189 = 0; _i1189 < _size1185; ++_i1189)
+            uint32_t _size1212;
+            ::apache::thrift::protocol::TType _etype1215;
+            xfer += iprot->readListBegin(_etype1215, _size1212);
+            this->tbl_names.resize(_size1212);
+            uint32_t _i1216;
+            for (_i1216 = 0; _i1216 < _size1212; ++_i1216)
             {
-              xfer += iprot->readString(this->tbl_names[_i1189]);
+              xfer += iprot->readString(this->tbl_names[_i1216]);
             }
             xfer += iprot->readListEnd();
           }
@@ -8064,10 +8064,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr
   xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_names.size()));
-    std::vector<std::string> ::const_iterator _iter1190;
-    for (_iter1190 = this->tbl_names.begin(); _iter1190 != this->tbl_names.end(); ++_iter1190)
+    std::vector<std::string> ::const_iterator _iter1217;
+    for (_iter1217 = this->tbl_names.begin(); _iter1217 != this->tbl_names.end(); ++_iter1217)
     {
-      xfer += oprot->writeString((*_iter1190));
+      xfer += oprot->writeString((*_iter1217));
     }
     xfer += oprot->writeListEnd();
   }
@@ -8095,10 +8095,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th
   xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_names)).size()));
-    std::vector<std::string> ::const_iterator _iter1191;
-    for (_iter1191 = (*(this->tbl_names)).begin(); _iter1191 != (*(this->tbl_names)).end(); ++_iter1191)
+    std::vector<std::string> ::const_iterator _iter1218;
+    for (_iter1218 = (*(this->tbl_names)).begin(); _iter1218 != (*(this->tbl_names)).end(); ++_iter1218)
     {
-      xfer += oprot->writeString((*_iter1191));
+      xfer += oprot->writeString((*_iter1218));
     }
     xfer += oprot->writeListEnd();
   }
@@ -8139,14 +8139,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1192;
-            ::apache::thrift::protocol::TType _etype1195;
-            xfer += iprot->readListBegin(_etype1195, _size1192);
-            this->success.resize(_size1192);
-            uint32_t _i1196;
-            for (_i1196 = 0; _i1196 < _size1192; ++_i1196)
+            uint32_t _size1219;
+            ::apache::thrift::protocol::TType _etype1222;
+            xfer += iprot->readListBegin(_etype1222, _size1219);
+            this->success.resize(_size1219);
+            uint32_t _i1223;
+            for (_i1223 = 0; _i1223 < _size1219; ++_i1223)
             {
-              xfer += this->success[_i1196].read(iprot);
+              xfer += this->success[_i1223].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -8177,10 +8177,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Table> ::const_iterator _iter1197;
-      for (_iter1197 = this->success.begin(); _iter1197 != this->success.end(); ++_iter1197)
+      std::vector<Table> ::const_iterator _iter1224;
+      for (_iter1224 = this->success.begin(); _iter1224 != this->success.end(); ++_iter1224)
       {
-        xfer += (*_iter1197).write(oprot);
+        xfer += (*_iter1224).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -8221,14 +8221,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1198;
-            ::apache::thrift::protocol::TType _etype1201;
-            xfer += iprot->readListBegin(_etype1201, _size1198);
-            (*(this->success)).resize(_size1198);
-            uint32_t _i1202;
-            for (_i1202 = 0; _i1202 < _size1198; ++_i1202)
+            uint32_t _size1225;
+            ::apache::thrift::protocol::TType _etype1228;
+            xfer += iprot->readListBegin(_etype1228, _size1225);
+            (*(this->success)).resize(_size1225);
+            uint32_t _i1229;
+            for (_i1229 = 0; _i1229 < _size1225; ++_i1229)
             {
-              xfer += (*(this->success))[_i1202].read(iprot);
+              xfer += (*(this->success))[_i1229].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -8864,14 +8864,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1203;
-            ::apache::thrift::protocol::TType _etype1206;
-            xfer += iprot->readListBegin(_etype1206, _size1203);
-            this->success.resize(_size1203);
-            uint32_t _i1207;
-            for (_i1207 = 0; _i1207 < _size1203; ++_i1207)
+            uint32_t _size1230;
+            ::apache::thrift::protocol::TType _etype1233;
+            xfer += iprot->readListBegin(_etype1233, _size1230);
+            this->success.resize(_size1230);
+            uint32_t _i1234;
+            for (_i1234 = 0; _i1234 < _size1230; ++_i1234)
             {
-              xfer += iprot->readString(this->success[_i1207]);
+              xfer += iprot->readString(this->success[_i1234]);
             }
             xfer += iprot->readListEnd();
           }
@@ -8926,10 +8926,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter1208;
-      for (_iter1208 = this->success.begin(); _iter1208 != this->success.end(); ++_iter1208)
+      std::vector<std::string> ::const_iterator _iter1235;
+      for (_iter1235 = this->success.begin(); _iter1235 != this->success.end(); ++_iter1235)
       {
-        xfer += oprot->writeString((*_iter1208));
+        xfer += oprot->writeString((*_iter1235));
       }
       xfer += oprot->writeListEnd();
     }
@@ -8982,14 +8982,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1209;
-            ::apache::thrift::protocol::TType _etype1212;
-            xfer += iprot->readListBegin(_etype1212, _size1209);
-            (*(this->success)).resize(_size1209);
-            uint32_t _i1213;
-            for (_i1213 = 0; _i1213 < _size1209; ++_i1213)
+            uint32_t _size1236;
+            ::apache::thrift::protocol::TType _etype1239;
+            xfer += iprot->readListBegin(_etype1239, _size1236);
+            (*(this->success)).resize(_size1236);
+            uint32_t _i1240;
+            for (_i1240 = 0; _i1240 < _size1236; ++_i1240)
             {
-              xfer += iprot->readString((*(this->success))[_i1213]);
+              xfer += iprot->readString((*(this->success))[_i1240]);
             }
             xfer += iprot->readListEnd();
           }
@@ -10323,14 +10323,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->new_parts.clear();
-            uint32_t _size1214;
-            ::apache::thrift::protocol::TType _etype1217;
-            xfer += iprot->readListBegin(_etype1217, _size1214);
-            this->new_parts.resize(_size1214);
-            uint32_t _i1218;
-            for (_i1218 = 0; _i1218 < _size1214; ++_i1218)
+            uint32_t _size1241;
+            ::apache::thrift::protocol::TType _etype1244;
+            xfer += iprot->readListBegin(_etype1244, _size1241);
+            this->new_parts.resize(_size1241);
+            uint32_t _i1245;
+            for (_i1245 = 0; _i1245 < _size1241; ++_i1245)
             {
-              xfer += this->new_parts[_i1218].read(iprot);
+              xfer += this->new_parts[_i1245].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -10359,10 +10359,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
-    std::vector<Partition> ::const_iterator _iter1219;
-    for (_iter1219 = this->new_parts.begin(); _iter1219 != this->new_parts.end(); ++_iter1219)
+    std::vector<Partition> ::const_iterator _iter1246;
+    for (_iter1246 = this->new_parts.begin(); _iter1246 != this->new_parts.end(); ++_iter1246)
     {
-      xfer += (*_iter1219).write(oprot);
+      xfer += (*_iter1246).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -10386,10 +10386,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
-    std::vector<Partition> ::const_iterator _iter1220;
-    for (_iter1220 = (*(this->new_parts)).begin(); _iter1220 != (*(this->new_parts)).end(); ++_iter1220)
+    std::vector<Partition> ::const_iterator _iter1247;
+    for (_iter1247 = (*(this->new_parts)).begin(); _iter1247 != (*(this->new_parts)).end(); ++_iter1247)
     {
-      xfer += (*_iter1220).write(oprot);
+      xfer += (*_iter1247).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -10598,14 +10598,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->new_parts.clear();
-            uint32_t _size1221;
-            ::apache::thrift::protocol::TType _etype1224;
-            xfer += iprot->readListBegin(_etype1224, _size1221);
-            this->new_parts.resize(_size1221);
-            uint32_t _i1225;
-            for (_i1225 = 0; _i1225 < _size1221; ++_i1225)
+            uint32_t _size1248;
+            ::apache::thrift::protocol::TType _etype1251;
+            xfer += iprot->readListBegin(_etype1251, _size1248);
+            this->new_parts.resize(_size1248);
+            uint32_t _i1252;
+            for (_i1252 = 0; _i1252 < _size1248; ++_i1252)
             {
-              xfer += this->new_parts[_i1225].read(iprot);
+              xfer += this->new_parts[_i1252].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -10634,10 +10634,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift::
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
-    std::vector<PartitionSpec> ::const_iterator _iter1226;
-    for (_iter1226 = this->new_parts.begin(); _iter1226 != this->new_parts.end(); ++_iter1226)
+    std::vector<PartitionSpec> ::const_iterator _iter1253;
+    for (_iter1253 = this->new_parts.begin(); _iter1253 != this->new_parts.end(); ++_iter1253)
     {
-      xfer += (*_iter1226).write(oprot);
+      xfer += (*_iter1253).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -10661,10 +10661,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift:
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
-    std::vector<PartitionSpec> ::const_iterator _iter1227;
-    for (_iter1227 = (*(this->new_parts)).begin(); _iter1227 != (*(this->new_parts)).end(); ++_iter1227)
+    std::vector<PartitionSpec> ::const_iterator _iter1254;
+    for (_iter1254 = (*(this->new_parts)).begin(); _iter1254 != (*(this->new_parts)).end(); ++_iter1254)
     {
-      xfer += (*_iter1227).write(oprot);
+      xfer += (*_iter1254).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -10889,14 +10889,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1228;
-            ::apache::thrift::protocol::TType _etype1231;
-            xfer += iprot->readListBegin(_etype1231, _size1228);
-            this->part_vals.resize(_size1228);
-            uint32_t _i1232;
-            for (_i1232 = 0; _i1232 < _size1228; ++_i1232)
+            uint32_t _size1255;
+            ::apache::thrift::protocol::TType _etype1258;
+            xfer += iprot->readListBegin(_etype1258, _size1255);
+            this->part_vals.resize(_size1255);
+            uint32_t _i1259;
+            for (_i1259 = 0; _i1259 < _size1255; ++_i1259)
             {
-              xfer += iprot->readString(this->part_vals[_i1232]);
+              xfer += iprot->readString(this->part_vals[_i1259]);
             }
             xfer += iprot->readListEnd();
           }
@@ -10933,10 +10933,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1233;
-    for (_iter1233 = this->part_vals.begin(); _iter1233 != this->part_vals.end(); ++_iter1233)
+    std::vector<std::string> ::const_iterator _iter1260;
+    for (_iter1260 = this->part_vals.begin(); _iter1260 != this->part_vals.end(); ++_iter1260)
     {
-      xfer += oprot->writeString((*_iter1233));
+      xfer += oprot->writeString((*_iter1260));
     }
     xfer += oprot->writeListEnd();
   }
@@ -10968,10 +10968,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1234;
-    for (_iter1234 = (*(this->part_vals)).begin(); _iter1234 != (*(this->part_vals)).end(); ++_iter1234)
+    std::vector<std::string> ::const_iterator _iter1261;
+    for (_iter1261 = (*(this->part_vals)).begin(); _iter1261 != (*(this->part_vals)).end(); ++_iter1261)
     {
-      xfer += oprot->writeString((*_iter1234));
+      xfer += oprot->writeString((*_iter1261));
     }
     xfer += oprot->writeListEnd();
   }
@@ -11443,14 +11443,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1235;
-            ::apache::thrift::protocol::TType _etype1238;
-            xfer += iprot->readListBegin(_etype1238, _size1235);
-            this->part_vals.resize(_size1235);
-            uint32_t _i1239;
-            for (_i1239 = 0; _i1239 < _size1235; ++_i1239)
+            uint32_t _size1262;
+            ::apache::thrift::protocol::TType _etype1265;
+            xfer += iprot->readListBegin(_etype1265, _size1262);
+            this->part_vals.resize(_size1262);
+            uint32_t _i1266;
+            for (_i1266 = 0; _i1266 < _size1262; ++_i1266)
             {
-              xfer += iprot->readString(this->part_vals[_i1239]);
+              xfer += iprot->readString(this->part_vals[_i1266]);
             }
             xfer += iprot->readListEnd();
           }
@@ -11495,10 +11495,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1240;
-    for (_iter1240 = this->part_vals.begin(); _iter1240 != this->part_vals.end(); ++_iter1240)
+    std::vector<std::string> ::const_iterator _iter1267;
+    for (_iter1267 = this->part_vals.begin(); _iter1267 != this->part_vals.end(); ++_iter1267)
     {
-      xfer += oprot->writeString((*_iter1240));
+      xfer += oprot->writeString((*_iter1267));
     }
     xfer += oprot->writeListEnd();
   }
@@ -11534,10 +11534,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1241;
-    for (_iter1241 = (*(this->part_vals)).begin(); _iter1241 != (*(this->part_vals)).end(); ++_iter1241)
+    std::vector<std::string> ::const_iterator _iter1268;
+    for (_iter1268 = (*(this->part_vals)).begin(); _iter1268 != (*(this->part_vals)).end(); ++_iter1268)
     {
-      xfer += oprot->writeString((*_iter1241));
+      xfer += oprot->writeString((*_iter1268));
     }
     xfer += oprot->writeListEnd();
   }
@@ -12340,14 +12340,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1242;
-            ::apache::thrift::protocol::TType _etype1245;
-            xfer += iprot->readListBegin(_etype1245, _size1242);
-            this->part_vals.resize(_size1242);
-            uint32_t _i1246;
-            for (_i1246 = 0; _i1246 < _size1242; ++_i1246)
+            uint32_t _size1269;
+            ::apache::thrift::protocol::TType _etype1272;
+            xfer += iprot->readListBegin(_etype1272, _size1269);
+            this->part_vals.resize(_size1269);
+            uint32_t _i1273;
+            for (_i1273 = 0; _i1273 < _size1269; ++_i1273)
             {
-              xfer += iprot->readString(this->part_vals[_i1246]);
+              xfer += iprot->readString(this->part_vals[_i1273]);
             }
             xfer += iprot->readListEnd();
           }
@@ -12392,10 +12392,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1247;
-    for (_iter1247 = this->part_vals.begin(); _iter1247 != this->part_vals.end(); ++_iter1247)
+    std::vector<std::string> ::const_iterator _iter1274;
+    for (_iter1274 = this->part_vals.begin(); _iter1274 != this->part_vals.end(); ++_iter1274)
     {
-      xfer += oprot->writeString((*_iter1247));
+      xfer += oprot->writeString((*_iter1274));
     }
     xfer += oprot->writeListEnd();
   }
@@ -12431,10 +12431,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1248;
-    for (_iter1248 = (*(this->part_vals)).begin(); _iter1248 != (*(this->part_vals)).end(); ++_iter1248)
+    std::vector<std::string> ::const_iterator _iter1275;
+    for (_iter1275 = (*(this->part_vals)).begin(); _iter1275 != (*(this->part_vals)).end(); ++_iter1275)
     {
-      xfer += oprot->writeString((*_iter1248));
+      xfer += oprot->writeString((*_iter1275));
     }
     xfer += oprot->writeListEnd();
   }
@@ -12643,14 +12643,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read(
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1249;
-            ::apache::thrift::protocol::TType _etype1252;
-            xfer += iprot->readListBegin(_etype1252, _size1249);
-            this->part_vals.resize(_size1249);
-            uint32_t _i1253;
-            for (_i1253 = 0; _i1253 < _size1249; ++_i1253)
+            uint32_t _size1276;
+            ::apache::thrift::protocol::TType _etype1279;
+            xfer += iprot->readListBegin(_etype1279, _size1276);
+            this->part_vals.resize(_size1276);
+            uint32_t _i1280;
+            for (_i1280 = 0; _i1280 < _size1276; ++_i1280)
             {
-              xfer += iprot->readString(this->part_vals[_i1253]);
+              xfer += iprot->readString(this->part_vals[_i1280]);
             }
             xfer += iprot->readListEnd();
           }
@@ -12703,10 +12703,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1254;
-    for (_iter1254 = this->part_vals.begin(); _iter1254 != this->part_vals.end(); ++_iter1254)
+    std::vector<std::string> ::const_iterator _iter1281;
+    for (_iter1281 = this->part_vals.begin(); _iter1281 != this->part_vals.end(); ++_iter1281)
     {
-      xfer += oprot->writeString((*_iter1254));
+      xfer += oprot->writeString((*_iter1281));
     }
     xfer += oprot->writeListEnd();
   }
@@ -12746,10 +12746,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1255;
-    for (_iter1255 = (*(this->part_vals)).begin(); _iter1255 != (*(this->part_vals)).end(); ++_iter1255)
+    std::vector<std::string> ::const_iterator _iter1282;
+    for (_iter1282 = (*(this->part_vals)).begin(); _iter1282 != (*(this->part_vals)).end(); ++_iter1282)
     {
-      xfer += oprot->writeString((*_iter1255));
+      xfer += oprot->writeString((*_iter1282));
     }
     xfer += oprot->writeListEnd();
   }
@@ -13755,14 +13755,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1256;
-            ::apache::thrift::protocol::TType _etype1259;
-            xfer += iprot->readListBegin(_etype1259, _size1256);
-            this->part_vals.resize(_size1256);
-            uint32_t _i1260;
-            for (_i1260 = 0; _i1260 < _size1256; ++_i1260)
+            uint32_t _size1283;
+            ::apache::thrift::protocol::TType _etype1286;
+            xfer += iprot->readListBegin(_etype1286, _size1283);
+            this->part_vals.resize(_size1283);
+            uint32_t _i1287;
+            for (_i1287 = 0; _i1287 < _size1283; ++_i1287)
             {
-              xfer += iprot->readString(this->part_vals[_i1260]);
+              xfer += iprot->readString(this->part_vals[_i1287]);
             }
             xfer += iprot->readListEnd();
           }
@@ -13799,10 +13799,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1261;
-    for (_iter1261 = this->part_vals.begin(); _iter1261 != this->part_vals.end(); ++_iter1261)
+    std::vector<std::string> ::const_iterator _iter1288;
+    for (_iter1288 = this->part_vals.begin(); _iter1288 != this->part_vals.end(); ++_iter1288)
     {
-      xfer += oprot->writeString((*_iter1261));
+      xfer += oprot->writeString((*_iter1288));
     }
     xfer += oprot->writeListEnd();
   }
@@ -13834,10 +13834,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1262;
-    for (_iter1262 = (*(this->part_vals)).begin(); _iter1262 != (*(this->part_vals)).end(); ++_iter1262)
+    std::vector<std::string> ::const_iterator _iter1289;
+    for (_iter1289 = (*(this->part_vals)).begin(); _iter1289 != (*(this->part_vals)).end(); ++_iter1289)
     {
-      xfer += oprot->writeString((*_iter1262));
+      xfer += oprot->writeString((*_iter1289));
     }
     xfer += oprot->writeListEnd();
   }
@@ -14026,17 +14026,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->partitionSpecs.clear();
-            uint32_t _size1263;
-            ::apache::thrift::protocol::TType _ktype1264;
-            ::apache::thrift::protocol::TType _vtype1265;
-            xfer += iprot->readMapBegin(_ktype1264, _vtype1265, _size1263);
-            uint32_t _i1267;
-            for (_i1267 = 0; _i1267 < _size1263; ++_i1267)
+            uint32_t _size1290;
+            ::apache::thrift::protocol::TType _ktype1291;
+            ::apache::thrift::protocol::TType _vtype1292;
+            xfer += iprot->readMapBegin(_ktype1291, _vtype1292, _size1290);
+            uint32_t _i1294;
+            for (_i1294 = 0; _i1294 < _size1290; ++_i1294)
             {
-              std::string _key1268;
-              xfer += iprot->readString(_key1268);
-              std::string& _val1269 = this->partitionSpecs[_key1268];
-              xfer += iprot->readString(_val1269);
+              std::string _key1295;
+              xfer += iprot->readString(_key1295);
+              std::string& _val1296 = this->partitionSpecs[_key1295];
+              xfer += iprot->readString(_val1296);
             }
             xfer += iprot->readMapEnd();
           }
@@ -14097,11 +14097,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr
   xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partitionSpecs.size()));
-    std::map<std::string, std::string> ::const_iterator _iter1270;
-    for (_iter1270 = this->partitionSpecs.begin(); _iter1270 != this->partitionSpecs.end(); ++_iter1270)
+    std::map<std::string, std::string> ::const_iterator _iter1297;
+    for (_iter1297 = this->partitionSpecs.begin(); _iter1297 != this->partitionSpecs.end(); ++_iter1297)
     {
-      xfer += oprot->writeString(_iter1270->first);
-      xfer += oprot->writeString(_iter1270->second);
+      xfer += oprot->writeString(_iter1297->first);
+      xfer += oprot->writeString(_iter1297->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -14141,11 +14141,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p
   xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partitionSpecs)).size()));
-    std::map<std::string, std::string> ::const_iterator _iter1271;
-    for (_iter1271 = (*(this->partitionSpecs)).begin(); _iter1271 != (*(this->partitionSpecs)).end(); ++_iter1271)
+    std::map<std::string, std::string> ::const_iterator _iter1298;
+    for (_iter1298 = (*(this->partitionSpecs)).begin(); _iter1298 != (*(this->partitionSpecs)).end(); ++_iter1298)
     {
-      xfer += oprot->writeString(_iter1271->first);
-      xfer += oprot->writeString(_iter1271->second);
+      xfer += oprot->writeString(_iter1298->first);
+      xfer += oprot->writeString(_iter1298->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -14390,17 +14390,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->partitionSpecs.clear();
-            uint32_t _size1272;
-            ::apache::thrift::protocol::TType _ktype1273;
-            ::apache::thrift::protocol::TType _vtype1274;
-            xfer += iprot->readMapBegin(_ktype1273, _vtype1274, _size1272);
-            uint32_t _i1276;
-            for (_i1276 = 0; _i1276 < _size1272; ++_i1276)
+            uint32_t _size1299;
+            ::apache::thrift::protocol::TType _ktype1300;
+            ::apache::thrift::protocol::TType _vtype1301;
+            xfer += iprot->readMapBegin(_ktype1300, _vtype1301, _size1299);
+            uint32_t _i1303;
+            for (_i1303 = 0; _i1303 < _size1299; ++_i1303)
             {
-              std::string _key1277;
-              xfer += iprot->readString(_key1277);
-              std::string& _val1278 = this->partitionSpecs[_key1277];
-              xfer += iprot->readString(_val1278);
+              std::string _key1304;
+              xfer += iprot->readString(_key1304);
+              std::string& _val1305 = this->partitionSpecs[_key1304];
+              xfer += iprot->readString(_val1305);
             }
             xfer += iprot->readMapEnd();
           }
@@ -14461,11 +14461,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p
   xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partitionSpecs.size()));
-    std::map<std::string, std::string> ::const_iterator _iter1279;
-    for (_iter1279 = this->partitionSpecs.begin(); _iter1279 != this->partitionSpecs.end(); ++_iter1279)
+    std::map<std::string, std::string> ::const_iterator _iter1306;
+    for (_iter1306 = this->partitionSpecs.begin(); _iter1306 != this->partitionSpecs.end(); ++_iter1306)
     {
-      xfer += oprot->writeString(_iter1279->first);
-      xfer += oprot->writeString(_iter1279->second);
+      xfer += oprot->writeString(_iter1306->first);
+      xfer += oprot->writeString(_iter1306->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -14505,11 +14505,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift::
   xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1);
   {
     xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partitionSpecs)).size()));
-    std::map<std::string, std::string> ::const_iterator _iter1280;
-    for (_iter1280 = (*(this->partitionSpecs)).begin(); _iter1280 != (*(this->partitionSpecs)).end(); ++_iter1280)
+    std::map<std::string, std::string> ::const_iterator _iter1307;
+    for (_iter1307 = (*(this->partitionSpecs)).begin(); _iter1307 != (*(this->partitionSpecs)).end(); ++_iter1307)
     {
-      xfer += oprot->writeString(_iter1280->first);
-      xfer += oprot->writeString(_iter1280->second);
+      xfer += oprot->writeString(_iter1307->first);
+      xfer += oprot->writeString(_iter1307->second);
     }
     xfer += oprot->writeMapEnd();
   }
@@ -14566,14 +14566,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1281;
-            ::apache::thrift::protocol::TType _etype1284;
-            xfer += iprot->readListBegin(_etype1284, _size1281);
-            this->success.resize(_size1281);
-            uint32_t _i1285;
-            for (_i1285 = 0; _i1285 < _size1281; ++_i1285)
+            uint32_t _size1308;
+            ::apache::thrift::protocol::TType _etype1311;
+            xfer += iprot->readListBegin(_etype1311, _size1308);
+            this->success.resize(_size1308);
+            uint32_t _i1312;
+            for (_i1312 = 0; _i1312 < _size1308; ++_i1312)
             {
-              xfer += this->success[_i1285].read(iprot);
+              xfer += this->success[_i1312].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -14636,10 +14636,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift:
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Partition> ::const_iterator _iter1286;
-      for (_iter1286 = this->success.begin(); _iter1286 != this->success.end(); ++_iter1286)
+      std::vector<Partition> ::const_iterator _iter1313;
+      for (_iter1313 = this->success.begin(); _iter1313 != this->success.end(); ++_iter1313)
       {
-        xfer += (*_iter1286).write(oprot);
+        xfer += (*_iter1313).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -14696,14 +14696,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1287;
-            ::apache::thrift::protocol::TType _etype1290;
-            xfer += iprot->readListBegin(_etype1290, _size1287);
-            (*(this->success)).resize(_size1287);
-            uint32_t _i1291;
-            for (_i1291 = 0; _i1291 < _size1287; ++_i1291)
+            uint32_t _size1314;
+            ::apache::thrift::protocol::TType _etype1317;
+            xfer += iprot->readListBegin(_etype1317, _size1314);
+            (*(this->success)).resize(_size1314);
+            uint32_t _i1318;
+            for (_i1318 = 0; _i1318 < _size1314; ++_i1318)
             {
-              xfer += (*(this->success))[_i1291].read(iprot);
+              xfer += (*(this->success))[_i1318].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -14802,14 +14802,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1292;
-            ::apache::thrift::protocol::TType _etype1295;
-            xfer += iprot->readListBegin(_etype1295, _size1292);
-            this->part_vals.resize(_size1292);
-            uint32_t _i1296;
-            for (_i1296 = 0; _i1296 < _size1292; ++_i1296)
+            uint32_t _size1319;
+            ::apache::thrift::protocol::TType _etype1322;
+            xfer += iprot->readListBegin(_etype1322, _size1319);
+            this->part_vals.resize(_size1319);
+            uint32_t _i1323;
+            for (_i1323 = 0; _i1323 < _size1319; ++_i1323)
             {
-              xfer += iprot->readString(this->part_vals[_i1296]);
+              xfer += iprot->readString(this->part_vals[_i1323]);
             }
             xfer += iprot->readListEnd();
           }
@@ -14830,14 +14830,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->group_names.clear();
-            uint32_t _size1297;
-            ::apache::thrift::protocol::TType _etype1300;
-            xfer += iprot->readListBegin(_etype1300, _size1297);
-            this->group_names.resize(_size1297);
-            uint32_t _i1301;
-            for (_i1301 = 0; _i1301 < _size1297; ++_i1301)
+            uint32_t _size1324;
+            ::apache::thrift::protocol::TType _etype1327;
+            xfer += iprot->readListBegin(_etype1327, _size1324);
+            this->group_names.resize(_size1324);
+            uint32_t _i1328;
+            for (_i1328 = 0; _i1328 < _size1324; ++_i1328)
             {
-              xfer += iprot->readString(this->group_names[_i1301]);
+              xfer += iprot->readString(this->group_names[_i1328]);
             }
             xfer += iprot->readListEnd();
           }
@@ -14874,10 +14874,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1302;
-    for (_iter1302 = this->part_vals.begin(); _iter1302 != this->part_vals.end(); ++_iter1302)
+    std::vector<std::string> ::const_iterator _iter1329;
+    for (_iter1329 = this->part_vals.begin(); _iter1329 != this->part_vals.end(); ++_iter1329)
     {
-      xfer += oprot->writeString((*_iter1302));
+      xfer += oprot->writeString((*_iter1329));
     }
     xfer += oprot->writeListEnd();
   }
@@ -14890,10 +14890,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif
   xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->group_names.size()));
-    std::vector<std::string> ::const_iterator _iter1303;
-    for (_iter1303 = this->group_names.begin(); _iter1303 != this->group_names.end(); ++_iter1303)
+    std::vector<std::string> ::const_iterator _iter1330;
+    for (_iter1330 = this->group_names.begin(); _iter1330 != this->group_names.end(); ++_iter1330)
     {
-      xfer += oprot->writeString((*_iter1303));
+      xfer += oprot->writeString((*_iter1330));
     }
     xfer += oprot->writeListEnd();
   }
@@ -14925,10 +14925,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1304;
-    for (_iter1304 = (*(this->part_vals)).begin(); _iter1304 != (*(this->part_vals)).end(); ++_iter1304)
+    std::vector<std::string> ::const_iterator _iter1331;
+    for (_iter1331 = (*(this->part_vals)).begin(); _iter1331 != (*(this->part_vals)).end(); ++_iter1331)
     {
-      xfer += oprot->writeString((*_iter1304));
+      xfer += oprot->writeString((*_iter1331));
     }
     xfer += oprot->writeListEnd();
   }
@@ -14941,10 +14941,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri
   xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->group_names)).size()));
-    std::vector<std::string> ::const_iterator _iter1305;
-    for (_iter1305 = (*(this->group_names)).begin(); _iter1305 != (*(this->group_names)).end(); ++_iter1305)
+    std::vector<std::string> ::const_iterator _iter1332;
+    for (_iter1332 = (*(this->group_names)).begin(); _iter1332 != (*(this->gro

<TRUNCATED>

[03/50] [abbrv] hive git commit: HIVE-17710 LockManager should only lock Managed tables (Eugene Koifman, reviewed by Alan Gates)

Posted by ga...@apache.org.
HIVE-17710 LockManager should only lock Managed tables (Eugene Koifman, reviewed by Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b7be4acc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b7be4acc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b7be4acc

Branch: refs/heads/standalone-metastore
Commit: b7be4acc05fb79033832422100715344e34376a7
Parents: 7ea263c
Author: Eugene Koifman <ek...@hortonworks.com>
Authored: Wed Dec 13 10:00:11 2017 -0800
Committer: Eugene Koifman <ek...@hortonworks.com>
Committed: Wed Dec 13 10:00:11 2017 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java    | 28 +++++++++++++++++---
 1 file changed, 24 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b7be4acc/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index fdb3603..48ac22d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hive.ql.plan.LockDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.LockTableDesc;
 import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
-import org.apache.hadoop.hive.ql.plan.api.Query;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hive.common.util.ShutdownHookManager;
 import org.slf4j.Logger;
@@ -343,6 +342,28 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
     //todo: handle Insert Overwrite as well: HIVE-18154
     return false;
   }
+  private boolean needsLock(Entity entity) {
+    switch (entity.getType()) {
+    case TABLE:
+      return isLockableTable(entity.getTable());
+    case PARTITION:
+      return isLockableTable(entity.getPartition().getTable());
+    default:
+      return true;
+    }
+  }
+  private boolean isLockableTable(Table t) {
+    if(t.isTemporary()) {
+      return false;
+    }
+    switch (t.getTableType()) {
+    case MANAGED_TABLE:
+    case MATERIALIZED_VIEW:
+      return true;
+    default:
+      return false;
+    }
+  }
   /**
    * Normally client should call {@link #acquireLocks(org.apache.hadoop.hive.ql.QueryPlan, org.apache.hadoop.hive.ql.Context, String)}
    * @param isBlocking if false, the method will return immediately; thus the locks may be in LockState.WAITING
@@ -371,8 +392,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
 
     // For each source to read, get a shared lock
     for (ReadEntity input : plan.getInputs()) {
-      if (!input.needsLock() || input.isUpdateOrDelete() ||
-          (input.getType() == Entity.Type.TABLE && input.getTable().isTemporary())) {
+      if (!input.needsLock() || input.isUpdateOrDelete() || !needsLock(input)) {
         // We don't want to acquire read locks during update or delete as we'll be acquiring write
         // locks instead. Also, there's no need to lock temp tables since they're session wide
         continue;
@@ -421,7 +441,7 @@ public final class DbTxnManager extends HiveTxnManagerImpl {
     for (WriteEntity output : plan.getOutputs()) {
       LOG.debug("output is null " + (output == null));
       if (output.getType() == Entity.Type.DFS_DIR || output.getType() == Entity.Type.LOCAL_DIR ||
-          (output.getType() == Entity.Type.TABLE && output.getTable().isTemporary())) {
+          !needsLock(output)) {
         // We don't lock files or directories. We also skip locking temp tables.
         continue;
       }


[02/50] [abbrv] hive git commit: HIVE-18250: CBO gets turned off with duplicates in RR error (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by ga...@apache.org.
HIVE-18250: CBO gets turned off with duplicates in RR error (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7ea263cb
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7ea263cb
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7ea263cb

Branch: refs/heads/standalone-metastore
Commit: 7ea263cbe20eab04a813d270fc8cddc9ad80e7dc
Parents: e86c77a
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Mon Dec 11 18:29:54 2017 -0800
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Wed Dec 13 09:27:22 2017 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/CalcitePlanner.java    | 164 +++++++++----------
 .../queries/clientpositive/groupby_multialias.q |   7 +
 .../results/clientpositive/complex_alias.q.out  |  63 +++----
 .../clientpositive/groupby_multialias.q.out     |  66 ++++++++
 ql/src/test/results/clientpositive/order3.q.out |  58 ++++---
 .../clientpositive/perf/spark/query19.q.out     |   8 +-
 .../clientpositive/perf/spark/query55.q.out     |  16 +-
 .../clientpositive/perf/spark/query71.q.out     |   6 +-
 .../clientpositive/perf/tez/query19.q.out       |  28 ++--
 .../clientpositive/perf/tez/query55.q.out       |  88 +++++-----
 .../clientpositive/perf/tez/query71.q.out       |  30 ++--
 11 files changed, 302 insertions(+), 232 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/7ea263cb/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 20386f1..efd5f7a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -3902,102 +3902,92 @@ public class CalcitePlanner extends SemanticAnalyzer {
           }
         }
 
-          Map<ASTNode, RelNode> subQueryToRelNode = new HashMap<>();
-          boolean isSubQuery = genSubQueryRelNode(qb, expr, srcRel, false,
-                  subQueryToRelNode);
-          if(isSubQuery) {
-            ExprNodeDesc subQueryExpr = genExprNodeDesc(expr, relToHiveRR.get(srcRel),
-                    outerRR, subQueryToRelNode, true);
-            col_list.add(subQueryExpr);
+        Map<ASTNode, RelNode> subQueryToRelNode = new HashMap<>();
+        boolean isSubQuery = genSubQueryRelNode(qb, expr, srcRel, false,
+                subQueryToRelNode);
+        if(isSubQuery) {
+          ExprNodeDesc subQueryExpr = genExprNodeDesc(expr, relToHiveRR.get(srcRel),
+                  outerRR, subQueryToRelNode, true);
+          col_list.add(subQueryExpr);
+
+          ColumnInfo colInfo = new ColumnInfo(SemanticAnalyzer.getColumnInternalName(pos),
+                  subQueryExpr.getWritableObjectInspector(), tabAlias, false);
+          if (!out_rwsch.putWithCheck(tabAlias, colAlias, null, colInfo)) {
+            throw new CalciteSemanticException("Cannot add column to RR: " + tabAlias + "."
+                    + colAlias + " => " + colInfo + " due to duplication, see previous warnings",
+                    UnsupportedFeature.Duplicates_in_RR);
+          }
+          pos = Integer.valueOf(pos.intValue() + 1);
+        } else {
+
+          // 6.4 Build ExprNode corresponding to colums
+          if (expr.getType() == HiveParser.TOK_ALLCOLREF) {
+            pos = genColListRegex(".*", expr.getChildCount() == 0 ? null : SemanticAnalyzer
+                            .getUnescapedName((ASTNode) expr.getChild(0)).toLowerCase(), expr, col_list,
+                    excludedColumns, inputRR, starRR, pos, out_rwsch, qb.getAliases(), true);
+            selectStar = true;
+          } else if (expr.getType() == HiveParser.TOK_TABLE_OR_COL
+                  && !hasAsClause
+                  && !inputRR.getIsExprResolver()
+                  && SemanticAnalyzer.isRegex(
+                  SemanticAnalyzer.unescapeIdentifier(expr.getChild(0).getText()), conf)) {
+            // In case the expression is a regex COL.
+            // This can only happen without AS clause
+            // We don't allow this for ExprResolver - the Group By case
+            pos = genColListRegex(SemanticAnalyzer.unescapeIdentifier(expr.getChild(0).getText()),
+                    null, expr, col_list, excludedColumns, inputRR, starRR, pos, out_rwsch,
+                    qb.getAliases(), true);
+          } else if (expr.getType() == HiveParser.DOT
+                  && expr.getChild(0).getType() == HiveParser.TOK_TABLE_OR_COL
+                  && inputRR.hasTableAlias(SemanticAnalyzer.unescapeIdentifier(expr.getChild(0)
+                  .getChild(0).getText().toLowerCase()))
+                  && !hasAsClause
+                  && !inputRR.getIsExprResolver()
+                  && SemanticAnalyzer.isRegex(
+                  SemanticAnalyzer.unescapeIdentifier(expr.getChild(1).getText()), conf)) {
+            // In case the expression is TABLE.COL (col can be regex).
+            // This can only happen without AS clause
+            // We don't allow this for ExprResolver - the Group By case
+            pos = genColListRegex(
+                    SemanticAnalyzer.unescapeIdentifier(expr.getChild(1).getText()),
+                    SemanticAnalyzer.unescapeIdentifier(expr.getChild(0).getChild(0).getText()
+                            .toLowerCase()), expr, col_list, excludedColumns, inputRR, starRR, pos,
+                    out_rwsch, qb.getAliases(), true);
+          } else if (ParseUtils.containsTokenOfType(expr, HiveParser.TOK_FUNCTIONDI)
+                  && !(srcRel instanceof HiveAggregate)) {
+            // Likely a malformed query eg, select hash(distinct c1) from t1;
+            throw new CalciteSemanticException("Distinct without an aggregation.",
+                    UnsupportedFeature.Distinct_without_an_aggreggation);
+          } else {
+            // Case when this is an expression
+            TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR);
+            // We allow stateful functions in the SELECT list (but nowhere else)
+            tcCtx.setAllowStatefulFunctions(true);
+            if (!qbp.getDestToGroupBy().isEmpty()) {
+              // Special handling of grouping function
+              expr = rewriteGroupingFunctionAST(getGroupByForClause(qbp, selClauseName), expr,
+                      !cubeRollupGrpSetPresent);
+            }
+            ExprNodeDesc exp = genExprNodeDesc(expr, inputRR, tcCtx);
+            String recommended = recommendName(exp, colAlias);
+            if (recommended != null && out_rwsch.get(null, recommended) == null) {
+              colAlias = recommended;
+            }
+            col_list.add(exp);
 
             ColumnInfo colInfo = new ColumnInfo(SemanticAnalyzer.getColumnInternalName(pos),
-                    subQueryExpr.getWritableObjectInspector(), tabAlias, false);
+                    exp.getWritableObjectInspector(), tabAlias, false);
+            colInfo.setSkewedCol((exp instanceof ExprNodeColumnDesc) ? ((ExprNodeColumnDesc) exp)
+                    .isSkewedCol() : false);
             if (!out_rwsch.putWithCheck(tabAlias, colAlias, null, colInfo)) {
               throw new CalciteSemanticException("Cannot add column to RR: " + tabAlias + "."
                       + colAlias + " => " + colInfo + " due to duplication, see previous warnings",
                       UnsupportedFeature.Duplicates_in_RR);
             }
-            pos = Integer.valueOf(pos.intValue() + 1);
-          } else {
 
-            // 6.4 Build ExprNode corresponding to colums
-            if (expr.getType() == HiveParser.TOK_ALLCOLREF) {
-              pos = genColListRegex(".*", expr.getChildCount() == 0 ? null : SemanticAnalyzer
-                              .getUnescapedName((ASTNode) expr.getChild(0)).toLowerCase(), expr, col_list,
-                      excludedColumns, inputRR, starRR, pos, out_rwsch, qb.getAliases(), true);
-              selectStar = true;
-            } else if (expr.getType() == HiveParser.TOK_TABLE_OR_COL
-                    && !hasAsClause
-                    && !inputRR.getIsExprResolver()
-                    && SemanticAnalyzer.isRegex(
-                    SemanticAnalyzer.unescapeIdentifier(expr.getChild(0).getText()), conf)) {
-              // In case the expression is a regex COL.
-              // This can only happen without AS clause
-              // We don't allow this for ExprResolver - the Group By case
-              pos = genColListRegex(SemanticAnalyzer.unescapeIdentifier(expr.getChild(0).getText()),
-                      null, expr, col_list, excludedColumns, inputRR, starRR, pos, out_rwsch,
-                      qb.getAliases(), true);
-            } else if (expr.getType() == HiveParser.DOT
-                    && expr.getChild(0).getType() == HiveParser.TOK_TABLE_OR_COL
-                    && inputRR.hasTableAlias(SemanticAnalyzer.unescapeIdentifier(expr.getChild(0)
-                    .getChild(0).getText().toLowerCase()))
-                    && !hasAsClause
-                    && !inputRR.getIsExprResolver()
-                    && SemanticAnalyzer.isRegex(
-                    SemanticAnalyzer.unescapeIdentifier(expr.getChild(1).getText()), conf)) {
-              // In case the expression is TABLE.COL (col can be regex).
-              // This can only happen without AS clause
-              // We don't allow this for ExprResolver - the Group By case
-              pos = genColListRegex(
-                      SemanticAnalyzer.unescapeIdentifier(expr.getChild(1).getText()),
-                      SemanticAnalyzer.unescapeIdentifier(expr.getChild(0).getChild(0).getText()
-                              .toLowerCase()), expr, col_list, excludedColumns, inputRR, starRR, pos,
-                      out_rwsch, qb.getAliases(), true);
-            } else if (ParseUtils.containsTokenOfType(expr, HiveParser.TOK_FUNCTIONDI)
-                    && !(srcRel instanceof HiveAggregate)) {
-              // Likely a malformed query eg, select hash(distinct c1) from t1;
-              throw new CalciteSemanticException("Distinct without an aggregation.",
-                      UnsupportedFeature.Distinct_without_an_aggreggation);
-            }
-              else {
-              // Case when this is an expression
-              TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR);
-              // We allow stateful functions in the SELECT list (but nowhere else)
-              tcCtx.setAllowStatefulFunctions(true);
-              if (!qbp.getDestToGroupBy().isEmpty()) {
-                // Special handling of grouping function
-                expr = rewriteGroupingFunctionAST(getGroupByForClause(qbp, selClauseName), expr,
-                        !cubeRollupGrpSetPresent);
-              }
-              ExprNodeDesc exp = genExprNodeDesc(expr, inputRR, tcCtx);
-              String recommended = recommendName(exp, colAlias);
-              if (recommended != null && out_rwsch.get(null, recommended) == null) {
-                colAlias = recommended;
-              }
-              col_list.add(exp);
-
-              ColumnInfo colInfo = new ColumnInfo(SemanticAnalyzer.getColumnInternalName(pos),
-                      exp.getWritableObjectInspector(), tabAlias, false);
-              colInfo.setSkewedCol((exp instanceof ExprNodeColumnDesc) ? ((ExprNodeColumnDesc) exp)
-                      .isSkewedCol() : false);
-              if (!out_rwsch.putWithCheck(tabAlias, colAlias, null, colInfo)) {
-                throw new CalciteSemanticException("Cannot add column to RR: " + tabAlias + "."
-                        + colAlias + " => " + colInfo + " due to duplication, see previous warnings",
-                        UnsupportedFeature.Duplicates_in_RR);
-              }
-
-              if (exp instanceof ExprNodeColumnDesc) {
-                ExprNodeColumnDesc colExp = (ExprNodeColumnDesc) exp;
-                String[] altMapping = inputRR.getAlternateMappings(colExp.getColumn());
-                if (altMapping != null) {
-                  // TODO: this can overwrite the mapping. Should this be allowed?
-                  out_rwsch.put(altMapping[0], altMapping[1], colInfo);
-                }
-              }
-
-              pos = Integer.valueOf(pos.intValue() + 1);
-            }
+            pos = Integer.valueOf(pos.intValue() + 1);
           }
+        }
       }
       selectStar = selectStar && exprList.getChildCount() == posn + 1;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/7ea263cb/ql/src/test/queries/clientpositive/groupby_multialias.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/groupby_multialias.q b/ql/src/test/queries/clientpositive/groupby_multialias.q
new file mode 100644
index 0000000..b0a0171
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/groupby_multialias.q
@@ -0,0 +1,7 @@
+create table t1 (a int);
+
+explain
+select t1.a as a1, min(t1.a) as a
+from t1
+group by t1.a;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/7ea263cb/ql/src/test/results/clientpositive/complex_alias.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/complex_alias.q.out b/ql/src/test/results/clientpositive/complex_alias.q.out
index 5182153..64e1f37 100644
--- a/ql/src/test/results/clientpositive/complex_alias.q.out
+++ b/ql/src/test/results/clientpositive/complex_alias.q.out
@@ -17,7 +17,7 @@ POSTHOOK: Output: default@agg1
 POSTHOOK: Lineage: agg1.col0 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: agg1.col1 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 POSTHOOK: Lineage: agg1.col2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-Warning: Shuffle Join JOIN[20][tables = [single_use_subq12, single_use_subq11]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: EXPLAIN
 SELECT single_use_subq11.a1 AS a1,
        single_use_subq11.a2 AS a2
@@ -76,27 +76,24 @@ STAGE PLANS:
             alias: agg1
             Statistics: Num rows: 1 Data size: 17 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: col0 (type: int), col2 (type: double)
-              outputColumnNames: col0, col2
+              expressions: col0 (type: int)
+              outputColumnNames: col0
               Statistics: Num rows: 1 Data size: 17 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(col2)
                 keys: col0 (type: int)
                 mode: hash
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 17 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
                   Statistics: Num rows: 1 Data size: 17 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: double)
       Reduce Operator Tree:
         Group By Operator
-          aggregations: sum(VALUE._col0)
           keys: KEY._col0 (type: int)
           mode: mergepartial
-          outputColumnNames: _col0, _col1
+          outputColumnNames: _col0
           Statistics: Num rows: 1 Data size: 17 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             Statistics: Num rows: 1 Data size: 17 Basic stats: COMPLETE Column stats: NONE
@@ -151,24 +148,20 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 17 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: '42' (type: string), col0 (type: int)
-                outputColumnNames: _col1, _col2
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 17 Basic stats: COMPLETE Column stats: NONE
                 Union
                   Statistics: Num rows: 2 Data size: 34 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: _col1 (type: string), _col2 (type: int)
-                    outputColumnNames: _col1, _col2
+                  Group By Operator
+                    keys: _col0 (type: string), _col1 (type: int)
+                    mode: hash
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 2 Data size: 34 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: _col2 (type: int), _col1 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string), _col1 (type: int)
+                      sort order: ++
+                      Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
                       Statistics: Num rows: 2 Data size: 34 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int), _col1 (type: string)
-                        sort order: ++
-                        Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
-                        Statistics: Num rows: 2 Data size: 34 Basic stats: COMPLETE Column stats: NONE
           TableScan
             alias: agg1
             Statistics: Num rows: 1 Data size: 17 Basic stats: COMPLETE Column stats: NONE
@@ -177,32 +170,28 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 17 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: '41' (type: string), col0 (type: int)
-                outputColumnNames: _col1, _col2
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 17 Basic stats: COMPLETE Column stats: NONE
                 Union
                   Statistics: Num rows: 2 Data size: 34 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: _col1 (type: string), _col2 (type: int)
-                    outputColumnNames: _col1, _col2
+                  Group By Operator
+                    keys: _col0 (type: string), _col1 (type: int)
+                    mode: hash
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 2 Data size: 34 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: _col2 (type: int), _col1 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string), _col1 (type: int)
+                      sort order: ++
+                      Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
                       Statistics: Num rows: 2 Data size: 34 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int), _col1 (type: string)
-                        sort order: ++
-                        Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
-                        Statistics: Num rows: 2 Data size: 34 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
-          keys: KEY._col0 (type: int), KEY._col1 (type: string)
+          keys: KEY._col0 (type: string), KEY._col1 (type: int)
           mode: mergepartial
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 1 Data size: 17 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col1 (type: string), _col1 (type: string)
+            expressions: _col0 (type: string), _col0 (type: string)
             outputColumnNames: _col1, _col2
             Statistics: Num rows: 1 Data size: 17 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
@@ -218,7 +207,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[20][tables = [single_use_subq12, single_use_subq11]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: SELECT single_use_subq11.a1 AS a1,
        single_use_subq11.a2 AS a2
 FROM   (SELECT Sum(agg1.col2) AS a1

http://git-wip-us.apache.org/repos/asf/hive/blob/7ea263cb/ql/src/test/results/clientpositive/groupby_multialias.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_multialias.q.out b/ql/src/test/results/clientpositive/groupby_multialias.q.out
new file mode 100644
index 0000000..cee790a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/groupby_multialias.q.out
@@ -0,0 +1,66 @@
+PREHOOK: query: create table t1 (a int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t1
+POSTHOOK: query: create table t1 (a int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t1
+PREHOOK: query: explain
+select t1.a as a1, min(t1.a) as a
+from t1
+group by t1.a
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select t1.a as a1, min(t1.a) as a
+from t1
+group by t1.a
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: t1
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Select Operator
+              expressions: a (type: int)
+              outputColumnNames: a
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              Group By Operator
+                aggregations: min(a)
+                keys: a (type: int)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                  value expressions: _col1 (type: int)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: min(VALUE._col0)
+          keys: KEY._col0 (type: int)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/7ea263cb/ql/src/test/results/clientpositive/order3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/order3.q.out b/ql/src/test/results/clientpositive/order3.q.out
index d3db1b9..85ee858 100644
--- a/ql/src/test/results/clientpositive/order3.q.out
+++ b/ql/src/test/results/clientpositive/order3.q.out
@@ -58,19 +58,23 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0
           Statistics: Num rows: 7 Data size: 70 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+          Select Operator
+            expressions: _col0 (type: int)
+            outputColumnNames: _col1
+            Statistics: Num rows: 7 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce
       Map Operator Tree:
           TableScan
             Reduce Output Operator
-              key expressions: _col0 (type: int)
+              key expressions: _col1 (type: int)
               sort order: +
               Statistics: Num rows: 7 Data size: 70 Basic stats: COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
@@ -92,7 +96,7 @@ STAGE PLANS:
 
   Stage: Stage-0
     Fetch Operator
-      limit: 3
+      limit: -1
       Processor Tree:
         ListSink
 
@@ -147,19 +151,23 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 7 Data size: 70 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+          Select Operator
+            expressions: _col1 (type: int), _col0 (type: int)
+            outputColumnNames: _col1, _col2
+            Statistics: Num rows: 7 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce
       Map Operator Tree:
           TableScan
             Reduce Output Operator
-              key expressions: _col0 (type: int)
+              key expressions: _col2 (type: int)
               sort order: +
               Statistics: Num rows: 7 Data size: 70 Basic stats: COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1
@@ -182,7 +190,7 @@ STAGE PLANS:
 
   Stage: Stage-0
     Fetch Operator
-      limit: 3
+      limit: -1
       Processor Tree:
         ListSink
 
@@ -237,19 +245,23 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 7 Data size: 70 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+          Select Operator
+            expressions: _col1 (type: bigint), _col0 (type: int)
+            outputColumnNames: _col1, _col2
+            Statistics: Num rows: 7 Data size: 70 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce
       Map Operator Tree:
           TableScan
             Reduce Output Operator
-              key expressions: _col0 (type: int)
+              key expressions: _col2 (type: int)
               sort order: +
               Statistics: Num rows: 7 Data size: 70 Basic stats: COMPLETE Column stats: NONE
               TopN Hash Memory Usage: 0.1

http://git-wip-us.apache.org/repos/asf/hive/blob/7ea263cb/ql/src/test/results/clientpositive/perf/spark/query19.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/spark/query19.q.out b/ql/src/test/results/clientpositive/perf/spark/query19.q.out
index 6a70ddc..281445c 100644
--- a/ql/src/test/results/clientpositive/perf/spark/query19.q.out
+++ b/ql/src/test/results/clientpositive/perf/spark/query19.q.out
@@ -241,11 +241,11 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 421657640 Data size: 37198759433 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col1 (type: int), _col0 (type: string), _col2 (type: int), _col3 (type: string), _col4 (type: decimal(17,2))
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                  expressions: _col2 (type: int), _col3 (type: string), _col4 (type: decimal(17,2)), _col0 (type: string), _col1 (type: int)
+                  outputColumnNames: _col2, _col3, _col4, _col5, _col6
                   Statistics: Num rows: 421657640 Data size: 37198759433 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: _col4 (type: decimal(17,2)), _col1 (type: string), _col0 (type: int), _col2 (type: int), _col3 (type: string)
+                    key expressions: _col4 (type: decimal(17,2)), _col5 (type: string), _col6 (type: int), _col2 (type: int), _col3 (type: string)
                     sort order: -++++
                     Statistics: Num rows: 421657640 Data size: 37198759433 Basic stats: COMPLETE Column stats: NONE
                     TopN Hash Memory Usage: 0.1
@@ -300,7 +300,7 @@ STAGE PLANS:
 
   Stage: Stage-0
     Fetch Operator
-      limit: 100
+      limit: -1
       Processor Tree:
         ListSink
 

http://git-wip-us.apache.org/repos/asf/hive/blob/7ea263cb/ql/src/test/results/clientpositive/perf/spark/query55.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/spark/query55.q.out b/ql/src/test/results/clientpositive/perf/spark/query55.q.out
index c611918..44eb24e 100644
--- a/ql/src/test/results/clientpositive/perf/spark/query55.q.out
+++ b/ql/src/test/results/clientpositive/perf/spark/query55.q.out
@@ -137,12 +137,16 @@ STAGE PLANS:
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 348477374 Data size: 30742775095 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col2 (type: decimal(17,2)), _col0 (type: int)
-                  sort order: -+
+                Select Operator
+                  expressions: _col1 (type: string), _col2 (type: decimal(17,2)), _col0 (type: int)
+                  outputColumnNames: _col1, _col2, _col3
                   Statistics: Num rows: 348477374 Data size: 30742775095 Basic stats: COMPLETE Column stats: NONE
-                  TopN Hash Memory Usage: 0.1
-                  value expressions: _col1 (type: string)
+                  Reduce Output Operator
+                    key expressions: _col2 (type: decimal(17,2)), _col3 (type: int)
+                    sort order: -+
+                    Statistics: Num rows: 348477374 Data size: 30742775095 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col1 (type: string)
         Reducer 5 
             Reduce Operator Tree:
               Select Operator
@@ -162,7 +166,7 @@ STAGE PLANS:
 
   Stage: Stage-0
     Fetch Operator
-      limit: 100
+      limit: -1
       Processor Tree:
         ListSink
 

http://git-wip-us.apache.org/repos/asf/hive/blob/7ea263cb/ql/src/test/results/clientpositive/perf/spark/query71.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/spark/query71.q.out b/ql/src/test/results/clientpositive/perf/spark/query71.q.out
index 73af356..92b7915 100644
--- a/ql/src/test/results/clientpositive/perf/spark/query71.q.out
+++ b/ql/src/test/results/clientpositive/perf/spark/query71.q.out
@@ -296,11 +296,11 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 670816149 Data size: 72801917486 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: int), _col3 (type: string), _col1 (type: int), _col2 (type: int), _col4 (type: decimal(17,2))
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                  expressions: _col3 (type: string), _col1 (type: int), _col2 (type: int), _col4 (type: decimal(17,2)), _col0 (type: int)
+                  outputColumnNames: _col1, _col2, _col3, _col4, _col5
                   Statistics: Num rows: 670816149 Data size: 72801917486 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: _col4 (type: decimal(17,2)), _col0 (type: int)
+                    key expressions: _col4 (type: decimal(17,2)), _col5 (type: int)
                     sort order: -+
                     Statistics: Num rows: 670816149 Data size: 72801917486 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/7ea263cb/ql/src/test/results/clientpositive/perf/tez/query19.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query19.q.out b/ql/src/test/results/clientpositive/perf/tez/query19.q.out
index 363425f..73bb6d9 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query19.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query19.q.out
@@ -59,10 +59,10 @@ Reducer 9 <- Map 11 (SIMPLE_EDGE), Map 8 (SIMPLE_EDGE)
 
 Stage-0
   Fetch Operator
-    limit:100
+    limit:-1
     Stage-1
       Reducer 6
-      File Output Operator [FS_44]
+      File Output Operator [FS_45]
         Limit [LIM_43] (rows=100 width=88)
           Number of rows:100
           Select Operator [SEL_42] (rows=421657640 width=88)
@@ -70,7 +70,7 @@ Stage-0
           <-Reducer 5 [SIMPLE_EDGE]
             SHUFFLE [RS_41]
               Select Operator [SEL_39] (rows=421657640 width=88)
-                Output:["_col0","_col1","_col2","_col3","_col4"]
+                Output:["_col2","_col3","_col4","_col5","_col6"]
                 Group By Operator [GBY_38] (rows=421657640 width=88)
                   Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3
                 <-Reducer 4 [SIMPLE_EDGE]
@@ -82,47 +82,47 @@ Stage-0
                         Output:["_col8","_col13","_col14","_col15","_col16"]
                         Filter Operator [FIL_34] (rows=843315281 width=88)
                           predicate:(substr(_col3, 1, 5) <> substr(_col19, 1, 5))
-                          Merge Join Operator [MERGEJOIN_73] (rows=843315281 width=88)
+                          Merge Join Operator [MERGEJOIN_74] (rows=843315281 width=88)
                             Conds:RS_31._col7=RS_32._col0(Inner),Output:["_col3","_col8","_col13","_col14","_col15","_col16","_col19"]
                           <-Map 13 [SIMPLE_EDGE]
                             SHUFFLE [RS_32]
                               PartitionCols:_col0
                               Select Operator [SEL_24] (rows=1704 width=1910)
                                 Output:["_col0","_col1"]
-                                Filter Operator [FIL_68] (rows=1704 width=1910)
+                                Filter Operator [FIL_69] (rows=1704 width=1910)
                                   predicate:s_store_sk is not null
                                   TableScan [TS_22] (rows=1704 width=1910)
                                     default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_zip"]
                           <-Reducer 3 [SIMPLE_EDGE]
                             SHUFFLE [RS_31]
                               PartitionCols:_col7
-                              Merge Join Operator [MERGEJOIN_72] (rows=766650239 width=88)
+                              Merge Join Operator [MERGEJOIN_73] (rows=766650239 width=88)
                                 Conds:RS_28._col0=RS_29._col2(Inner),Output:["_col3","_col7","_col8","_col13","_col14","_col15","_col16"]
                               <-Reducer 10 [SIMPLE_EDGE]
                                 SHUFFLE [RS_29]
                                   PartitionCols:_col2
-                                  Merge Join Operator [MERGEJOIN_71] (rows=696954748 width=88)
+                                  Merge Join Operator [MERGEJOIN_72] (rows=696954748 width=88)
                                     Conds:RS_18._col1=RS_19._col0(Inner),Output:["_col2","_col3","_col4","_col9","_col10","_col11","_col12"]
                                   <-Map 12 [SIMPLE_EDGE]
                                     SHUFFLE [RS_19]
                                       PartitionCols:_col0
                                       Select Operator [SEL_14] (rows=231000 width=1436)
                                         Output:["_col0","_col1","_col2","_col3","_col4"]
-                                        Filter Operator [FIL_67] (rows=231000 width=1436)
+                                        Filter Operator [FIL_68] (rows=231000 width=1436)
                                           predicate:((i_manager_id = 7) and i_item_sk is not null)
                                           TableScan [TS_12] (rows=462000 width=1436)
                                             default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_brand","i_manufact_id","i_manufact","i_manager_id"]
                                   <-Reducer 9 [SIMPLE_EDGE]
                                     SHUFFLE [RS_18]
                                       PartitionCols:_col1
-                                      Merge Join Operator [MERGEJOIN_70] (rows=633595212 width=88)
+                                      Merge Join Operator [MERGEJOIN_71] (rows=633595212 width=88)
                                         Conds:RS_15._col0=RS_16._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
                                       <-Map 11 [SIMPLE_EDGE]
                                         SHUFFLE [RS_16]
                                           PartitionCols:_col0
                                           Select Operator [SEL_11] (rows=18262 width=1119)
                                             Output:["_col0"]
-                                            Filter Operator [FIL_66] (rows=18262 width=1119)
+                                            Filter Operator [FIL_67] (rows=18262 width=1119)
                                               predicate:((d_moy = 11) and (d_year = 1999) and d_date_sk is not null)
                                               TableScan [TS_9] (rows=73049 width=1119)
                                                 default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
@@ -131,21 +131,21 @@ Stage-0
                                           PartitionCols:_col0
                                           Select Operator [SEL_8] (rows=575995635 width=88)
                                             Output:["_col0","_col1","_col2","_col3","_col4"]
-                                            Filter Operator [FIL_65] (rows=575995635 width=88)
+                                            Filter Operator [FIL_66] (rows=575995635 width=88)
                                               predicate:(ss_customer_sk is not null and ss_item_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null)
                                               TableScan [TS_6] (rows=575995635 width=88)
                                                 default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_store_sk","ss_ext_sales_price"]
                               <-Reducer 2 [SIMPLE_EDGE]
                                 SHUFFLE [RS_28]
                                   PartitionCols:_col0
-                                  Merge Join Operator [MERGEJOIN_69] (rows=88000001 width=860)
+                                  Merge Join Operator [MERGEJOIN_70] (rows=88000001 width=860)
                                     Conds:RS_25._col1=RS_26._col0(Inner),Output:["_col0","_col3"]
                                   <-Map 1 [SIMPLE_EDGE]
                                     SHUFFLE [RS_25]
                                       PartitionCols:_col1
                                       Select Operator [SEL_2] (rows=80000000 width=860)
                                         Output:["_col0","_col1"]
-                                        Filter Operator [FIL_63] (rows=80000000 width=860)
+                                        Filter Operator [FIL_64] (rows=80000000 width=860)
                                           predicate:(c_current_addr_sk is not null and c_customer_sk is not null)
                                           TableScan [TS_0] (rows=80000000 width=860)
                                             default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_addr_sk"]
@@ -154,7 +154,7 @@ Stage-0
                                       PartitionCols:_col0
                                       Select Operator [SEL_5] (rows=40000000 width=1014)
                                         Output:["_col0","_col1"]
-                                        Filter Operator [FIL_64] (rows=40000000 width=1014)
+                                        Filter Operator [FIL_65] (rows=40000000 width=1014)
                                           predicate:ca_address_sk is not null
                                           TableScan [TS_3] (rows=40000000 width=1014)
                                             default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_zip"]

http://git-wip-us.apache.org/repos/asf/hive/blob/7ea263cb/ql/src/test/results/clientpositive/perf/tez/query55.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query55.q.out b/ql/src/test/results/clientpositive/perf/tez/query55.q.out
index 27ea1c3..5a5e33a 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query55.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query55.q.out
@@ -34,55 +34,57 @@ Reducer 5 <- Reducer 4 (SIMPLE_EDGE)
 
 Stage-0
   Fetch Operator
-    limit:100
+    limit:-1
     Stage-1
       Reducer 5
-      File Output Operator [FS_24]
+      File Output Operator [FS_25]
         Limit [LIM_23] (rows=100 width=88)
           Number of rows:100
           Select Operator [SEL_22] (rows=348477374 width=88)
             Output:["_col0","_col1","_col2"]
           <-Reducer 4 [SIMPLE_EDGE]
             SHUFFLE [RS_21]
-              Group By Operator [GBY_18] (rows=348477374 width=88)
-                Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1
-              <-Reducer 3 [SIMPLE_EDGE]
-                SHUFFLE [RS_17]
-                  PartitionCols:_col0, _col1
-                  Group By Operator [GBY_16] (rows=696954748 width=88)
-                    Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)"],keys:_col7, _col8
-                    Merge Join Operator [MERGEJOIN_34] (rows=696954748 width=88)
-                      Conds:RS_12._col1=RS_13._col0(Inner),Output:["_col2","_col7","_col8"]
-                    <-Map 7 [SIMPLE_EDGE]
-                      SHUFFLE [RS_13]
-                        PartitionCols:_col0
-                        Select Operator [SEL_8] (rows=231000 width=1436)
-                          Output:["_col0","_col1","_col2"]
-                          Filter Operator [FIL_32] (rows=231000 width=1436)
-                            predicate:((i_manager_id = 36) and i_item_sk is not null)
-                            TableScan [TS_6] (rows=462000 width=1436)
-                              default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_brand","i_manager_id"]
-                    <-Reducer 2 [SIMPLE_EDGE]
-                      SHUFFLE [RS_12]
-                        PartitionCols:_col1
-                        Merge Join Operator [MERGEJOIN_33] (rows=633595212 width=88)
-                          Conds:RS_9._col0=RS_10._col0(Inner),Output:["_col1","_col2"]
-                        <-Map 1 [SIMPLE_EDGE]
-                          SHUFFLE [RS_9]
-                            PartitionCols:_col0
-                            Select Operator [SEL_2] (rows=575995635 width=88)
-                              Output:["_col0","_col1","_col2"]
-                              Filter Operator [FIL_30] (rows=575995635 width=88)
-                                predicate:(ss_item_sk is not null and ss_sold_date_sk is not null)
-                                TableScan [TS_0] (rows=575995635 width=88)
-                                  default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_ext_sales_price"]
-                        <-Map 6 [SIMPLE_EDGE]
-                          SHUFFLE [RS_10]
-                            PartitionCols:_col0
-                            Select Operator [SEL_5] (rows=18262 width=1119)
-                              Output:["_col0"]
-                              Filter Operator [FIL_31] (rows=18262 width=1119)
-                                predicate:((d_moy = 12) and (d_year = 2001) and d_date_sk is not null)
-                                TableScan [TS_3] (rows=73049 width=1119)
-                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
+              Select Operator [SEL_19] (rows=348477374 width=88)
+                Output:["_col1","_col2","_col3"]
+                Group By Operator [GBY_18] (rows=348477374 width=88)
+                  Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1
+                <-Reducer 3 [SIMPLE_EDGE]
+                  SHUFFLE [RS_17]
+                    PartitionCols:_col0, _col1
+                    Group By Operator [GBY_16] (rows=696954748 width=88)
+                      Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)"],keys:_col7, _col8
+                      Merge Join Operator [MERGEJOIN_35] (rows=696954748 width=88)
+                        Conds:RS_12._col1=RS_13._col0(Inner),Output:["_col2","_col7","_col8"]
+                      <-Map 7 [SIMPLE_EDGE]
+                        SHUFFLE [RS_13]
+                          PartitionCols:_col0
+                          Select Operator [SEL_8] (rows=231000 width=1436)
+                            Output:["_col0","_col1","_col2"]
+                            Filter Operator [FIL_33] (rows=231000 width=1436)
+                              predicate:((i_manager_id = 36) and i_item_sk is not null)
+                              TableScan [TS_6] (rows=462000 width=1436)
+                                default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_brand","i_manager_id"]
+                      <-Reducer 2 [SIMPLE_EDGE]
+                        SHUFFLE [RS_12]
+                          PartitionCols:_col1
+                          Merge Join Operator [MERGEJOIN_34] (rows=633595212 width=88)
+                            Conds:RS_9._col0=RS_10._col0(Inner),Output:["_col1","_col2"]
+                          <-Map 1 [SIMPLE_EDGE]
+                            SHUFFLE [RS_9]
+                              PartitionCols:_col0
+                              Select Operator [SEL_2] (rows=575995635 width=88)
+                                Output:["_col0","_col1","_col2"]
+                                Filter Operator [FIL_31] (rows=575995635 width=88)
+                                  predicate:(ss_item_sk is not null and ss_sold_date_sk is not null)
+                                  TableScan [TS_0] (rows=575995635 width=88)
+                                    default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_ext_sales_price"]
+                          <-Map 6 [SIMPLE_EDGE]
+                            SHUFFLE [RS_10]
+                              PartitionCols:_col0
+                              Select Operator [SEL_5] (rows=18262 width=1119)
+                                Output:["_col0"]
+                                Filter Operator [FIL_32] (rows=18262 width=1119)
+                                  predicate:((d_moy = 12) and (d_year = 2001) and d_date_sk is not null)
+                                  TableScan [TS_3] (rows=73049 width=1119)
+                                    default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/7ea263cb/ql/src/test/results/clientpositive/perf/tez/query71.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query71.q.out b/ql/src/test/results/clientpositive/perf/tez/query71.q.out
index bd48e56..6635b2e 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query71.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query71.q.out
@@ -90,13 +90,13 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 7
-      File Output Operator [FS_52]
+      File Output Operator [FS_53]
         Select Operator [SEL_51] (rows=670816149 width=108)
           Output:["_col0","_col1","_col2","_col3","_col4"]
         <-Reducer 6 [SIMPLE_EDGE]
           SHUFFLE [RS_50]
             Select Operator [SEL_48] (rows=670816149 width=108)
-              Output:["_col0","_col1","_col2","_col3","_col4"]
+              Output:["_col1","_col2","_col3","_col4","_col5"]
               Group By Operator [GBY_47] (rows=670816149 width=108)
                 Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3
               <-Reducer 5 [SIMPLE_EDGE]
@@ -104,28 +104,28 @@ Stage-0
                   PartitionCols:_col0, _col1, _col2, _col3
                   Group By Operator [GBY_45] (rows=1341632299 width=108)
                     Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col0)"],keys:_col4, _col8, _col9, _col5
-                    Merge Join Operator [MERGEJOIN_86] (rows=1341632299 width=108)
+                    Merge Join Operator [MERGEJOIN_87] (rows=1341632299 width=108)
                       Conds:RS_41._col2=RS_42._col0(Inner),Output:["_col0","_col4","_col5","_col8","_col9"]
                     <-Map 16 [SIMPLE_EDGE]
                       SHUFFLE [RS_42]
                         PartitionCols:_col0
                         Select Operator [SEL_37] (rows=86400 width=471)
                           Output:["_col0","_col1","_col2"]
-                          Filter Operator [FIL_81] (rows=86400 width=471)
+                          Filter Operator [FIL_82] (rows=86400 width=471)
                             predicate:(((t_meal_time = 'breakfast') or (t_meal_time = 'dinner')) and t_time_sk is not null)
                             TableScan [TS_35] (rows=86400 width=471)
                               default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour","t_minute","t_meal_time"]
                     <-Reducer 4 [SIMPLE_EDGE]
                       SHUFFLE [RS_41]
                         PartitionCols:_col2
-                        Merge Join Operator [MERGEJOIN_85] (rows=1219665700 width=108)
+                        Merge Join Operator [MERGEJOIN_86] (rows=1219665700 width=108)
                           Conds:Union 3._col1=RS_39._col0(Inner),Output:["_col0","_col2","_col4","_col5"]
                         <-Map 15 [SIMPLE_EDGE]
                           SHUFFLE [RS_39]
                             PartitionCols:_col0
                             Select Operator [SEL_34] (rows=231000 width=1436)
                               Output:["_col0","_col1","_col2"]
-                              Filter Operator [FIL_80] (rows=231000 width=1436)
+                              Filter Operator [FIL_81] (rows=231000 width=1436)
                                 predicate:((i_manager_id = 1) and i_item_sk is not null)
                                 TableScan [TS_32] (rows=462000 width=1436)
                                   default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_brand","i_manager_id"]
@@ -135,14 +135,14 @@ Stage-0
                               PartitionCols:_col1
                               Select Operator [SEL_19] (rows=316788826 width=135)
                                 Output:["_col0","_col1","_col2"]
-                                Merge Join Operator [MERGEJOIN_83] (rows=316788826 width=135)
+                                Merge Join Operator [MERGEJOIN_84] (rows=316788826 width=135)
                                   Conds:RS_16._col0=RS_17._col0(Inner),Output:["_col1","_col2","_col3"]
                                 <-Map 11 [SIMPLE_EDGE]
                                   SHUFFLE [RS_17]
                                     PartitionCols:_col0
                                     Select Operator [SEL_15] (rows=18262 width=1119)
                                       Output:["_col0"]
-                                      Filter Operator [FIL_77] (rows=18262 width=1119)
+                                      Filter Operator [FIL_78] (rows=18262 width=1119)
                                         predicate:((d_moy = 12) and (d_year = 2001) and d_date_sk is not null)
                                         TableScan [TS_13] (rows=73049 width=1119)
                                           default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
@@ -151,7 +151,7 @@ Stage-0
                                     PartitionCols:_col0
                                     Select Operator [SEL_12] (rows=287989836 width=135)
                                       Output:["_col0","_col1","_col2","_col3"]
-                                      Filter Operator [FIL_76] (rows=287989836 width=135)
+                                      Filter Operator [FIL_77] (rows=287989836 width=135)
                                         predicate:(cs_item_sk is not null and cs_sold_date_sk is not null and cs_sold_time_sk is not null)
                                         TableScan [TS_10] (rows=287989836 width=135)
                                           default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_sold_time_sk","cs_item_sk","cs_ext_sales_price"]
@@ -160,14 +160,14 @@ Stage-0
                               PartitionCols:_col1
                               Select Operator [SEL_30] (rows=633595212 width=88)
                                 Output:["_col0","_col1","_col2"]
-                                Merge Join Operator [MERGEJOIN_84] (rows=633595212 width=88)
+                                Merge Join Operator [MERGEJOIN_85] (rows=633595212 width=88)
                                   Conds:RS_27._col0=RS_28._col0(Inner),Output:["_col1","_col2","_col3"]
                                 <-Map 12 [SIMPLE_EDGE]
                                   SHUFFLE [RS_27]
                                     PartitionCols:_col0
                                     Select Operator [SEL_23] (rows=575995635 width=88)
                                       Output:["_col0","_col1","_col2","_col3"]
-                                      Filter Operator [FIL_78] (rows=575995635 width=88)
+                                      Filter Operator [FIL_79] (rows=575995635 width=88)
                                         predicate:(ss_item_sk is not null and ss_sold_date_sk is not null and ss_sold_time_sk is not null)
                                         TableScan [TS_21] (rows=575995635 width=88)
                                           default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_sold_time_sk","ss_item_sk","ss_ext_sales_price"]
@@ -176,7 +176,7 @@ Stage-0
                                     PartitionCols:_col0
                                     Select Operator [SEL_26] (rows=18262 width=1119)
                                       Output:["_col0"]
-                                      Filter Operator [FIL_79] (rows=18262 width=1119)
+                                      Filter Operator [FIL_80] (rows=18262 width=1119)
                                         predicate:((d_moy = 12) and (d_year = 2001) and d_date_sk is not null)
                                         TableScan [TS_24] (rows=73049 width=1119)
                                           default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
@@ -185,14 +185,14 @@ Stage-0
                               PartitionCols:_col1
                               Select Operator [SEL_9] (rows=158402938 width=135)
                                 Output:["_col0","_col1","_col2"]
-                                Merge Join Operator [MERGEJOIN_82] (rows=158402938 width=135)
+                                Merge Join Operator [MERGEJOIN_83] (rows=158402938 width=135)
                                   Conds:RS_6._col0=RS_7._col0(Inner),Output:["_col1","_col2","_col3"]
                                 <-Map 1 [SIMPLE_EDGE]
                                   SHUFFLE [RS_6]
                                     PartitionCols:_col0
                                     Select Operator [SEL_2] (rows=144002668 width=135)
                                       Output:["_col0","_col1","_col2","_col3"]
-                                      Filter Operator [FIL_74] (rows=144002668 width=135)
+                                      Filter Operator [FIL_75] (rows=144002668 width=135)
                                         predicate:(ws_item_sk is not null and ws_sold_date_sk is not null and ws_sold_time_sk is not null)
                                         TableScan [TS_0] (rows=144002668 width=135)
                                           default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_sold_time_sk","ws_item_sk","ws_ext_sales_price"]
@@ -201,7 +201,7 @@ Stage-0
                                     PartitionCols:_col0
                                     Select Operator [SEL_5] (rows=18262 width=1119)
                                       Output:["_col0"]
-                                      Filter Operator [FIL_75] (rows=18262 width=1119)
+                                      Filter Operator [FIL_76] (rows=18262 width=1119)
                                         predicate:((d_moy = 12) and (d_year = 2001) and d_date_sk is not null)
                                         TableScan [TS_3] (rows=73049 width=1119)
                                           default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]


[07/50] [abbrv] hive git commit: HIVE-18272: Fix check-style violations in subquery code (Vineet Garg, reviewed by Ashutosh Chauhan)

Posted by ga...@apache.org.
HIVE-18272: Fix check-style violations in subquery code (Vineet Garg,reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ca96613d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ca96613d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ca96613d

Branch: refs/heads/standalone-metastore
Commit: ca96613da5705c8dfd1a3269315551fee225444a
Parents: a96564c
Author: Vineet Garg <vg...@apache.org>
Authored: Thu Dec 14 12:24:32 2017 -0800
Committer: Vineet Garg <vg...@apache.org>
Committed: Thu Dec 14 12:24:32 2017 -0800

----------------------------------------------------------------------
 .../calcite/HiveSubQRemoveRelBuilder.java       | 448 ++++-----
 .../calcite/rules/HiveRelDecorrelator.java      | 637 +++++++------
 .../calcite/rules/HiveSubQueryRemoveRule.java   | 933 +++++++++----------
 3 files changed, 1005 insertions(+), 1013 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ca96613d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveSubQRemoveRelBuilder.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveSubQRemoveRelBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveSubQRemoveRelBuilder.java
index c6a5ce2..a8b408a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveSubQRemoveRelBuilder.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveSubQRemoveRelBuilder.java
@@ -100,6 +100,9 @@ import java.util.TreeSet;
  *  because CALCITE-1493 hasn't been fixed yet
  *  This should be deleted and replaced with RelBuilder in SubqueryRemoveRule
  *  once CALCITE-1493 is fixed.
+ *  EDIT: Although CALCITE-1493 has been fixed and released but HIVE now has special handling
+ *    in join (it gets a flag to see if semi join is to be created or not). So we still can not
+ *    replace this with Calcite's RelBuilder
  *
  * <p>{@code RelBuilder} does not make possible anything that you could not
  * also accomplish by calling the factory methods of the particular relational
@@ -116,14 +119,14 @@ import java.util.TreeSet;
  */
 public class HiveSubQRemoveRelBuilder {
   private static final Function<RexNode, String> FN_TYPE =
-          new Function<RexNode, String>() {
-            public String apply(RexNode input) {
-              return input + ": " + input.getType();
-            }
-          };
+      new Function<RexNode, String>() {
+        public String apply(RexNode input) {
+          return input + ": " + input.getType();
+        }
+      };
 
-  protected final RelOptCluster cluster;
-  protected final RelOptSchema relOptSchema;
+  private final RelOptCluster cluster;
+  private final RelOptSchema relOptSchema;
   private final RelFactories.FilterFactory filterFactory;
   private final RelFactories.ProjectFactory projectFactory;
   private final RelFactories.AggregateFactory aggregateFactory;
@@ -137,57 +140,57 @@ public class HiveSubQRemoveRelBuilder {
   private final Deque<Frame> stack = new ArrayDeque<>();
 
   public HiveSubQRemoveRelBuilder(Context context, RelOptCluster cluster,
-                       RelOptSchema relOptSchema) {
+                                  RelOptSchema relOptSchema) {
     this.cluster = cluster;
     this.relOptSchema = relOptSchema;
     if (context == null) {
       context = Contexts.EMPTY_CONTEXT;
     }
     this.aggregateFactory =
-            Util.first(context.unwrap(RelFactories.AggregateFactory.class),
-                    HiveRelFactories.HIVE_AGGREGATE_FACTORY);
+        Util.first(context.unwrap(RelFactories.AggregateFactory.class),
+            HiveRelFactories.HIVE_AGGREGATE_FACTORY);
     this.filterFactory =
-            Util.first(context.unwrap(RelFactories.FilterFactory.class),
-                    HiveRelFactories.HIVE_FILTER_FACTORY);
+        Util.first(context.unwrap(RelFactories.FilterFactory.class),
+            HiveRelFactories.HIVE_FILTER_FACTORY);
     this.projectFactory =
-            Util.first(context.unwrap(RelFactories.ProjectFactory.class),
-                    HiveRelFactories.HIVE_PROJECT_FACTORY);
+        Util.first(context.unwrap(RelFactories.ProjectFactory.class),
+            HiveRelFactories.HIVE_PROJECT_FACTORY);
     this.sortFactory =
-            Util.first(context.unwrap(RelFactories.SortFactory.class),
-                    HiveRelFactories.HIVE_SORT_FACTORY);
+        Util.first(context.unwrap(RelFactories.SortFactory.class),
+            HiveRelFactories.HIVE_SORT_FACTORY);
     this.setOpFactory =
-            Util.first(context.unwrap(RelFactories.SetOpFactory.class),
-                    HiveRelFactories.HIVE_SET_OP_FACTORY);
+        Util.first(context.unwrap(RelFactories.SetOpFactory.class),
+            HiveRelFactories.HIVE_SET_OP_FACTORY);
     this.joinFactory =
-            Util.first(context.unwrap(RelFactories.JoinFactory.class),
-                    HiveRelFactories.HIVE_JOIN_FACTORY);
+        Util.first(context.unwrap(RelFactories.JoinFactory.class),
+            HiveRelFactories.HIVE_JOIN_FACTORY);
     this.semiJoinFactory =
-            Util.first(context.unwrap(RelFactories.SemiJoinFactory.class),
-                    HiveRelFactories.HIVE_SEMI_JOIN_FACTORY);
+        Util.first(context.unwrap(RelFactories.SemiJoinFactory.class),
+            HiveRelFactories.HIVE_SEMI_JOIN_FACTORY);
     this.correlateFactory =
-            Util.first(context.unwrap(RelFactories.CorrelateFactory.class),
-                    RelFactories.DEFAULT_CORRELATE_FACTORY);
+        Util.first(context.unwrap(RelFactories.CorrelateFactory.class),
+            RelFactories.DEFAULT_CORRELATE_FACTORY);
     this.valuesFactory =
-            Util.first(context.unwrap(RelFactories.ValuesFactory.class),
-                    RelFactories.DEFAULT_VALUES_FACTORY);
+        Util.first(context.unwrap(RelFactories.ValuesFactory.class),
+            RelFactories.DEFAULT_VALUES_FACTORY);
     this.scanFactory =
-            Util.first(context.unwrap(RelFactories.TableScanFactory.class),
-                    RelFactories.DEFAULT_TABLE_SCAN_FACTORY);
+        Util.first(context.unwrap(RelFactories.TableScanFactory.class),
+            RelFactories.DEFAULT_TABLE_SCAN_FACTORY);
   }
 
-    /** Creates a RelBuilder. */
+  /** Creates a RelBuilder. */
   public static HiveSubQRemoveRelBuilder create(FrameworkConfig config) {
     final RelOptCluster[] clusters = {null};
     final RelOptSchema[] relOptSchemas = {null};
     Frameworks.withPrepare(
-            new Frameworks.PrepareAction<Void>(config) {
-              public Void apply(RelOptCluster cluster, RelOptSchema relOptSchema,
-                                SchemaPlus rootSchema, CalciteServerStatement statement) {
-                clusters[0] = cluster;
-                relOptSchemas[0] = relOptSchema;
-                return null;
-              }
-            });
+        new Frameworks.PrepareAction<Void>(config) {
+          public Void apply(RelOptCluster cluster, RelOptSchema relOptSchema,
+                            SchemaPlus rootSchema, CalciteServerStatement statement) {
+            clusters[0] = cluster;
+            relOptSchemas[0] = relOptSchema;
+            return null;
+          }
+        });
     return new HiveSubQRemoveRelBuilder(config.getContext(), clusters[0], relOptSchemas[0]);
   }
 
@@ -286,15 +289,15 @@ public class HiveSubQRemoveRelBuilder {
       return rexBuilder.makeExactLiteral((BigDecimal) value);
     } else if (value instanceof Float || value instanceof Double) {
       return rexBuilder.makeApproxLiteral(
-              BigDecimal.valueOf(((Number) value).doubleValue()));
+          BigDecimal.valueOf(((Number) value).doubleValue()));
     } else if (value instanceof Number) {
       return rexBuilder.makeExactLiteral(
-              BigDecimal.valueOf(((Number) value).longValue()));
+          BigDecimal.valueOf(((Number) value).longValue()));
     } else if (value instanceof String) {
       return rexBuilder.makeLiteral((String) value);
     } else {
       throw new IllegalArgumentException("cannot convert " + value
-              + " (" + value.getClass() + ") to a constant");
+          + " (" + value.getClass() + ") to a constant");
     }
   }
 
@@ -323,7 +326,7 @@ public class HiveSubQRemoveRelBuilder {
       return field(inputCount, inputOrdinal, i);
     } else {
       throw new IllegalArgumentException("field [" + fieldName
-              + "] not found; input fields are: " + fieldNames);
+          + "] not found; input fields are: " + fieldNames);
     }
   }
 
@@ -359,12 +362,12 @@ public class HiveSubQRemoveRelBuilder {
     final RelDataType rowType = input.getRowType();
     if (fieldOrdinal < 0 || fieldOrdinal > rowType.getFieldCount()) {
       throw new IllegalArgumentException("field ordinal [" + fieldOrdinal
-              + "] out of range; input fields are: " + rowType.getFieldNames());
+          + "] out of range; input fields are: " + rowType.getFieldNames());
     }
     final RelDataTypeField field = rowType.getFieldList().get(fieldOrdinal);
     final int offset = inputOffset(inputCount, inputOrdinal);
     final RexInputRef ref = cluster.getRexBuilder()
-            .makeInputRef(field.getType(), offset + fieldOrdinal);
+        .makeInputRef(field.getType(), offset + fieldOrdinal);
     final RelDataTypeField aliasField = frame.fields().get(fieldOrdinal);
     if (!alias || field.getName().equals(aliasField.getName())) {
       return ref;
@@ -388,15 +391,15 @@ public class HiveSubQRemoveRelBuilder {
           return field(offset + i);
         } else {
           throw new IllegalArgumentException("no field '" + fieldName
-                  + "' in relation '" + alias
-                  + "'; fields are: " + pair.right.getFieldNames());
+              + "' in relation '" + alias
+              + "'; fields are: " + pair.right.getFieldNames());
         }
       }
       aliases.add(pair.left);
       offset += pair.right.getFieldCount();
     }
     throw new IllegalArgumentException("no relation wtih alias '" + alias
-            + "'; aliases are: " + aliases);
+        + "'; aliases are: " + aliases);
   }
 
   /** Returns references to the fields of the top input. */
@@ -421,16 +424,16 @@ public class HiveSubQRemoveRelBuilder {
     for (RelFieldCollation fieldCollation : collation.getFieldCollations()) {
       RexNode node = field(fieldCollation.getFieldIndex());
       switch (fieldCollation.direction) {
-        case DESCENDING:
-          node = desc(node);
+      case DESCENDING:
+        node = desc(node);
       }
       switch (fieldCollation.nullDirection) {
-        case FIRST:
-          node = nullsFirst(node);
-          break;
-        case LAST:
-          node = nullsLast(node);
-          break;
+      case FIRST:
+        node = nullsFirst(node);
+        break;
+      case LAST:
+        node = nullsLast(node);
+        break;
       }
       nodes.add(node);
     }
@@ -480,7 +483,7 @@ public class HiveSubQRemoveRelBuilder {
     final RelDataType type = builder.deriveReturnType(operator, operandList);
     if (type == null) {
       throw new IllegalArgumentException("cannot derive type: " + operator
-              + "; operands: " + Lists.transform(operandList, FN_TYPE));
+          + "; operands: " + Lists.transform(operandList, FN_TYPE));
     }
     return builder.makeCall(type, operator, operandList);
   }
@@ -489,7 +492,7 @@ public class HiveSubQRemoveRelBuilder {
   public RexNode call(SqlOperator operator,
                       Iterable<? extends RexNode> operands) {
     return cluster.getRexBuilder().makeCall(operator,
-            ImmutableList.copyOf(operands));
+        ImmutableList.copyOf(operands));
   }
 
   /** Creates an AND. */
@@ -546,7 +549,7 @@ public class HiveSubQRemoveRelBuilder {
    * and precision or length. */
   public RexNode cast(RexNode expr, SqlTypeName typeName, int precision) {
     final RelDataType type =
-            cluster.getTypeFactory().createSqlType(typeName, precision);
+        cluster.getTypeFactory().createSqlType(typeName, precision);
     return cluster.getRexBuilder().makeCast(type, expr);
   }
 
@@ -555,7 +558,7 @@ public class HiveSubQRemoveRelBuilder {
   public RexNode cast(RexNode expr, SqlTypeName typeName, int precision,
                       int scale) {
     final RelDataType type =
-            cluster.getTypeFactory().createSqlType(typeName, precision, scale);
+        cluster.getTypeFactory().createSqlType(typeName, precision, scale);
     return cluster.getRexBuilder().makeCast(type, expr);
   }
 
@@ -604,7 +607,7 @@ public class HiveSubQRemoveRelBuilder {
   public GroupKey groupKey(Iterable<? extends RexNode> nodes, boolean indicator,
                            Iterable<? extends Iterable<? extends RexNode>> nodeLists) {
     final ImmutableList.Builder<ImmutableList<RexNode>> builder =
-            ImmutableList.builder();
+        ImmutableList.builder();
     for (Iterable<? extends RexNode> nodeList : nodeLists) {
       builder.add(ImmutableList.copyOf(nodeList));
     }
@@ -636,14 +639,14 @@ public class HiveSubQRemoveRelBuilder {
       groupSets = ImmutableList.of(groupSet);
     }
     final ImmutableList<RexNode> nodes =
-            fields(ImmutableIntList.of(groupSet.toArray()));
+        fields(ImmutableIntList.of(groupSet.toArray()));
     final List<ImmutableList<RexNode>> nodeLists =
-            Lists.transform(groupSets,
-                    new Function<ImmutableBitSet, ImmutableList<RexNode>>() {
-                      public ImmutableList<RexNode> apply(ImmutableBitSet input) {
-                        return fields(ImmutableIntList.of(input.toArray()));
-                      }
-                    });
+        Lists.transform(groupSets,
+            new Function<ImmutableBitSet, ImmutableList<RexNode>>() {
+              public ImmutableList<RexNode> apply(ImmutableBitSet input) {
+                return fields(ImmutableIntList.of(input.toArray()));
+              }
+            });
     return groupKey(nodes, indicator, nodeLists);
   }
 
@@ -651,7 +654,7 @@ public class HiveSubQRemoveRelBuilder {
   public AggCall aggregateCall(SqlAggFunction aggFunction, boolean distinct,
                                RexNode filter, String alias, RexNode... operands) {
     return aggregateCall(aggFunction, distinct, filter, alias,
-            ImmutableList.copyOf(operands));
+        ImmutableList.copyOf(operands));
   }
 
   /** Creates a call to an aggregate function. */
@@ -666,13 +669,13 @@ public class HiveSubQRemoveRelBuilder {
       }
     }
     return new AggCallImpl(aggFunction, distinct, filter, alias,
-            ImmutableList.copyOf(operands));
+        ImmutableList.copyOf(operands));
   }
 
   /** Creates a call to the COUNT aggregate function. */
   public AggCall count(boolean distinct, String alias, RexNode... operands) {
     return aggregateCall(SqlStdOperatorTable.COUNT, distinct, null, alias,
-            operands);
+        operands);
   }
 
   /** Creates a call to the COUNT(*) aggregate function. */
@@ -683,13 +686,13 @@ public class HiveSubQRemoveRelBuilder {
   /** Creates a call to the SUM aggregate function. */
   public AggCall sum(boolean distinct, String alias, RexNode operand) {
     return aggregateCall(SqlStdOperatorTable.SUM, distinct, null, alias,
-            operand);
+        operand);
   }
 
   /** Creates a call to the AVG aggregate function. */
   public AggCall avg(boolean distinct, String alias, RexNode operand) {
     return aggregateCall(
-            SqlStdOperatorTable.AVG, distinct, null, alias, operand);
+        SqlStdOperatorTable.AVG, distinct, null, alias, operand);
   }
 
   /** Creates a call to the MIN aggregate function. */
@@ -789,7 +792,7 @@ public class HiveSubQRemoveRelBuilder {
    * @param fieldNames field names for expressions
    */
   public HiveSubQRemoveRelBuilder project(Iterable<? extends RexNode> nodes,
-                            Iterable<String> fieldNames) {
+                                          Iterable<String> fieldNames) {
     return project(nodes, fieldNames, false);
   }
 
@@ -817,9 +820,9 @@ public class HiveSubQRemoveRelBuilder {
    * @param force create project even if it is identity
    */
   public HiveSubQRemoveRelBuilder project(
-          Iterable<? extends RexNode> nodes,
-          Iterable<String> fieldNames,
-          boolean force) {
+      Iterable<? extends RexNode> nodes,
+      Iterable<String> fieldNames,
+      boolean force) {
     final List<String> names = new ArrayList<>();
     final List<RexNode> exprList = Lists.newArrayList(nodes);
     final Iterator<String> nameIterator = fieldNames.iterator();
@@ -837,17 +840,17 @@ public class HiveSubQRemoveRelBuilder {
         // create "virtual" row type for project only rename fields
         final Frame frame = stack.pop();
         final RelDataType rowType =
-                RexUtil.createStructType(cluster.getTypeFactory(), exprList,
-                        names, SqlValidatorUtil.F_SUGGESTER);
+            RexUtil.createStructType(cluster.getTypeFactory(), exprList,
+                names, SqlValidatorUtil.F_SUGGESTER);
         stack.push(
-                new Frame(frame.rel,
-                        ImmutableList.of(Pair.of(frame.right.get(0).left, rowType))));
+            new Frame(frame.rel,
+                ImmutableList.of(Pair.of(frame.right.get(0).left, rowType))));
         return this;
       }
     }
     final RelNode project =
-            projectFactory.createProject(build(), ImmutableList.copyOf(exprList),
-                    names);
+        projectFactory.createProject(build(), ImmutableList.copyOf(exprList),
+            names);
     push(project);
     return this;
   }
@@ -865,24 +868,24 @@ public class HiveSubQRemoveRelBuilder {
    */
   private String inferAlias(List<RexNode> exprList, RexNode expr) {
     switch (expr.getKind()) {
-      case INPUT_REF:
-        final RexInputRef ref = (RexInputRef) expr;
-        return peek(0).getRowType().getFieldNames().get(ref.getIndex());
-      case CAST:
-        return inferAlias(exprList, ((RexCall) expr).getOperands().get(0));
-      case AS:
-        final RexCall call = (RexCall) expr;
-        for (;;) {
-          final int i = exprList.indexOf(expr);
-          if (i < 0) {
-            break;
-          }
-          exprList.set(i, call.getOperands().get(0));
+    case INPUT_REF:
+      final RexInputRef ref = (RexInputRef) expr;
+      return peek(0).getRowType().getFieldNames().get(ref.getIndex());
+    case CAST:
+      return inferAlias(exprList, ((RexCall) expr).getOperands().get(0));
+    case AS:
+      final RexCall call = (RexCall) expr;
+      for (;;) {
+        final int i = exprList.indexOf(expr);
+        if (i < 0) {
+          break;
         }
-        return ((NlsString) ((RexLiteral) call.getOperands().get(1)).getValue())
-                .getValue();
-      default:
-        return null;
+        exprList.set(i, call.getOperands().get(0));
+      }
+      return ((NlsString) ((RexLiteral) call.getOperands().get(1)).getValue())
+          .getValue();
+    default:
+      return null;
     }
   }
 
@@ -905,26 +908,26 @@ public class HiveSubQRemoveRelBuilder {
     final List<RexNode> extraNodes = projects(inputRowType);
     final GroupKeyImpl groupKey_ = (GroupKeyImpl) groupKey;
     final ImmutableBitSet groupSet =
-            ImmutableBitSet.of(registerExpressions(extraNodes, groupKey_.nodes));
+        ImmutableBitSet.of(registerExpressions(extraNodes, groupKey_.nodes));
     final ImmutableList<ImmutableBitSet> groupSets;
     if (groupKey_.nodeLists != null) {
       final int sizeBefore = extraNodes.size();
       final SortedSet<ImmutableBitSet> groupSetSet =
-              new TreeSet<>(ImmutableBitSet.ORDERING);
+          new TreeSet<>(ImmutableBitSet.ORDERING);
       for (ImmutableList<RexNode> nodeList : groupKey_.nodeLists) {
         final ImmutableBitSet groupSet2 =
-                ImmutableBitSet.of(registerExpressions(extraNodes, nodeList));
+            ImmutableBitSet.of(registerExpressions(extraNodes, nodeList));
         if (!groupSet.contains(groupSet2)) {
           throw new IllegalArgumentException("group set element " + nodeList
-                  + " must be a subset of group key");
+              + " must be a subset of group key");
         }
         groupSetSet.add(groupSet2);
       }
       groupSets = ImmutableList.copyOf(groupSetSet);
       if (extraNodes.size() > sizeBefore) {
         throw new IllegalArgumentException(
-                "group sets contained expressions not in group key: "
-                        + extraNodes.subList(sizeBefore, extraNodes.size()));
+            "group sets contained expressions not in group key: "
+                + extraNodes.subList(sizeBefore, extraNodes.size()));
       }
     } else {
       groupSets = ImmutableList.of(groupSet);
@@ -949,10 +952,10 @@ public class HiveSubQRemoveRelBuilder {
         final AggCallImpl aggCall1 = (AggCallImpl) aggCall;
         final List<Integer> args = registerExpressions(extraNodes, aggCall1.operands);
         final int filterArg = aggCall1.filter == null ? -1
-                : registerExpression(extraNodes, aggCall1.filter);
+            : registerExpression(extraNodes, aggCall1.filter);
         aggregateCall =
-                AggregateCall.create(aggCall1.aggFunction, aggCall1.distinct, args,
-                        filterArg, groupSet.cardinality(), r, null, aggCall1.alias);
+            AggregateCall.create(aggCall1.aggFunction, aggCall1.distinct, args,
+                filterArg, groupSet.cardinality(), r, null, aggCall1.alias);
       } else {
         aggregateCall = ((AggCallImpl2) aggCall).aggregateCall;
       }
@@ -964,7 +967,7 @@ public class HiveSubQRemoveRelBuilder {
       assert groupSet.contains(set);
     }
     RelNode aggregate = aggregateFactory.createAggregate(r,
-            groupKey_.indicator, groupSet, groupSets, aggregateCalls);
+        groupKey_.indicator, groupSet, groupSets, aggregateCalls);
     push(aggregate);
     return this;
   }
@@ -1002,22 +1005,22 @@ public class HiveSubQRemoveRelBuilder {
       inputs.add(0, build());
     }
     switch (kind) {
-      case UNION:
-      case INTERSECT:
-      case EXCEPT:
+    case UNION:
+    case INTERSECT:
+    case EXCEPT:
       if (n < 1) {
         throw new IllegalArgumentException(
             "bad INTERSECT/UNION/EXCEPT input count");
-        }
-        break;
-      default:
-        throw new AssertionError("bad setOp " + kind);
+      }
+      break;
+    default:
+      throw new AssertionError("bad setOp " + kind);
     }
     switch (n) {
-      case 1:
-        return push(inputs.get(0));
-      default:
-        return push(setOpFactory.createSetOp(kind, inputs, all));
+    case 1:
+      return push(inputs.get(0));
+    default:
+      return push(setOpFactory.createSetOp(kind, inputs, all));
     }
   }
 
@@ -1079,16 +1082,16 @@ public class HiveSubQRemoveRelBuilder {
 
   /** Creates a {@link org.apache.calcite.rel.core.Join}. */
   public HiveSubQRemoveRelBuilder join(JoinRelType joinType, RexNode condition0,
-                         RexNode... conditions) {
+                                       RexNode... conditions) {
     return join(joinType, Lists.asList(condition0, conditions));
   }
 
   /** Creates a {@link org.apache.calcite.rel.core.Join} with multiple
    * conditions. */
   public HiveSubQRemoveRelBuilder join(JoinRelType joinType,
-                         Iterable<? extends RexNode> conditions) {
+                                       Iterable<? extends RexNode> conditions) {
     return join(joinType, and(conditions),
-            ImmutableSet.<CorrelationId>of());
+        ImmutableSet.<CorrelationId>of());
   }
 
   public HiveSubQRemoveRelBuilder join(JoinRelType joinType, RexNode condition) {
@@ -1099,8 +1102,8 @@ public class HiveSubQRemoveRelBuilder {
    * a Holder. */
   public HiveSubQRemoveRelBuilder variable(Holder<RexCorrelVariable> v) {
     v.set((RexCorrelVariable)
-            getRexBuilder().makeCorrel(peek().getRowType(),
-                    cluster.createCorrel()));
+        getRexBuilder().makeCorrel(peek().getRowType(),
+            cluster.createCorrel()));
     return this;
   }
 
@@ -1125,22 +1128,21 @@ public class HiveSubQRemoveRelBuilder {
             + " must not be used by left input to correlation");
       }
       switch (joinType) {
-        case LEFT:
-          // Correlate does not have an ON clause.
-          // For a LEFT correlate, predicate must be evaluated first.
-          // For INNER, we can defer.
-          stack.push(right);
-          filter(condition.accept(new Shifter(left.rel, id, right.rel)));
-          right = stack.pop();
-          break;
-        default:
-          postCondition = condition;
+      case LEFT:
+        // Correlate does not have an ON clause.
+        // For a LEFT correlate, predicate must be evaluated first.
+        // For INNER, we can defer.
+        stack.push(right);
+        filter(condition.accept(new Shifter(left.rel, id, right.rel)));
+        right = stack.pop();
+        break;
+      default:
+        postCondition = condition;
       }
       if(createSemiJoin) {
         join = correlateFactory.createCorrelate(left.rel, right.rel, id,
             requiredColumns, SemiJoinType.SEMI);
-      }
-      else {
+      } else {
         join = correlateFactory.createCorrelate(left.rel, right.rel, id,
             requiredColumns, SemiJoinType.of(joinType));
 
@@ -1160,8 +1162,8 @@ public class HiveSubQRemoveRelBuilder {
   /** Creates a {@link org.apache.calcite.rel.core.Join} with correlating
    * variables. */
   public HiveSubQRemoveRelBuilder join(JoinRelType joinType, RexNode condition,
-                         Set<CorrelationId> variablesSet) {
-    return join(joinType, condition, variablesSet, false) ;
+                                       Set<CorrelationId> variablesSet) {
+    return join(joinType, condition, variablesSet, false);
   }
 
   /** Creates a {@link org.apache.calcite.rel.core.Join} using USING syntax.
@@ -1177,9 +1179,9 @@ public class HiveSubQRemoveRelBuilder {
     final List<RexNode> conditions = new ArrayList<>();
     for (String fieldName : fieldNames) {
       conditions.add(
-              call(SqlStdOperatorTable.EQUALS,
-                      field(2, 0, fieldName),
-                      field(2, 1, fieldName)));
+          call(SqlStdOperatorTable.EQUALS,
+              field(2, 0, fieldName),
+              field(2, 1, fieldName)));
     }
     return join(joinType, conditions);
   }
@@ -1189,7 +1191,7 @@ public class HiveSubQRemoveRelBuilder {
     final Frame right = stack.pop();
     final Frame left = stack.pop();
     final RelNode semiJoin =
-            semiJoinFactory.createSemiJoin(left.rel, right.rel, and(conditions));
+        semiJoinFactory.createSemiJoin(left.rel, right.rel, and(conditions));
     stack.push(new Frame(semiJoin, left.right));
     return this;
   }
@@ -1203,8 +1205,8 @@ public class HiveSubQRemoveRelBuilder {
   public HiveSubQRemoveRelBuilder as(String alias) {
     final Frame pair = stack.pop();
     stack.push(
-            new Frame(pair.rel,
-                    ImmutableList.of(Pair.of(alias, pair.right.get(0).right))));
+        new Frame(pair.rel,
+            ImmutableList.of(Pair.of(alias, pair.right.get(0).right))));
     return this;
   }
 
@@ -1223,36 +1225,36 @@ public class HiveSubQRemoveRelBuilder {
    */
   public HiveSubQRemoveRelBuilder values(String[] fieldNames, Object... values) {
     if (fieldNames == null
-            || fieldNames.length == 0
-            || values.length % fieldNames.length != 0
-            || values.length < fieldNames.length) {
+        || fieldNames.length == 0
+        || values.length % fieldNames.length != 0
+        || values.length < fieldNames.length) {
       throw new IllegalArgumentException(
-              "Value count must be a positive multiple of field count");
+          "Value count must be a positive multiple of field count");
     }
     final int rowCount = values.length / fieldNames.length;
     for (Ord<String> fieldName : Ord.zip(fieldNames)) {
       if (allNull(values, fieldName.i, fieldNames.length)) {
         throw new IllegalArgumentException("All values of field '" + fieldName.e
-                + "' are null; cannot deduce type");
+            + "' are null; cannot deduce type");
       }
     }
     final ImmutableList<ImmutableList<RexLiteral>> tupleList =
-            tupleList(fieldNames.length, values);
+        tupleList(fieldNames.length, values);
     final RelDataTypeFactory.FieldInfoBuilder rowTypeBuilder =
-            cluster.getTypeFactory().builder();
+        cluster.getTypeFactory().builder();
     for (final Ord<String> fieldName : Ord.zip(fieldNames)) {
       final String name =
-              fieldName.e != null ? fieldName.e : "expr$" + fieldName.i;
+          fieldName.e != null ? fieldName.e : "expr$" + fieldName.i;
       final RelDataType type = cluster.getTypeFactory().leastRestrictive(
-              new AbstractList<RelDataType>() {
-                public RelDataType get(int index) {
-                  return tupleList.get(index).get(fieldName.i).getType();
-                }
-
-                public int size() {
-                  return rowCount;
-                }
-              });
+          new AbstractList<RelDataType>() {
+            public RelDataType get(int index) {
+              return tupleList.get(index).get(fieldName.i).getType();
+            }
+
+            public int size() {
+              return rowCount;
+            }
+          });
       rowTypeBuilder.add(name, type);
     }
     final RelDataType rowType = rowTypeBuilder.build();
@@ -1262,7 +1264,7 @@ public class HiveSubQRemoveRelBuilder {
   private ImmutableList<ImmutableList<RexLiteral>> tupleList(int columnCount,
                                                              Object[] values) {
     final ImmutableList.Builder<ImmutableList<RexLiteral>> listBuilder =
-            ImmutableList.builder();
+        ImmutableList.builder();
     final List<RexLiteral> valueList = new ArrayList<>();
     for (int i = 0; i < values.length; i++) {
       Object value = values[i];
@@ -1296,7 +1298,7 @@ public class HiveSubQRemoveRelBuilder {
   public HiveSubQRemoveRelBuilder empty() {
     final RelNode input = build();
     final RelNode sort = HiveRelFactories.HIVE_SORT_FACTORY.createSort(
-            input, RelCollations.of(), null, literal(0));
+        input, RelCollations.of(), null, literal(0));
     return this.push(sort);
   }
 
@@ -1312,9 +1314,9 @@ public class HiveSubQRemoveRelBuilder {
    */
   public HiveSubQRemoveRelBuilder values(RelDataType rowType, Object... columnValues) {
     final ImmutableList<ImmutableList<RexLiteral>> tupleList =
-            tupleList(rowType.getFieldCount(), columnValues);
+        tupleList(rowType.getFieldCount(), columnValues);
     RelNode values = valuesFactory.createValues(cluster, rowType,
-            ImmutableList.copyOf(tupleList));
+        ImmutableList.copyOf(tupleList));
     push(values);
     return this;
   }
@@ -1329,9 +1331,9 @@ public class HiveSubQRemoveRelBuilder {
    * @param rowType Row type
    */
   public HiveSubQRemoveRelBuilder values(Iterable<? extends List<RexLiteral>> tupleList,
-                           RelDataType rowType) {
+                                         RelDataType rowType) {
     RelNode values =
-            valuesFactory.createValues(cluster, rowType, copy(tupleList));
+        valuesFactory.createValues(cluster, rowType, copy(tupleList));
     push(values);
     return this;
   }
@@ -1347,14 +1349,13 @@ public class HiveSubQRemoveRelBuilder {
 
   /** Converts an iterable of lists into an immutable list of immutable lists
    * with the same contents. Returns the same object if possible. */
-  private static <E> ImmutableList<ImmutableList<E>>
-  copy(Iterable<? extends List<E>> tupleList) {
+  private static <E> ImmutableList<ImmutableList<E>> copy(Iterable<? extends List<E>> tupleList) {
     final ImmutableList.Builder<ImmutableList<E>> builder =
-            ImmutableList.builder();
+        ImmutableList.builder();
     int changeCount = 0;
     for (List<E> literals : tupleList) {
       final ImmutableList<E> literals2 =
-              ImmutableList.copyOf(literals);
+          ImmutableList.copyOf(literals);
       builder.add(literals2);
       if (literals != literals2) {
         ++changeCount;
@@ -1408,15 +1409,15 @@ public class HiveSubQRemoveRelBuilder {
    * @param nodes Sort expressions
    */
   public HiveSubQRemoveRelBuilder sortLimit(int offset, int fetch,
-                              Iterable<? extends RexNode> nodes) {
+                                            Iterable<? extends RexNode> nodes) {
     final List<RelFieldCollation> fieldCollations = new ArrayList<>();
     final RelDataType inputRowType = peek().getRowType();
     final List<RexNode> extraNodes = projects(inputRowType);
     final List<RexNode> originalExtraNodes = ImmutableList.copyOf(extraNodes);
     for (RexNode node : nodes) {
       fieldCollations.add(
-              collation(node, RelFieldCollation.Direction.ASCENDING, null,
-                      extraNodes));
+          collation(node, RelFieldCollation.Direction.ASCENDING, null,
+              extraNodes));
     }
     final RexNode offsetNode = offset <= 0 ? null : literal(offset);
     final RexNode fetchNode = fetch < 0 ? null : literal(fetch);
@@ -1437,8 +1438,8 @@ public class HiveSubQRemoveRelBuilder {
           stack.pop();
           push(sort2.getInput());
           final RelNode sort =
-                  sortFactory.createSort(build(), sort2.collation,
-                          offsetNode, fetchNode);
+              sortFactory.createSort(build(), sort2.collation,
+                  offsetNode, fetchNode);
           push(sort);
           return this;
         }
@@ -1451,8 +1452,8 @@ public class HiveSubQRemoveRelBuilder {
             stack.pop();
             push(sort2.getInput());
             final RelNode sort =
-                    sortFactory.createSort(build(), sort2.collation,
-                            offsetNode, fetchNode);
+                sortFactory.createSort(build(), sort2.collation,
+                    offsetNode, fetchNode);
             push(sort);
             project(project.getProjects());
             return this;
@@ -1464,8 +1465,8 @@ public class HiveSubQRemoveRelBuilder {
       project(extraNodes);
     }
     final RelNode sort =
-            sortFactory.createSort(build(), RelCollations.of(fieldCollations),
-                    offsetNode, fetchNode);
+        sortFactory.createSort(build(), RelCollations.of(fieldCollations),
+            offsetNode, fetchNode);
     push(sort);
     if (addedFields) {
       project(originalExtraNodes);
@@ -1475,26 +1476,27 @@ public class HiveSubQRemoveRelBuilder {
 
   private static RelFieldCollation collation(RexNode node,
                                              RelFieldCollation.Direction direction,
-                                             RelFieldCollation.NullDirection nullDirection, List<RexNode> extraNodes) {
+                                             RelFieldCollation.NullDirection nullDirection,
+                                             List<RexNode> extraNodes) {
     switch (node.getKind()) {
-      case INPUT_REF:
-        return new RelFieldCollation(((RexInputRef) node).getIndex(), direction,
-                Util.first(nullDirection, direction.defaultNullDirection()));
-      case DESCENDING:
-        return collation(((RexCall) node).getOperands().get(0),
-                RelFieldCollation.Direction.DESCENDING,
-                nullDirection, extraNodes);
-      case NULLS_FIRST:
-        return collation(((RexCall) node).getOperands().get(0), direction,
-                RelFieldCollation.NullDirection.FIRST, extraNodes);
-      case NULLS_LAST:
-        return collation(((RexCall) node).getOperands().get(0), direction,
-                RelFieldCollation.NullDirection.LAST, extraNodes);
-      default:
-        final int fieldIndex = extraNodes.size();
-        extraNodes.add(node);
-        return new RelFieldCollation(fieldIndex, direction,
-                Util.first(nullDirection, direction.defaultNullDirection()));
+    case INPUT_REF:
+      return new RelFieldCollation(((RexInputRef) node).getIndex(), direction,
+          Util.first(nullDirection, direction.defaultNullDirection()));
+    case DESCENDING:
+      return collation(((RexCall) node).getOperands().get(0),
+          RelFieldCollation.Direction.DESCENDING,
+          nullDirection, extraNodes);
+    case NULLS_FIRST:
+      return collation(((RexCall) node).getOperands().get(0), direction,
+          RelFieldCollation.NullDirection.FIRST, extraNodes);
+    case NULLS_LAST:
+      return collation(((RexCall) node).getOperands().get(0), direction,
+          RelFieldCollation.NullDirection.LAST, extraNodes);
+    default:
+      final int fieldIndex = extraNodes.size();
+      extraNodes.add(node);
+      return new RelFieldCollation(fieldIndex, direction,
+          Util.first(nullDirection, direction.defaultNullDirection()));
     }
   }
 
@@ -1509,7 +1511,7 @@ public class HiveSubQRemoveRelBuilder {
   public HiveSubQRemoveRelBuilder convert(RelDataType castRowType, boolean rename) {
     final RelNode r = build();
     final RelNode r2 =
-            RelOptUtil.createCastRel(r, castRowType, rename, projectFactory);
+        RelOptUtil.createCastRel(r, castRowType, rename, projectFactory);
     push(r2);
     return this;
   }
@@ -1528,14 +1530,14 @@ public class HiveSubQRemoveRelBuilder {
   }
 
   public HiveSubQRemoveRelBuilder aggregate(GroupKey groupKey,
-                              List<AggregateCall> aggregateCalls) {
+                                            List<AggregateCall> aggregateCalls) {
     return aggregate(groupKey,
-            Lists.transform(
-                    aggregateCalls, new Function<AggregateCall, AggCall>() {
-                      public AggCall apply(AggregateCall input) {
-                        return new AggCallImpl2(input);
-                      }
-                    }));
+        Lists.transform(
+            aggregateCalls, new Function<AggregateCall, AggCall>() {
+              public AggCall apply(AggregateCall input) {
+                return new AggCallImpl2(input);
+              }
+            }));
   }
 
   /** Clears the stack.
@@ -1548,8 +1550,8 @@ public class HiveSubQRemoveRelBuilder {
   protected String getAlias() {
     final Frame frame = stack.peek();
     return frame.right.size() == 1
-            ? frame.right.get(0).left
-            : null;
+        ? frame.right.get(0).left
+        : null;
   }
 
   /** Information necessary to create a call to an aggregate function.
@@ -1570,10 +1572,10 @@ public class HiveSubQRemoveRelBuilder {
 
   /** Implementation of {@link RelBuilder.GroupKey}. */
   protected static class GroupKeyImpl implements GroupKey {
-    final ImmutableList<RexNode> nodes;
-    final boolean indicator;
-    final ImmutableList<ImmutableList<RexNode>> nodeLists;
-    final String alias;
+    private final ImmutableList<RexNode> nodes;
+    private final boolean indicator;
+    private final ImmutableList<ImmutableList<RexNode>> nodeLists;
+    private final String alias;
 
     GroupKeyImpl(ImmutableList<RexNode> nodes, boolean indicator,
                  ImmutableList<ImmutableList<RexNode>> nodeLists, String alias) {
@@ -1589,8 +1591,8 @@ public class HiveSubQRemoveRelBuilder {
 
     public GroupKey alias(String alias) {
       return Objects.equals(this.alias, alias)
-              ? this
-              : new GroupKeyImpl(nodes, indicator, nodeLists, alias);
+          ? this
+          : new GroupKeyImpl(nodes, indicator, nodeLists, alias);
     }
   }
 
@@ -1626,16 +1628,16 @@ public class HiveSubQRemoveRelBuilder {
    *
    * <p>Describes a previously created relational expression and
    * information about how table aliases map into its row type. */
-  private static class Frame {
+  private static final class Frame {
     static final Function<Pair<String, RelDataType>, List<RelDataTypeField>> FN =
-            new Function<Pair<String, RelDataType>, List<RelDataTypeField>>() {
-              public List<RelDataTypeField> apply(Pair<String, RelDataType> input) {
-                return input.right.getFieldList();
-              }
-            };
+        new Function<Pair<String, RelDataType>, List<RelDataTypeField>>() {
+          public List<RelDataTypeField> apply(Pair<String, RelDataType> input) {
+            return input.right.getFieldList();
+          }
+        };
 
-    final RelNode rel;
-    final ImmutableList<Pair<String, RelDataType>> right;
+    private final RelNode rel;
+    private final ImmutableList<Pair<String, RelDataType>> right;
 
     private Frame(RelNode rel, ImmutableList<Pair<String, RelDataType>> pairs) {
       this.rel = rel;

http://git-wip-us.apache.org/repos/asf/hive/blob/ca96613d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelDecorrelator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelDecorrelator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelDecorrelator.java
index 98d140f..c9e02ea 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelDecorrelator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelDecorrelator.java
@@ -136,10 +136,10 @@ import java.util.Stack;
 /**
  * NOTE: this whole logic is replicated from Calcite's RelDecorrelator
  *  and is exteneded to make it suitable for HIVE
- *  TODO:
  *    We should get rid of this and replace it with Calcite's RelDecorrelator
  *    once that works with Join, Project etc instead of LogicalJoin, LogicalProject.
- *    Also we need to have CALCITE-1511 fixed
+ *    At this point this has differed from Calcite's version significantly so cannot
+ *    get rid of this.
  *
  * RelDecorrelator replaces all correlated expressions (corExp) in a relational
  * expression (RelNode) tree with non-correlated expressions that are produced
@@ -156,7 +156,7 @@ import java.util.Stack;
  *   de-correlator</li>
  * </ul>
  */
-public class HiveRelDecorrelator implements ReflectiveVisitor {
+public final class HiveRelDecorrelator implements ReflectiveVisitor {
   //~ Static fields/initializers ---------------------------------------------
 
   protected static final Logger LOG = LoggerFactory.getLogger(
@@ -191,7 +191,7 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
 
   //~ Constructors -----------------------------------------------------------
 
-  private HiveRelDecorrelator (
+  private HiveRelDecorrelator(
           RelOptCluster cluster,
           CorelMap cm,
           Context context) {
@@ -698,225 +698,223 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
   }
 
   public Frame decorrelateRel(HiveAggregate rel) throws SemanticException{
-    {
-      if (rel.getGroupType() != Aggregate.Group.SIMPLE) {
-        throw new AssertionError(Bug.CALCITE_461_FIXED);
-      }
-      //
-      // Rewrite logic:
-      //
-      // 1. Permute the group by keys to the front.
-      // 2. If the input of an aggregate produces correlated variables,
-      //    add them to the group list.
-      // 3. Change aggCalls to reference the new project.
-      //
+    if (rel.getGroupType() != Aggregate.Group.SIMPLE) {
+      throw new AssertionError(Bug.CALCITE_461_FIXED);
+    }
+    //
+    // Rewrite logic:
+    //
+    // 1. Permute the group by keys to the front.
+    // 2. If the input of an aggregate produces correlated variables,
+    //    add them to the group list.
+    // 3. Change aggCalls to reference the new project.
+    //
 
-      // Aggregate itself should not reference cor vars.
-      assert !cm.mapRefRelToCorRef.containsKey(rel);
+    // Aggregate itself should not reference cor vars.
+    assert !cm.mapRefRelToCorRef.containsKey(rel);
 
-      final RelNode oldInput = rel.getInput();
-      final Frame frame = getInvoke(oldInput, rel);
-      if (frame == null) {
-        // If input has not been rewritten, do not rewrite this rel.
-        return null;
-      }
-      //assert !frame.corVarOutputPos.isEmpty();
-      final RelNode newInput = frame.r;
+    final RelNode oldInput = rel.getInput();
+    final Frame frame = getInvoke(oldInput, rel);
+    if (frame == null) {
+      // If input has not been rewritten, do not rewrite this rel.
+      return null;
+    }
+    //assert !frame.corVarOutputPos.isEmpty();
+    final RelNode newInput = frame.r;
 
-      // map from newInput
-      Map<Integer, Integer> mapNewInputToProjOutputs =  new HashMap<>();
-      final int oldGroupKeyCount = rel.getGroupSet().cardinality();
+    // map from newInput
+    Map<Integer, Integer> mapNewInputToProjOutputs =  new HashMap<>();
+    final int oldGroupKeyCount = rel.getGroupSet().cardinality();
 
-      // Project projects the original expressions,
-      // plus any correlated variables the input wants to pass along.
-      final List<Pair<RexNode, String>> projects = Lists.newArrayList();
+    // Project projects the original expressions,
+    // plus any correlated variables the input wants to pass along.
+    final List<Pair<RexNode, String>> projects = Lists.newArrayList();
 
-      List<RelDataTypeField> newInputOutput =
-              newInput.getRowType().getFieldList();
+    List<RelDataTypeField> newInputOutput =
+        newInput.getRowType().getFieldList();
 
-      int newPos = 0;
+    int newPos = 0;
 
-      // oldInput has the original group by keys in the front.
-      final NavigableMap<Integer, RexLiteral> omittedConstants = new TreeMap<>();
-      for (int i = 0; i < oldGroupKeyCount; i++) {
-        final RexLiteral constant = projectedLiteral(newInput, i);
-        if (constant != null) {
-          // Exclude constants. Aggregate({true}) occurs because Aggregate({})
-          // would generate 1 row even when applied to an empty table.
-          omittedConstants.put(i, constant);
-          continue;
-        }
-        int newInputPos = frame.oldToNewOutputs.get(i);
-        projects.add(RexInputRef.of2(newInputPos, newInputOutput));
-        mapNewInputToProjOutputs.put(newInputPos, newPos);
-        newPos++;
+    // oldInput has the original group by keys in the front.
+    final NavigableMap<Integer, RexLiteral> omittedConstants = new TreeMap<>();
+    for (int i = 0; i < oldGroupKeyCount; i++) {
+      final RexLiteral constant = projectedLiteral(newInput, i);
+      if (constant != null) {
+        // Exclude constants. Aggregate({true}) occurs because Aggregate({})
+        // would generate 1 row even when applied to an empty table.
+        omittedConstants.put(i, constant);
+        continue;
       }
+      int newInputPos = frame.oldToNewOutputs.get(i);
+      projects.add(RexInputRef.of2(newInputPos, newInputOutput));
+      mapNewInputToProjOutputs.put(newInputPos, newPos);
+      newPos++;
+    }
 
-      final SortedMap<CorDef, Integer> corDefOutputs = new TreeMap<>();
-      if (!frame.corDefOutputs.isEmpty()) {
-        // If input produces correlated variables, move them to the front,
-        // right after any existing GROUP BY fields.
+    final SortedMap<CorDef, Integer> corDefOutputs = new TreeMap<>();
+    if (!frame.corDefOutputs.isEmpty()) {
+      // If input produces correlated variables, move them to the front,
+      // right after any existing GROUP BY fields.
 
-        // Now add the corVars from the input, starting from
-        // position oldGroupKeyCount.
-        for (Map.Entry<CorDef, Integer> entry
-                : frame.corDefOutputs.entrySet()) {
-          projects.add(RexInputRef.of2(entry.getValue(), newInputOutput));
+      // Now add the corVars from the input, starting from
+      // position oldGroupKeyCount.
+      for (Map.Entry<CorDef, Integer> entry
+          : frame.corDefOutputs.entrySet()) {
+        projects.add(RexInputRef.of2(entry.getValue(), newInputOutput));
 
-          corDefOutputs.put(entry.getKey(), newPos);
-          mapNewInputToProjOutputs.put(entry.getValue(), newPos);
-          newPos++;
-        }
+        corDefOutputs.put(entry.getKey(), newPos);
+        mapNewInputToProjOutputs.put(entry.getValue(), newPos);
+        newPos++;
       }
+    }
 
-      // add the remaining fields
-      final int newGroupKeyCount = newPos;
-      for (int i = 0; i < newInputOutput.size(); i++) {
-        if (!mapNewInputToProjOutputs.containsKey(i)) {
-          projects.add(RexInputRef.of2(i, newInputOutput));
-          mapNewInputToProjOutputs.put(i, newPos);
-          newPos++;
-        }
+    // add the remaining fields
+    final int newGroupKeyCount = newPos;
+    for (int i = 0; i < newInputOutput.size(); i++) {
+      if (!mapNewInputToProjOutputs.containsKey(i)) {
+        projects.add(RexInputRef.of2(i, newInputOutput));
+        mapNewInputToProjOutputs.put(i, newPos);
+        newPos++;
       }
+    }
 
-      assert newPos == newInputOutput.size();
+    assert newPos == newInputOutput.size();
 
-      // This Project will be what the old input maps to,
-      // replacing any previous mapping from old input).
-      RelNode newProject = HiveProject.create(newInput, Pair.left(projects), Pair.right(projects));
+    // This Project will be what the old input maps to,
+    // replacing any previous mapping from old input).
+    RelNode newProject = HiveProject.create(newInput, Pair.left(projects), Pair.right(projects));
 
-      // update mappings:
-      // oldInput ----> newInput
-      //
-      //                newProject
-      //                   |
-      // oldInput ----> newInput
-      //
-      // is transformed to
-      //
-      // oldInput ----> newProject
-      //                   |
-      //                newInput
-      Map<Integer, Integer> combinedMap = Maps.newHashMap();
+    // update mappings:
+    // oldInput ----> newInput
+    //
+    //                newProject
+    //                   |
+    // oldInput ----> newInput
+    //
+    // is transformed to
+    //
+    // oldInput ----> newProject
+    //                   |
+    //                newInput
+    Map<Integer, Integer> combinedMap = Maps.newHashMap();
 
-      for (Integer oldInputPos : frame.oldToNewOutputs.keySet()) {
-        combinedMap.put(oldInputPos,
-                mapNewInputToProjOutputs.get(
-                        frame.oldToNewOutputs.get(oldInputPos)));
-      }
+    for (Integer oldInputPos : frame.oldToNewOutputs.keySet()) {
+      combinedMap.put(oldInputPos,
+          mapNewInputToProjOutputs.get(
+              frame.oldToNewOutputs.get(oldInputPos)));
+    }
 
-      register(oldInput, newProject, combinedMap, corDefOutputs);
+    register(oldInput, newProject, combinedMap, corDefOutputs);
 
-      // now it's time to rewrite the Aggregate
-      final ImmutableBitSet newGroupSet = ImmutableBitSet.range(newGroupKeyCount);
-      List<AggregateCall> newAggCalls = Lists.newArrayList();
-      List<AggregateCall> oldAggCalls = rel.getAggCallList();
+    // now it's time to rewrite the Aggregate
+    final ImmutableBitSet newGroupSet = ImmutableBitSet.range(newGroupKeyCount);
+    List<AggregateCall> newAggCalls = Lists.newArrayList();
+    List<AggregateCall> oldAggCalls = rel.getAggCallList();
 
-      int oldInputOutputFieldCount = rel.getGroupSet().cardinality();
-      int newInputOutputFieldCount = newGroupSet.cardinality();
+    int oldInputOutputFieldCount = rel.getGroupSet().cardinality();
+    int newInputOutputFieldCount = newGroupSet.cardinality();
 
-      int i = -1;
-      for (AggregateCall oldAggCall : oldAggCalls) {
-        ++i;
-        List<Integer> oldAggArgs = oldAggCall.getArgList();
+    int i = -1;
+    for (AggregateCall oldAggCall : oldAggCalls) {
+      ++i;
+      List<Integer> oldAggArgs = oldAggCall.getArgList();
 
-        List<Integer> aggArgs = Lists.newArrayList();
+      List<Integer> aggArgs = Lists.newArrayList();
 
-        // Adjust the aggregator argument positions.
-        // Note aggregator does not change input ordering, so the input
-        // output position mapping can be used to derive the new positions
-        // for the argument.
-        for (int oldPos : oldAggArgs) {
-          aggArgs.add(combinedMap.get(oldPos));
-        }
-        final int filterArg = oldAggCall.filterArg < 0 ? oldAggCall.filterArg
-                : combinedMap.get(oldAggCall.filterArg);
+      // Adjust the aggregator argument positions.
+      // Note aggregator does not change input ordering, so the input
+      // output position mapping can be used to derive the new positions
+      // for the argument.
+      for (int oldPos : oldAggArgs) {
+        aggArgs.add(combinedMap.get(oldPos));
+      }
+      final int filterArg = oldAggCall.filterArg < 0 ? oldAggCall.filterArg
+          : combinedMap.get(oldAggCall.filterArg);
 
-        newAggCalls.add(
-                oldAggCall.adaptTo(newProject, aggArgs, filterArg,
-                        oldGroupKeyCount, newGroupKeyCount));
+      newAggCalls.add(
+          oldAggCall.adaptTo(newProject, aggArgs, filterArg,
+              oldGroupKeyCount, newGroupKeyCount));
 
-        // The old to new output position mapping will be the same as that
-        // of newProject, plus any aggregates that the oldAgg produces.
-        combinedMap.put(
-                oldInputOutputFieldCount + i,
-                newInputOutputFieldCount + i);
-      }
+      // The old to new output position mapping will be the same as that
+      // of newProject, plus any aggregates that the oldAgg produces.
+      combinedMap.put(
+          oldInputOutputFieldCount + i,
+          newInputOutputFieldCount + i);
+    }
 
-      relBuilder.push(
-              new HiveAggregate(rel.getCluster(), rel.getTraitSet(), newProject, newGroupSet, null, newAggCalls) );
+    relBuilder.push(
+        new HiveAggregate(rel.getCluster(), rel.getTraitSet(), newProject,
+            newGroupSet, null, newAggCalls));
 
-      if (!omittedConstants.isEmpty()) {
-        final List<RexNode> postProjects = new ArrayList<>(relBuilder.fields());
-        for (Map.Entry<Integer, RexLiteral> entry
-                : omittedConstants.descendingMap().entrySet()) {
-          postProjects.add(entry.getKey() + frame.corDefOutputs.size(),
-                  entry.getValue());
-        }
-        relBuilder.project(postProjects);
+    if (!omittedConstants.isEmpty()) {
+      final List<RexNode> postProjects = new ArrayList<>(relBuilder.fields());
+      for (Map.Entry<Integer, RexLiteral> entry
+          : omittedConstants.descendingMap().entrySet()) {
+        postProjects.add(entry.getKey() + frame.corDefOutputs.size(),
+            entry.getValue());
       }
-
-      // Aggregate does not change input ordering so corVars will be
-      // located at the same position as the input newProject.
-      return register(rel, relBuilder.build(), combinedMap, corDefOutputs);
+      relBuilder.project(postProjects);
     }
+
+    // Aggregate does not change input ordering so corVars will be
+    // located at the same position as the input newProject.
+    return register(rel, relBuilder.build(), combinedMap, corDefOutputs);
   }
 
   public Frame decorrelateRel(HiveProject rel) throws SemanticException{
-    {
-      //
-      // Rewrite logic:
-      //
-      // 1. Pass along any correlated variables coming from the input.
-      //
+    //
+    // Rewrite logic:
+    //
+    // 1. Pass along any correlated variables coming from the input.
+    //
 
-      final RelNode oldInput = rel.getInput();
-      Frame frame = getInvoke(oldInput, rel);
-      if (frame == null) {
-        // If input has not been rewritten, do not rewrite this rel.
-        return null;
-      }
-      final List<RexNode> oldProjects = rel.getProjects();
-      final List<RelDataTypeField> relOutput = rel.getRowType().getFieldList();
+    final RelNode oldInput = rel.getInput();
+    Frame frame = getInvoke(oldInput, rel);
+    if (frame == null) {
+      // If input has not been rewritten, do not rewrite this rel.
+      return null;
+    }
+    final List<RexNode> oldProjects = rel.getProjects();
+    final List<RelDataTypeField> relOutput = rel.getRowType().getFieldList();
 
-      // LogicalProject projects the original expressions,
-      // plus any correlated variables the input wants to pass along.
-      final List<Pair<RexNode, String>> projects = Lists.newArrayList();
+    // LogicalProject projects the original expressions,
+    // plus any correlated variables the input wants to pass along.
+    final List<Pair<RexNode, String>> projects = Lists.newArrayList();
 
-      // If this LogicalProject has correlated reference, create value generator
-      // and produce the correlated variables in the new output.
-      if (cm.mapRefRelToCorRef.containsKey(rel)) {
-        frame = decorrelateInputWithValueGenerator(rel);
-      }
+    // If this LogicalProject has correlated reference, create value generator
+    // and produce the correlated variables in the new output.
+    if (cm.mapRefRelToCorRef.containsKey(rel)) {
+      frame = decorrelateInputWithValueGenerator(rel);
+    }
 
-      // LogicalProject projects the original expressions
-      final Map<Integer, Integer> mapOldToNewOutputs =  new HashMap<>();
-      int newPos;
-      for (newPos = 0; newPos < oldProjects.size(); newPos++) {
-        projects.add(
-                newPos,
-                Pair.of(
-                        decorrelateExpr(oldProjects.get(newPos)),
-                        relOutput.get(newPos).getName()));
-        mapOldToNewOutputs.put(newPos, newPos);
-      }
+    // LogicalProject projects the original expressions
+    final Map<Integer, Integer> mapOldToNewOutputs =  new HashMap<>();
+    int newPos;
+    for (newPos = 0; newPos < oldProjects.size(); newPos++) {
+      projects.add(
+          newPos,
+          Pair.of(
+              decorrelateExpr(oldProjects.get(newPos)),
+              relOutput.get(newPos).getName()));
+      mapOldToNewOutputs.put(newPos, newPos);
+    }
 
 
-      // Project any correlated variables the input wants to pass along.
-      final SortedMap<CorDef, Integer> corDefOutputs = new TreeMap<>();
-      for (Map.Entry<CorDef, Integer> entry : frame.corDefOutputs.entrySet()) {
-        projects.add(
-                RexInputRef.of2(entry.getValue(),
-                        frame.r.getRowType().getFieldList()));
-        corDefOutputs.put(entry.getKey(), newPos);
-        newPos++;
-      }
+    // Project any correlated variables the input wants to pass along.
+    final SortedMap<CorDef, Integer> corDefOutputs = new TreeMap<>();
+    for (Map.Entry<CorDef, Integer> entry : frame.corDefOutputs.entrySet()) {
+      projects.add(
+          RexInputRef.of2(entry.getValue(),
+              frame.r.getRowType().getFieldList()));
+      corDefOutputs.put(entry.getKey(), newPos);
+      newPos++;
+    }
 
-      RelNode newProject = HiveProject.create(frame.r, Pair.left(projects), SqlValidatorUtil.uniquify(Pair.right(projects)));
+    RelNode newProject = HiveProject.create(frame.r, Pair.left(projects),
+        SqlValidatorUtil.uniquify(Pair.right(projects)));
 
-      return register(rel, newProject, mapOldToNewOutputs,
-              corDefOutputs);
-    }
+    return register(rel, newProject, mapOldToNewOutputs,
+        corDefOutputs);
   }
   /**
    * Rewrite LogicalProject.
@@ -1118,10 +1116,10 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
     // Try to populate correlation variables using local fields.
     // This means that we do not need a value generator.
     if (rel instanceof Filter) {
-      SortedMap<CorDef, Integer> map = new TreeMap<>();
+      SortedMap<CorDef, Integer> coreMap = new TreeMap<>();
       for (CorRef correlation : corVarList) {
         final CorDef def = correlation.def();
-        if (corDefOutputs.containsKey(def) || map.containsKey(def)) {
+        if (corDefOutputs.containsKey(def) || coreMap.containsKey(def)) {
           continue;
         }
         try {
@@ -1132,15 +1130,15 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
           // is generated
           def.setPredicateKind((SqlOperator) ((Pair)((Pair)e.getNode()).getValue()).getKey());
           def.setIsLeft((boolean)((Pair)((Pair) e.getNode()).getValue()).getValue());
-          map.put(def, (Integer)((Pair) e.getNode()).getKey());
+          coreMap.put(def, (Integer)((Pair) e.getNode()).getKey());
         }
       }
       // If all correlation variables are now satisfied, skip creating a value
       // generator.
-      if (map.size() == corVarList.size()) {
-        map.putAll(frame.corDefOutputs);
+      if (coreMap.size() == corVarList.size()) {
+        coreMap.putAll(frame.corDefOutputs);
         return register(oldInput, frame.r,
-                frame.oldToNewOutputs, map);
+                frame.oldToNewOutputs, coreMap);
       }
     }
 
@@ -1149,14 +1147,14 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
 
     // can directly add positions into corDefOutputs since join
     // does not change the output ordering from the inputs.
-    RelNode valueGen =
+    RelNode valueGenRel =
             createValueGenerator(
                     corVarList,
                     leftInputOutputCount,
                     corDefOutputs);
 
     RelNode join =
-            LogicalJoin.create(frame.r, valueGen, rexBuilder.makeLiteral(true),
+            LogicalJoin.create(frame.r, valueGenRel, rexBuilder.makeLiteral(true),
                     ImmutableSet.<CorrelationId>of(), JoinRelType.INNER);
 
     // LogicalJoin or LogicalFilter does not change the old input ordering. All
@@ -1208,23 +1206,23 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
 
   private boolean references(RexNode e, CorRef correlation) {
     switch (e.getKind()) {
-      case CAST:
-        final RexNode operand = ((RexCall) e).getOperands().get(0);
-        if (isWidening(e.getType(), operand.getType())) {
-          return references(operand, correlation);
-        }
-        return false;
-      case FIELD_ACCESS:
-        final RexFieldAccess f = (RexFieldAccess) e;
-        if (f.getField().getIndex() == correlation.field
-                && f.getReferenceExpr() instanceof RexCorrelVariable) {
-          if (((RexCorrelVariable) f.getReferenceExpr()).id == correlation.corr) {
-            return true;
-          }
+    case CAST:
+      final RexNode operand = ((RexCall) e).getOperands().get(0);
+      if (isWidening(e.getType(), operand.getType())) {
+        return references(operand, correlation);
+      }
+      return false;
+    case FIELD_ACCESS:
+      final RexFieldAccess f = (RexFieldAccess) e;
+      if (f.getField().getIndex() == correlation.field
+          && f.getReferenceExpr() instanceof RexCorrelVariable) {
+        if (((RexCorrelVariable) f.getReferenceExpr()).id == correlation.corr) {
+          return true;
         }
-        // fall through
-      default:
-        return false;
+      }
+      // fall through
+    default:
+      return false;
     }
   }
 
@@ -1241,69 +1239,70 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
   }
 
   public Frame decorrelateRel(HiveFilter rel) throws SemanticException {
-    {
-      //
-      // Rewrite logic:
-      //
-      // 1. If a LogicalFilter references a correlated field in its filter
-      // condition, rewrite the LogicalFilter to be
-      //   LogicalFilter
-      //     LogicalJoin(cross product)
-      //       OriginalFilterInput
-      //       ValueGenerator(produces distinct sets of correlated variables)
-      // and rewrite the correlated fieldAccess in the filter condition to
-      // reference the LogicalJoin output.
-      //
-      // 2. If LogicalFilter does not reference correlated variables, simply
-      // rewrite the filter condition using new input.
-      //
+    //
+    // Rewrite logic:
+    //
+    // 1. If a LogicalFilter references a correlated field in its filter
+    // condition, rewrite the LogicalFilter to be
+    //   LogicalFilter
+    //     LogicalJoin(cross product)
+    //       OriginalFilterInput
+    //       ValueGenerator(produces distinct sets of correlated variables)
+    // and rewrite the correlated fieldAccess in the filter condition to
+    // reference the LogicalJoin output.
+    //
+    // 2. If LogicalFilter does not reference correlated variables, simply
+    // rewrite the filter condition using new input.
+    //
 
-      final RelNode oldInput = rel.getInput();
-      Frame frame = getInvoke(oldInput, rel);
-      if (frame == null) {
-        // If input has not been rewritten, do not rewrite this rel.
-        return null;
-      }
+    final RelNode oldInput = rel.getInput();
+    Frame frame = getInvoke(oldInput, rel);
+    if (frame == null) {
+      // If input has not been rewritten, do not rewrite this rel.
+      return null;
+    }
+
+    Frame oldInputFrame = frame;
+    // If this LogicalFilter has correlated reference, create value generator
+    // and produce the correlated variables in the new output.
+    if (cm.mapRefRelToCorRef.containsKey(rel)) {
+      frame = decorrelateInputWithValueGenerator(rel);
+    }
+
+    boolean valueGenerator = true;
+    if(frame.r == oldInputFrame.r) {
+      // this means correated value generator wasn't generated
+      valueGenerator = false;
+    }
 
-      Frame oldInputFrame = frame;
-      // If this LogicalFilter has correlated reference, create value generator
-      // and produce the correlated variables in the new output.
-      if (cm.mapRefRelToCorRef.containsKey(rel)) {
-        frame = decorrelateInputWithValueGenerator(rel);
-      }
-
-      boolean valueGenerator = true;
-      if(frame.r == oldInputFrame.r) {
-        // this means correated value generator wasn't generated
-        valueGenerator = false;
-      }
-
-      if(oldInput instanceof LogicalCorrelate && ((LogicalCorrelate) oldInput).getJoinType() == SemiJoinType.SEMI
-          &&  !cm.mapRefRelToCorRef.containsKey(rel)) {
-        // this conditions need to be pushed into semi-join since this condition
-        // corresponds to IN
-        HiveSemiJoin join = ((HiveSemiJoin)frame.r);
-        final List<RexNode> conditions = new ArrayList<>();
-        RexNode joinCond = join.getCondition();
-        conditions.add(joinCond);
-        conditions.add(decorrelateExpr(rel.getCondition(), valueGenerator));
-        final RexNode condition =
-            RexUtil.composeConjunction(rexBuilder, conditions, false);
-        RelNode newRel = HiveSemiJoin.getSemiJoin(frame.r.getCluster(), frame.r.getTraitSet(), join.getLeft(), join.getRight(),
-            condition,join.getLeftKeys(), join.getRightKeys());
-        return register(rel, newRel, frame.oldToNewOutputs, frame.corDefOutputs);
-      }
-      // Replace the filter expression to reference output of the join
-        // Map filter to the new filter over join
-        relBuilder.push(frame.r).filter(
-            (decorrelateExpr(rel.getCondition(), valueGenerator)));
-      // Filter does not change the input ordering.
-      // Filter rel does not permute the input.
-      // All corvars produced by filter will have the same output positions in the
-      // input rel.
-      return register(rel, relBuilder.build(), frame.oldToNewOutputs,
-              frame.corDefOutputs);
+    if(oldInput instanceof LogicalCorrelate
+        && ((LogicalCorrelate) oldInput).getJoinType() == SemiJoinType.SEMI
+        &&  !cm.mapRefRelToCorRef.containsKey(rel)) {
+      // this conditions need to be pushed into semi-join since this condition
+      // corresponds to IN
+      HiveSemiJoin join = ((HiveSemiJoin)frame.r);
+      final List<RexNode> conditions = new ArrayList<>();
+      RexNode joinCond = join.getCondition();
+      conditions.add(joinCond);
+      conditions.add(decorrelateExpr(rel.getCondition(), valueGenerator));
+      final RexNode condition =
+          RexUtil.composeConjunction(rexBuilder, conditions, false);
+
+      RelNode newRel = HiveSemiJoin.getSemiJoin(frame.r.getCluster(), frame.r.getTraitSet(),
+          join.getLeft(), join.getRight(), condition, join.getLeftKeys(), join.getRightKeys());
+
+      return register(rel, newRel, frame.oldToNewOutputs, frame.corDefOutputs);
     }
+    // Replace the filter expression to reference output of the join
+    // Map filter to the new filter over join
+    relBuilder.push(frame.r).filter(
+        (decorrelateExpr(rel.getCondition(), valueGenerator)));
+    // Filter does not change the input ordering.
+    // Filter rel does not permute the input.
+    // All corvars produced by filter will have the same output positions in the
+    // input rel.
+    return register(rel, relBuilder.build(), frame.oldToNewOutputs,
+        frame.corDefOutputs);
   }
 
     /**
@@ -1348,7 +1347,8 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
       valueGenerator = false;
     }
 
-    if(oldInput instanceof LogicalCorrelate && ((LogicalCorrelate) oldInput).getJoinType() == SemiJoinType.SEMI
+    if(oldInput instanceof LogicalCorrelate
+        && ((LogicalCorrelate) oldInput).getJoinType() == SemiJoinType.SEMI
         &&  !cm.mapRefRelToCorRef.containsKey(rel)) {
       // this conditions need to be pushed into semi-join since this condition
       // corresponds to IN
@@ -1359,8 +1359,8 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
       conditions.add(decorrelateExpr(rel.getCondition(), valueGenerator));
       final RexNode condition =
           RexUtil.composeConjunction(rexBuilder, conditions, false);
-      RelNode newRel = HiveSemiJoin.getSemiJoin(frame.r.getCluster(), frame.r.getTraitSet(), join.getLeft(), join.getRight(),
-          condition,join.getLeftKeys(), join.getRightKeys());
+      RelNode newRel = HiveSemiJoin.getSemiJoin(frame.r.getCluster(), frame.r.getTraitSet(),
+          join.getLeft(), join.getRight(), condition, join.getLeftKeys(), join.getRightKeys());
       return register(rel, newRel, frame.oldToNewOutputs, frame.corDefOutputs);
     }
 
@@ -1443,8 +1443,7 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
                 RexInputRef.of(newLeftPos, newLeftOutput),
                 new RexInputRef(newLeftFieldCount + newRightPos,
                     newRightOutput.get(newRightPos).getType())));
-      }
-      else {
+      } else {
         conditions.add(
             rexBuilder.makeCall(callOp,
                 new RexInputRef(newLeftFieldCount + newRightPos,
@@ -1488,13 +1487,12 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
       final List<Integer> leftKeys = new ArrayList<Integer>();
       final List<Integer> rightKeys = new ArrayList<Integer>();
 
-      RelNode[] inputRels = new RelNode[] { leftFrame.r, rightFrame.r};
-      newJoin = HiveSemiJoin.getSemiJoin(rel.getCluster(), rel.getCluster().traitSetOf(HiveRelNode.CONVENTION),
-          leftFrame.r, rightFrame.r, condition, ImmutableIntList.copyOf(leftKeys),
-          ImmutableIntList.copyOf(rightKeys));
+      RelNode[] inputRels = new RelNode[] {leftFrame.r, rightFrame.r};
+      newJoin = HiveSemiJoin.getSemiJoin(rel.getCluster(),
+          rel.getCluster().traitSetOf(HiveRelNode.CONVENTION), leftFrame.r, rightFrame.r,
+          condition, ImmutableIntList.copyOf(leftKeys), ImmutableIntList.copyOf(rightKeys));
 
-    }
-    else {
+    } else {
       // Right input positions are shifted by newLeftFieldCount.
       for (int i = 0; i < oldRightFieldCount; i++) {
         mapOldToNewOutputs.put(
@@ -1531,7 +1529,8 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
       return null;
     }
 
-    final RelNode newJoin = HiveJoin.getJoin(rel.getCluster(), leftFrame.r, rightFrame.r, decorrelateExpr(rel.getCondition()), rel.getJoinType() );
+    final RelNode newJoin = HiveJoin.getJoin(rel.getCluster(), leftFrame.r, rightFrame.r,
+        decorrelateExpr(rel.getCondition()), rel.getJoinType());
 
     // Create the mapping between the output of the old correlation rel
     // and the new join rel
@@ -1589,7 +1588,7 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
     }
 
     final RelNode newJoin = HiveJoin.getJoin(rel.getCluster(), leftFrame.r,
-            rightFrame.r, decorrelateExpr(rel.getCondition()), rel.getJoinType() );
+            rightFrame.r, decorrelateExpr(rel.getCondition()), rel.getJoinType());
 
     // Create the mapping between the output of the old correlation rel
     // and the new join rel
@@ -1838,7 +1837,7 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
   }
 
   /**
-   * Remove correlated variables from the tree at root corRel
+   * Remove correlated variables from the tree at root corRel.
    *
    * @param correlate Correlator
    */
@@ -1949,7 +1948,7 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
           final List<RexNode> newOperands = new ArrayList<>();
           newOperands.add(o0);
           newOperands.add(o1);
-          boolean[] update = { false };
+          boolean[] update = {false};
           List<RexNode> clonedOperands = visitList(newOperands, update);
 
           return relBuilder.call(call.getOperator(), clonedOperands);
@@ -2003,13 +2002,13 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
 
   /** Shuttle that removes correlations. */
   private class RemoveCorrelationRexShuttle extends RexShuttle {
-    final RexBuilder rexBuilder;
-    final RelDataTypeFactory typeFactory;
-    final boolean projectPulledAboveLeftCorrelator;
-    final RexInputRef nullIndicator;
-    final ImmutableSet<Integer> isCount;
+    private final RexBuilder rexBuilder;
+    private final RelDataTypeFactory typeFactory;
+    private final boolean projectPulledAboveLeftCorrelator;
+    private final RexInputRef nullIndicator;
+    private final ImmutableSet<Integer> isCount;
 
-    public RemoveCorrelationRexShuttle(
+    RemoveCorrelationRexShuttle(
             RexBuilder rexBuilder,
             boolean projectPulledAboveLeftCorrelator,
             RexInputRef nullIndicator,
@@ -2204,7 +2203,7 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
    * AggRel single group</blockquote>
    */
   private final class RemoveSingleAggregateRule extends RelOptRule {
-    public RemoveSingleAggregateRule() {
+    RemoveSingleAggregateRule() {
       super(
               operand(
                       LogicalAggregate.class,
@@ -2257,7 +2256,7 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
 
   /** Planner rule that removes correlations for scalar projects. */
   private final class RemoveCorrelationForScalarProjectRule extends RelOptRule {
-    public RemoveCorrelationForScalarProjectRule() {
+    RemoveCorrelationForScalarProjectRule() {
       super(
               operand(LogicalCorrelate.class,
                       operand(RelNode.class, any()),
@@ -2456,7 +2455,7 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
   /** Planner rule that removes correlations for scalar aggregates. */
   private final class RemoveCorrelationForScalarAggregateRule
           extends RelOptRule {
-    public RemoveCorrelationForScalarAggregateRule() {
+    RemoveCorrelationForScalarAggregateRule() {
       super(
               operand(LogicalCorrelate.class,
                       operand(RelNode.class, any()),
@@ -2838,9 +2837,9 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
 
   /** Planner rule that adjusts projects when counts are added. */
   private final class AdjustProjectForCountAggregateRule extends RelOptRule {
-    final boolean flavor;
+    private final boolean flavor;
 
-    public AdjustProjectForCountAggregateRule(boolean flavor) {
+    AdjustProjectForCountAggregateRule(boolean flavor) {
       super(
               flavor
                       ? operand(LogicalCorrelate.class,
@@ -2976,9 +2975,9 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
    * {@link CorRef#uniqueKey}.
    */
   static class CorRef implements Comparable<CorRef> {
-    public final int uniqueKey;
-    public final CorrelationId corr;
-    public final int field;
+    private final int uniqueKey;
+    private final CorrelationId corr;
+    private final int field;
 
     CorRef(CorrelationId corr, int field, int uniqueKey) {
       this.corr = corr;
@@ -3021,8 +3020,8 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
 
   /** A correlation and a field. */
   static class CorDef implements Comparable<CorDef> {
-    public final CorrelationId corr;
-    public final int field;
+    private final CorrelationId corr;
+    private final int field;
 
     private SqlOperator predicateKind;
     // this indicates if corr var is left operand of rex call or not
@@ -3100,7 +3099,7 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
    * updated.
    *
    * </ol> */
-  private static class CorelMap {
+  private static final class CorelMap {
     private final Multimap<RelNode, CorRef> mapRefRelToCorRef;
     private final SortedMap<CorrelationId, RelNode> mapCorToCorRel;
     private final Map<RexFieldAccess, CorRef> mapFieldAccessToCorRef;
@@ -3155,8 +3154,10 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
   }
 
   private static class findIfValueGenRequired extends HiveRelShuttleImpl {
-    private boolean mightRequireValueGen ;
-    findIfValueGenRequired() { this.mightRequireValueGen = true; }
+    private boolean mightRequireValueGen;
+    findIfValueGenRequired() {
+      this.mightRequireValueGen = true;
+    }
 
     private boolean hasRexOver(List<RexNode> projects) {
       for(RexNode expr : projects) {
@@ -3200,8 +3201,7 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
       if(!(hasRexOver(((HiveProject)rel).getProjects()))) {
         mightRequireValueGen = false;
         return super.visit(rel);
-      }
-      else {
+      } else {
         mightRequireValueGen = true;
         return rel;
       }
@@ -3210,8 +3210,7 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
       if(!(hasRexOver(((LogicalProject)rel).getProjects()))) {
         mightRequireValueGen = false;
         return super.visit(rel);
-      }
-      else {
+      } else {
         mightRequireValueGen = true;
         return rel;
       }
@@ -3219,12 +3218,10 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
     @Override public RelNode visit(HiveAggregate rel) {
       // if there are aggregate functions or grouping sets we will need
       // value generator
-      if((((HiveAggregate)rel).getAggCallList().isEmpty() == true
-          && ((HiveAggregate)rel).indicator == false)) {
+      if(rel.getAggCallList().isEmpty() && !rel.indicator) {
         this.mightRequireValueGen = false;
         return super.visit(rel);
-      }
-      else {
+      } else {
         // need to reset to true in case previous aggregate/project
         // has set it to false
         this.mightRequireValueGen = true;
@@ -3232,12 +3229,10 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
       }
     }
     @Override public RelNode visit(LogicalAggregate rel) {
-      if((((LogicalAggregate)rel).getAggCallList().isEmpty() == true
-          && ((LogicalAggregate)rel).indicator == false)) {
+      if(rel.getAggCallList().isEmpty() && !rel.indicator) {
         this.mightRequireValueGen = false;
         return super.visit(rel);
-      }
-      else {
+      } else {
         // need to reset to true in case previous aggregate/project
         // has set it to false
         this.mightRequireValueGen = true;
@@ -3257,10 +3252,10 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
   }
   /** Builds a {@link org.apache.calcite.sql2rel.RelDecorrelator.CorelMap}. */
   private static class CorelMapBuilder extends HiveRelShuttleImpl {
-    final SortedMap<CorrelationId, RelNode> mapCorToCorRel =
+    private final SortedMap<CorrelationId, RelNode> mapCorToCorRel =
         new TreeMap<>();
 
-    final SortedSetMultimap<RelNode, CorRef> mapRefRelToCorRef =
+    private final SortedSetMultimap<RelNode, CorRef> mapRefRelToCorRef =
         Multimaps.newSortedSetMultimap(
             new HashMap<RelNode, Collection<CorRef>>(),
             new Supplier<TreeSet<CorRef>>() {
@@ -3270,12 +3265,12 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
               }
             });
 
-    final Map<RexFieldAccess, CorRef> mapFieldAccessToCorVar = new HashMap<>();
+    private final Map<RexFieldAccess, CorRef> mapFieldAccessToCorVar = new HashMap<>();
 
-    final Holder<Integer> offset = Holder.of(0);
-    int corrIdGenerator = 0;
+    private final Holder<Integer> offset = Holder.of(0);
+    private int corrIdGenerator = 0;
 
-    final List<RelNode> stack = new ArrayList<>();
+    private final List<RelNode> stack = new ArrayList<>();
 
     /** Creates a CorelMap by iterating over a {@link RelNode} tree. */
     CorelMap build(RelNode rel) {
@@ -3401,9 +3396,9 @@ public class HiveRelDecorrelator implements ReflectiveVisitor {
    * and where to find the output fields and correlation variables
    * among its output fields. */
   static class Frame {
-    final RelNode r;
-    final ImmutableSortedMap<CorDef, Integer> corDefOutputs;
-    final ImmutableSortedMap<Integer, Integer> oldToNewOutputs;
+    private final RelNode r;
+    private final ImmutableSortedMap<CorDef, Integer> corDefOutputs;
+    private final ImmutableSortedMap<Integer, Integer> oldToNewOutputs;
 
     Frame(RelNode oldRel, RelNode r, SortedMap<CorDef, Integer> corDefOutputs,
           Map<Integer, Integer> oldToNewOutputs) {


[23/50] [abbrv] hive git commit: HIVE-17982 Move metastore specific itests

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
deleted file mode 100644
index a19cc86..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
+++ /dev/null
@@ -1,270 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import junit.framework.Assert;
-
-import org.apache.hadoop.hive.common.ValidTxnList;
-import org.apache.hadoop.hive.common.ValidReadTxnList;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.DataOperationType;
-import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
-import org.apache.hadoop.hive.metastore.api.LockResponse;
-import org.apache.hadoop.hive.metastore.api.LockState;
-import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.util.List;
-
-/**
- * Unit tests for {@link org.apache.hadoop.hive.metastore.HiveMetaStoreClient}.  For now this just has
- * transaction and locking tests.  The goal here is not to test all
- * functionality possible through the interface, as all permutations of DB
- * operations should be tested in the appropriate DB handler classes.  The
- * goal is to test that we can properly pass the messages through the thrift
- * service.
- *
- * This is in the ql directory rather than the metastore directory because it
- * required the hive-exec jar, and hive-exec jar already depends on
- * hive-metastore jar, thus I can't make hive-metastore depend on hive-exec.
- */
-public class TestHiveMetaStoreTxns {
-
-  private final HiveConf conf = new HiveConf();
-  private IMetaStoreClient client;
-
-  public TestHiveMetaStoreTxns() throws Exception {
-    TxnDbUtil.setConfValues(conf);
-    LogManager.getRootLogger().setLevel(Level.DEBUG);
-    tearDown();
-  }
-
-  @Test
-  public void testTxns() throws Exception {
-    List<Long> tids = client.openTxns("me", 3).getTxn_ids();
-    Assert.assertEquals(1L, (long) tids.get(0));
-    Assert.assertEquals(2L, (long) tids.get(1));
-    Assert.assertEquals(3L, (long) tids.get(2));
-    client.rollbackTxn(1);
-    client.commitTxn(2);
-    ValidTxnList validTxns = client.getValidTxns();
-    Assert.assertFalse(validTxns.isTxnValid(1));
-    Assert.assertTrue(validTxns.isTxnValid(2));
-    Assert.assertFalse(validTxns.isTxnValid(3));
-    Assert.assertFalse(validTxns.isTxnValid(4));
-  }
-
-  @Test
-  public void testOpenTxnNotExcluded() throws Exception {
-    List<Long> tids = client.openTxns("me", 3).getTxn_ids();
-    Assert.assertEquals(1L, (long) tids.get(0));
-    Assert.assertEquals(2L, (long) tids.get(1));
-    Assert.assertEquals(3L, (long) tids.get(2));
-    client.rollbackTxn(1);
-    client.commitTxn(2);
-    ValidTxnList validTxns = client.getValidTxns(3);
-    Assert.assertFalse(validTxns.isTxnValid(1));
-    Assert.assertTrue(validTxns.isTxnValid(2));
-    Assert.assertTrue(validTxns.isTxnValid(3));
-    Assert.assertFalse(validTxns.isTxnValid(4));
-  }
-
-  @Test
-  public void testTxnRange() throws Exception {
-    ValidTxnList validTxns = client.getValidTxns();
-    Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
-        validTxns.isTxnRangeValid(1L, 3L));
-    List<Long> tids = client.openTxns("me", 5).getTxn_ids();
-
-    HeartbeatTxnRangeResponse rsp = client.heartbeatTxnRange(1, 5);
-    Assert.assertEquals(0, rsp.getNosuch().size());
-    Assert.assertEquals(0, rsp.getAborted().size());
-
-    client.rollbackTxn(1L);
-    client.commitTxn(2L);
-    client.commitTxn(3L);
-    client.commitTxn(4L);
-    validTxns = client.getValidTxns();
-    System.out.println("validTxns = " + validTxns);
-    Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
-        validTxns.isTxnRangeValid(2L, 2L));
-    Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
-        validTxns.isTxnRangeValid(2L, 3L));
-    Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
-        validTxns.isTxnRangeValid(2L, 4L));
-    Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
-        validTxns.isTxnRangeValid(3L, 4L));
-
-    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
-        validTxns.isTxnRangeValid(1L, 4L));
-    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
-        validTxns.isTxnRangeValid(2L, 5L));
-    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
-        validTxns.isTxnRangeValid(1L, 2L));
-    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
-        validTxns.isTxnRangeValid(4L, 5L));
-
-    Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
-        validTxns.isTxnRangeValid(1L, 1L));
-    Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
-        validTxns.isTxnRangeValid(5L, 10L));
-
-    validTxns = new ValidReadTxnList("10:5:4,5,6:");
-    Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
-        validTxns.isTxnRangeValid(4,6));
-    Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
-        validTxns.isTxnRangeValid(7, 10));
-    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
-        validTxns.isTxnRangeValid(7, 11));
-    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
-        validTxns.isTxnRangeValid(3, 6));
-    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
-        validTxns.isTxnRangeValid(4, 7));
-    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
-        validTxns.isTxnRangeValid(1, 12));
-    Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
-        validTxns.isTxnRangeValid(1, 3));
-  }
-
-  @Test
-  public void testLocks() throws Exception {
-    LockRequestBuilder rqstBuilder = new LockRequestBuilder();
-    rqstBuilder.addLockComponent(new LockComponentBuilder()
-        .setDbName("mydb")
-        .setTableName("mytable")
-        .setPartitionName("mypartition")
-        .setExclusive()
-        .setOperationType(DataOperationType.NO_TXN)
-        .build());
-    rqstBuilder.addLockComponent(new LockComponentBuilder()
-        .setDbName("mydb")
-        .setTableName("yourtable")
-        .setSemiShared()
-        .setOperationType(DataOperationType.NO_TXN)
-        .build());
-    rqstBuilder.addLockComponent(new LockComponentBuilder()
-        .setDbName("yourdb")
-        .setOperationType(DataOperationType.NO_TXN)
-        .setShared()
-        .build());
-    rqstBuilder.setUser("fred");
-
-    LockResponse res = client.lock(rqstBuilder.build());
-    Assert.assertEquals(1L, res.getLockid());
-    Assert.assertEquals(LockState.ACQUIRED, res.getState());
-
-    res = client.checkLock(1);
-    Assert.assertEquals(1L, res.getLockid());
-    Assert.assertEquals(LockState.ACQUIRED, res.getState());
-
-    client.heartbeat(0, 1);
-
-    client.unlock(1);
-  }
-
-  @Test
-  public void testLocksWithTxn() throws Exception {
-    long txnid = client.openTxn("me");
-
-    LockRequestBuilder rqstBuilder = new LockRequestBuilder();
-    rqstBuilder.setTransactionId(txnid)
-      .addLockComponent(new LockComponentBuilder()
-        .setDbName("mydb")
-        .setTableName("mytable")
-        .setPartitionName("mypartition")
-        .setSemiShared()
-        .setOperationType(DataOperationType.UPDATE)
-        .build())
-      .addLockComponent(new LockComponentBuilder()
-        .setDbName("mydb")
-        .setTableName("yourtable")
-        .setSemiShared()
-        .setOperationType(DataOperationType.UPDATE)
-        .build())
-      .addLockComponent(new LockComponentBuilder()
-        .setDbName("yourdb")
-        .setShared()
-        .setOperationType(DataOperationType.SELECT)
-        .build())
-      .setUser("fred");
-
-    LockResponse res = client.lock(rqstBuilder.build());
-    Assert.assertEquals(1L, res.getLockid());
-    Assert.assertEquals(LockState.ACQUIRED, res.getState());
-
-    res = client.checkLock(1);
-    Assert.assertEquals(1L, res.getLockid());
-    Assert.assertEquals(LockState.ACQUIRED, res.getState());
-
-    client.heartbeat(txnid, 1);
-
-    client.commitTxn(txnid);
-  }
-
-  @Test
-  public void stringifyValidTxns() throws Exception {
-    // Test with just high water mark
-    ValidTxnList validTxns = new ValidReadTxnList("1:" + Long.MAX_VALUE + "::");
-    String asString = validTxns.toString();
-    Assert.assertEquals("1:" + Long.MAX_VALUE + "::", asString);
-    validTxns = new ValidReadTxnList(asString);
-    Assert.assertEquals(1, validTxns.getHighWatermark());
-    Assert.assertNotNull(validTxns.getInvalidTransactions());
-    Assert.assertEquals(0, validTxns.getInvalidTransactions().length);
-    asString = validTxns.toString();
-    Assert.assertEquals("1:" + Long.MAX_VALUE + "::", asString);
-    validTxns = new ValidReadTxnList(asString);
-    Assert.assertEquals(1, validTxns.getHighWatermark());
-    Assert.assertNotNull(validTxns.getInvalidTransactions());
-    Assert.assertEquals(0, validTxns.getInvalidTransactions().length);
-
-    // Test with open transactions
-    validTxns = new ValidReadTxnList("10:3:5:3");
-    asString = validTxns.toString();
-    if (!asString.equals("10:3:3:5") && !asString.equals("10:3:5:3")) {
-      Assert.fail("Unexpected string value " + asString);
-    }
-    validTxns = new ValidReadTxnList(asString);
-    Assert.assertEquals(10, validTxns.getHighWatermark());
-    Assert.assertNotNull(validTxns.getInvalidTransactions());
-    Assert.assertEquals(2, validTxns.getInvalidTransactions().length);
-    boolean sawThree = false, sawFive = false;
-    for (long tid : validTxns.getInvalidTransactions()) {
-      if (tid == 3)  sawThree = true;
-      else if (tid == 5) sawFive = true;
-      else  Assert.fail("Unexpected value " + tid);
-    }
-    Assert.assertTrue(sawThree);
-    Assert.assertTrue(sawFive);
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    TxnDbUtil.prepDb(conf);
-    client = new HiveMetaStoreClient(conf);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    TxnDbUtil.cleanDb(conf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java
deleted file mode 100644
index c29a34d..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java
+++ /dev/null
@@ -1,219 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
-import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
-import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.DropTableEvent;
-import org.apache.hadoop.hive.metastore.events.ListenerEvent;
-import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.apache.hadoop.hive.ql.io.HiveInputFormat;
-import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
-
-/**
- * TestHiveMetaStoreWithEnvironmentContext. Test case for _with_environment_context
- * calls in {@link org.apache.hadoop.hive.metastore.HiveMetaStore}
- */
-public class TestHiveMetaStoreWithEnvironmentContext extends TestCase {
-
-  private HiveConf hiveConf;
-  private HiveMetaStoreClient msc;
-  private EnvironmentContext envContext;
-  private final Database db = new Database();
-  private Table table = new Table();
-  private final Partition partition = new Partition();
-
-  private static final String dbName = "hive3252";
-  private static final String tblName = "tmptbl";
-  private static final String renamed = "tmptbl2";
-
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
-
-    System.setProperty("hive.metastore.event.listeners",
-        DummyListener.class.getName());
-
-    int port = MetaStoreTestUtils.startMetaStoreWithRetry();
-
-    hiveConf = new HiveConf(this.getClass());
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    SessionState.start(new CliSessionState(hiveConf));
-    msc = new HiveMetaStoreClient(hiveConf);
-
-    msc.dropDatabase(dbName, true, true);
-
-    Map<String, String> envProperties = new HashMap<String, String>();
-    envProperties.put("hadoop.job.ugi", "test_user");
-    envContext = new EnvironmentContext(envProperties);
-
-    db.setName(dbName);
-
-    Map<String, String> tableParams = new HashMap<String, String>();
-    tableParams.put("a", "string");
-    List<FieldSchema> partitionKeys = new ArrayList<FieldSchema>();
-    partitionKeys.add(new FieldSchema("b", "string", ""));
-
-    List<FieldSchema> cols = new ArrayList<FieldSchema>();
-    cols.add(new FieldSchema("a", "string", ""));
-    cols.add(new FieldSchema("b", "string", ""));
-    StorageDescriptor sd = new StorageDescriptor();
-    sd.setCols(cols);
-    sd.setCompressed(false);
-    sd.setParameters(tableParams);
-    sd.setSerdeInfo(new SerDeInfo());
-    sd.getSerdeInfo().setName(tblName);
-    sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-    sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
-    sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
-    sd.setInputFormat(HiveInputFormat.class.getName());
-    sd.setOutputFormat(HiveOutputFormat.class.getName());
-
-    table.setDbName(dbName);
-    table.setTableName(tblName);
-    table.setParameters(tableParams);
-    table.setPartitionKeys(partitionKeys);
-    table.setSd(sd);
-
-    List<String> partValues = new ArrayList<String>();
-    partValues.add("2011");
-    partition.setDbName(dbName);
-    partition.setTableName(tblName);
-    partition.setValues(partValues);
-    partition.setSd(table.getSd().deepCopy());
-    partition.getSd().setSerdeInfo(table.getSd().getSerdeInfo().deepCopy());
-
-    DummyListener.notifyList.clear();
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    super.tearDown();
-  }
-
-  public void testEnvironmentContext() throws Exception {
-    int listSize = 0;
-
-    List<ListenerEvent> notifyList = DummyListener.notifyList;
-    assertEquals(notifyList.size(), listSize);
-    msc.createDatabase(db);
-    listSize++;
-    assertEquals(listSize, notifyList.size());
-    CreateDatabaseEvent dbEvent = (CreateDatabaseEvent)(notifyList.get(listSize - 1));
-    assert dbEvent.getStatus();
-
-    msc.createTable(table, envContext);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    CreateTableEvent tblEvent = (CreateTableEvent)(notifyList.get(listSize - 1));
-    assert tblEvent.getStatus();
-    assertEquals(envContext, tblEvent.getEnvironmentContext());
-
-    table = msc.getTable(dbName, tblName);
-
-    partition.getSd().setLocation(table.getSd().getLocation() + "/part1");
-    msc.add_partition(partition, envContext);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    AddPartitionEvent partEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
-    assert partEvent.getStatus();
-    assertEquals(envContext, partEvent.getEnvironmentContext());
-
-    List<String> partVals = new ArrayList<String>();
-    partVals.add("2012");
-    msc.appendPartition(dbName, tblName, partVals, envContext);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    AddPartitionEvent appendPartEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
-    assert appendPartEvent.getStatus();
-    assertEquals(envContext, appendPartEvent.getEnvironmentContext());
-
-    table.setTableName(renamed);
-    msc.alter_table_with_environmentContext(dbName, tblName, table, envContext);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    AlterTableEvent alterTableEvent = (AlterTableEvent) notifyList.get(listSize-1);
-    assert alterTableEvent.getStatus();
-    assertEquals(envContext, alterTableEvent.getEnvironmentContext());
-
-    table.setTableName(tblName);
-    msc.alter_table_with_environmentContext(dbName, renamed, table, envContext);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-
-    List<String> dropPartVals = new ArrayList<String>();
-    dropPartVals.add("2011");
-    msc.dropPartition(dbName, tblName, dropPartVals, envContext);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    DropPartitionEvent dropPartEvent = (DropPartitionEvent)notifyList.get(listSize - 1);
-    assert dropPartEvent.getStatus();
-    assertEquals(envContext, dropPartEvent.getEnvironmentContext());
-
-    msc.dropPartition(dbName, tblName, "b=2012", true, envContext);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    DropPartitionEvent dropPartByNameEvent = (DropPartitionEvent)notifyList.get(listSize - 1);
-    assert dropPartByNameEvent.getStatus();
-    assertEquals(envContext, dropPartByNameEvent.getEnvironmentContext());
-
-    msc.dropTable(dbName, tblName, true, false, envContext);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    DropTableEvent dropTblEvent = (DropTableEvent)notifyList.get(listSize-1);
-    assert dropTblEvent.getStatus();
-    assertEquals(envContext, dropTblEvent.getEnvironmentContext());
-
-    msc.dropDatabase(dbName);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-
-    DropDatabaseEvent dropDB = (DropDatabaseEvent)notifyList.get(listSize-1);
-    assert dropDB.getStatus();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java
deleted file mode 100644
index 7cf351f..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.ql.CommandNeedRetryException;
-import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.thrift.TException;
-
-public class TestMarkPartition extends TestCase{
-
-  protected HiveConf hiveConf;
-  private Driver driver;
-
-  @Override
-  protected void setUp() throws Exception {
-
-    super.setUp();
-    System.setProperty("hive.metastore.event.clean.freq", "2");
-    System.setProperty("hive.metastore.event.expiry.duration", "5");
-    hiveConf = new HiveConf(this.getClass());
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    SessionState.start(new CliSessionState(hiveConf));
-
-  }
-
-  public void testMarkingPartitionSet() throws CommandNeedRetryException, MetaException,
-  TException, NoSuchObjectException, UnknownDBException, UnknownTableException,
-  InvalidPartitionException, UnknownPartitionException, InterruptedException {
-    HiveMetaStoreClient msc = new HiveMetaStoreClient(hiveConf);
-    driver = new Driver(hiveConf);
-    driver.run("drop database if exists hive2215 cascade");
-    driver.run("create database hive2215");
-    driver.run("use hive2215");
-    driver.run("drop table if exists tmptbl");
-    driver.run("create table tmptbl (a string) partitioned by (b string)");
-    driver.run("alter table tmptbl add partition (b='2011')");
-    Map<String,String> kvs = new HashMap<String, String>();
-    kvs.put("b", "'2011'");
-    msc.markPartitionForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
-    assert msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
-    Thread.sleep(10000);
-    assert !msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
-
-    kvs.put("b", "'2012'");
-    assert !msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
-    try{
-      msc.markPartitionForEvent("hive2215", "tmptbl2", kvs, PartitionEventType.LOAD_DONE);
-      assert false;
-    } catch(Exception e){
-      assert e instanceof UnknownTableException;
-    }
-    try{
-      msc.isPartitionMarkedForEvent("hive2215", "tmptbl2", kvs, PartitionEventType.LOAD_DONE);
-      assert false;
-    } catch(Exception e){
-      assert e instanceof UnknownTableException;
-    }
-    kvs.put("a", "'2012'");
-    try{
-      msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
-      assert false;
-    } catch(Exception e){
-      assert e instanceof InvalidPartitionException;
-    }
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    driver.run("drop database if exists hive2215 cascade");
-    super.tearDown();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java
deleted file mode 100644
index c541193..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-
-public class TestMarkPartitionRemote extends TestMarkPartition {
-
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + MetaStoreTestUtils.startMetaStore());
-    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
deleted file mode 100644
index e44cfca..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-
-/**
- * TestMetaStoreEventListener. Test case for
- * {@link org.apache.hadoop.hive.metastore.MetaStoreEndFunctionListener}
- */
-public class TestMetaStoreEndFunctionListener extends TestCase {
-  private HiveConf hiveConf;
-  private HiveMetaStoreClient msc;
-  private Driver driver;
-
-  @Override
-  protected void setUp() throws Exception {
-
-    super.setUp();
-    System.setProperty("hive.metastore.event.listeners",
-        DummyListener.class.getName());
-    System.setProperty("hive.metastore.pre.event.listeners",
-        DummyPreListener.class.getName());
-    System.setProperty("hive.metastore.end.function.listeners",
-        DummyEndFunctionListener.class.getName());
-    int port = MetaStoreTestUtils.startMetaStoreWithRetry();
-    hiveConf = new HiveConf(this.getClass());
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    SessionState.start(new CliSessionState(hiveConf));
-    msc = new HiveMetaStoreClient(hiveConf);
-    driver = new Driver(hiveConf);
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    super.tearDown();
-  }
-
-  public void testEndFunctionListener() throws Exception {
-    /* Objective here is to ensure that when exceptions are thrown in HiveMetaStore in API methods
-     * they bubble up and are stored in the MetaStoreEndFunctionContext objects
-     */
-    String dbName = "hive3524";
-    String tblName = "tmptbl";
-    int listSize = 0;
-
-    driver.run("create database " + dbName);
-
-    try {
-      msc.getDatabase("UnknownDB");
-    }
-    catch (Exception e) {
-    }
-    listSize = DummyEndFunctionListener.funcNameList.size();
-    String func_name = DummyEndFunctionListener.funcNameList.get(listSize-1);
-    MetaStoreEndFunctionContext context = DummyEndFunctionListener.contextList.get(listSize-1);
-    assertEquals(func_name,"get_database");
-    assertFalse(context.isSuccess());
-    Exception e = context.getException();
-    assertTrue((e!=null));
-    assertTrue((e instanceof NoSuchObjectException));
-    assertEquals(context.getInputTableName(), null);
-
-    driver.run("use " + dbName);
-    driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName));
-    String tableName = "Unknown";
-    try {
-      msc.getTable(dbName, tableName);
-    }
-    catch (Exception e1) {
-    }
-    listSize = DummyEndFunctionListener.funcNameList.size();
-    func_name = DummyEndFunctionListener.funcNameList.get(listSize-1);
-    context = DummyEndFunctionListener.contextList.get(listSize-1);
-    assertEquals(func_name,"get_table");
-    assertFalse(context.isSuccess());
-    e = context.getException();
-    assertTrue((e!=null));
-    assertTrue((e instanceof NoSuchObjectException));
-    assertEquals(context.getInputTableName(), tableName);
-
-    try {
-      msc.getPartition("hive3524", tblName, "b=2012");
-    }
-    catch (Exception e2) {
-    }
-    listSize = DummyEndFunctionListener.funcNameList.size();
-    func_name = DummyEndFunctionListener.funcNameList.get(listSize-1);
-    context = DummyEndFunctionListener.contextList.get(listSize-1);
-    assertEquals(func_name,"get_partition_by_name");
-    assertFalse(context.isSuccess());
-    e = context.getException();
-    assertTrue((e!=null));
-    assertTrue((e instanceof NoSuchObjectException));
-    assertEquals(context.getInputTableName(), tblName);
-    try {
-      driver.run("drop table Unknown");
-    }
-    catch (Exception e4) {
-    }
-    listSize = DummyEndFunctionListener.funcNameList.size();
-    func_name = DummyEndFunctionListener.funcNameList.get(listSize-1);
-    context = DummyEndFunctionListener.contextList.get(listSize-1);
-    assertEquals(func_name,"get_table");
-    assertFalse(context.isSuccess());
-    e = context.getException();
-    assertTrue((e!=null));
-    assertTrue((e instanceof NoSuchObjectException));
-    assertEquals(context.getInputTableName(), "Unknown");
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
deleted file mode 100644
index e803106..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
+++ /dev/null
@@ -1,524 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import com.google.common.collect.Lists;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import junit.framework.TestCase;
-
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
-import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
-import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
-import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
-import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
-import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.DropIndexEvent;
-import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.DropTableEvent;
-import org.apache.hadoop.hive.metastore.events.ListenerEvent;
-import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
-import org.apache.hadoop.hive.metastore.events.PreAddIndexEvent;
-import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.PreAlterIndexEvent;
-import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
-import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
-import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.PreDropIndexEvent;
-import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
-import org.apache.hadoop.hive.metastore.events.PreEventContext;
-import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
-import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.processors.SetProcessor;
-import org.apache.hadoop.hive.ql.session.SessionState;
-
-/**
- * TestMetaStoreEventListener. Test case for
- * {@link org.apache.hadoop.hive.metastore.MetaStoreEventListener} and
- * {@link org.apache.hadoop.hive.metastore.MetaStorePreEventListener}
- */
-public class TestMetaStoreEventListener extends TestCase {
-  private HiveConf hiveConf;
-  private HiveMetaStoreClient msc;
-  private Driver driver;
-
-  private static final String dbName = "hive2038";
-  private static final String tblName = "tmptbl";
-  private static final String renamed = "tmptbl2";
-  private static final String metaConfKey = "hive.metastore.partition.name.whitelist.pattern";
-  private static final String metaConfVal = "";
-
-  @Override
-  protected void setUp() throws Exception {
-
-    super.setUp();
-
-    System.setProperty("hive.metastore.event.listeners",
-        DummyListener.class.getName());
-    System.setProperty("hive.metastore.pre.event.listeners",
-        DummyPreListener.class.getName());
-
-    hiveConf = new HiveConf(this.getClass());
-
-    hiveConf.setVar(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN, metaConfVal);
-    int port = MetaStoreTestUtils.startMetaStoreWithRetry(hiveConf);
-
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    SessionState.start(new CliSessionState(hiveConf));
-    msc = new HiveMetaStoreClient(hiveConf);
-    driver = new Driver(hiveConf);
-
-    driver.run("drop database if exists " + dbName + " cascade");
-
-    DummyListener.notifyList.clear();
-    DummyPreListener.notifyList.clear();
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    super.tearDown();
-  }
-
-  private void validateCreateDb(Database expectedDb, Database actualDb) {
-    assertEquals(expectedDb.getName(), actualDb.getName());
-    assertEquals(expectedDb.getLocationUri(), actualDb.getLocationUri());
-  }
-
-  private void validateTable(Table expectedTable, Table actualTable) {
-    assertEquals(expectedTable.getTableName(), actualTable.getTableName());
-    assertEquals(expectedTable.getDbName(), actualTable.getDbName());
-    assertEquals(expectedTable.getSd().getLocation(), actualTable.getSd().getLocation());
-  }
-
-  private void validateCreateTable(Table expectedTable, Table actualTable) {
-    validateTable(expectedTable, actualTable);
-  }
-
-  private void validateAddPartition(Partition expectedPartition, Partition actualPartition) {
-    assertEquals(expectedPartition, actualPartition);
-  }
-
-  private void validateTableInAddPartition(Table expectedTable, Table actualTable) {
-    assertEquals(expectedTable, actualTable);
-  }
-
-  private void validatePartition(Partition expectedPartition, Partition actualPartition) {
-    assertEquals(expectedPartition.getValues(), actualPartition.getValues());
-    assertEquals(expectedPartition.getDbName(), actualPartition.getDbName());
-    assertEquals(expectedPartition.getTableName(), actualPartition.getTableName());
-  }
-
-  private void validateAlterPartition(Partition expectedOldPartition,
-      Partition expectedNewPartition, String actualOldPartitionDbName,
-      String actualOldPartitionTblName,List<String> actualOldPartitionValues,
-      Partition actualNewPartition) {
-    assertEquals(expectedOldPartition.getValues(), actualOldPartitionValues);
-    assertEquals(expectedOldPartition.getDbName(), actualOldPartitionDbName);
-    assertEquals(expectedOldPartition.getTableName(), actualOldPartitionTblName);
-
-    validatePartition(expectedNewPartition, actualNewPartition);
-  }
-
-  private void validateAlterTable(Table expectedOldTable, Table expectedNewTable,
-      Table actualOldTable, Table actualNewTable) {
-    validateTable(expectedOldTable, actualOldTable);
-    validateTable(expectedNewTable, actualNewTable);
-  }
-
-  private void validateAlterTableColumns(Table expectedOldTable, Table expectedNewTable,
-      Table actualOldTable, Table actualNewTable) {
-    validateAlterTable(expectedOldTable, expectedNewTable, actualOldTable, actualNewTable);
-
-    assertEquals(expectedOldTable.getSd().getCols(), actualOldTable.getSd().getCols());
-    assertEquals(expectedNewTable.getSd().getCols(), actualNewTable.getSd().getCols());
-  }
-
-  private void validateLoadPartitionDone(String expectedTableName,
-      Map<String,String> expectedPartitionName, String actualTableName,
-      Map<String,String> actualPartitionName) {
-    assertEquals(expectedPartitionName, actualPartitionName);
-    assertEquals(expectedTableName, actualTableName);
-  }
-
-  private void validateDropPartition(Iterator<Partition> expectedPartitions, Iterator<Partition> actualPartitions) {
-    while (expectedPartitions.hasNext()){
-      assertTrue(actualPartitions.hasNext());
-      validatePartition(expectedPartitions.next(), actualPartitions.next());
-    }
-    assertFalse(actualPartitions.hasNext());
-  }
-
-  private void validateTableInDropPartition(Table expectedTable, Table actualTable) {
-    validateTable(expectedTable, actualTable);
-  }
-
-  private void validateDropTable(Table expectedTable, Table actualTable) {
-    validateTable(expectedTable, actualTable);
-  }
-
-  private void validateDropDb(Database expectedDb, Database actualDb) {
-    assertEquals(expectedDb, actualDb);
-  }
-
-  private void validateIndex(Index expectedIndex, Index actualIndex) {
-    assertEquals(expectedIndex.getDbName(), actualIndex.getDbName());
-    assertEquals(expectedIndex.getIndexName(), actualIndex.getIndexName());
-    assertEquals(expectedIndex.getIndexHandlerClass(), actualIndex.getIndexHandlerClass());
-    assertEquals(expectedIndex.getOrigTableName(), actualIndex.getOrigTableName());
-    assertEquals(expectedIndex.getIndexTableName(), actualIndex.getIndexTableName());
-    assertEquals(expectedIndex.getSd().getLocation(), actualIndex.getSd().getLocation());
-  }
-
-  private void validateAddIndex(Index expectedIndex, Index actualIndex) {
-    validateIndex(expectedIndex, actualIndex);
-  }
-
-  private void validateAlterIndex(Index expectedOldIndex, Index actualOldIndex,
-      Index expectedNewIndex, Index actualNewIndex) {
-    validateIndex(expectedOldIndex, actualOldIndex);
-    validateIndex(expectedNewIndex, actualNewIndex);
-  }
-
-  private void validateDropIndex(Index expectedIndex, Index actualIndex) {
-    validateIndex(expectedIndex, actualIndex);
-  }
-
-  public void testListener() throws Exception {
-    int listSize = 0;
-
-    List<ListenerEvent> notifyList = DummyListener.notifyList;
-    List<PreEventContext> preNotifyList = DummyPreListener.notifyList;
-    assertEquals(notifyList.size(), listSize);
-    assertEquals(preNotifyList.size(), listSize);
-
-    driver.run("create database " + dbName);
-    listSize++;
-    PreCreateDatabaseEvent preDbEvent = (PreCreateDatabaseEvent)(preNotifyList.get(preNotifyList.size() - 1));
-    Database db = msc.getDatabase(dbName);
-    assertEquals(listSize, notifyList.size());
-    assertEquals(listSize + 1, preNotifyList.size());
-    validateCreateDb(db, preDbEvent.getDatabase());
-
-    CreateDatabaseEvent dbEvent = (CreateDatabaseEvent)(notifyList.get(listSize - 1));
-    assert dbEvent.getStatus();
-    validateCreateDb(db, dbEvent.getDatabase());
-
-
-    driver.run("use " + dbName);
-    driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName));
-    PreCreateTableEvent preTblEvent = (PreCreateTableEvent)(preNotifyList.get(preNotifyList.size() - 1));
-    listSize++;
-    Table tbl = msc.getTable(dbName, tblName);
-    validateCreateTable(tbl, preTblEvent.getTable());
-    assertEquals(notifyList.size(), listSize);
-
-    CreateTableEvent tblEvent = (CreateTableEvent)(notifyList.get(listSize - 1));
-    assert tblEvent.getStatus();
-    validateCreateTable(tbl, tblEvent.getTable());
-
-    driver.run("create index tmptbl_i on table tmptbl(a) as 'compact' " +
-        "WITH DEFERRED REBUILD IDXPROPERTIES ('prop1'='val1', 'prop2'='val2')");
-    listSize += 2;  // creates index table internally
-    assertEquals(notifyList.size(), listSize);
-
-    AddIndexEvent addIndexEvent = (AddIndexEvent)notifyList.get(listSize - 1);
-    assert addIndexEvent.getStatus();
-    PreAddIndexEvent preAddIndexEvent = (PreAddIndexEvent)(preNotifyList.get(preNotifyList.size() - 3));
-
-    Index oldIndex = msc.getIndex(dbName, "tmptbl", "tmptbl_i");
-
-    validateAddIndex(oldIndex, addIndexEvent.getIndex());
-
-    validateAddIndex(oldIndex, preAddIndexEvent.getIndex());
-
-    driver.run("alter index tmptbl_i on tmptbl set IDXPROPERTIES " +
-        "('prop1'='val1_new', 'prop3'='val3')");
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-
-    Index newIndex = msc.getIndex(dbName, "tmptbl", "tmptbl_i");
-
-    AlterIndexEvent alterIndexEvent = (AlterIndexEvent) notifyList.get(listSize - 1);
-    assert alterIndexEvent.getStatus();
-    validateAlterIndex(oldIndex, alterIndexEvent.getOldIndex(),
-        newIndex, alterIndexEvent.getNewIndex());
-
-    PreAlterIndexEvent preAlterIndexEvent = (PreAlterIndexEvent) (preNotifyList.get(preNotifyList.size() - 1));
-    validateAlterIndex(oldIndex, preAlterIndexEvent.getOldIndex(),
-        newIndex, preAlterIndexEvent.getNewIndex());
-
-    driver.run("drop index tmptbl_i on tmptbl");
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-
-    DropIndexEvent dropIndexEvent = (DropIndexEvent) notifyList.get(listSize - 1);
-    assert dropIndexEvent.getStatus();
-    validateDropIndex(newIndex, dropIndexEvent.getIndex());
-
-    PreDropIndexEvent preDropIndexEvent = (PreDropIndexEvent) (preNotifyList.get(preNotifyList.size() - 1));
-    validateDropIndex(newIndex, preDropIndexEvent.getIndex());
-
-    driver.run("alter table tmptbl add partition (b='2011')");
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    PreAddPartitionEvent prePartEvent = (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1));
-
-    AddPartitionEvent partEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
-    assert partEvent.getStatus();
-    Partition part = msc.getPartition("hive2038", "tmptbl", "b=2011");
-    Partition partAdded = partEvent.getPartitionIterator().next();
-    validateAddPartition(part, partAdded);
-    validateTableInAddPartition(tbl, partEvent.getTable());
-    validateAddPartition(part, prePartEvent.getPartitions().get(0));
-
-    // Test adding multiple partitions in a single partition-set, atomically.
-    int currentTime = (int)System.currentTimeMillis();
-    HiveMetaStoreClient hmsClient = new HiveMetaStoreClient(hiveConf);
-    Table table = hmsClient.getTable(dbName, "tmptbl");
-    Partition partition1 = new Partition(Arrays.asList("20110101"), dbName, "tmptbl", currentTime,
-                                        currentTime, table.getSd(), table.getParameters());
-    Partition partition2 = new Partition(Arrays.asList("20110102"), dbName, "tmptbl", currentTime,
-                                        currentTime, table.getSd(), table.getParameters());
-    Partition partition3 = new Partition(Arrays.asList("20110103"), dbName, "tmptbl", currentTime,
-                                        currentTime, table.getSd(), table.getParameters());
-    hmsClient.add_partitions(Arrays.asList(partition1, partition2, partition3));
-    ++listSize;
-    AddPartitionEvent multiplePartitionEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
-    assertEquals("Unexpected table value.", table, multiplePartitionEvent.getTable());
-    List<Partition> multiParts = Lists.newArrayList(multiplePartitionEvent.getPartitionIterator());
-    assertEquals("Unexpected number of partitions in event!", 3, multiParts.size());
-    assertEquals("Unexpected partition value.", partition1.getValues(), multiParts.get(0).getValues());
-    assertEquals("Unexpected partition value.", partition2.getValues(), multiParts.get(1).getValues());
-    assertEquals("Unexpected partition value.", partition3.getValues(), multiParts.get(2).getValues());
-
-    driver.run(String.format("alter table %s touch partition (%s)", tblName, "b='2011'"));
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    PreAlterPartitionEvent preAlterPartEvent =
-        (PreAlterPartitionEvent)preNotifyList.get(preNotifyList.size() - 1);
-
-    //the partition did not change,
-    // so the new partition should be similar to the original partition
-    Partition origP = msc.getPartition(dbName, tblName, "b=2011");
-
-    AlterPartitionEvent alterPartEvent = (AlterPartitionEvent)notifyList.get(listSize - 1);
-    assert alterPartEvent.getStatus();
-    validateAlterPartition(origP, origP, alterPartEvent.getOldPartition().getDbName(),
-        alterPartEvent.getOldPartition().getTableName(),
-        alterPartEvent.getOldPartition().getValues(), alterPartEvent.getNewPartition());
-
-
-    validateAlterPartition(origP, origP, preAlterPartEvent.getDbName(),
-        preAlterPartEvent.getTableName(), preAlterPartEvent.getNewPartition().getValues(),
-        preAlterPartEvent.getNewPartition());
-
-    List<String> part_vals = new ArrayList<String>();
-    part_vals.add("c=2012");
-    int preEventListSize;
-    preEventListSize = preNotifyList.size() + 1;
-    Partition newPart = msc.appendPartition(dbName, tblName, part_vals);
-
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    assertEquals(preNotifyList.size(), preEventListSize);
-
-    AddPartitionEvent appendPartEvent =
-        (AddPartitionEvent)(notifyList.get(listSize-1));
-    Partition partAppended = appendPartEvent.getPartitionIterator().next();
-    validateAddPartition(newPart, partAppended);
-
-    PreAddPartitionEvent preAppendPartEvent =
-        (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1));
-    validateAddPartition(newPart, preAppendPartEvent.getPartitions().get(0));
-
-    driver.run(String.format("alter table %s rename to %s", tblName, renamed));
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    PreAlterTableEvent preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1);
-
-    Table renamedTable = msc.getTable(dbName, renamed);
-
-    AlterTableEvent alterTableE = (AlterTableEvent) notifyList.get(listSize-1);
-    assert alterTableE.getStatus();
-    validateAlterTable(tbl, renamedTable, alterTableE.getOldTable(), alterTableE.getNewTable());
-    validateAlterTable(tbl, renamedTable, preAlterTableE.getOldTable(),
-        preAlterTableE.getNewTable());
-
-    //change the table name back
-    driver.run(String.format("alter table %s rename to %s", renamed, tblName));
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-
-    driver.run(String.format("alter table %s ADD COLUMNS (c int)", tblName));
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1);
-
-    Table altTable = msc.getTable(dbName, tblName);
-
-    alterTableE = (AlterTableEvent) notifyList.get(listSize-1);
-    assert alterTableE.getStatus();
-    validateAlterTableColumns(tbl, altTable, alterTableE.getOldTable(), alterTableE.getNewTable());
-    validateAlterTableColumns(tbl, altTable, preAlterTableE.getOldTable(),
-        preAlterTableE.getNewTable());
-
-    Map<String,String> kvs = new HashMap<String, String>(1);
-    kvs.put("b", "2011");
-    msc.markPartitionForEvent("hive2038", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-
-    LoadPartitionDoneEvent partMarkEvent = (LoadPartitionDoneEvent)notifyList.get(listSize - 1);
-    assert partMarkEvent.getStatus();
-    validateLoadPartitionDone("tmptbl", kvs, partMarkEvent.getTable().getTableName(),
-        partMarkEvent.getPartitionName());
-
-    PreLoadPartitionDoneEvent prePartMarkEvent =
-        (PreLoadPartitionDoneEvent)preNotifyList.get(preNotifyList.size() - 1);
-    validateLoadPartitionDone("tmptbl", kvs, prePartMarkEvent.getTableName(),
-        prePartMarkEvent.getPartitionName());
-
-    driver.run(String.format("alter table %s drop partition (b='2011')", tblName));
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    PreDropPartitionEvent preDropPart = (PreDropPartitionEvent) preNotifyList.get(preNotifyList
-        .size() - 1);
-
-    DropPartitionEvent dropPart = (DropPartitionEvent)notifyList.get(listSize - 1);
-    assert dropPart.getStatus();
-    validateDropPartition(Collections.singletonList(part).iterator(), dropPart.getPartitionIterator());
-    validateTableInDropPartition(tbl, dropPart.getTable());
-
-    validateDropPartition(Collections.singletonList(part).iterator(), preDropPart.getPartitionIterator());
-    validateTableInDropPartition(tbl, preDropPart.getTable());
-
-    driver.run("drop table " + tblName);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    PreDropTableEvent preDropTbl = (PreDropTableEvent)preNotifyList.get(preNotifyList.size() - 1);
-
-    DropTableEvent dropTbl = (DropTableEvent)notifyList.get(listSize-1);
-    assert dropTbl.getStatus();
-    validateDropTable(tbl, dropTbl.getTable());
-    validateDropTable(tbl, preDropTbl.getTable());
-
-    driver.run("drop database " + dbName);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    PreDropDatabaseEvent preDropDB = (PreDropDatabaseEvent)preNotifyList.get(preNotifyList.size() - 1);
-
-    DropDatabaseEvent dropDB = (DropDatabaseEvent)notifyList.get(listSize-1);
-    assert dropDB.getStatus();
-    validateDropDb(db, dropDB.getDatabase());
-    validateDropDb(db, preDropDB.getDatabase());
-
-    SetProcessor.setVariable("metaconf:hive.metastore.try.direct.sql", "false");
-    ConfigChangeEvent event = (ConfigChangeEvent) notifyList.get(notifyList.size() - 1);
-    assertEquals("hive.metastore.try.direct.sql", event.getKey());
-    assertEquals("true", event.getOldValue());
-    assertEquals("false", event.getNewValue());
-  }
-
-  public void testMetaConfNotifyListenersClosingClient() throws Exception {
-    HiveMetaStoreClient closingClient = new HiveMetaStoreClient(hiveConf, null);
-    closingClient.setMetaConf(metaConfKey, "[test pattern modified]");
-    ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
-    assertEquals(event.getOldValue(), metaConfVal);
-    assertEquals(event.getNewValue(), "[test pattern modified]");
-    closingClient.close();
-
-    Thread.sleep(5 * 1000);
-
-    event = (ConfigChangeEvent) DummyListener.getLastEvent();
-    assertEquals(event.getOldValue(), "[test pattern modified]");
-    assertEquals(event.getNewValue(), metaConfVal);
-  }
-
-  public void testMetaConfNotifyListenersNonClosingClient() throws Exception {
-    HiveMetaStoreClient nonClosingClient = new HiveMetaStoreClient(hiveConf, null);
-    nonClosingClient.setMetaConf(metaConfKey, "[test pattern modified]");
-    ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
-    assertEquals(event.getOldValue(), metaConfVal);
-    assertEquals(event.getNewValue(), "[test pattern modified]");
-    // This should also trigger meta listener notification via TServerEventHandler#deleteContext
-    nonClosingClient.getTTransport().close();
-
-    Thread.sleep(5 * 1000);
-
-    event = (ConfigChangeEvent) DummyListener.getLastEvent();
-    assertEquals(event.getOldValue(), "[test pattern modified]");
-    assertEquals(event.getNewValue(), metaConfVal);
-  }
-
-  public void testMetaConfDuplicateNotification() throws Exception {
-    HiveMetaStoreClient closingClient = new HiveMetaStoreClient(hiveConf, null);
-    closingClient.setMetaConf(metaConfKey, metaConfVal);
-    int beforeCloseNotificationEventCounts = DummyListener.notifyList.size();
-    closingClient.close();
-
-    Thread.sleep(5 * 1000);
-
-    int afterCloseNotificationEventCounts = DummyListener.notifyList.size();
-    // Setting key to same value, should not trigger configChange event during shutdown
-    assertEquals(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts);
-  }
-
-  public void testMetaConfSameHandler() throws Exception {
-    HiveMetaStoreClient closingClient = new HiveMetaStoreClient(hiveConf, null);
-    closingClient.setMetaConf(metaConfKey, "[test pattern modified]");
-    ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
-    int beforeCloseNotificationEventCounts = DummyListener.notifyList.size();
-    IHMSHandler beforeHandler = event.getIHMSHandler();
-    closingClient.close();
-
-    Thread.sleep(5 * 1000);
-    event = (ConfigChangeEvent) DummyListener.getLastEvent();
-    int afterCloseNotificationEventCounts = DummyListener.notifyList.size();
-    IHMSHandler afterHandler = event.getIHMSHandler();
-    // Meta-conf cleanup should trigger an event to listener
-    assertNotSame(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts);
-    // Both the handlers should be same
-    assertEquals(beforeHandler, afterHandler);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java
deleted file mode 100644
index 9623fed..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.util.List;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.events.ListenerEvent;
-import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.session.SessionState;
-
-/**
- * Ensure that the status of MetaStore events depend on the RawStore's commit status.
- */
-public class TestMetaStoreEventListenerOnlyOnCommit extends TestCase {
-
-  private HiveConf hiveConf;
-  private HiveMetaStoreClient msc;
-  private Driver driver;
-
-  @Override
-  protected void setUp() throws Exception {
-
-    super.setUp();
-
-    DummyRawStoreControlledCommit.setCommitSucceed(true);
-
-    System.setProperty(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS.varname,
-            DummyListener.class.getName());
-    System.setProperty(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL.varname,
-            DummyRawStoreControlledCommit.class.getName());
-
-    int port = MetaStoreTestUtils.startMetaStoreWithRetry();
-
-    hiveConf = new HiveConf(this.getClass());
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    SessionState.start(new CliSessionState(hiveConf));
-    msc = new HiveMetaStoreClient(hiveConf);
-    driver = new Driver(hiveConf);
-
-    DummyListener.notifyList.clear();
-  }
-
-  public void testEventStatus() throws Exception {
-    int listSize = 0;
-    List<ListenerEvent> notifyList = DummyListener.notifyList;
-    assertEquals(notifyList.size(), listSize);
-
-    driver.run("CREATE DATABASE tmpDb");
-    listSize += 1;
-    notifyList = DummyListener.notifyList;
-    assertEquals(notifyList.size(), listSize);
-    assertTrue(DummyListener.getLastEvent().getStatus());
-
-    driver.run("CREATE TABLE unittest_TestMetaStoreEventListenerOnlyOnCommit (id INT) " +
-                "PARTITIONED BY (ds STRING)");
-    listSize += 1;
-    notifyList = DummyListener.notifyList;
-    assertEquals(notifyList.size(), listSize);
-    assertTrue(DummyListener.getLastEvent().getStatus());
-
-    driver.run("ALTER TABLE unittest_TestMetaStoreEventListenerOnlyOnCommit " +
-                "ADD PARTITION(ds='foo1')");
-    listSize += 1;
-    notifyList = DummyListener.notifyList;
-    assertEquals(notifyList.size(), listSize);
-    assertTrue(DummyListener.getLastEvent().getStatus());
-
-    DummyRawStoreControlledCommit.setCommitSucceed(false);
-
-    driver.run("ALTER TABLE unittest_TestMetaStoreEventListenerOnlyOnCommit " +
-                "ADD PARTITION(ds='foo2')");
-    listSize += 1;
-    notifyList = DummyListener.notifyList;
-    assertEquals(notifyList.size(), listSize);
-    assertFalse(DummyListener.getLastEvent().getStatus());
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java
deleted file mode 100644
index 4982313..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.session.SessionState;
-
-/**
- * TestMetaStoreInitListener. Test case for
- * {@link org.apache.hadoop.hive.metastore.MetaStoreInitListener}
- */
-public class TestMetaStoreInitListener extends TestCase {
-  private HiveConf hiveConf;
-  private HiveMetaStoreClient msc;
-  private Driver driver;
-
-  @Override
-  protected void setUp() throws Exception {
-
-    super.setUp();
-    System.setProperty("hive.metastore.init.hooks",
-        DummyMetaStoreInitListener.class.getName());
-    int port = MetaStoreTestUtils.startMetaStoreWithRetry();
-    hiveConf = new HiveConf(this.getClass());
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    SessionState.start(new CliSessionState(hiveConf));
-    msc = new HiveMetaStoreClient(hiveConf);
-    driver = new Driver(hiveConf);
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    super.tearDown();
-  }
-
-  public void testMetaStoreInitListener() throws Exception {
-    // DummyMataStoreInitListener's onInit will be called at HMSHandler
-    // initialization, and set this to true
-    assertTrue(DummyMetaStoreInitListener.wasCalled);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java
deleted file mode 100644
index 11ebf4d..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import junit.framework.Assert;
-import junit.framework.TestCase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-
-/**
- * Test for unwrapping InvocationTargetException, which is thrown from
- * constructor of listener class
- */
-public class TestMetaStoreListenersError extends TestCase {
-
-  public void testInitListenerException() throws Throwable {
-
-    System.setProperty("hive.metastore.init.hooks", ErrorInitListener.class.getName());
-    int port = MetaStoreTestUtils.findFreePort();
-    try {
-      HiveMetaStore.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
-    } catch (Throwable throwable) {
-      Assert.assertEquals(MetaException.class, throwable.getClass());
-      Assert.assertEquals(
-          "Failed to instantiate listener named: " +
-              "org.apache.hadoop.hive.metastore.TestMetaStoreListenersError$ErrorInitListener, " +
-              "reason: java.lang.IllegalArgumentException: exception on constructor",
-          throwable.getMessage());
-    }
-  }
-
-  public void testEventListenerException() throws Throwable {
-
-    System.setProperty("hive.metastore.init.hooks", "");
-    System.setProperty("hive.metastore.event.listeners", ErrorEventListener.class.getName());
-    int port = MetaStoreTestUtils.findFreePort();
-    try {
-      HiveMetaStore.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
-    } catch (Throwable throwable) {
-      Assert.assertEquals(MetaException.class, throwable.getClass());
-      Assert.assertEquals(
-          "Failed to instantiate listener named: " +
-              "org.apache.hadoop.hive.metastore.TestMetaStoreListenersError$ErrorEventListener, " +
-              "reason: java.lang.IllegalArgumentException: exception on constructor",
-          throwable.getMessage());
-    }
-  }
-
-  public static class ErrorInitListener extends MetaStoreInitListener {
-
-    public ErrorInitListener(Configuration config) {
-      super(config);
-      throw new IllegalArgumentException("exception on constructor");
-    }
-
-    public void onInit(MetaStoreInitContext context) throws MetaException {
-    }
-  }
-
-  public static class ErrorEventListener extends MetaStoreEventListener {
-
-    public ErrorEventListener(Configuration config) {
-      super(config);
-      throw new IllegalArgumentException("exception on constructor");
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java
deleted file mode 100644
index 1695bfd..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
-import java.sql.DriverManager;
-import java.sql.SQLException;
-
-import javax.jdo.JDOCanRetryException;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-
-public class TestObjectStoreInitRetry {
-
-  private static boolean noisy = true; // switch to true to see line number debug traces for FakeDerby calls
-
-  private static int injectConnectFailure = 0;
-
-  public static void setInjectConnectFailure(int x){
-    injectConnectFailure = x;
-  }
-
-  public static int getInjectConnectFailure(){
-    return injectConnectFailure;
-  }
-
-  public static void decrementInjectConnectFailure(){
-    injectConnectFailure--;
-  }
-
-  @BeforeClass
-  public static void oneTimeSetup() throws SQLException {
-    // dummy instantiation to make sure any static/ctor code blocks of that
-    // driver are loaded and ready to go.
-    DriverManager.registerDriver(new FakeDerby());
-  }
-
-  @AfterClass
-  public static void oneTimeTearDown() throws SQLException {
-    DriverManager.deregisterDriver(new FakeDerby());
-  }
-
-  public static void misbehave() throws RuntimeException{
-    TestObjectStoreInitRetry.debugTrace();
-    if (TestObjectStoreInitRetry.getInjectConnectFailure() > 0){
-      TestObjectStoreInitRetry.decrementInjectConnectFailure();
-      RuntimeException re = new JDOCanRetryException();
-      if (noisy){
-        System.err.println("MISBEHAVE:" + TestObjectStoreInitRetry.getInjectConnectFailure());
-        re.printStackTrace(System.err);
-      }
-      throw re;
-    }
-  }
-
-  // debug instrumenter - useful in finding which fns get called, and how often
-  public static void debugTrace() {
-    if (noisy){
-      Exception e = new Exception();
-      System.err.println("." + e.getStackTrace()[1].getLineNumber() + ":" + TestObjectStoreInitRetry.getInjectConnectFailure());
-    }
-  }
-
-  protected static HiveConf hiveConf;
-
-  @Test
-  public void testObjStoreRetry() throws Exception {
-    hiveConf = new HiveConf(this.getClass());
-
-    hiveConf.setIntVar(ConfVars.HMSHANDLERATTEMPTS, 4);
-    hiveConf.setVar(ConfVars.HMSHANDLERINTERVAL, "1s");
-    hiveConf.setVar(ConfVars.METASTORE_CONNECTION_DRIVER,FakeDerby.class.getName());
-    hiveConf.setBoolVar(ConfVars.METASTORE_TRY_DIRECT_SQL,true);
-    String jdbcUrl = hiveConf.get(ConfVars.METASTORECONNECTURLKEY.varname);
-    jdbcUrl = jdbcUrl.replace("derby","fderby");
-    hiveConf.setVar(ConfVars.METASTORECONNECTURLKEY,jdbcUrl);
-
-    ObjectStore objStore = new ObjectStore();
-
-    Exception savE = null;
-    try {
-      setInjectConnectFailure(5);
-      objStore.setConf(hiveConf);
-    } catch (Exception e) {
-      e.printStackTrace(System.err);
-      savE = e;
-    }
-
-    /**
-     * A note on retries.
-     *
-     * We've configured a total of 4 attempts.
-     * 5 - 4 == 1 connect failure simulation count left after this.
-     */
-
-    assertEquals(1, getInjectConnectFailure());
-    assertNotNull(savE);
-
-    setInjectConnectFailure(0);
-    objStore.setConf(hiveConf);
-    assertEquals(0, getInjectConnectFailure());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionNameWhitelistValidation.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionNameWhitelistValidation.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionNameWhitelistValidation.java
deleted file mode 100644
index e3e175b..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionNameWhitelistValidation.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import static org.junit.Assert.*;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-// Validate the metastore client call validatePartitionNameCharacters to ensure it throws
-// an exception if partition fields contain Unicode characters or commas
-
-public class TestPartitionNameWhitelistValidation {
-
-  private static final String partitionValidationPattern = "[\\x20-\\x7E&&[^,]]*";
-  private static HiveConf hiveConf;
-  private static HiveMetaStoreClient msc;
-
-  @BeforeClass
-  public static void setupBeforeClass() throws Exception {
-    System.setProperty(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN.varname,
-        partitionValidationPattern);
-    hiveConf = new HiveConf();
-    SessionState.start(new CliSessionState(hiveConf));
-    msc = new HiveMetaStoreClient(hiveConf);
-  }
-
-  // Runs an instance of DisallowUnicodePreEventListener
-  // Returns whether or not it succeeded
-  private boolean runValidation(List<String> partVals) {
-    try {
-      msc.validatePartitionNameCharacters(partVals);
-    } catch (Exception e) {
-      return false;
-    }
-
-    return true;
- }
-
-  // Sample data
-  private List<String> getPartValsWithUnicode() {
-    List<String> partVals = new ArrayList<String>();
-    partVals.add("klâwen");
-    partVals.add("tägelîch");
-
-    return partVals;
-  }
-
-  private List<String> getPartValsWithCommas() {
-    List<String> partVals = new ArrayList<String>();
-    partVals.add("a,b");
-    partVals.add("c,d,e,f");
-
-    return partVals;
-  }
-
-  private List<String> getPartValsWithValidCharacters() {
-    List<String> partVals = new ArrayList<String>();
-    partVals.add("part1");
-    partVals.add("part2");
-
-    return partVals;
-  }
-
-  @Test
-  public void testAddPartitionWithCommas() {
-    assertFalse("Add a partition with commas in name",
-        runValidation(getPartValsWithCommas()));
-  }
-
-  @Test
-  public void testAddPartitionWithUnicode() {
-    assertFalse("Add a partition with unicode characters in name",
-        runValidation(getPartValsWithUnicode()));
-  }
-
-  @Test
-  public void testAddPartitionWithValidPartVal() {
-    assertTrue("Add a partition with unicode characters in name",
-        runValidation(getPartValsWithValidCharacters()));
-  }
-
-  @Test
-  public void testAppendPartitionWithUnicode() {
-    assertFalse("Append a partition with unicode characters in name",
-        runValidation(getPartValsWithUnicode()));
-  }
-
-  @Test
-  public void testAppendPartitionWithCommas() {
-    assertFalse("Append a partition with unicode characters in name",
-        runValidation(getPartValsWithCommas()));
-  }
-
-  @Test
-  public void testAppendPartitionWithValidCharacters() {
-    assertTrue("Append a partition with no unicode characters in name",
-        runValidation(getPartValsWithValidCharacters()));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java
deleted file mode 100644
index ec84e66..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-
-
-public class TestRemoteHiveMetaStore extends TestHiveMetaStore {
-  private static boolean isServerStarted = false;
-  protected static int port;
-
-  public TestRemoteHiveMetaStore() {
-    super();
-    isThriftClient = true;
-  }
-
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
-
-    if (isServerStarted) {
-      assertNotNull("Unable to connect to the MetaStore server", client);
-      hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-      return;
-    }
-
-    port = MetaStoreTestUtils.startMetaStoreWithRetry(hiveConf);
-    System.out.println("Starting MetaStore Server on port " + port);
-    isServerStarted = true;
-
-    // This is default case with setugi off for both client and server
-    client = createClient();
-  }
-
-  @Override
-  protected HiveMetaStoreClient createClient() throws Exception {
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-    hiveConf.setBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI, false);
-    return new HiveMetaStoreClient(hiveConf);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java
deleted file mode 100644
index c7c35f3..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.apache.hadoop.util.StringUtils;
-
-/**
- *
- * TestRemoteHiveMetaStoreIpAddress.
- *
- * Test which checks that the remote Hive metastore stores the proper IP address using
- * IpAddressListener
- */
-public class TestRemoteHiveMetaStoreIpAddress extends TestCase {
-  private static boolean isServerStarted = false;
-  private static HiveConf hiveConf;
-  private static HiveMetaStoreClient msc;
-
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
-    hiveConf = new HiveConf(this.getClass());
-
-    if (isServerStarted) {
-      assertNotNull("Unable to connect to the MetaStore server", msc);
-      return;
-    }
-
-    System.setProperty(ConfVars.METASTORE_EVENT_LISTENERS.varname,
-        IpAddressListener.class.getName());
-    int port = MetaStoreTestUtils.startMetaStoreWithRetry();
-    System.out.println("Started MetaStore Server on port " + port);
-    isServerStarted = true;
-
-    // This is default case with setugi off for both client and server
-    createClient(port);
-  }
-
-  public void testIpAddress() throws Exception {
-    try {
-
-      Database db = new Database();
-      db.setName("testIpAddressIp");
-      msc.createDatabase(db);
-      msc.dropDatabase(db.getName());
-    } catch (Exception e) {
-      System.err.println(StringUtils.stringifyException(e));
-      System.err.println("testIpAddress() failed.");
-      throw e;
-    }
-  }
-
-  protected void createClient(int port) throws Exception {
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-    msc = new HiveMetaStoreClient(hiveConf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteUGIHiveMetaStoreIpAddress.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteUGIHiveMetaStoreIpAddress.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteUGIHiveMetaStoreIpAddress.java
deleted file mode 100644
index 8658262..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteUGIHiveMetaStoreIpAddress.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-
-public class TestRemoteUGIHiveMetaStoreIpAddress extends TestRemoteHiveMetaStoreIpAddress {
-  public TestRemoteUGIHiveMetaStoreIpAddress() {
-    super();
-    System.setProperty(ConfVars.METASTORE_EXECUTE_SET_UGI.varname, "true");
-  }
-
-}


[14/50] [abbrv] hive git commit: HIVE-18209: Fix API call in VectorizedListColumnReader to get value from BytesColumnVector (Colin Ma, reviewed by Ferdinand Xu)

Posted by ga...@apache.org.
HIVE-18209: Fix API call in VectorizedListColumnReader to get value from BytesColumnVector (Colin Ma, reviewed by Ferdinand Xu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/11227eba
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/11227eba
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/11227eba

Branch: refs/heads/standalone-metastore
Commit: 11227ebab390df10970fb8ef61f3e24421d6c66e
Parents: 7acc4ce
Author: Ferdinand Xu <ch...@intel.com>
Authored: Mon Dec 18 10:01:13 2017 +0800
Committer: Ferdinand Xu <ch...@intel.com>
Committed: Mon Dec 18 10:01:13 2017 +0800

----------------------------------------------------------------------
 .../vector/VectorizedListColumnReader.java      |  3 +-
 .../parquet/TestVectorizedListColumnReader.java | 34 +++++++++++++++++++-
 .../parquet/VectorizedColumnReaderTestBase.java |  1 +
 3 files changed, 36 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/11227eba/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedListColumnReader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedListColumnReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedListColumnReader.java
index ea4f2f2..12af77c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedListColumnReader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/vector/VectorizedListColumnReader.java
@@ -258,7 +258,8 @@ public class VectorizedListColumnReader extends BaseVectorizedColumnReader {
         lcv.child = new BytesColumnVector(total);
         lcv.child.init();
         for (int i = 0; i < valueList.size(); i++) {
-          ((BytesColumnVector)lcv.child).setVal(i, ((List<byte[]>)valueList).get(i));
+          byte[] src = ((List<byte[]>)valueList).get(i);
+          ((BytesColumnVector)lcv.child).setRef(i, src, 0, src.length);
         }
         break;
       case FLOAT:

http://git-wip-us.apache.org/repos/asf/hive/blob/11227eba/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestVectorizedListColumnReader.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestVectorizedListColumnReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestVectorizedListColumnReader.java
index de19615..8ea5d25 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestVectorizedListColumnReader.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestVectorizedListColumnReader.java
@@ -72,8 +72,9 @@ public class TestVectorizedListColumnReader extends VectorizedColumnReaderTestBa
         }
       }
       for (int j = 0; j < listMaxSize; j++) {
-        group.append("list_int32_field_for_repeat_test", getIntValue(isDictionaryEncoding, j));
+        group.append("list_binary_field_for_repeat_test", getBinaryValue(isDictionaryEncoding, i));
       }
+
       writer.write(group);
     }
     writer.close();
@@ -157,6 +158,14 @@ public class TestVectorizedListColumnReader extends VectorizedColumnReaderTestBa
     removeFile();
   }
 
+  @Test
+  public void testUnrepeatedStringWithoutNullListRead() throws Exception {
+    removeFile();
+    writeListData(initWriterFromFile(), false, 1025);
+    testUnRepeateStringWithoutNullListRead();
+    removeFile();
+  }
+
   private void testListReadAllType(boolean isDictionaryEncoding, int elementNum) throws Exception {
     testListRead(isDictionaryEncoding, "int", elementNum);
     testListRead(isDictionaryEncoding, "long", elementNum);
@@ -250,6 +259,10 @@ public class TestVectorizedListColumnReader extends VectorizedColumnReaderTestBa
     try {
       while (reader.next(NullWritable.get(), previous)) {
         ListColumnVector vector = (ListColumnVector) previous.cols[0];
+
+        //since Repeating only happens when offset length is 1.
+        assertEquals((vector.offsets.length == 1),vector.isRepeating);
+
         for (int i = 0; i < vector.offsets.length; i++) {
           if (row == elementNum) {
             assertEquals(i, vector.offsets.length - 1);
@@ -305,4 +318,23 @@ public class TestVectorizedListColumnReader extends VectorizedColumnReaderTestBa
       reader.close();
     }
   }
+
+  private void testUnRepeateStringWithoutNullListRead() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(IOConstants.COLUMNS, "list_binary_field_for_repeat_test");
+    conf.set(IOConstants.COLUMNS_TYPES, "array<string>");
+    conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false);
+    conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0");
+    VectorizedParquetRecordReader reader = createTestParquetReader(
+        "message hive_schema {repeated binary list_binary_field_for_repeat_test;}", conf);
+    VectorizedRowBatch previous = reader.createValue();
+    try {
+      while (reader.next(NullWritable.get(), previous)) {
+        ListColumnVector vector = (ListColumnVector) previous.cols[0];
+        assertEquals((vector.offsets.length == 1),vector.isRepeating);
+      }
+    } finally {
+      reader.close();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/11227eba/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/VectorizedColumnReaderTestBase.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/VectorizedColumnReaderTestBase.java b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/VectorizedColumnReaderTestBase.java
index 33c5c82..db7777d 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/VectorizedColumnReaderTestBase.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/VectorizedColumnReaderTestBase.java
@@ -124,6 +124,7 @@ public class VectorizedColumnReaderTestBase {
       + "repeated fixed_len_byte_array(3) list_byte_array_field;"
       + "repeated binary list_binary_field;"
       + "repeated binary list_decimal_field (DECIMAL(5,2));"
+      + "repeated binary list_binary_field_for_repeat_test;"
       + "repeated int32 list_int32_field_for_repeat_test;"
       + "repeated group map_int32 (MAP_KEY_VALUE) {\n"
       + "  required int32 key;\n"


[39/50] [abbrv] hive git commit: HIVE-17990 Add Thrift and DB storage for Schema Registry objects

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index 808ee09..1e4c4fd 100644
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@ -1412,6 +1412,111 @@ class Iface(fb303.FacebookService.Iface):
     """
     pass
 
+  def create_ischema(self, schema):
+    """
+    Parameters:
+     - schema
+    """
+    pass
+
+  def alter_ischema(self, schemaName, newSchema):
+    """
+    Parameters:
+     - schemaName
+     - newSchema
+    """
+    pass
+
+  def get_ischema(self, schemaName):
+    """
+    Parameters:
+     - schemaName
+    """
+    pass
+
+  def drop_ischema(self, schemaName):
+    """
+    Parameters:
+     - schemaName
+    """
+    pass
+
+  def add_schema_version(self, schemaVersion):
+    """
+    Parameters:
+     - schemaVersion
+    """
+    pass
+
+  def get_schema_version(self, schemaName, version):
+    """
+    Parameters:
+     - schemaName
+     - version
+    """
+    pass
+
+  def get_schema_latest_version(self, schemaName):
+    """
+    Parameters:
+     - schemaName
+    """
+    pass
+
+  def get_schema_all_versions(self, schemaName):
+    """
+    Parameters:
+     - schemaName
+    """
+    pass
+
+  def drop_schema_version(self, schemaName, version):
+    """
+    Parameters:
+     - schemaName
+     - version
+    """
+    pass
+
+  def get_schemas_by_cols(self, rqst):
+    """
+    Parameters:
+     - rqst
+    """
+    pass
+
+  def map_schema_version_to_serde(self, schemaName, version, serdeName):
+    """
+    Parameters:
+     - schemaName
+     - version
+     - serdeName
+    """
+    pass
+
+  def set_schema_version_state(self, schemaName, version, state):
+    """
+    Parameters:
+     - schemaName
+     - version
+     - state
+    """
+    pass
+
+  def add_serde(self, serde):
+    """
+    Parameters:
+     - serde
+    """
+    pass
+
+  def get_serde(self, serdeName):
+    """
+    Parameters:
+     - serdeName
+    """
+    pass
+
 
 class Client(fb303.FacebookService.Client, Iface):
   """
@@ -7871,6 +7976,500 @@ class Client(fb303.FacebookService.Client, Iface):
       raise result.o4
     raise TApplicationException(TApplicationException.MISSING_RESULT, "create_or_drop_wm_trigger_to_pool_mapping failed: unknown result")
 
+  def create_ischema(self, schema):
+    """
+    Parameters:
+     - schema
+    """
+    self.send_create_ischema(schema)
+    self.recv_create_ischema()
+
+  def send_create_ischema(self, schema):
+    self._oprot.writeMessageBegin('create_ischema', TMessageType.CALL, self._seqid)
+    args = create_ischema_args()
+    args.schema = schema
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_create_ischema(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = create_ischema_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.o1 is not None:
+      raise result.o1
+    if result.o2 is not None:
+      raise result.o2
+    if result.o3 is not None:
+      raise result.o3
+    return
+
+  def alter_ischema(self, schemaName, newSchema):
+    """
+    Parameters:
+     - schemaName
+     - newSchema
+    """
+    self.send_alter_ischema(schemaName, newSchema)
+    self.recv_alter_ischema()
+
+  def send_alter_ischema(self, schemaName, newSchema):
+    self._oprot.writeMessageBegin('alter_ischema', TMessageType.CALL, self._seqid)
+    args = alter_ischema_args()
+    args.schemaName = schemaName
+    args.newSchema = newSchema
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_alter_ischema(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = alter_ischema_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.o1 is not None:
+      raise result.o1
+    if result.o2 is not None:
+      raise result.o2
+    return
+
+  def get_ischema(self, schemaName):
+    """
+    Parameters:
+     - schemaName
+    """
+    self.send_get_ischema(schemaName)
+    return self.recv_get_ischema()
+
+  def send_get_ischema(self, schemaName):
+    self._oprot.writeMessageBegin('get_ischema', TMessageType.CALL, self._seqid)
+    args = get_ischema_args()
+    args.schemaName = schemaName
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_get_ischema(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = get_ischema_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    if result.o1 is not None:
+      raise result.o1
+    if result.o2 is not None:
+      raise result.o2
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_ischema failed: unknown result")
+
+  def drop_ischema(self, schemaName):
+    """
+    Parameters:
+     - schemaName
+    """
+    self.send_drop_ischema(schemaName)
+    self.recv_drop_ischema()
+
+  def send_drop_ischema(self, schemaName):
+    self._oprot.writeMessageBegin('drop_ischema', TMessageType.CALL, self._seqid)
+    args = drop_ischema_args()
+    args.schemaName = schemaName
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_drop_ischema(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = drop_ischema_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.o1 is not None:
+      raise result.o1
+    if result.o2 is not None:
+      raise result.o2
+    if result.o3 is not None:
+      raise result.o3
+    return
+
+  def add_schema_version(self, schemaVersion):
+    """
+    Parameters:
+     - schemaVersion
+    """
+    self.send_add_schema_version(schemaVersion)
+    self.recv_add_schema_version()
+
+  def send_add_schema_version(self, schemaVersion):
+    self._oprot.writeMessageBegin('add_schema_version', TMessageType.CALL, self._seqid)
+    args = add_schema_version_args()
+    args.schemaVersion = schemaVersion
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_add_schema_version(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = add_schema_version_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.o1 is not None:
+      raise result.o1
+    if result.o2 is not None:
+      raise result.o2
+    if result.o3 is not None:
+      raise result.o3
+    return
+
+  def get_schema_version(self, schemaName, version):
+    """
+    Parameters:
+     - schemaName
+     - version
+    """
+    self.send_get_schema_version(schemaName, version)
+    return self.recv_get_schema_version()
+
+  def send_get_schema_version(self, schemaName, version):
+    self._oprot.writeMessageBegin('get_schema_version', TMessageType.CALL, self._seqid)
+    args = get_schema_version_args()
+    args.schemaName = schemaName
+    args.version = version
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_get_schema_version(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = get_schema_version_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    if result.o1 is not None:
+      raise result.o1
+    if result.o2 is not None:
+      raise result.o2
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_schema_version failed: unknown result")
+
+  def get_schema_latest_version(self, schemaName):
+    """
+    Parameters:
+     - schemaName
+    """
+    self.send_get_schema_latest_version(schemaName)
+    return self.recv_get_schema_latest_version()
+
+  def send_get_schema_latest_version(self, schemaName):
+    self._oprot.writeMessageBegin('get_schema_latest_version', TMessageType.CALL, self._seqid)
+    args = get_schema_latest_version_args()
+    args.schemaName = schemaName
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_get_schema_latest_version(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = get_schema_latest_version_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    if result.o1 is not None:
+      raise result.o1
+    if result.o2 is not None:
+      raise result.o2
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_schema_latest_version failed: unknown result")
+
+  def get_schema_all_versions(self, schemaName):
+    """
+    Parameters:
+     - schemaName
+    """
+    self.send_get_schema_all_versions(schemaName)
+    return self.recv_get_schema_all_versions()
+
+  def send_get_schema_all_versions(self, schemaName):
+    self._oprot.writeMessageBegin('get_schema_all_versions', TMessageType.CALL, self._seqid)
+    args = get_schema_all_versions_args()
+    args.schemaName = schemaName
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_get_schema_all_versions(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = get_schema_all_versions_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    if result.o1 is not None:
+      raise result.o1
+    if result.o2 is not None:
+      raise result.o2
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_schema_all_versions failed: unknown result")
+
+  def drop_schema_version(self, schemaName, version):
+    """
+    Parameters:
+     - schemaName
+     - version
+    """
+    self.send_drop_schema_version(schemaName, version)
+    self.recv_drop_schema_version()
+
+  def send_drop_schema_version(self, schemaName, version):
+    self._oprot.writeMessageBegin('drop_schema_version', TMessageType.CALL, self._seqid)
+    args = drop_schema_version_args()
+    args.schemaName = schemaName
+    args.version = version
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_drop_schema_version(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = drop_schema_version_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.o1 is not None:
+      raise result.o1
+    if result.o2 is not None:
+      raise result.o2
+    return
+
+  def get_schemas_by_cols(self, rqst):
+    """
+    Parameters:
+     - rqst
+    """
+    self.send_get_schemas_by_cols(rqst)
+    return self.recv_get_schemas_by_cols()
+
+  def send_get_schemas_by_cols(self, rqst):
+    self._oprot.writeMessageBegin('get_schemas_by_cols', TMessageType.CALL, self._seqid)
+    args = get_schemas_by_cols_args()
+    args.rqst = rqst
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_get_schemas_by_cols(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = get_schemas_by_cols_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    if result.o1 is not None:
+      raise result.o1
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_schemas_by_cols failed: unknown result")
+
+  def map_schema_version_to_serde(self, schemaName, version, serdeName):
+    """
+    Parameters:
+     - schemaName
+     - version
+     - serdeName
+    """
+    self.send_map_schema_version_to_serde(schemaName, version, serdeName)
+    self.recv_map_schema_version_to_serde()
+
+  def send_map_schema_version_to_serde(self, schemaName, version, serdeName):
+    self._oprot.writeMessageBegin('map_schema_version_to_serde', TMessageType.CALL, self._seqid)
+    args = map_schema_version_to_serde_args()
+    args.schemaName = schemaName
+    args.version = version
+    args.serdeName = serdeName
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_map_schema_version_to_serde(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = map_schema_version_to_serde_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.o1 is not None:
+      raise result.o1
+    if result.o2 is not None:
+      raise result.o2
+    return
+
+  def set_schema_version_state(self, schemaName, version, state):
+    """
+    Parameters:
+     - schemaName
+     - version
+     - state
+    """
+    self.send_set_schema_version_state(schemaName, version, state)
+    self.recv_set_schema_version_state()
+
+  def send_set_schema_version_state(self, schemaName, version, state):
+    self._oprot.writeMessageBegin('set_schema_version_state', TMessageType.CALL, self._seqid)
+    args = set_schema_version_state_args()
+    args.schemaName = schemaName
+    args.version = version
+    args.state = state
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_set_schema_version_state(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = set_schema_version_state_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.o1 is not None:
+      raise result.o1
+    if result.o2 is not None:
+      raise result.o2
+    if result.o3 is not None:
+      raise result.o3
+    return
+
+  def add_serde(self, serde):
+    """
+    Parameters:
+     - serde
+    """
+    self.send_add_serde(serde)
+    self.recv_add_serde()
+
+  def send_add_serde(self, serde):
+    self._oprot.writeMessageBegin('add_serde', TMessageType.CALL, self._seqid)
+    args = add_serde_args()
+    args.serde = serde
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_add_serde(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = add_serde_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.o1 is not None:
+      raise result.o1
+    if result.o2 is not None:
+      raise result.o2
+    return
+
+  def get_serde(self, serdeName):
+    """
+    Parameters:
+     - serdeName
+    """
+    self.send_get_serde(serdeName)
+    return self.recv_get_serde()
+
+  def send_get_serde(self, serdeName):
+    self._oprot.writeMessageBegin('get_serde', TMessageType.CALL, self._seqid)
+    args = get_serde_args()
+    args.serdeName = serdeName
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_get_serde(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = get_serde_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    if result.o1 is not None:
+      raise result.o1
+    if result.o2 is not None:
+      raise result.o2
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_serde failed: unknown result")
+
 
 class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
   def __init__(self, handler):
@@ -8054,6 +8653,20 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
     self._processMap["create_or_update_wm_mapping"] = Processor.process_create_or_update_wm_mapping
     self._processMap["drop_wm_mapping"] = Processor.process_drop_wm_mapping
     self._processMap["create_or_drop_wm_trigger_to_pool_mapping"] = Processor.process_create_or_drop_wm_trigger_to_pool_mapping
+    self._processMap["create_ischema"] = Processor.process_create_ischema
+    self._processMap["alter_ischema"] = Processor.process_alter_ischema
+    self._processMap["get_ischema"] = Processor.process_get_ischema
+    self._processMap["drop_ischema"] = Processor.process_drop_ischema
+    self._processMap["add_schema_version"] = Processor.process_add_schema_version
+    self._processMap["get_schema_version"] = Processor.process_get_schema_version
+    self._processMap["get_schema_latest_version"] = Processor.process_get_schema_latest_version
+    self._processMap["get_schema_all_versions"] = Processor.process_get_schema_all_versions
+    self._processMap["drop_schema_version"] = Processor.process_drop_schema_version
+    self._processMap["get_schemas_by_cols"] = Processor.process_get_schemas_by_cols
+    self._processMap["map_schema_version_to_serde"] = Processor.process_map_schema_version_to_serde
+    self._processMap["set_schema_version_state"] = Processor.process_set_schema_version_state
+    self._processMap["add_serde"] = Processor.process_add_serde
+    self._processMap["get_serde"] = Processor.process_get_serde
 
   def process(self, iprot, oprot):
     (name, type, seqid) = iprot.readMessageBegin()
@@ -12485,6 +13098,365 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
     oprot.writeMessageEnd()
     oprot.trans.flush()
 
+  def process_create_ischema(self, seqid, iprot, oprot):
+    args = create_ischema_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = create_ischema_result()
+    try:
+      self._handler.create_ischema(args.schema)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except AlreadyExistsException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except NoSuchObjectException as o2:
+      msg_type = TMessageType.REPLY
+      result.o2 = o2
+    except MetaException as o3:
+      msg_type = TMessageType.REPLY
+      result.o3 = o3
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("create_ischema", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_alter_ischema(self, seqid, iprot, oprot):
+    args = alter_ischema_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = alter_ischema_result()
+    try:
+      self._handler.alter_ischema(args.schemaName, args.newSchema)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except NoSuchObjectException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except MetaException as o2:
+      msg_type = TMessageType.REPLY
+      result.o2 = o2
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("alter_ischema", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_get_ischema(self, seqid, iprot, oprot):
+    args = get_ischema_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = get_ischema_result()
+    try:
+      result.success = self._handler.get_ischema(args.schemaName)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except NoSuchObjectException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except MetaException as o2:
+      msg_type = TMessageType.REPLY
+      result.o2 = o2
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("get_ischema", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_drop_ischema(self, seqid, iprot, oprot):
+    args = drop_ischema_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = drop_ischema_result()
+    try:
+      self._handler.drop_ischema(args.schemaName)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except NoSuchObjectException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except InvalidOperationException as o2:
+      msg_type = TMessageType.REPLY
+      result.o2 = o2
+    except MetaException as o3:
+      msg_type = TMessageType.REPLY
+      result.o3 = o3
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("drop_ischema", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_add_schema_version(self, seqid, iprot, oprot):
+    args = add_schema_version_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = add_schema_version_result()
+    try:
+      self._handler.add_schema_version(args.schemaVersion)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except AlreadyExistsException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except NoSuchObjectException as o2:
+      msg_type = TMessageType.REPLY
+      result.o2 = o2
+    except MetaException as o3:
+      msg_type = TMessageType.REPLY
+      result.o3 = o3
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("add_schema_version", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_get_schema_version(self, seqid, iprot, oprot):
+    args = get_schema_version_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = get_schema_version_result()
+    try:
+      result.success = self._handler.get_schema_version(args.schemaName, args.version)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except NoSuchObjectException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except MetaException as o2:
+      msg_type = TMessageType.REPLY
+      result.o2 = o2
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("get_schema_version", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_get_schema_latest_version(self, seqid, iprot, oprot):
+    args = get_schema_latest_version_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = get_schema_latest_version_result()
+    try:
+      result.success = self._handler.get_schema_latest_version(args.schemaName)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except NoSuchObjectException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except MetaException as o2:
+      msg_type = TMessageType.REPLY
+      result.o2 = o2
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("get_schema_latest_version", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_get_schema_all_versions(self, seqid, iprot, oprot):
+    args = get_schema_all_versions_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = get_schema_all_versions_result()
+    try:
+      result.success = self._handler.get_schema_all_versions(args.schemaName)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except NoSuchObjectException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except MetaException as o2:
+      msg_type = TMessageType.REPLY
+      result.o2 = o2
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("get_schema_all_versions", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_drop_schema_version(self, seqid, iprot, oprot):
+    args = drop_schema_version_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = drop_schema_version_result()
+    try:
+      self._handler.drop_schema_version(args.schemaName, args.version)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except NoSuchObjectException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except MetaException as o2:
+      msg_type = TMessageType.REPLY
+      result.o2 = o2
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("drop_schema_version", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_get_schemas_by_cols(self, seqid, iprot, oprot):
+    args = get_schemas_by_cols_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = get_schemas_by_cols_result()
+    try:
+      result.success = self._handler.get_schemas_by_cols(args.rqst)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except MetaException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("get_schemas_by_cols", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_map_schema_version_to_serde(self, seqid, iprot, oprot):
+    args = map_schema_version_to_serde_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = map_schema_version_to_serde_result()
+    try:
+      self._handler.map_schema_version_to_serde(args.schemaName, args.version, args.serdeName)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except NoSuchObjectException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except MetaException as o2:
+      msg_type = TMessageType.REPLY
+      result.o2 = o2
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("map_schema_version_to_serde", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_set_schema_version_state(self, seqid, iprot, oprot):
+    args = set_schema_version_state_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = set_schema_version_state_result()
+    try:
+      self._handler.set_schema_version_state(args.schemaName, args.version, args.state)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except NoSuchObjectException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except InvalidOperationException as o2:
+      msg_type = TMessageType.REPLY
+      result.o2 = o2
+    except MetaException as o3:
+      msg_type = TMessageType.REPLY
+      result.o3 = o3
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("set_schema_version_state", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_add_serde(self, seqid, iprot, oprot):
+    args = add_serde_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = add_serde_result()
+    try:
+      self._handler.add_serde(args.serde)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except AlreadyExistsException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except MetaException as o2:
+      msg_type = TMessageType.REPLY
+      result.o2 = o2
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("add_serde", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_get_serde(self, seqid, iprot, oprot):
+    args = get_serde_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = get_serde_result()
+    try:
+      result.success = self._handler.get_serde(args.serdeName)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except NoSuchObjectException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except MetaException as o2:
+      msg_type = TMessageType.REPLY
+      result.o2 = o2
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("get_serde", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
 
 # HELPER FUNCTIONS AND STRUCTURES
 
@@ -13371,10 +14343,10 @@ class get_databases_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype702, _size699) = iprot.readListBegin()
-          for _i703 in xrange(_size699):
-            _elem704 = iprot.readString()
-            self.success.append(_elem704)
+          (_etype716, _size713) = iprot.readListBegin()
+          for _i717 in xrange(_size713):
+            _elem718 = iprot.readString()
+            self.success.append(_elem718)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -13397,8 +14369,8 @@ class get_databases_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter705 in self.success:
-        oprot.writeString(iter705)
+      for iter719 in self.success:
+        oprot.writeString(iter719)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -13503,10 +14475,10 @@ class get_all_databases_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype709, _size706) = iprot.readListBegin()
-          for _i710 in xrange(_size706):
-            _elem711 = iprot.readString()
-            self.success.append(_elem711)
+          (_etype723, _size720) = iprot.readListBegin()
+          for _i724 in xrange(_size720):
+            _elem725 = iprot.readString()
+            self.success.append(_elem725)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -13529,8 +14501,8 @@ class get_all_databases_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter712 in self.success:
-        oprot.writeString(iter712)
+      for iter726 in self.success:
+        oprot.writeString(iter726)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -14300,12 +15272,12 @@ class get_type_all_result:
       if fid == 0:
         if ftype == TType.MAP:
           self.success = {}
-          (_ktype714, _vtype715, _size713 ) = iprot.readMapBegin()
-          for _i717 in xrange(_size713):
-            _key718 = iprot.readString()
-            _val719 = Type()
-            _val719.read(iprot)
-            self.success[_key718] = _val719
+          (_ktype728, _vtype729, _size727 ) = iprot.readMapBegin()
+          for _i731 in xrange(_size727):
+            _key732 = iprot.readString()
+            _val733 = Type()
+            _val733.read(iprot)
+            self.success[_key732] = _val733
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -14328,9 +15300,9 @@ class get_type_all_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.MAP, 0)
       oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success))
-      for kiter720,viter721 in self.success.items():
-        oprot.writeString(kiter720)
-        viter721.write(oprot)
+      for kiter734,viter735 in self.success.items():
+        oprot.writeString(kiter734)
+        viter735.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.o2 is not None:
@@ -14473,11 +15445,11 @@ class get_fields_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype725, _size722) = iprot.readListBegin()
-          for _i726 in xrange(_size722):
-            _elem727 = FieldSchema()
-            _elem727.read(iprot)
-            self.success.append(_elem727)
+          (_etype739, _size736) = iprot.readListBegin()
+          for _i740 in xrange(_size736):
+            _elem741 = FieldSchema()
+            _elem741.read(iprot)
+            self.success.append(_elem741)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -14512,8 +15484,8 @@ class get_fields_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter728 in self.success:
-        iter728.write(oprot)
+      for iter742 in self.success:
+        iter742.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -14680,11 +15652,11 @@ class get_fields_with_environment_context_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype732, _size729) = iprot.readListBegin()
-          for _i733 in xrange(_size729):
-            _elem734 = FieldSchema()
-            _elem734.read(iprot)
-            self.success.append(_elem734)
+          (_etype746, _size743) = iprot.readListBegin()
+          for _i747 in xrange(_size743):
+            _elem748 = FieldSchema()
+            _elem748.read(iprot)
+            self.success.append(_elem748)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -14719,8 +15691,8 @@ class get_fields_with_environment_context_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter735 in self.success:
-        iter735.write(oprot)
+      for iter749 in self.success:
+        iter749.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -14873,11 +15845,11 @@ class get_schema_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype739, _size736) = iprot.readListBegin()
-          for _i740 in xrange(_size736):
-            _elem741 = FieldSchema()
-            _elem741.read(iprot)
-            self.success.append(_elem741)
+          (_etype753, _size750) = iprot.readListBegin()
+          for _i754 in xrange(_size750):
+            _elem755 = FieldSchema()
+            _elem755.read(iprot)
+            self.success.append(_elem755)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -14912,8 +15884,8 @@ class get_schema_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter742 in self.success:
-        iter742.write(oprot)
+      for iter756 in self.success:
+        iter756.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -15080,11 +16052,11 @@ class get_schema_with_environment_context_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype746, _size743) = iprot.readListBegin()
-          for _i747 in xrange(_size743):
-            _elem748 = FieldSchema()
-            _elem748.read(iprot)
-            self.success.append(_elem748)
+          (_etype760, _size757) = iprot.readListBegin()
+          for _i761 in xrange(_size757):
+            _elem762 = FieldSchema()
+            _elem762.read(iprot)
+            self.success.append(_elem762)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -15119,8 +16091,8 @@ class get_schema_with_environment_context_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter749 in self.success:
-        iter749.write(oprot)
+      for iter763 in self.success:
+        iter763.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -15567,44 +16539,44 @@ class create_table_with_constraints_args:
       elif fid == 2:
         if ftype == TType.LIST:
           self.primaryKeys = []
-          (_etype753, _size750) = iprot.readListBegin()
-          for _i754 in xrange(_size750):
-            _elem755 = SQLPrimaryKey()
-            _elem755.read(iprot)
-            self.primaryKeys.append(_elem755)
+          (_etype767, _size764) = iprot.readListBegin()
+          for _i768 in xrange(_size764):
+            _elem769 = SQLPrimaryKey()
+            _elem769.read(iprot)
+            self.primaryKeys.append(_elem769)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 3:
         if ftype == TType.LIST:
           self.foreignKeys = []
-          (_etype759, _size756) = iprot.readListBegin()
-          for _i760 in xrange(_size756):
-            _elem761 = SQLForeignKey()
-            _elem761.read(iprot)
-            self.foreignKeys.append(_elem761)
+          (_etype773, _size770) = iprot.readListBegin()
+          for _i774 in xrange(_size770):
+            _elem775 = SQLForeignKey()
+            _elem775.read(iprot)
+            self.foreignKeys.append(_elem775)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 4:
         if ftype == TType.LIST:
           self.uniqueConstraints = []
-          (_etype765, _size762) = iprot.readListBegin()
-          for _i766 in xrange(_size762):
-            _elem767 = SQLUniqueConstraint()
-            _elem767.read(iprot)
-            self.uniqueConstraints.append(_elem767)
+          (_etype779, _size776) = iprot.readListBegin()
+          for _i780 in xrange(_size776):
+            _elem781 = SQLUniqueConstraint()
+            _elem781.read(iprot)
+            self.uniqueConstraints.append(_elem781)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 5:
         if ftype == TType.LIST:
           self.notNullConstraints = []
-          (_etype771, _size768) = iprot.readListBegin()
-          for _i772 in xrange(_size768):
-            _elem773 = SQLNotNullConstraint()
-            _elem773.read(iprot)
-            self.notNullConstraints.append(_elem773)
+          (_etype785, _size782) = iprot.readListBegin()
+          for _i786 in xrange(_size782):
+            _elem787 = SQLNotNullConstraint()
+            _elem787.read(iprot)
+            self.notNullConstraints.append(_elem787)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -15625,29 +16597,29 @@ class create_table_with_constraints_args:
     if self.primaryKeys is not None:
       oprot.writeFieldBegin('primaryKeys', TType.LIST, 2)
       oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys))
-      for iter774 in self.primaryKeys:
-        iter774.write(oprot)
+      for iter788 in self.primaryKeys:
+        iter788.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.foreignKeys is not None:
       oprot.writeFieldBegin('foreignKeys', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys))
-      for iter775 in self.foreignKeys:
-        iter775.write(oprot)
+      for iter789 in self.foreignKeys:
+        iter789.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.uniqueConstraints is not None:
       oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4)
       oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints))
-      for iter776 in self.uniqueConstraints:
-        iter776.write(oprot)
+      for iter790 in self.uniqueConstraints:
+        iter790.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.notNullConstraints is not None:
       oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5)
       oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints))
-      for iter777 in self.notNullConstraints:
-        iter777.write(oprot)
+      for iter791 in self.notNullConstraints:
+        iter791.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -16913,10 +17885,10 @@ class truncate_table_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.partNames = []
-          (_etype781, _size778) = iprot.readListBegin()
-          for _i782 in xrange(_size778):
-            _elem783 = iprot.readString()
-            self.partNames.append(_elem783)
+          (_etype795, _size792) = iprot.readListBegin()
+          for _i796 in xrange(_size792):
+            _elem797 = iprot.readString()
+            self.partNames.append(_elem797)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -16941,8 +17913,8 @@ class truncate_table_args:
     if self.partNames is not None:
       oprot.writeFieldBegin('partNames', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.partNames))
-      for iter784 in self.partNames:
-        oprot.writeString(iter784)
+      for iter798 in self.partNames:
+        oprot.writeString(iter798)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -17142,10 +18114,10 @@ class get_tables_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype788, _size785) = iprot.readListBegin()
-          for _i789 in xrange(_size785):
-            _elem790 = iprot.readString()
-            self.success.append(_elem790)
+          (_etype802, _size799) = iprot.readListBegin()
+          for _i803 in xrange(_size799):
+            _elem804 = iprot.readString()
+            self.success.append(_elem804)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17168,8 +18140,8 @@ class get_tables_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter791 in self.success:
-        oprot.writeString(iter791)
+      for iter805 in self.success:
+        oprot.writeString(iter805)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -17319,10 +18291,10 @@ class get_tables_by_type_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype795, _size792) = iprot.readListBegin()
-          for _i796 in xrange(_size792):
-            _elem797 = iprot.readString()
-            self.success.append(_elem797)
+          (_etype809, _size806) = iprot.readListBegin()
+          for _i810 in xrange(_size806):
+            _elem811 = iprot.readString()
+            self.success.append(_elem811)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17345,8 +18317,8 @@ class get_tables_by_type_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter798 in self.success:
-        oprot.writeString(iter798)
+      for iter812 in self.success:
+        oprot.writeString(iter812)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -17419,10 +18391,10 @@ class get_table_meta_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.tbl_types = []
-          (_etype802, _size799) = iprot.readListBegin()
-          for _i803 in xrange(_size799):
-            _elem804 = iprot.readString()
-            self.tbl_types.append(_elem804)
+          (_etype816, _size813) = iprot.readListBegin()
+          for _i817 in xrange(_size813):
+            _elem818 = iprot.readString()
+            self.tbl_types.append(_elem818)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17447,8 +18419,8 @@ class get_table_meta_args:
     if self.tbl_types is not None:
       oprot.writeFieldBegin('tbl_types', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.tbl_types))
-      for iter805 in self.tbl_types:
-        oprot.writeString(iter805)
+      for iter819 in self.tbl_types:
+        oprot.writeString(iter819)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -17504,11 +18476,11 @@ class get_table_meta_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype809, _size806) = iprot.readListBegin()
-          for _i810 in xrange(_size806):
-            _elem811 = TableMeta()
-            _elem811.read(iprot)
-            self.success.append(_elem811)
+          (_etype823, _size820) = iprot.readListBegin()
+          for _i824 in xrange(_size820):
+            _elem825 = TableMeta()
+            _elem825.read(iprot)
+            self.success.append(_elem825)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17531,8 +18503,8 @@ class get_table_meta_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter812 in self.success:
-        iter812.write(oprot)
+      for iter826 in self.success:
+        iter826.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -17656,10 +18628,10 @@ class get_all_tables_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype816, _size813) = iprot.readListBegin()
-          for _i817 in xrange(_size813):
-            _elem818 = iprot.readString()
-            self.success.append(_elem818)
+          (_etype830, _size827) = iprot.readListBegin()
+          for _i831 in xrange(_size827):
+            _elem832 = iprot.readString()
+            self.success.append(_elem832)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17682,8 +18654,8 @@ class get_all_tables_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter819 in self.success:
-        oprot.writeString(iter819)
+      for iter833 in self.success:
+        oprot.writeString(iter833)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -17919,10 +18891,10 @@ class get_table_objects_by_name_args:
       elif fid == 2:
         if ftype == TType.LIST:
           self.tbl_names = []
-          (_etype823, _size820) = iprot.readListBegin()
-          for _i824 in xrange(_size820):
-            _elem825 = iprot.readString()
-            self.tbl_names.append(_elem825)
+          (_etype837, _size834) = iprot.readListBegin()
+          for _i838 in xrange(_size834):
+            _elem839 = iprot.readString()
+            self.tbl_names.append(_elem839)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17943,8 +18915,8 @@ class get_table_objects_by_name_args:
     if self.tbl_names is not None:
       oprot.writeFieldBegin('tbl_names', TType.LIST, 2)
       oprot.writeListBegin(TType.STRING, len(self.tbl_names))
-      for iter826 in self.tbl_names:
-        oprot.writeString(iter826)
+      for iter840 in self.tbl_names:
+        oprot.writeString(iter840)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -17996,11 +18968,11 @@ class get_table_objects_by_name_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype830, _size827) = iprot.readListBegin()
-          for _i831 in xrange(_size827):
-            _elem832 = Table()
-            _elem832.read(iprot)
-            self.success.append(_elem832)
+          (_etype844, _size841) = iprot.readListBegin()
+          for _i845 in xrange(_size841):
+            _elem846 = Table()
+            _elem846.read(iprot)
+            self.success.append(_elem846)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18017,8 +18989,8 @@ class get_table_objects_by_name_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter833 in self.success:
-        iter833.write(oprot)
+      for iter847 in self.success:
+        iter847.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -18501,10 +19473,10 @@ class get_table_names_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype837, _size834) = iprot.readListBegin()
-          for _i838 in xrange(_size834):
-            _elem839 = iprot.readString()
-            self.success.append(_elem839)
+          (_etype851, _size848) = iprot.readListBegin()
+          for _i852 in xrange(_size848):
+            _elem853 = iprot.readString()
+            self.success.append(_elem853)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18539,8 +19511,8 @@ class get_table_names_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter840 in self.success:
-        oprot.writeString(iter840)
+      for iter854 in self.success:
+        oprot.writeString(iter854)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -19510,11 +20482,11 @@ class add_partitions_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype844, _size841) = iprot.readListBegin()
-          for _i845 in xrange(_size841):
-            _elem846 = Partition()
-            _elem846.read(iprot)
-            self.new_parts.append(_elem846)
+          (_etype858, _size855) = iprot.readListBegin()
+          for _i859 in xrange(_size855):
+            _elem860 = Partition()
+            _elem860.read(iprot)
+            self.new_parts.append(_elem860)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19531,8 +20503,8 @@ class add_partitions_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter847 in self.new_parts:
-        iter847.write(oprot)
+      for iter861 in self.new_parts:
+        iter861.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -19690,11 +20662,11 @@ class add_partitions_pspec_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype851, _size848) = iprot.readListBegin()
-          for _i852 in xrange(_size848):
-            _elem853 = PartitionSpec()
-            _elem853.read(iprot)
-            self.new_parts.append(_elem853)
+          (_etype865, _size862) = iprot.readListBegin()
+          for _i866 in xrange(_size862):
+            _elem867 = PartitionSpec()
+            _elem867.read(iprot)
+            self.new_parts.append(_elem867)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19711,8 +20683,8 @@ class add_partitions_pspec_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter854 in self.new_parts:
-        iter854.write(oprot)
+      for iter868 in self.new_parts:
+        iter868.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -19886,10 +20858,10 @@ class append_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype858, _size855) = iprot.readListBegin()
-          for _i859 in xrange(_size855):
-            _elem860 = iprot.readString()
-            self.part_vals.append(_elem860)
+          (_etype872, _size869) = iprot.readListBegin()
+          for _i873 in xrange(_size869):
+            _elem874 = iprot.readString()
+            self.part_vals.append(_elem874)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19914,8 +20886,8 @@ class append_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter861 in self.part_vals:
-        oprot.writeString(iter861)
+      for iter875 in self.part_vals:
+        oprot.writeString(iter875)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -20268,10 +21240,10 @@ class append_partition_with_environment_context_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype865, _size862) = iprot.readListBegin()
-          for _i866 in xrange(_size862):
-            _elem867 = iprot.readString()
-            self.part_vals.append(_elem867)
+          (_etype879, _size876) = iprot.readListBegin()
+          for _i880 in xrange(_size876):
+            _elem881 = iprot.readString()
+            self.part_vals.append(_elem881)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20302,8 +21274,8 @@ class append_partition_with_environment_context_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter868 in self.part_vals:
-        oprot.writeString(iter868)
+      for iter882 in self.part_vals:
+        oprot.writeString(iter882)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.environment_context is not None:
@@ -20898,10 +21870,10 @@ class drop_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype872, _size869) = iprot.readListBegin()
-          for _i873 in xrange(_size869):
-            _elem874 = iprot.readString()
-            self.part_vals.append(_elem874)
+          (_etype886, _size883) = iprot.readListBegin()
+          for _i887 in xrange(_size883):
+            _elem888 = iprot.readString()
+            self.part_vals.append(_elem888)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20931,8 +21903,8 @@ class drop_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter875 in self.part_vals:
-        oprot.writeString(iter875)
+      for iter889 in self.part_vals:
+        oprot.writeString(iter889)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.deleteData is not None:
@@ -21105,10 +22077,10 @@ class drop_partition_with_environment_context_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype879, _size876) = iprot.readListBegin()
-          for _i880 in xrange(_size876):
-            _elem881 = iprot.readString()
-            self.part_vals.append(_elem881)
+          (_etype893, _size890) = iprot.readListBegin()
+          for _i894 in xrange(_size890):
+            _elem895 = iprot.readString()
+            self.part_vals.append(_elem895)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21144,8 +22116,8 @@ class drop_partition_with_environment_context_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter882 in self.part_vals:
-        oprot.writeString(iter882)
+      for iter896 in self.part_vals:
+        oprot.writeString(iter896)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.deleteData is not None:
@@ -21882,10 +22854,10 @@ class get_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype886, _size883) = iprot.readListBegin()
-          for _i887 in xrange(_size883):
-            _elem888 = iprot.readString()
-            self.part_vals.append(_elem888)
+          (_etype900, _size897) = iprot.readListBegin()
+          for _i901 in xrange(_size897):
+            _elem902 = iprot.readString()
+            self.part_vals.append(_elem902)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21910,8 +22882,8 @@ class get_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter889 in self.part_vals:
-        oprot.writeString(iter889)
+      for iter903 in self.part_vals:
+        oprot.writeString(iter903)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -22070,11 +23042,11 @@ class exchange_partition_args:
       if fid == 1:
         if ftype == TType.MAP:
           self.partitionSpecs = {}
-          (_ktype891, _vtype892, _size890 ) = iprot.readMapBegin()
-          for _i894 in xrange(_size890):
-            _key895 = iprot.readString()
-            _val896 = iprot.readString()
-            self.partitionSpecs[_key895] = _val896
+          (_ktype905, _vtype906, _size904 ) = iprot.readMapBegin()
+          for _i908 in xrange(_size904):
+            _key909 = iprot.readString()
+            _val910 = iprot.readString()
+            self.partitionSpecs[_key909] = _val910
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -22111,9 +23083,9 @@ class exchange_partition_args:
     if self.partitionSpecs is not None:
       oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
-      for kiter897,viter898 in self.partitionSpecs.items():
-        oprot.writeString(kiter897)
-        oprot.writeString(viter898)
+      for kiter911,viter912 in self.partitionSpecs.items():
+        oprot.writeString(kiter911)
+        oprot.writeString(viter912)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.source_db is not None:
@@ -22318,11 +23290,11 @@ class exchange_partitions_args:
       if fid == 1:
         if ftype == TType.MAP:
           self.partitionSpecs = {}
-          (_ktype900, _vtype901, _size899 ) = iprot.readMapBegin()
-          for _i903 in xrange(_size899):
-            _key904 = iprot.readString()
-            _val905 = iprot.readString()
-            self.partitionSpecs[_key904] = _val905
+          (_ktype914, _vtype915, _size913 ) = iprot.readMapBegin()
+          for _i917 in xrange(_size913):
+            _key918 = iprot.readString()
+            _val919 = iprot.readString()
+            self.partitionSpecs[_key918] = _val919
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -22359,9 +23331,9 @@ class exchange_partitions_args:
     if self.partitionSpecs is not None:
       oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
-      for kiter906,viter907 in self.partitionSpecs.items():
-        oprot.writeString(kiter906)
-        oprot.writeString(viter907)
+      for kiter920,viter921 in self.partitionSpecs.items():
+        oprot.writeString(kiter920)
+        oprot.writeString(viter921)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.source_db is not None:
@@ -22444,11 +23416,11 @@ class exchange_partitions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype911, _size908) = iprot.readListBegin()
-          for _i912 in xrange(_size908):
-            _elem913 = Partition()
-            _elem913.read(iprot)
-            self.success.append(_elem913)
+          (_etype925, _size922) = iprot.readListBegin()
+          for _i926 in xrange(_size922):
+            _elem927 = Partition()
+            _elem927.read(iprot)
+            self.success.append(_elem927)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22489,8 +23461,8 @@ class exchange_partitions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter914 in self.success:
-        iter914.write(oprot)
+      for iter928 in self.success:
+        iter928.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -22584,10 +23556,10 @@ class get_partition_with_auth_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype918, _size915) = iprot.readListBegin()
-          for _i919 in xrange(_size915):
-            _elem920 = iprot.readString()
-            self.part_vals.append(_elem920)
+          (_etype932, _size929) = iprot.readListBegin()
+          for _i933 in xrange(_size929):
+            _elem934 = iprot.readString()
+            self.part_vals.append(_elem934)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22599,10 +23571,10 @@ class get_partition_with_auth_args:
       elif fid == 5:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype924, _size921) = iprot.readListBegin()
-          for _i925 in xrange(_size921):
-            _elem926 = iprot.readString()
-            self.group_names.append(_elem926)
+          (_etype938, _size935) = iprot.readListBegin()
+          for _i939 in xrange(_size935):
+            _elem940 = iprot.readString()
+            self.group_names.append(_elem940)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22627,8 +23599,8 @@ class get_partition_with_auth_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter927 in self.part_vals:
-        oprot.writeString(iter927)
+      for iter941 in self.part_vals:
+        oprot.writeString(iter941)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.user_name is not None:
@@ -22638,8 +23610,8 @@ class get_partition_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 5)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter928 in self.group_names:
-        oprot.writeString(iter928)
+      for iter942 in self.group_names:
+        oprot.writeString(iter942)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -23068,11 +24040,11 @@ class get_partitions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype932, _size929) = iprot.readListBegin()
-          for _i933 in xrange(_size929):
-            _elem934 = Partition()
-            _elem934.read(iprot)
-            self.success.append(_elem934)
+          (_etype946, _size943) = iprot.readListBegin()
+          for _i947 in xrange(_size943):
+            _elem948 = Partition()
+            _elem948.read(iprot)
+            self.success.append(_elem948)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23101,8 +24073,8 @@ class get_partitions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter935 in self.success:
-        iter935.write(oprot)
+      for iter949 in self.success:
+        iter949.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -23196,10 +24168,10 @@ class get_partitions_with_auth_args:
       elif fid == 5:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype939, _size936) = iprot.readListBegin()
-          for _i940 in xrange(_size936):
-            _elem941 = iprot.readString()
-            self.group_names.append(_elem941)
+          (_etype953, _size950) = iprot.readListBegin()
+          for _i954 in xrange(_size950):
+            _elem955 = iprot.readString()
+            self.group_names.append(_elem955)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23232,8 +24204,8 @@ class get_partitions_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 5)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter942 in self.group_names:
-        oprot.writeString(iter942)
+      for iter956 in self.group_names:
+        oprot.writeString(iter956)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -23294,11 +24266,11 @@ class get_partitions_with_auth_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype946, _size943) = iprot.readListBegin()
-          for _i947 in xrange(_size943):
-            _elem948 = Partition()
-            _elem948.read(iprot)
-            self.success.append(_elem948)
+          (_etype960, _size957) = iprot.readListBegin()
+          for _i961 in xrange(_size957):
+            _elem962 = Partition()
+            _elem962.read(iprot)
+            self.success.append(_elem962)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23327,8 +24299,8 @@ class get_partitions_with_auth_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter949 in self.success:
-        iter949.write(oprot)
+      for iter963 in self.success:
+        iter963.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -23486,11 +24458,11 @@ class get_partitions_pspec_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype953, _size950) = iprot.readListBegin()
-          for _i954 in xrange(_size950):
-            _elem955 = PartitionSpec()
-            _elem955.read(iprot)
-            self.success.append(_elem955)
+          (_etype967, _size964) = iprot.readListBegin()
+          for _i968 in xrange(_size964):
+            _elem969 = PartitionSpec()
+            _elem969.read(iprot)
+            self.success.append(_elem969)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23519,8 +24491,8 @@ class get_partitions_pspec_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter956 in self.success:
-        iter956.write(oprot)
+      for iter970 in self.success:
+        iter970.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -23678,10 +24650,10 @@ class get_partition_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype960, _size957) = iprot.readListBegin()
-          for _i961 in xrange(_size957):
-            _elem962 = iprot.readString()
-            self.success.append(_elem962)
+          (_etype974, _size971) = iprot.readListBegin()
+          for _i975 in xrange(_size971):
+            _elem976 = iprot.readString()
+            self.success.append(_elem976)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23710,8 +24682,8 @@ class get_partition_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter963 in self.success:
-        oprot.writeString(iter963)
+      for iter977 in self.success:
+        oprot.writeString(iter977)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -23951,10 +24923,10 @@ class get_partitions_ps_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype967, _size964) = iprot.readListBegin()
-          for _i968 in xrange(_size964):
-            _elem969 = iprot.readString()
-            self.part_vals.append(_elem969)
+          (_etype981, _size978) = iprot.readListBegin()
+          for _i982 in xrange(_size978):
+            _elem983 = iprot.readString()
+            self.part_vals.append(_elem983)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23984,8 +24956,8 @@ class get_partitions_ps_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter970 in self.part_vals:
-        oprot.writeString(iter970)
+      for iter984 in self.part_vals:
+        oprot.writeString(iter984)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -24049,11 +25021,11 @@ class get_partitions_ps_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype974, _size971) = iprot.readListBegin()
-          for _i975 in xrange(_size971):
-            _elem976 = Partition()
-            _elem976.read(iprot)
-            self.success.append(_elem976)
+          (_etype988, _size985) = iprot.readListBegin()
+          for _i989 in xrange(_size985):
+            _elem990 = Partition()
+            _elem990.read(iprot)
+            self.success.append(_elem990)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -24082,8 +25054,8 @@ class get_partitions_ps_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter977 in self.success:
-        iter977.write(oprot)
+      for iter991 in self.success:
+        iter991.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -24170,10 +25142,10 @@ class get_partitions_ps_with_auth_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype981, _size978) = iprot.readListBegin()
-          for _i982 in xrange(_size978):
-            _elem983 = iprot.readString()
-            self.part_vals.append(_elem983)
+          (_etype995, _size992) = iprot.readListBegin()
+          for _i996 in xrange(_size992):
+            _elem997 = iprot.readString()
+            self.part_vals.append(_elem997)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -24190,10 +25162,10 @@ class get_partitions_ps_with_auth_args:
       elif fid == 6:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype987, _size984) = iprot.readListBegin()
-          for _i988 in xrange(_size984):
-            _elem989 = iprot.readString()
-            self.group_names.append(_elem989)
+          (_etype1001, _size998) = iprot.readListBegin()
+          for _i1002 in xrange(_size998):
+            _elem1003 = iprot.readString()
+            self.group_names.append(_elem1003)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -24218,8 +25190,8 @@ class get_partitions_ps_with_auth_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter990 in self.part_vals:
-        oprot.writeString(iter990)
+      for iter1004 in self.part_vals:
+        oprot.writeString(iter1004)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -24233,8 +25205,8 @@ class get_partitions_ps_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 6)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter991 in self.group_names:
-        oprot.writeString(iter991)
+      for iter1005 in self.group_names:
+        oprot.writeString(iter1005)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -24296,11 +25268,11 @@ class get_partitions_ps_with_auth_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype995, _size992) = iprot.readListBegin()
-          for _i996 in xrange(_size992):
-            _elem997 = Partition()
-            _elem997.read(iprot)
-            self.success.append(_elem997)
+          (_etype1009, _size1006) = iprot.readListBegin()
+          for _i1010 in xrange(_size1006):
+            _elem1011 = Partition()
+            _elem1011.read(iprot)
+            self.success.append(_elem1011)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -24329,8 +25301,8 @@ class get_partitions_ps_with_auth_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter998 in self.success:
-        iter998.write(oprot)
+      for iter1012 in self.success:
+        iter1012.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -24411,10 +25383,10 @@ class get_partition_names_ps_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1002, _size999) = iprot.readListBegin()
-          for _i1003 in xrange(_size999):
-            _elem1004 = iprot.readString()
-            self.part_vals.append(_elem1004)
+          (_etype1016, _size1013) = iprot.readListBegin()
+          for _i1017 in xrange(_size1013):
+            _elem1018 = iprot.readString()
+            self.part_vals.append(_elem1018)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -24444,8 +25416,8 @@ class get_partition_names_ps_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1005 in self.part_vals:
-        oprot.writeString(iter1005)
+      for iter1019 in self.part_vals:
+        oprot.writeString(iter1019)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -24509,10 +25481,10 @@ class get_partition_names_ps_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1009, _size1006) = iprot.readListBegin()
-          for _i1010 in xrange(_size1006):
-            _elem1011 = iprot.readString()
-            self.success.append(_elem1011)
+          (_etype1023, _size1020) = iprot.readListBegin()
+          for _i1024 in xrange(_size1020):
+            _elem1025 = iprot.readString()
+            self.success.append(_elem1025)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -24541,8 +25513,8 @@ class get_partition_names_ps_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1012 in self.success:
-        oprot.writeString(iter1012)
+      for iter1026 in self.success:
+        oprot.writeString(iter1026)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -24713,11 +25685,11 @@ class get_partitions_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1016, _size1013) = iprot.readListBegin()
-          for _i1017 in xrange(_size1013):
-            _elem1018 = Partition()
-            _elem1018.read(iprot)
-            self.success.append(_elem1018)
+          (_etype1030, _size1027) = iprot.readListBegin()
+          for _i1031 in xrange(_size1027):
+            _elem1032 = Partition()
+            _elem1032.read(iprot)
+            self.success.append(_elem1032)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -24746,8 +25718,8 @@ class get_partitions_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1019 in self.success:
-        iter1019.write(oprot)
+      for iter1033 in self.success:
+        iter1033.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -24918,11 +25890,11 @@ class get_part_specs_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1023, _size1020) = iprot.readListBegin()
-          for _i1024 in xrange(_size1020):
-            _elem1025 = PartitionSpec()
-            _elem1025.read(iprot)
-            self.success.append(_elem1025)
+          (_etype1037, _size1034) = iprot.readListBegin()
+          for _i1038 in xrange(_size1034):
+            _elem1039 = PartitionSpec()
+            _elem1039.read(iprot)
+            self.success.append(_elem1039)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -24951,8 +25923,8 @@ class get_part_specs_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1026 in self.success:
-        iter1026.write(oprot)
+      for iter1040 in self.success:
+        iter1040.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -25372,10 +26344,10 @@ class get_partitions_by_names_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.names = []
-          (_etype1030, _size1027) = iprot.readListBegin()
-          for _i1031 in xrange(_size1027):
-            _elem1032 = iprot.readString()
-            self.names.append(_elem1032)
+          (_etype1044, _size1041) = iprot.readListBegin()
+          for _i1045 in xrange(_size1041):
+            _elem1046 = iprot.readString()
+            self.names.append(_elem1046)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -25400,8 +26372,8 @@ class get_partitions_by_names_args:
     if self.names is not None:
       oprot.writeFieldBegin('names', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.names))
-      for iter1033 in self.names:
-        oprot.writeString(iter1033)
+      for iter1047 in self.names:
+        oprot.writeString(iter1047)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -25460,11 +26432,11 @@ class get_partitions_by_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1037, _size1034) = iprot.readListBegin()
-          for _i1038 in xrange(_size1034):
-            _elem1039 = Partition()
-            _elem1039.read(iprot)
-            self.success.append(_elem1039)
+          (_etype1051, _size1048) = iprot.readListBegin()
+          for _i1052 in xrange(_size1048):
+            _elem1053 = Partition()
+            _elem1053.read(iprot)
+            self.success.append(_elem1053)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -25493,8 +26465,8 @@ class get_partitions_by_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1040 in self.success:
-        iter1040.write(oprot)
+      for iter1054 in self.success:
+        iter1054.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -25744,11 +26716,11 @@ class alter_partitions_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype1044, _size1041) = iprot.readListBegin()
-          for _i1045 in xrange(_size1041):
-            _elem1046 = Partition()
-            _elem1046.read(iprot)
-            self.new_parts.append(_elem1046)
+          (_etype1058, _size1055) = iprot.readListBegin()
+          for _i1059 in xrange(_size1055):
+            _elem1060 = Partition()
+            _elem1060.read(iprot)
+            self.new_parts.append(_elem1060)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -25773,8 +26745,8 @@ class alter_partitions_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter1047 in self.new_parts:
-        iter1047.write(oprot)
+      for iter1061 in self.new_parts:
+        iter1061.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -25927,11 +26899,11 @@ class alter_partitions_with_environment_context_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype1051, _size1048) = iprot.readListBegin()
-          for _i1052 in xrange(_size1048):
-            _elem1053 = Partition()
-            _elem1053.read(iprot)
-            self.new_parts.append(_elem1053)
+          (_etype1065, _size1062) = iprot.readListBegin()
+          for _i1066 in xrange(_size1062):
+            _elem1067 = Partition()
+            _elem1067.read(iprot)
+            self.new_parts.append(_elem1067)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -25962,8 +26934,8 @@ class alter_partitions_with_environment_context_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter1054 in self.new_parts:
-        iter1054.write(oprot)
+      for iter1068 in self.new_parts:
+        iter1068.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.environment_context is not None:
@@ -26307,10 +27279,10 @@ class rename_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1058, _size1055) = iprot.readListBegin()
-          for _i1059 in xrange(_size1055):
-            _elem1060 = iprot.readString()
-            self.part_vals.append(_elem1060)
+          (_etype1072, _size1069) = iprot.readListBegin()
+          for _i1073 in xrange(_size1069):
+            _elem1074 = iprot.readString()
+            self.part_vals.append(_elem1074)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26341,8 +27313,8 @@ class rename_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1061 in self.part_vals:
-        oprot.writeString(iter1061)
+      for iter1075 in self.part_vals:
+        oprot.writeString(iter1075)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.new_part is not None:
@@ -26484,10 +27456,10 @@ class partition_name_has_valid_characters_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1065, _size1062) = iprot.readListBegin()
-          for _i1066 in xrange(_size1062):
-            _elem1067 = iprot.readString()
-            self.part_vals.append(_elem1067)
+          (_etype1079, _size1076) = iprot.readListBegin()
+          for _i1080 in xrange(_size1076):
+            _elem1081 = iprot.readString()
+            self.part_vals.append(_elem1081)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26509,8 +27481,8 @@ class partition_name_has_valid_characters_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 1)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1068 in self.part_vals:
-        oprot.writeString(iter1068)
+      for iter1082 in self.part_vals:
+        oprot.writeString(iter1082)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.throw_exception is not None:
@@ -26868,10 +27840,10 @@ class partition_name_to_vals_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1072, _size1069) = iprot.readListBegin()
-          for _i1073 in xrange(_size1069):
-            _elem1074 = iprot.readString()
-            self.success.append(_elem1074)
+          (_etype1086, _size1083) = iprot.readListBegin()
+          for _i1087 in xrange(_size1083):
+            _elem1088 = iprot.readString()
+            self.success.append(_elem1088)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26894,8 +27866,8 @@ class partition_name_to_vals_result:
     if self.

<TRUNCATED>

[26/50] [abbrv] hive git commit: HIVE-17983 Make the standalone metastore generate tarballs etc.

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java
new file mode 100644
index 0000000..a8f7d2a
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java
@@ -0,0 +1,467 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.tools;
+
+import java.io.BufferedWriter;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.net.URI;
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.Random;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.HiveMetaException;
+import org.apache.hadoop.hive.metastore.IMetaStoreSchemaInfo;
+import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfoFactory;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TestSchemaToolForMetastore {
+  private static final Logger LOG = LoggerFactory.getLogger(TestMetastoreSchemaTool.class);
+
+  private MetastoreSchemaTool schemaTool;
+  private Connection conn;
+  private Configuration conf;
+  private String testMetastoreDB;
+  private PrintStream errStream;
+  private PrintStream outStream;
+
+  @Before
+  public void setUp() throws HiveMetaException, IOException {
+    testMetastoreDB = System.getProperty("java.io.tmpdir") +
+        File.separator + "test_metastore-" + new Random().nextInt();
+    System.setProperty(ConfVars.CONNECTURLKEY.toString(),
+        "jdbc:derby:" + testMetastoreDB + ";create=true");
+    conf = MetastoreConf.newMetastoreConf();
+    schemaTool = new MetastoreSchemaTool(
+        System.getProperty("test.tmp.dir", "target/tmp"), conf, "derby");
+    schemaTool.setUserName(MetastoreConf.getVar(schemaTool.getConf(), ConfVars.CONNECTION_USER_NAME));
+    schemaTool.setPassWord(MetastoreConf.getPassword(schemaTool.getConf(), ConfVars.PWD));
+    System.setProperty("beeLine.system.exit", "true");
+    errStream = System.err;
+    outStream = System.out;
+    conn = schemaTool.getConnectionToMetastore(false);
+  }
+
+  @After
+  public void tearDown() throws IOException, SQLException {
+    File metaStoreDir = new File(testMetastoreDB);
+    if (metaStoreDir.exists()) {
+      FileUtils.forceDeleteOnExit(metaStoreDir);
+    }
+    System.setOut(outStream);
+    System.setErr(errStream);
+    if (conn != null) {
+      conn.close();
+    }
+  }
+
+  // Test the sequence validation functionality
+  @Test
+  public void testValidateSequences() throws Exception {
+    schemaTool.doInit();
+
+    // Test empty database
+    boolean isValid = schemaTool.validateSequences(conn);
+    Assert.assertTrue(isValid);
+
+    // Test valid case
+    String[] scripts = new String[] {
+        "insert into SEQUENCE_TABLE values('org.apache.hadoop.hive.metastore.model.MDatabase', 100);",
+        "insert into DBS values(99, 'test db1', 'hdfs:///tmp', 'db1', 'test', 'test');"
+    };
+    File scriptFile = generateTestScript(scripts);
+    schemaTool.runSqlLine(scriptFile.getPath());
+    isValid = schemaTool.validateSequences(conn);
+    Assert.assertTrue(isValid);
+
+    // Test invalid case
+    scripts = new String[] {
+        "delete from SEQUENCE_TABLE;",
+        "delete from DBS;",
+        "insert into SEQUENCE_TABLE values('org.apache.hadoop.hive.metastore.model.MDatabase', 100);",
+        "insert into DBS values(102, 'test db1', 'hdfs:///tmp', 'db1', 'test', 'test');"
+    };
+    scriptFile = generateTestScript(scripts);
+    schemaTool.runSqlLine(scriptFile.getPath());
+    isValid = schemaTool.validateSequences(conn);
+    Assert.assertFalse(isValid);
+  }
+
+  // Test to validate that all tables exist in the HMS metastore.
+  @Test
+  public void testValidateSchemaTables() throws Exception {
+    schemaTool.doInit("2.3.0");
+
+    boolean isValid = schemaTool.validateSchemaTables(conn);
+    Assert.assertTrue(isValid);
+
+    // upgrade from 2.0.0 schema and re-validate
+    schemaTool.doUpgrade("2.3.0");
+    isValid = schemaTool.validateSchemaTables(conn);
+    Assert.assertTrue(isValid);
+
+    // Simulate a missing table scenario by renaming a couple of tables
+    String[] scripts = new String[] {
+        "RENAME TABLE SEQUENCE_TABLE to SEQUENCE_TABLE_RENAMED;",
+        "RENAME TABLE NUCLEUS_TABLES to NUCLEUS_TABLES_RENAMED;"
+    };
+
+    File scriptFile = generateTestScript(scripts);
+    schemaTool.runSqlLine(scriptFile.getPath());
+    isValid = schemaTool.validateSchemaTables(conn);
+    Assert.assertFalse(isValid);
+
+    // Restored the renamed tables
+    scripts = new String[] {
+        "RENAME TABLE SEQUENCE_TABLE_RENAMED to SEQUENCE_TABLE;",
+        "RENAME TABLE NUCLEUS_TABLES_RENAMED to NUCLEUS_TABLES;"
+    };
+
+    scriptFile = generateTestScript(scripts);
+    schemaTool.runSqlLine(scriptFile.getPath());
+    isValid = schemaTool.validateSchemaTables(conn);
+    Assert.assertTrue(isValid);
+   }
+
+  // Test the validation of incorrect NULL values in the tables
+  @Test
+  public void testValidateNullValues() throws Exception {
+    schemaTool.doInit();
+
+    // Test empty database
+    boolean isValid = schemaTool.validateColumnNullValues(conn);
+    Assert.assertTrue(isValid);
+
+    // Test valid case
+    createTestHiveTableSchemas();
+    isValid = schemaTool.validateColumnNullValues(conn);
+
+    // Test invalid case
+    String[] scripts = new String[] {
+        "update TBLS set SD_ID=null"
+    };
+    File scriptFile = generateTestScript(scripts);
+    schemaTool.runSqlLine(scriptFile.getPath());
+    isValid = schemaTool.validateColumnNullValues(conn);
+    Assert.assertFalse(isValid);
+  }
+
+  // Test dryrun of schema initialization
+  @Test
+  public void testSchemaInitDryRun() throws Exception {
+    schemaTool.setDryRun(true);
+    schemaTool.doInit("3.0.0");
+    schemaTool.setDryRun(false);
+    try {
+      schemaTool.verifySchemaVersion();
+    } catch (HiveMetaException e) {
+      // The connection should fail since it the dry run
+      return;
+    }
+    Assert.fail("Dry run shouldn't create actual metastore");
+  }
+
+  // Test dryrun of schema upgrade
+  @Test
+  public void testSchemaUpgradeDryRun() throws Exception {
+    schemaTool.doInit("2.3.0");
+
+    schemaTool.setDryRun(true);
+    schemaTool.doUpgrade("2.3.0");
+    schemaTool.setDryRun(false);
+    try {
+      schemaTool.verifySchemaVersion();
+    } catch (HiveMetaException e) {
+      // The connection should fail since it the dry run
+      return;
+    }
+    Assert.fail("Dry run shouldn't upgrade metastore schema");
+  }
+
+  /**
+   * Test schema initialization
+   */
+  @Test
+  public void testSchemaInit() throws Exception {
+    IMetaStoreSchemaInfo metastoreSchemaInfo = MetaStoreSchemaInfoFactory.get(conf,
+        System.getProperty("test.tmp.dir", "target/tmp"), "derby");
+    schemaTool.doInit(metastoreSchemaInfo.getHiveSchemaVersion());
+    schemaTool.verifySchemaVersion();
+  }
+
+  /**
+  * Test validation for schema versions
+  */
+  @Test
+ public void testValidateSchemaVersions() throws Exception {
+   schemaTool.doInit();
+   boolean isValid = schemaTool.validateSchemaVersions();
+   // Test an invalid case with multiple versions
+   String[] scripts = new String[] {
+       "insert into VERSION values(100, '2.2.0', 'Hive release version 2.2.0')"
+   };
+   File scriptFile = generateTestScript(scripts);
+   schemaTool.runSqlLine(scriptFile.getPath());
+   isValid = schemaTool.validateSchemaVersions();
+   Assert.assertFalse(isValid);
+
+   scripts = new String[] {
+       "delete from VERSION where VER_ID = 100"
+   };
+   scriptFile = generateTestScript(scripts);
+   schemaTool.runSqlLine(scriptFile.getPath());
+   isValid = schemaTool.validateSchemaVersions();
+   Assert.assertTrue(isValid);
+
+   // Test an invalid case without version
+   scripts = new String[] {
+       "delete from VERSION"
+   };
+   scriptFile = generateTestScript(scripts);
+   schemaTool.runSqlLine(scriptFile.getPath());
+   isValid = schemaTool.validateSchemaVersions();
+   Assert.assertFalse(isValid);
+ }
+
+  /**
+   * Test schema upgrade
+   */
+  @Test
+  public void testSchemaUpgrade() throws Exception {
+    boolean foundException = false;
+    // Initialize 2.3.0 schema
+    schemaTool.doInit("2.3.0");
+    // verify that driver fails due to older version schema
+    try {
+      schemaTool.verifySchemaVersion();
+    } catch (HiveMetaException e) {
+      // Expected to fail due to old schema
+      foundException = true;
+    }
+    if (!foundException) {
+      throw new Exception(
+          "Hive operations shouldn't pass with older version schema");
+    }
+
+    // Generate dummy pre-upgrade script with errors
+    String invalidPreUpgradeScript = writeDummyPreUpgradeScript(
+        0, "upgrade-2.3.0-to-3.0.0.derby.sql", "foo bar;");
+    // Generate dummy pre-upgrade scripts with valid SQL
+    String validPreUpgradeScript0 = writeDummyPreUpgradeScript(
+        1, "upgrade-2.3.0-to-3.0.0.derby.sql",
+        "CREATE TABLE schema_test0 (id integer);");
+    String validPreUpgradeScript1 = writeDummyPreUpgradeScript(
+        2, "upgrade-2.3.0-to-3.0.0.derby.sql",
+        "CREATE TABLE schema_test1 (id integer);");
+
+    // Capture system out and err
+    schemaTool.setVerbose(true);
+    OutputStream stderr = new ByteArrayOutputStream();
+    PrintStream errPrintStream = new PrintStream(stderr);
+    System.setErr(errPrintStream);
+    OutputStream stdout = new ByteArrayOutputStream();
+    PrintStream outPrintStream = new PrintStream(stdout);
+    System.setOut(outPrintStream);
+
+    // Upgrade schema from 0.7.0 to latest
+    schemaTool.doUpgrade("2.3.0");
+
+    LOG.info("stdout is " + stdout.toString());
+    LOG.info("stderr is " + stderr.toString());
+
+    // Verify that the schemaTool ran pre-upgrade scripts and ignored errors
+    Assert.assertTrue(stderr.toString().contains(invalidPreUpgradeScript));
+    Assert.assertTrue(stderr.toString().contains("foo"));
+    Assert.assertFalse(stderr.toString().contains(validPreUpgradeScript0));
+    Assert.assertFalse(stderr.toString().contains(validPreUpgradeScript1));
+    Assert.assertTrue(stdout.toString().contains(validPreUpgradeScript0));
+    Assert.assertTrue(stdout.toString().contains(validPreUpgradeScript1));
+
+    // Verify that driver works fine with latest schema
+    schemaTool.verifySchemaVersion();
+  }
+
+  /**
+   * Test validate uri of locations
+   */
+  @Test
+  public void testValidateLocations() throws Exception {
+    schemaTool.doInit();
+    URI defaultRoot = new URI("hdfs://myhost.com:8020");
+    URI defaultRoot2 = new URI("s3://myhost2.com:8888");
+    //check empty DB
+    boolean isValid = schemaTool.validateLocations(conn, null);
+    Assert.assertTrue(isValid);
+    isValid = schemaTool.validateLocations(conn, new URI[] {defaultRoot,defaultRoot2});
+    Assert.assertTrue(isValid);
+
+ // Test valid case
+    String[] scripts = new String[] {
+         "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role');",
+         "insert into DBS values(7, 'db with bad port', 'hdfs://myhost.com:8020/', 'haDB', 'public', 'role');",
+         "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
+         "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
+         "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (3,null,'org.apache.hadoop.mapred.TextInputFormat','N','N',null,-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
+         "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4000,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
+         "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n');",
+         "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (3 ,1435255431,2,0 ,'hive',0,3,'myView','VIRTUAL_VIEW','select a.col1,a.col2 from foo','select * from foo','n');",
+         "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (4012 ,1435255431,7,0 ,'hive',0,4000,'mytal4012','MANAGED_TABLE',NULL,NULL,'n');",
+         "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(1, 1441402388,0, 'd1=1/d2=1',2,2);",
+         "insert into SKEWED_STRING_LIST values(1);",
+         "insert into SKEWED_STRING_LIST values(2);",
+         "insert into SKEWED_COL_VALUE_LOC_MAP values(1,1,'hdfs://myhost.com:8020/user/hive/warehouse/mytal/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/');",
+         "insert into SKEWED_COL_VALUE_LOC_MAP values(2,2,'s3://myhost.com:8020/user/hive/warehouse/mytal/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/');"
+       };
+    File scriptFile = generateTestScript(scripts);
+    schemaTool.runSqlLine(scriptFile.getPath());
+    isValid = schemaTool.validateLocations(conn, null);
+    Assert.assertTrue(isValid);
+    isValid = schemaTool.validateLocations(conn, new URI[] {defaultRoot, defaultRoot2});
+    Assert.assertTrue(isValid);
+    scripts = new String[] {
+        "delete from SKEWED_COL_VALUE_LOC_MAP;",
+        "delete from SKEWED_STRING_LIST;",
+        "delete from PARTITIONS;",
+        "delete from TBLS;",
+        "delete from SDS;",
+        "delete from DBS;",
+        "insert into DBS values(2, 'my db', '/user/hive/warehouse/mydb', 'mydb', 'public', 'role');",
+        "insert into DBS values(4, 'my db2', 'hdfs://myhost.com:8020', '', 'public', 'role');",
+        "insert into DBS values(6, 'db with bad port', 'hdfs://myhost.com:8020:', 'zDB', 'public', 'role');",
+        "insert into DBS values(7, 'db with bad port', 'hdfs://mynameservice.com/', 'haDB', 'public', 'role');",
+        "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://yourhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
+        "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','file:///user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
+        "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n');",
+        "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(1, 1441402388,0, 'd1=1/d2=1',2,2);",
+        "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (3000,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','yourhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
+        "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4000,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
+        "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4001,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
+        "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4003,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
+        "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4004,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
+        "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (4002,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
+        "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (5000,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','file:///user/admin/2016_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
+        "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (3000 ,1435255431,2,0 ,'hive',0,3000,'mytal3000','MANAGED_TABLE',NULL,NULL,'n');",
+        "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (4011 ,1435255431,4,0 ,'hive',0,4001,'mytal4011','MANAGED_TABLE',NULL,NULL,'n');",
+        "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (4012 ,1435255431,4,0 ,'hive',0,4002,'','MANAGED_TABLE',NULL,NULL,'n');",
+        "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (4013 ,1435255431,4,0 ,'hive',0,4003,'mytal4013','MANAGED_TABLE',NULL,NULL,'n');",
+        "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (4014 ,1435255431,2,0 ,'hive',0,4003,'','MANAGED_TABLE',NULL,NULL,'n');",
+        "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(4001, 1441402388,0, 'd1=1/d2=4001',4001,4011);",
+        "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(4002, 1441402388,0, 'd1=1/d2=4002',4002,4012);",
+        "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(4003, 1441402388,0, 'd1=1/d2=4003',4003,4013);",
+        "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(4004, 1441402388,0, 'd1=1/d2=4004',4004,4014);",
+        "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(5000, 1441402388,0, 'd1=1/d2=5000',5000,2);",
+        "insert into SKEWED_STRING_LIST values(1);",
+        "insert into SKEWED_STRING_LIST values(2);",
+        "insert into SKEWED_COL_VALUE_LOC_MAP values(1,1,'hdfs://yourhost.com:8020/user/hive/warehouse/mytal/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/');",
+        "insert into SKEWED_COL_VALUE_LOC_MAP values(2,2,'file:///user/admin/warehouse/mytal/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/');"
+    };
+    scriptFile = generateTestScript(scripts);
+    schemaTool.runSqlLine(scriptFile.getPath());
+    isValid = schemaTool.validateLocations(conn, null);
+    Assert.assertFalse(isValid);
+    isValid = schemaTool.validateLocations(conn, new URI[] {defaultRoot, defaultRoot2});
+    Assert.assertFalse(isValid);
+  }
+
+  @Test
+  public void testHiveMetastoreDbPropertiesTable() throws HiveMetaException, IOException {
+    schemaTool.doInit("3.0.0");
+    validateMetastoreDbPropertiesTable();
+  }
+
+  @Test
+  public void testMetastoreDbPropertiesAfterUpgrade() throws HiveMetaException, IOException {
+    schemaTool.doInit("2.3.0");
+    schemaTool.doUpgrade();
+    validateMetastoreDbPropertiesTable();
+  }
+
+  private File generateTestScript(String [] stmts) throws IOException {
+    File testScriptFile = File.createTempFile("schematest", ".sql");
+    testScriptFile.deleteOnExit();
+    FileWriter fstream = new FileWriter(testScriptFile.getPath());
+    BufferedWriter out = new BufferedWriter(fstream);
+    for (String line: stmts) {
+      out.write(line);
+      out.newLine();
+    }
+    out.close();
+    return testScriptFile;
+  }
+
+  private void validateMetastoreDbPropertiesTable() throws HiveMetaException, IOException {
+    boolean isValid = schemaTool.validateSchemaTables(conn);
+    Assert.assertTrue(isValid);
+    // adding same property key twice should throw unique key constraint violation exception
+    String[] scripts = new String[] {
+        "insert into METASTORE_DB_PROPERTIES values ('guid', 'test-uuid-1', 'dummy uuid 1')",
+        "insert into METASTORE_DB_PROPERTIES values ('guid', 'test-uuid-2', 'dummy uuid 2')", };
+    File scriptFile = generateTestScript(scripts);
+    Exception ex = null;
+    try {
+      schemaTool.runSqlLine(scriptFile.getPath());
+    } catch (Exception iox) {
+      ex = iox;
+    }
+    Assert.assertTrue(ex != null && ex instanceof IOException);
+  }
+  /**
+   * Write out a dummy pre-upgrade script with given SQL statement.
+   */
+  private String writeDummyPreUpgradeScript(int index, String upgradeScriptName,
+      String sql) throws Exception {
+    String preUpgradeScript = "pre-" + index + "-" + upgradeScriptName;
+    String dummyPreScriptPath = System.getProperty("test.tmp.dir", "target/tmp") +
+        File.separatorChar + "scripts" + File.separatorChar + "metastore" +
+        File.separatorChar + "upgrade" + File.separatorChar + "derby" +
+        File.separatorChar + preUpgradeScript;
+    FileWriter fstream = new FileWriter(dummyPreScriptPath);
+    BufferedWriter out = new BufferedWriter(fstream);
+    out.write(sql + System.getProperty("line.separator"));
+    out.close();
+    return preUpgradeScript;
+  }
+
+  // Insert the records in DB to simulate a hive table
+  private void createTestHiveTableSchemas() throws IOException {
+     String[] scripts = new String[] {
+          "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role');",
+          "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
+          "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
+          "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n');",
+          "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (3 ,1435255431,2,0 ,'hive',0,2,'aTable','MANAGED_TABLE',NULL,NULL,'n');",
+          "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(1, 1441402388,0, 'd1=1/d2=1',2,2);"
+        };
+     File scriptFile = generateTestScript(scripts);
+     schemaTool.runSqlLine(scriptFile.getPath());
+  }
+}


[44/50] [abbrv] hive git commit: HIVE-17990 Add Thrift and DB storage for Schema Registry objects

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchema.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchema.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchema.java
new file mode 100644
index 0000000..92d8b52
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchema.java
@@ -0,0 +1,1162 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ISchema implements org.apache.thrift.TBase<ISchema, ISchema._Fields>, java.io.Serializable, Cloneable, Comparable<ISchema> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ISchema");
+
+  private static final org.apache.thrift.protocol.TField SCHEMA_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaType", org.apache.thrift.protocol.TType.I32, (short)1);
+  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField COMPATIBILITY_FIELD_DESC = new org.apache.thrift.protocol.TField("compatibility", org.apache.thrift.protocol.TType.I32, (short)4);
+  private static final org.apache.thrift.protocol.TField VALIDATION_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("validationLevel", org.apache.thrift.protocol.TType.I32, (short)5);
+  private static final org.apache.thrift.protocol.TField CAN_EVOLVE_FIELD_DESC = new org.apache.thrift.protocol.TField("canEvolve", org.apache.thrift.protocol.TType.BOOL, (short)6);
+  private static final org.apache.thrift.protocol.TField SCHEMA_GROUP_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaGroup", org.apache.thrift.protocol.TType.STRING, (short)7);
+  private static final org.apache.thrift.protocol.TField DESCRIPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("description", org.apache.thrift.protocol.TType.STRING, (short)8);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new ISchemaStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new ISchemaTupleSchemeFactory());
+  }
+
+  private SchemaType schemaType; // required
+  private String name; // required
+  private String dbName; // required
+  private SchemaCompatibility compatibility; // required
+  private SchemaValidation validationLevel; // required
+  private boolean canEvolve; // required
+  private String schemaGroup; // optional
+  private String description; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    /**
+     * 
+     * @see SchemaType
+     */
+    SCHEMA_TYPE((short)1, "schemaType"),
+    NAME((short)2, "name"),
+    DB_NAME((short)3, "dbName"),
+    /**
+     * 
+     * @see SchemaCompatibility
+     */
+    COMPATIBILITY((short)4, "compatibility"),
+    /**
+     * 
+     * @see SchemaValidation
+     */
+    VALIDATION_LEVEL((short)5, "validationLevel"),
+    CAN_EVOLVE((short)6, "canEvolve"),
+    SCHEMA_GROUP((short)7, "schemaGroup"),
+    DESCRIPTION((short)8, "description");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // SCHEMA_TYPE
+          return SCHEMA_TYPE;
+        case 2: // NAME
+          return NAME;
+        case 3: // DB_NAME
+          return DB_NAME;
+        case 4: // COMPATIBILITY
+          return COMPATIBILITY;
+        case 5: // VALIDATION_LEVEL
+          return VALIDATION_LEVEL;
+        case 6: // CAN_EVOLVE
+          return CAN_EVOLVE;
+        case 7: // SCHEMA_GROUP
+          return SCHEMA_GROUP;
+        case 8: // DESCRIPTION
+          return DESCRIPTION;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __CANEVOLVE_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.SCHEMA_GROUP,_Fields.DESCRIPTION};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.SCHEMA_TYPE, new org.apache.thrift.meta_data.FieldMetaData("schemaType", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, SchemaType.class)));
+    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.COMPATIBILITY, new org.apache.thrift.meta_data.FieldMetaData("compatibility", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, SchemaCompatibility.class)));
+    tmpMap.put(_Fields.VALIDATION_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("validationLevel", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, SchemaValidation.class)));
+    tmpMap.put(_Fields.CAN_EVOLVE, new org.apache.thrift.meta_data.FieldMetaData("canEvolve", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.SCHEMA_GROUP, new org.apache.thrift.meta_data.FieldMetaData("schemaGroup", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DESCRIPTION, new org.apache.thrift.meta_data.FieldMetaData("description", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ISchema.class, metaDataMap);
+  }
+
+  public ISchema() {
+  }
+
+  public ISchema(
+    SchemaType schemaType,
+    String name,
+    String dbName,
+    SchemaCompatibility compatibility,
+    SchemaValidation validationLevel,
+    boolean canEvolve)
+  {
+    this();
+    this.schemaType = schemaType;
+    this.name = name;
+    this.dbName = dbName;
+    this.compatibility = compatibility;
+    this.validationLevel = validationLevel;
+    this.canEvolve = canEvolve;
+    setCanEvolveIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public ISchema(ISchema other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetSchemaType()) {
+      this.schemaType = other.schemaType;
+    }
+    if (other.isSetName()) {
+      this.name = other.name;
+    }
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetCompatibility()) {
+      this.compatibility = other.compatibility;
+    }
+    if (other.isSetValidationLevel()) {
+      this.validationLevel = other.validationLevel;
+    }
+    this.canEvolve = other.canEvolve;
+    if (other.isSetSchemaGroup()) {
+      this.schemaGroup = other.schemaGroup;
+    }
+    if (other.isSetDescription()) {
+      this.description = other.description;
+    }
+  }
+
+  public ISchema deepCopy() {
+    return new ISchema(this);
+  }
+
+  @Override
+  public void clear() {
+    this.schemaType = null;
+    this.name = null;
+    this.dbName = null;
+    this.compatibility = null;
+    this.validationLevel = null;
+    setCanEvolveIsSet(false);
+    this.canEvolve = false;
+    this.schemaGroup = null;
+    this.description = null;
+  }
+
+  /**
+   * 
+   * @see SchemaType
+   */
+  public SchemaType getSchemaType() {
+    return this.schemaType;
+  }
+
+  /**
+   * 
+   * @see SchemaType
+   */
+  public void setSchemaType(SchemaType schemaType) {
+    this.schemaType = schemaType;
+  }
+
+  public void unsetSchemaType() {
+    this.schemaType = null;
+  }
+
+  /** Returns true if field schemaType is set (has been assigned a value) and false otherwise */
+  public boolean isSetSchemaType() {
+    return this.schemaType != null;
+  }
+
+  public void setSchemaTypeIsSet(boolean value) {
+    if (!value) {
+      this.schemaType = null;
+    }
+  }
+
+  public String getName() {
+    return this.name;
+  }
+
+  public void setName(String name) {
+    this.name = name;
+  }
+
+  public void unsetName() {
+    this.name = null;
+  }
+
+  /** Returns true if field name is set (has been assigned a value) and false otherwise */
+  public boolean isSetName() {
+    return this.name != null;
+  }
+
+  public void setNameIsSet(boolean value) {
+    if (!value) {
+      this.name = null;
+    }
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  /**
+   * 
+   * @see SchemaCompatibility
+   */
+  public SchemaCompatibility getCompatibility() {
+    return this.compatibility;
+  }
+
+  /**
+   * 
+   * @see SchemaCompatibility
+   */
+  public void setCompatibility(SchemaCompatibility compatibility) {
+    this.compatibility = compatibility;
+  }
+
+  public void unsetCompatibility() {
+    this.compatibility = null;
+  }
+
+  /** Returns true if field compatibility is set (has been assigned a value) and false otherwise */
+  public boolean isSetCompatibility() {
+    return this.compatibility != null;
+  }
+
+  public void setCompatibilityIsSet(boolean value) {
+    if (!value) {
+      this.compatibility = null;
+    }
+  }
+
+  /**
+   * 
+   * @see SchemaValidation
+   */
+  public SchemaValidation getValidationLevel() {
+    return this.validationLevel;
+  }
+
+  /**
+   * 
+   * @see SchemaValidation
+   */
+  public void setValidationLevel(SchemaValidation validationLevel) {
+    this.validationLevel = validationLevel;
+  }
+
+  public void unsetValidationLevel() {
+    this.validationLevel = null;
+  }
+
+  /** Returns true if field validationLevel is set (has been assigned a value) and false otherwise */
+  public boolean isSetValidationLevel() {
+    return this.validationLevel != null;
+  }
+
+  public void setValidationLevelIsSet(boolean value) {
+    if (!value) {
+      this.validationLevel = null;
+    }
+  }
+
+  public boolean isCanEvolve() {
+    return this.canEvolve;
+  }
+
+  public void setCanEvolve(boolean canEvolve) {
+    this.canEvolve = canEvolve;
+    setCanEvolveIsSet(true);
+  }
+
+  public void unsetCanEvolve() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CANEVOLVE_ISSET_ID);
+  }
+
+  /** Returns true if field canEvolve is set (has been assigned a value) and false otherwise */
+  public boolean isSetCanEvolve() {
+    return EncodingUtils.testBit(__isset_bitfield, __CANEVOLVE_ISSET_ID);
+  }
+
+  public void setCanEvolveIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CANEVOLVE_ISSET_ID, value);
+  }
+
+  public String getSchemaGroup() {
+    return this.schemaGroup;
+  }
+
+  public void setSchemaGroup(String schemaGroup) {
+    this.schemaGroup = schemaGroup;
+  }
+
+  public void unsetSchemaGroup() {
+    this.schemaGroup = null;
+  }
+
+  /** Returns true if field schemaGroup is set (has been assigned a value) and false otherwise */
+  public boolean isSetSchemaGroup() {
+    return this.schemaGroup != null;
+  }
+
+  public void setSchemaGroupIsSet(boolean value) {
+    if (!value) {
+      this.schemaGroup = null;
+    }
+  }
+
+  public String getDescription() {
+    return this.description;
+  }
+
+  public void setDescription(String description) {
+    this.description = description;
+  }
+
+  public void unsetDescription() {
+    this.description = null;
+  }
+
+  /** Returns true if field description is set (has been assigned a value) and false otherwise */
+  public boolean isSetDescription() {
+    return this.description != null;
+  }
+
+  public void setDescriptionIsSet(boolean value) {
+    if (!value) {
+      this.description = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case SCHEMA_TYPE:
+      if (value == null) {
+        unsetSchemaType();
+      } else {
+        setSchemaType((SchemaType)value);
+      }
+      break;
+
+    case NAME:
+      if (value == null) {
+        unsetName();
+      } else {
+        setName((String)value);
+      }
+      break;
+
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case COMPATIBILITY:
+      if (value == null) {
+        unsetCompatibility();
+      } else {
+        setCompatibility((SchemaCompatibility)value);
+      }
+      break;
+
+    case VALIDATION_LEVEL:
+      if (value == null) {
+        unsetValidationLevel();
+      } else {
+        setValidationLevel((SchemaValidation)value);
+      }
+      break;
+
+    case CAN_EVOLVE:
+      if (value == null) {
+        unsetCanEvolve();
+      } else {
+        setCanEvolve((Boolean)value);
+      }
+      break;
+
+    case SCHEMA_GROUP:
+      if (value == null) {
+        unsetSchemaGroup();
+      } else {
+        setSchemaGroup((String)value);
+      }
+      break;
+
+    case DESCRIPTION:
+      if (value == null) {
+        unsetDescription();
+      } else {
+        setDescription((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case SCHEMA_TYPE:
+      return getSchemaType();
+
+    case NAME:
+      return getName();
+
+    case DB_NAME:
+      return getDbName();
+
+    case COMPATIBILITY:
+      return getCompatibility();
+
+    case VALIDATION_LEVEL:
+      return getValidationLevel();
+
+    case CAN_EVOLVE:
+      return isCanEvolve();
+
+    case SCHEMA_GROUP:
+      return getSchemaGroup();
+
+    case DESCRIPTION:
+      return getDescription();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case SCHEMA_TYPE:
+      return isSetSchemaType();
+    case NAME:
+      return isSetName();
+    case DB_NAME:
+      return isSetDbName();
+    case COMPATIBILITY:
+      return isSetCompatibility();
+    case VALIDATION_LEVEL:
+      return isSetValidationLevel();
+    case CAN_EVOLVE:
+      return isSetCanEvolve();
+    case SCHEMA_GROUP:
+      return isSetSchemaGroup();
+    case DESCRIPTION:
+      return isSetDescription();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof ISchema)
+      return this.equals((ISchema)that);
+    return false;
+  }
+
+  public boolean equals(ISchema that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_schemaType = true && this.isSetSchemaType();
+    boolean that_present_schemaType = true && that.isSetSchemaType();
+    if (this_present_schemaType || that_present_schemaType) {
+      if (!(this_present_schemaType && that_present_schemaType))
+        return false;
+      if (!this.schemaType.equals(that.schemaType))
+        return false;
+    }
+
+    boolean this_present_name = true && this.isSetName();
+    boolean that_present_name = true && that.isSetName();
+    if (this_present_name || that_present_name) {
+      if (!(this_present_name && that_present_name))
+        return false;
+      if (!this.name.equals(that.name))
+        return false;
+    }
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_compatibility = true && this.isSetCompatibility();
+    boolean that_present_compatibility = true && that.isSetCompatibility();
+    if (this_present_compatibility || that_present_compatibility) {
+      if (!(this_present_compatibility && that_present_compatibility))
+        return false;
+      if (!this.compatibility.equals(that.compatibility))
+        return false;
+    }
+
+    boolean this_present_validationLevel = true && this.isSetValidationLevel();
+    boolean that_present_validationLevel = true && that.isSetValidationLevel();
+    if (this_present_validationLevel || that_present_validationLevel) {
+      if (!(this_present_validationLevel && that_present_validationLevel))
+        return false;
+      if (!this.validationLevel.equals(that.validationLevel))
+        return false;
+    }
+
+    boolean this_present_canEvolve = true;
+    boolean that_present_canEvolve = true;
+    if (this_present_canEvolve || that_present_canEvolve) {
+      if (!(this_present_canEvolve && that_present_canEvolve))
+        return false;
+      if (this.canEvolve != that.canEvolve)
+        return false;
+    }
+
+    boolean this_present_schemaGroup = true && this.isSetSchemaGroup();
+    boolean that_present_schemaGroup = true && that.isSetSchemaGroup();
+    if (this_present_schemaGroup || that_present_schemaGroup) {
+      if (!(this_present_schemaGroup && that_present_schemaGroup))
+        return false;
+      if (!this.schemaGroup.equals(that.schemaGroup))
+        return false;
+    }
+
+    boolean this_present_description = true && this.isSetDescription();
+    boolean that_present_description = true && that.isSetDescription();
+    if (this_present_description || that_present_description) {
+      if (!(this_present_description && that_present_description))
+        return false;
+      if (!this.description.equals(that.description))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_schemaType = true && (isSetSchemaType());
+    list.add(present_schemaType);
+    if (present_schemaType)
+      list.add(schemaType.getValue());
+
+    boolean present_name = true && (isSetName());
+    list.add(present_name);
+    if (present_name)
+      list.add(name);
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_compatibility = true && (isSetCompatibility());
+    list.add(present_compatibility);
+    if (present_compatibility)
+      list.add(compatibility.getValue());
+
+    boolean present_validationLevel = true && (isSetValidationLevel());
+    list.add(present_validationLevel);
+    if (present_validationLevel)
+      list.add(validationLevel.getValue());
+
+    boolean present_canEvolve = true;
+    list.add(present_canEvolve);
+    if (present_canEvolve)
+      list.add(canEvolve);
+
+    boolean present_schemaGroup = true && (isSetSchemaGroup());
+    list.add(present_schemaGroup);
+    if (present_schemaGroup)
+      list.add(schemaGroup);
+
+    boolean present_description = true && (isSetDescription());
+    list.add(present_description);
+    if (present_description)
+      list.add(description);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(ISchema other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetSchemaType()).compareTo(other.isSetSchemaType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSchemaType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaType, other.schemaType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCompatibility()).compareTo(other.isSetCompatibility());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCompatibility()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.compatibility, other.compatibility);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetValidationLevel()).compareTo(other.isSetValidationLevel());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetValidationLevel()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validationLevel, other.validationLevel);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCanEvolve()).compareTo(other.isSetCanEvolve());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCanEvolve()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.canEvolve, other.canEvolve);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetSchemaGroup()).compareTo(other.isSetSchemaGroup());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSchemaGroup()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaGroup, other.schemaGroup);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDescription()).compareTo(other.isSetDescription());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDescription()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.description, other.description);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ISchema(");
+    boolean first = true;
+
+    sb.append("schemaType:");
+    if (this.schemaType == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.schemaType);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("name:");
+    if (this.name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("compatibility:");
+    if (this.compatibility == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.compatibility);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("validationLevel:");
+    if (this.validationLevel == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.validationLevel);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("canEvolve:");
+    sb.append(this.canEvolve);
+    first = false;
+    if (isSetSchemaGroup()) {
+      if (!first) sb.append(", ");
+      sb.append("schemaGroup:");
+      if (this.schemaGroup == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.schemaGroup);
+      }
+      first = false;
+    }
+    if (isSetDescription()) {
+      if (!first) sb.append(", ");
+      sb.append("description:");
+      if (this.description == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.description);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class ISchemaStandardSchemeFactory implements SchemeFactory {
+    public ISchemaStandardScheme getScheme() {
+      return new ISchemaStandardScheme();
+    }
+  }
+
+  private static class ISchemaStandardScheme extends StandardScheme<ISchema> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ISchema struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // SCHEMA_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.schemaType = org.apache.hadoop.hive.metastore.api.SchemaType.findByValue(iprot.readI32());
+              struct.setSchemaTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.name = iprot.readString();
+              struct.setNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // COMPATIBILITY
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.compatibility = org.apache.hadoop.hive.metastore.api.SchemaCompatibility.findByValue(iprot.readI32());
+              struct.setCompatibilityIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // VALIDATION_LEVEL
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.validationLevel = org.apache.hadoop.hive.metastore.api.SchemaValidation.findByValue(iprot.readI32());
+              struct.setValidationLevelIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // CAN_EVOLVE
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.canEvolve = iprot.readBool();
+              struct.setCanEvolveIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // SCHEMA_GROUP
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.schemaGroup = iprot.readString();
+              struct.setSchemaGroupIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 8: // DESCRIPTION
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.description = iprot.readString();
+              struct.setDescriptionIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ISchema struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.schemaType != null) {
+        oprot.writeFieldBegin(SCHEMA_TYPE_FIELD_DESC);
+        oprot.writeI32(struct.schemaType.getValue());
+        oprot.writeFieldEnd();
+      }
+      if (struct.name != null) {
+        oprot.writeFieldBegin(NAME_FIELD_DESC);
+        oprot.writeString(struct.name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.compatibility != null) {
+        oprot.writeFieldBegin(COMPATIBILITY_FIELD_DESC);
+        oprot.writeI32(struct.compatibility.getValue());
+        oprot.writeFieldEnd();
+      }
+      if (struct.validationLevel != null) {
+        oprot.writeFieldBegin(VALIDATION_LEVEL_FIELD_DESC);
+        oprot.writeI32(struct.validationLevel.getValue());
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(CAN_EVOLVE_FIELD_DESC);
+      oprot.writeBool(struct.canEvolve);
+      oprot.writeFieldEnd();
+      if (struct.schemaGroup != null) {
+        if (struct.isSetSchemaGroup()) {
+          oprot.writeFieldBegin(SCHEMA_GROUP_FIELD_DESC);
+          oprot.writeString(struct.schemaGroup);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.description != null) {
+        if (struct.isSetDescription()) {
+          oprot.writeFieldBegin(DESCRIPTION_FIELD_DESC);
+          oprot.writeString(struct.description);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class ISchemaTupleSchemeFactory implements SchemeFactory {
+    public ISchemaTupleScheme getScheme() {
+      return new ISchemaTupleScheme();
+    }
+  }
+
+  private static class ISchemaTupleScheme extends TupleScheme<ISchema> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, ISchema struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetSchemaType()) {
+        optionals.set(0);
+      }
+      if (struct.isSetName()) {
+        optionals.set(1);
+      }
+      if (struct.isSetDbName()) {
+        optionals.set(2);
+      }
+      if (struct.isSetCompatibility()) {
+        optionals.set(3);
+      }
+      if (struct.isSetValidationLevel()) {
+        optionals.set(4);
+      }
+      if (struct.isSetCanEvolve()) {
+        optionals.set(5);
+      }
+      if (struct.isSetSchemaGroup()) {
+        optionals.set(6);
+      }
+      if (struct.isSetDescription()) {
+        optionals.set(7);
+      }
+      oprot.writeBitSet(optionals, 8);
+      if (struct.isSetSchemaType()) {
+        oprot.writeI32(struct.schemaType.getValue());
+      }
+      if (struct.isSetName()) {
+        oprot.writeString(struct.name);
+      }
+      if (struct.isSetDbName()) {
+        oprot.writeString(struct.dbName);
+      }
+      if (struct.isSetCompatibility()) {
+        oprot.writeI32(struct.compatibility.getValue());
+      }
+      if (struct.isSetValidationLevel()) {
+        oprot.writeI32(struct.validationLevel.getValue());
+      }
+      if (struct.isSetCanEvolve()) {
+        oprot.writeBool(struct.canEvolve);
+      }
+      if (struct.isSetSchemaGroup()) {
+        oprot.writeString(struct.schemaGroup);
+      }
+      if (struct.isSetDescription()) {
+        oprot.writeString(struct.description);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, ISchema struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(8);
+      if (incoming.get(0)) {
+        struct.schemaType = org.apache.hadoop.hive.metastore.api.SchemaType.findByValue(iprot.readI32());
+        struct.setSchemaTypeIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.name = iprot.readString();
+        struct.setNameIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.dbName = iprot.readString();
+        struct.setDbNameIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.compatibility = org.apache.hadoop.hive.metastore.api.SchemaCompatibility.findByValue(iprot.readI32());
+        struct.setCompatibilityIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.validationLevel = org.apache.hadoop.hive.metastore.api.SchemaValidation.findByValue(iprot.readI32());
+        struct.setValidationLevelIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.canEvolve = iprot.readBool();
+        struct.setCanEvolveIsSet(true);
+      }
+      if (incoming.get(6)) {
+        struct.schemaGroup = iprot.readString();
+        struct.setSchemaGroupIsSet(true);
+      }
+      if (incoming.get(7)) {
+        struct.description = iprot.readString();
+        struct.setDescriptionIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaCompatibility.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaCompatibility.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaCompatibility.java
new file mode 100644
index 0000000..4c0bb5a
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaCompatibility.java
@@ -0,0 +1,51 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum SchemaCompatibility implements org.apache.thrift.TEnum {
+  NONE(1),
+  BACKWARD(2),
+  FORWARD(3),
+  BOTH(4);
+
+  private final int value;
+
+  private SchemaCompatibility(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static SchemaCompatibility findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return NONE;
+      case 2:
+        return BACKWARD;
+      case 3:
+        return FORWARD;
+      case 4:
+        return BOTH;
+      default:
+        return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaType.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaType.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaType.java
new file mode 100644
index 0000000..f4adeb9
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaType.java
@@ -0,0 +1,45 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum SchemaType implements org.apache.thrift.TEnum {
+  HIVE(1),
+  AVRO(2);
+
+  private final int value;
+
+  private SchemaType(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static SchemaType findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return HIVE;
+      case 2:
+        return AVRO;
+      default:
+        return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaValidation.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaValidation.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaValidation.java
new file mode 100644
index 0000000..1983f56
--- /dev/null
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaValidation.java
@@ -0,0 +1,45 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum SchemaValidation implements org.apache.thrift.TEnum {
+  LATEST(1),
+  ALL(2);
+
+  private final int value;
+
+  private SchemaValidation(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static SchemaValidation findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return LATEST;
+      case 2:
+        return ALL;
+      default:
+        return null;
+    }
+  }
+}


[32/50] [abbrv] hive git commit: HIVE-17983 Make the standalone metastore generate tarballs etc.

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/derby/hive-schema-2.3.0.derby.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/derby/hive-schema-2.3.0.derby.sql b/standalone-metastore/src/main/sql/derby/hive-schema-2.3.0.derby.sql
new file mode 100644
index 0000000..332b93e
--- /dev/null
+++ b/standalone-metastore/src/main/sql/derby/hive-schema-2.3.0.derby.sql
@@ -0,0 +1,456 @@
+-- Timestamp: 2011-09-22 15:32:02.024
+-- Source database is: /home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
+-- Connection URL is: jdbc:derby:/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
+-- Specified schema is: APP
+-- appendLogs: false
+
+-- ----------------------------------------------
+-- DDL Statements for functions
+-- ----------------------------------------------
+
+CREATE FUNCTION "APP"."NUCLEUS_ASCII" (C CHAR(1)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.ascii' ;
+
+CREATE FUNCTION "APP"."NUCLEUS_MATCHES" (TEXT VARCHAR(8000),PATTERN VARCHAR(8000)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.matches' ;
+
+-- ----------------------------------------------
+-- DDL Statements for tables
+-- ----------------------------------------------
+
+CREATE TABLE "APP"."DBS" ("DB_ID" BIGINT NOT NULL, "DESC" VARCHAR(4000), "DB_LOCATION_URI" VARCHAR(4000) NOT NULL, "NAME" VARCHAR(128), "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10));
+
+CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT);
+
+CREATE TABLE "APP"."DATABASE_PARAMS" ("DB_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(180) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."TBL_COL_PRIVS" ("TBL_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_COL_PRIV" VARCHAR(128), "TBL_ID" BIGINT);
+
+CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+
+CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(767) NOT NULL, "TYPE_NAME" CLOB, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL);
+
+CREATE TABLE "APP"."PARTITION_KEY_VALS" ("PART_ID" BIGINT NOT NULL, "PART_KEY_VAL" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."DB_PRIVS" ("DB_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "DB_PRIV" VARCHAR(128));
+
+CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DEFERRED_REBUILD" CHAR(1) NOT NULL, "INDEX_HANDLER_CLASS" VARCHAR(4000), "INDEX_NAME" VARCHAR(128), "INDEX_TBL_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "ORIG_TBL_ID" BIGINT, "SD_ID" BIGINT);
+
+CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT);
+
+CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000));
+
+CREATE TABLE "APP"."PART_PRIVS" ("PART_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_PRIV" VARCHAR(128));
+
+CREATE TABLE "APP"."ROLE_MAP" ("ROLE_GRANT_ID" BIGINT NOT NULL, "ADD_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "ROLE_ID" BIGINT);
+
+CREATE TABLE "APP"."TYPES" ("TYPES_ID" BIGINT NOT NULL, "TYPE_NAME" VARCHAR(128), "TYPE1" VARCHAR(767), "TYPE2" VARCHAR(767));
+
+CREATE TABLE "APP"."GLOBAL_PRIVS" ("USER_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "USER_PRIV" VARCHAR(128));
+
+CREATE TABLE "APP"."PARTITION_PARAMS" ("PART_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."PARTITION_EVENTS" ("PART_NAME_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_TIME" BIGINT NOT NULL, "EVENT_TYPE" INTEGER NOT NULL, "PARTITION_NAME" VARCHAR(767), "TBL_NAME" VARCHAR(256));
+
+CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128));
+
+CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL);
+
+CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."PART_COL_PRIVS" ("PART_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_COL_PRIV" VARCHAR(128));
+
+CREATE TABLE "APP"."SDS" ("SD_ID" BIGINT NOT NULL, "INPUT_FORMAT" VARCHAR(4000), "IS_COMPRESSED" CHAR(1) NOT NULL, "LOCATION" VARCHAR(4000), "NUM_BUCKETS" INTEGER NOT NULL, "OUTPUT_FORMAT" VARCHAR(4000), "SERDE_ID" BIGINT, "CD_ID" BIGINT, "IS_STOREDASSUBDIRECTORIES" CHAR(1) NOT NULL);
+
+CREATE TABLE "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME" VARCHAR(256) NOT NULL, "NEXT_VAL" BIGINT NOT NULL);
+
+CREATE TABLE "APP"."TAB_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(256) NOT NULL, "COLUMN_NAME" VARCHAR(767) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000),"BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL, "BIT_VECTOR" BLOB);
+
+CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+
+CREATE TABLE "APP"."BUCKETING_COLS" ("SD_ID" BIGINT NOT NULL, "BUCKET_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."TYPE_FIELDS" ("TYPE_NAME" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "FIELD_NAME" VARCHAR(128) NOT NULL, "FIELD_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."NUCLEUS_TABLES" ("CLASS_NAME" VARCHAR(128) NOT NULL, "TABLE_NAME" VARCHAR(128) NOT NULL, "TYPE" VARCHAR(4) NOT NULL, "OWNER" VARCHAR(2) NOT NULL, "VERSION" VARCHAR(20) NOT NULL, "INTERFACE_NAME" VARCHAR(256) DEFAULT NULL);
+
+CREATE TABLE "APP"."SD_PARAMS" ("SD_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+
+CREATE TABLE "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID" BIGINT NOT NULL);
+
+CREATE TABLE "APP"."SKEWED_STRING_LIST_VALUES" ("STRING_LIST_ID" BIGINT NOT NULL, "STRING_LIST_VALUE" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."SKEWED_COL_NAMES" ("SD_ID" BIGINT NOT NULL, "SKEWED_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ("SD_ID" BIGINT NOT NULL, "STRING_LIST_ID_KID" BIGINT NOT NULL, "LOCATION" VARCHAR(4000));
+
+CREATE TABLE "APP"."SKEWED_VALUES" ("SD_ID_OID" BIGINT NOT NULL, "STRING_LIST_ID_EID" BIGINT NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."MASTER_KEYS" ("KEY_ID" INTEGER NOT NULL generated always as identity (start with 1), "MASTER_KEY" VARCHAR(767));
+
+CREATE TABLE "APP"."DELEGATION_TOKENS" ( "TOKEN_IDENT" VARCHAR(767) NOT NULL, "TOKEN" VARCHAR(767));
+
+CREATE TABLE "APP"."PART_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(256) NOT NULL, "PARTITION_NAME" VARCHAR(767) NOT NULL, "COLUMN_NAME" VARCHAR(767) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "PART_ID" BIGINT NOT NULL);
+
+CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255));
+
+CREATE TABLE "APP"."FUNCS" ("FUNC_ID" BIGINT NOT NULL, "CLASS_NAME" VARCHAR(4000), "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "FUNC_NAME" VARCHAR(128), "FUNC_TYPE" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10));
+
+CREATE TABLE "APP"."FUNC_RU" ("FUNC_ID" BIGINT NOT NULL, "RESOURCE_TYPE" INTEGER NOT NULL, "RESOURCE_URI" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."NOTIFICATION_LOG" ("NL_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_ID" BIGINT NOT NULL, "EVENT_TIME" INTEGER NOT NULL, "EVENT_TYPE" VARCHAR(32) NOT NULL, "MESSAGE" CLOB, "TBL_NAME" VARCHAR(256), "MESSAGE_FORMAT" VARCHAR(16));
+
+CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL);
+
+CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_INTEGER_IDX" INTEGER NOT NULL, "CHILD_TBL_ID" BIGINT, "PARENT_CD_ID" BIGINT NOT NULL, "PARENT_INTEGER_IDX" INTEGER, "PARENT_TBL_ID" BIGINT NOT NULL,  "POSITION" BIGINT NOT NULL, "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, "CONSTRAINT_TYPE" SMALLINT NOT NULL, "UPDATE_RULE" SMALLINT, "DELETE_RULE" SMALLINT, "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL);
+
+ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY ("CONSTRAINT_NAME", "POSITION");
+
+CREATE INDEX "APP"."CONSTRAINTS_PARENT_TBL_ID_INDEX" ON "APP"."KEY_CONSTRAINTS"("PARENT_TBL_ID");
+
+-- ----------------------------------------------
+-- DDL Statements for indexes
+-- ----------------------------------------------
+
+CREATE UNIQUE INDEX "APP"."UNIQUEINDEX" ON "APP"."IDXS" ("INDEX_NAME", "ORIG_TBL_ID");
+
+CREATE INDEX "APP"."TABLECOLUMNPRIVILEGEINDEX" ON "APP"."TBL_COL_PRIVS" ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON "APP"."DB_PRIVS" ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
+
+CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."ROLEENTITYINDEX" ON "APP"."ROLES" ("ROLE_NAME");
+
+CREATE INDEX "APP"."TABLEPRIVILEGEINDEX" ON "APP"."TBL_PRIVS" ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."UNIQUETABLE" ON "APP"."TBLS" ("TBL_NAME", "DB_ID");
+
+CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME");
+
+CREATE UNIQUE INDEX "APP"."USERROLEMAPINDEX" ON "APP"."ROLE_MAP" ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."GLOBALPRIVILEGEINDEX" ON "APP"."GLOBAL_PRIVS" ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."UNIQUE_TYPE" ON "APP"."TYPES" ("TYPE_NAME");
+
+CREATE INDEX "APP"."PARTITIONCOLUMNPRIVILEGEINDEX" ON "APP"."PART_COL_PRIVS" ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."UNIQUEPARTITION" ON "APP"."PARTITIONS" ("PART_NAME", "TBL_ID");
+
+CREATE UNIQUE INDEX "APP"."UNIQUEFUNCTION" ON "APP"."FUNCS" ("FUNC_NAME", "DB_ID");
+
+CREATE INDEX "APP"."FUNCS_N49" ON "APP"."FUNCS" ("DB_ID");
+
+CREATE INDEX "APP"."FUNC_RU_N49" ON "APP"."FUNC_RU" ("FUNC_ID");
+
+-- ----------------------------------------------
+-- DDL Statements for keys
+-- ----------------------------------------------
+
+-- primary/unique
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_PK" PRIMARY KEY ("INDEX_ID");
+
+ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_PK" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+
+ALTER TABLE "APP"."CDS" ADD CONSTRAINT "SQL110922153006460" PRIMARY KEY ("CD_ID");
+
+ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_PK" PRIMARY KEY ("DB_GRANT_ID");
+
+ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_PK" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEY_PK" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+
+ALTER TABLE "APP"."SEQUENCE_TABLE" ADD CONSTRAINT "SEQUENCE_TABLE_PK" PRIMARY KEY ("SEQUENCE_NAME");
+
+ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_PK" PRIMARY KEY ("PART_GRANT_ID");
+
+ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_PK" PRIMARY KEY ("SD_ID");
+
+ALTER TABLE "APP"."SERDES" ADD CONSTRAINT "SERDES_PK" PRIMARY KEY ("SERDE_ID");
+
+ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_PK" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
+
+ALTER TABLE "APP"."PARTITION_EVENTS" ADD CONSTRAINT "PARTITION_EVENTS_PK" PRIMARY KEY ("PART_NAME_ID");
+
+ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_PK" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+
+ALTER TABLE "APP"."ROLES" ADD CONSTRAINT "ROLES_PK" PRIMARY KEY ("ROLE_ID");
+
+ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_PK" PRIMARY KEY ("TBL_GRANT_ID");
+
+ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_PK" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."NUCLEUS_TABLES" ADD CONSTRAINT "NUCLEUS_TABLES_PK" PRIMARY KEY ("CLASS_NAME");
+
+ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_PK" PRIMARY KEY ("TBL_ID");
+
+ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_PK" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_PK" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_PK" PRIMARY KEY ("DB_ID");
+
+ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_PK" PRIMARY KEY ("ROLE_GRANT_ID");
+
+ALTER TABLE "APP"."GLOBAL_PRIVS" ADD CONSTRAINT "GLOBAL_PRIVS_PK" PRIMARY KEY ("USER_GRANT_ID");
+
+ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_PK" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."TYPES" ADD CONSTRAINT "TYPES_PK" PRIMARY KEY ("TYPES_ID");
+
+ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "SQL110922153006740" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+
+ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_PK" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+
+ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_PK" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_PK" PRIMARY KEY ("PART_ID");
+
+ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_PK" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."SKEWED_STRING_LIST" ADD CONSTRAINT "SKEWED_STRING_LIST_PK" PRIMARY KEY ("STRING_LIST_ID");
+
+ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_PK" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_PK" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+
+ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_PK" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_PK" PRIMARY KEY ("CS_ID");
+
+ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_PK" PRIMARY KEY ("CS_ID");
+
+ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_PK" PRIMARY KEY ("FUNC_ID");
+
+ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_PK" PRIMARY KEY ("FUNC_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."NOTIFICATION_LOG" ADD CONSTRAINT "NOTIFICATION_LOG_PK" PRIMARY KEY ("NL_ID");
+
+ALTER TABLE "APP"."NOTIFICATION_SEQUENCE" ADD CONSTRAINT "NOTIFICATION_SEQUENCE_PK" PRIMARY KEY ("NNI_ID");
+
+-- foreign
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK3" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_FK1" FOREIGN KEY ("INDEX_ID") REFERENCES "APP"."IDXS" ("INDEX_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEYS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK2" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_FK1" FOREIGN KEY ("TYPE_NAME") REFERENCES "APP"."TYPES" ("TYPES_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_FK1" FOREIGN KEY ("ROLE_ID") REFERENCES "APP"."ROLES" ("ROLE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "COLUMNS_V2_FK1" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_FK1" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK1" FOREIGN KEY ("SD_ID_OID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK2" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_FK" FOREIGN KEY ("TBL_ID") REFERENCES TBLS("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_FK" FOREIGN KEY ("PART_ID") REFERENCES PARTITIONS("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."VERSION" ADD CONSTRAINT "VERSION_PK" PRIMARY KEY ("VER_ID");
+
+ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "APP"."FUNCS" ("FUNC_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+-- ----------------------------------------------
+-- DDL Statements for checks
+-- ----------------------------------------------
+
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "SQL110318025504980" CHECK (DEFERRED_REBUILD IN ('Y','N'));
+
+ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SQL110318025505550" CHECK (IS_COMPRESSED IN ('Y','N'));
+
+-- ----------------------------
+-- Transaction and Lock Tables
+-- ----------------------------
+CREATE TABLE TXNS (
+  TXN_ID bigint PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED bigint NOT NULL,
+  TXN_LAST_HEARTBEAT bigint NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL,
+  TXN_AGENT_INFO varchar(128),
+  TXN_META_INFO varchar(128),
+  TXN_HEARTBEAT_COUNT integer
+);
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID bigint REFERENCES TXNS (TXN_ID),
+  TC_DATABASE varchar(128) NOT NULL,
+  TC_TABLE varchar(128),
+  TC_PARTITION varchar(767),
+  TC_OPERATION_TYPE char(1) NOT NULL
+);
+
+CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID bigint,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(256),
+  CTC_PARTITION varchar(767)
+);
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID bigint NOT NULL,
+  HL_LOCK_INT_ID bigint NOT NULL,
+  HL_TXNID bigint,
+  HL_DB varchar(128) NOT NULL,
+  HL_TABLE varchar(128),
+  HL_PARTITION varchar(767),
+  HL_LOCK_STATE char(1) NOT NULL,
+  HL_LOCK_TYPE char(1) NOT NULL,
+  HL_LAST_HEARTBEAT bigint NOT NULL,
+  HL_ACQUIRED_AT bigint,
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  HL_HEARTBEAT_COUNT integer,
+  HL_AGENT_INFO varchar(128),
+  HL_BLOCKEDBY_EXT_ID bigint,
+  HL_BLOCKEDBY_INT_ID bigint,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+); 
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID bigint PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_TBLPROPERTIES varchar(2048),
+  CQ_WORKER_ID varchar(128),
+  CQ_START bigint,
+  CQ_RUN_AS varchar(128),
+  CQ_HIGHEST_TXN_ID bigint,
+  CQ_META_INFO varchar(2048) for bit data,
+  CQ_HADOOP_JOB_ID varchar(32)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+CREATE TABLE COMPLETED_COMPACTIONS (
+  CC_ID bigint PRIMARY KEY,
+  CC_DATABASE varchar(128) NOT NULL,
+  CC_TABLE varchar(128) NOT NULL,
+  CC_PARTITION varchar(767),
+  CC_STATE char(1) NOT NULL,
+  CC_TYPE char(1) NOT NULL,
+  CC_TBLPROPERTIES varchar(2048),
+  CC_WORKER_ID varchar(128),
+  CC_START bigint,
+  CC_END bigint,
+  CC_RUN_AS varchar(128),
+  CC_HIGHEST_TXN_ID bigint,
+  CC_META_INFO varchar(2048) for bit data,
+  CC_HADOOP_JOB_ID varchar(32)
+);
+
+CREATE TABLE AUX_TABLE (
+  MT_KEY1 varchar(128) NOT NULL,
+  MT_KEY2 bigint NOT NULL,
+  MT_COMMENT varchar(255),
+  PRIMARY KEY(MT_KEY1, MT_KEY2)
+);
+
+--1st 4 cols make up a PK but since WS_PARTITION is nullable we can't declare such PK
+--This is a good candidate for Index orgainzed table
+CREATE TABLE WRITE_SET (
+  WS_DATABASE varchar(128) NOT NULL,
+  WS_TABLE varchar(128) NOT NULL,
+  WS_PARTITION varchar(767),
+  WS_TXNID bigint NOT NULL,
+  WS_COMMIT_ID bigint NOT NULL,
+  WS_OPERATION_TYPE char(1) NOT NULL
+);
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO "APP"."VERSION" (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '2.3.0', 'Hive release version 2.3.0');
+

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql b/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
new file mode 100644
index 0000000..1763246
--- /dev/null
+++ b/standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql
@@ -0,0 +1,508 @@
+-- Timestamp: 2011-09-22 15:32:02.024
+-- Source database is: /home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
+-- Connection URL is: jdbc:derby:/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
+-- Specified schema is: APP
+-- appendLogs: false
+
+-- ----------------------------------------------
+-- DDL Statements for functions
+-- ----------------------------------------------
+
+CREATE FUNCTION "APP"."NUCLEUS_ASCII" (C CHAR(1)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.ascii' ;
+
+CREATE FUNCTION "APP"."NUCLEUS_MATCHES" (TEXT VARCHAR(8000),PATTERN VARCHAR(8000)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.matches' ;
+
+-- ----------------------------------------------
+-- DDL Statements for tables
+-- ----------------------------------------------
+
+CREATE TABLE "APP"."DBS" ("DB_ID" BIGINT NOT NULL, "DESC" VARCHAR(4000), "DB_LOCATION_URI" VARCHAR(4000) NOT NULL, "NAME" VARCHAR(128), "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10));
+
+CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT);
+
+CREATE TABLE "APP"."DATABASE_PARAMS" ("DB_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(180) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."TBL_COL_PRIVS" ("TBL_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_COL_PRIV" VARCHAR(128), "TBL_ID" BIGINT);
+
+CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+
+CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(767) NOT NULL, "TYPE_NAME" CLOB, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL);
+
+CREATE TABLE "APP"."PARTITION_KEY_VALS" ("PART_ID" BIGINT NOT NULL, "PART_KEY_VAL" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."DB_PRIVS" ("DB_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "DB_PRIV" VARCHAR(128));
+
+CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DEFERRED_REBUILD" CHAR(1) NOT NULL, "INDEX_HANDLER_CLASS" VARCHAR(4000), "INDEX_NAME" VARCHAR(128), "INDEX_TBL_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "ORIG_TBL_ID" BIGINT, "SD_ID" BIGINT);
+
+CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT);
+
+CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000));
+
+CREATE TABLE "APP"."PART_PRIVS" ("PART_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_PRIV" VARCHAR(128));
+
+CREATE TABLE "APP"."ROLE_MAP" ("ROLE_GRANT_ID" BIGINT NOT NULL, "ADD_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "ROLE_ID" BIGINT);
+
+CREATE TABLE "APP"."TYPES" ("TYPES_ID" BIGINT NOT NULL, "TYPE_NAME" VARCHAR(128), "TYPE1" VARCHAR(767), "TYPE2" VARCHAR(767));
+
+CREATE TABLE "APP"."GLOBAL_PRIVS" ("USER_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "USER_PRIV" VARCHAR(128));
+
+CREATE TABLE "APP"."PARTITION_PARAMS" ("PART_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."PARTITION_EVENTS" ("PART_NAME_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_TIME" BIGINT NOT NULL, "EVENT_TYPE" INTEGER NOT NULL, "PARTITION_NAME" VARCHAR(767), "TBL_NAME" VARCHAR(256));
+
+CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128));
+
+CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL);
+
+CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."PART_COL_PRIVS" ("PART_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_COL_PRIV" VARCHAR(128));
+
+CREATE TABLE "APP"."SDS" ("SD_ID" BIGINT NOT NULL, "INPUT_FORMAT" VARCHAR(4000), "IS_COMPRESSED" CHAR(1) NOT NULL, "LOCATION" VARCHAR(4000), "NUM_BUCKETS" INTEGER NOT NULL, "OUTPUT_FORMAT" VARCHAR(4000), "SERDE_ID" BIGINT, "CD_ID" BIGINT, "IS_STOREDASSUBDIRECTORIES" CHAR(1) NOT NULL);
+
+CREATE TABLE "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME" VARCHAR(256) NOT NULL, "NEXT_VAL" BIGINT NOT NULL);
+
+CREATE TABLE "APP"."TAB_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(256) NOT NULL, "COLUMN_NAME" VARCHAR(767) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000),"BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL, "BIT_VECTOR" BLOB);
+
+CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+
+CREATE TABLE "APP"."BUCKETING_COLS" ("SD_ID" BIGINT NOT NULL, "BUCKET_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."TYPE_FIELDS" ("TYPE_NAME" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "FIELD_NAME" VARCHAR(128) NOT NULL, "FIELD_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."NUCLEUS_TABLES" ("CLASS_NAME" VARCHAR(128) NOT NULL, "TABLE_NAME" VARCHAR(128) NOT NULL, "TYPE" VARCHAR(4) NOT NULL, "OWNER" VARCHAR(2) NOT NULL, "VERSION" VARCHAR(20) NOT NULL, "INTERFACE_NAME" VARCHAR(256) DEFAULT NULL);
+
+CREATE TABLE "APP"."SD_PARAMS" ("SD_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+
+CREATE TABLE "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID" BIGINT NOT NULL);
+
+CREATE TABLE "APP"."SKEWED_STRING_LIST_VALUES" ("STRING_LIST_ID" BIGINT NOT NULL, "STRING_LIST_VALUE" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."SKEWED_COL_NAMES" ("SD_ID" BIGINT NOT NULL, "SKEWED_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ("SD_ID" BIGINT NOT NULL, "STRING_LIST_ID_KID" BIGINT NOT NULL, "LOCATION" VARCHAR(4000));
+
+CREATE TABLE "APP"."SKEWED_VALUES" ("SD_ID_OID" BIGINT NOT NULL, "STRING_LIST_ID_EID" BIGINT NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."MASTER_KEYS" ("KEY_ID" INTEGER NOT NULL generated always as identity (start with 1), "MASTER_KEY" VARCHAR(767));
+
+CREATE TABLE "APP"."DELEGATION_TOKENS" ( "TOKEN_IDENT" VARCHAR(767) NOT NULL, "TOKEN" VARCHAR(767));
+
+CREATE TABLE "APP"."PART_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(256) NOT NULL, "PARTITION_NAME" VARCHAR(767) NOT NULL, "COLUMN_NAME" VARCHAR(767) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "BIT_VECTOR" BLOB, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "PART_ID" BIGINT NOT NULL);
+
+CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255));
+
+CREATE TABLE "APP"."FUNCS" ("FUNC_ID" BIGINT NOT NULL, "CLASS_NAME" VARCHAR(4000), "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "FUNC_NAME" VARCHAR(128), "FUNC_TYPE" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10));
+
+CREATE TABLE "APP"."FUNC_RU" ("FUNC_ID" BIGINT NOT NULL, "RESOURCE_TYPE" INTEGER NOT NULL, "RESOURCE_URI" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."NOTIFICATION_LOG" ("NL_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_ID" BIGINT NOT NULL, "EVENT_TIME" INTEGER NOT NULL, "EVENT_TYPE" VARCHAR(32) NOT NULL, "MESSAGE" CLOB, "TBL_NAME" VARCHAR(256), "MESSAGE_FORMAT" VARCHAR(16));
+
+CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL);
+
+CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_INTEGER_IDX" INTEGER, "CHILD_TBL_ID" BIGINT, "PARENT_CD_ID" BIGINT NOT NULL, "PARENT_INTEGER_IDX" INTEGER, "PARENT_TBL_ID" BIGINT NOT NULL,  "POSITION" BIGINT NOT NULL, "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, "CONSTRAINT_TYPE" SMALLINT NOT NULL, "UPDATE_RULE" SMALLINT, "DELETE_RULE" SMALLINT, "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL);
+
+CREATE TABLE "APP"."METASTORE_DB_PROPERTIES" ("PROPERTY_KEY" VARCHAR(255) NOT NULL, "PROPERTY_VALUE" VARCHAR(1000) NOT NULL, "DESCRIPTION" VARCHAR(1000));
+
+CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID INTEGER NOT NULL, NAME VARCHAR(128) NOT NULL, QUERY_PARALLELISM INTEGER, STATUS VARCHAR(20) NOT NULL);
+
+CREATE TABLE "APP"."WM_POOL" (POOL_ID INTEGER NOT NULL, RP_ID INTEGER NOT NULL, PATH VARCHAR(1024) NOT NULL, PARENT_POOL_ID INTEGER, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER);
+
+CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID INTEGER NOT NULL, RP_ID INTEGER NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024));
+
+CREATE TABLE "APP"."WM_POOL_TO_TRIGGER"  (POOL_ID INTEGER NOT NULL, TRIGGER_ID INTEGER NOT NULL);
+
+CREATE TABLE "APP"."WM_MAPPING" (MAPPING_ID INTEGER NOT NULL, RP_ID INTEGER NOT NULL, ENTITY_TYPE VARCHAR(10) NOT NULL, ENTITY_NAME VARCHAR(128) NOT NULL, POOL_ID INTEGER NOT NULL, ORDERING INTEGER);
+
+-- ----------------------------------------------
+-- DML Statements
+-- ----------------------------------------------
+
+INSERT INTO "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID", "NEXT_EVENT_ID") SELECT * FROM (VALUES (1,1)) tmp_table WHERE NOT EXISTS ( SELECT "NEXT_EVENT_ID" FROM "APP"."NOTIFICATION_SEQUENCE");
+
+-- ----------------------------------------------
+-- DDL Statements for indexes
+-- ----------------------------------------------
+
+CREATE UNIQUE INDEX "APP"."UNIQUEINDEX" ON "APP"."IDXS" ("INDEX_NAME", "ORIG_TBL_ID");
+
+CREATE INDEX "APP"."TABLECOLUMNPRIVILEGEINDEX" ON "APP"."TBL_COL_PRIVS" ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON "APP"."DB_PRIVS" ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
+
+CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."ROLEENTITYINDEX" ON "APP"."ROLES" ("ROLE_NAME");
+
+CREATE INDEX "APP"."TABLEPRIVILEGEINDEX" ON "APP"."TBL_PRIVS" ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."UNIQUETABLE" ON "APP"."TBLS" ("TBL_NAME", "DB_ID");
+
+CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME");
+
+CREATE UNIQUE INDEX "APP"."USERROLEMAPINDEX" ON "APP"."ROLE_MAP" ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."GLOBALPRIVILEGEINDEX" ON "APP"."GLOBAL_PRIVS" ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."UNIQUE_TYPE" ON "APP"."TYPES" ("TYPE_NAME");
+
+CREATE INDEX "APP"."PARTITIONCOLUMNPRIVILEGEINDEX" ON "APP"."PART_COL_PRIVS" ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."UNIQUEPARTITION" ON "APP"."PARTITIONS" ("PART_NAME", "TBL_ID");
+
+CREATE UNIQUE INDEX "APP"."UNIQUEFUNCTION" ON "APP"."FUNCS" ("FUNC_NAME", "DB_ID");
+
+CREATE INDEX "APP"."FUNCS_N49" ON "APP"."FUNCS" ("DB_ID");
+
+CREATE INDEX "APP"."FUNC_RU_N49" ON "APP"."FUNC_RU" ("FUNC_ID");
+
+CREATE INDEX "APP"."CONSTRAINTS_PARENT_TBL_ID_INDEX" ON "APP"."KEY_CONSTRAINTS"("PARENT_TBL_ID");
+
+CREATE INDEX "APP"."CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "APP"."KEY_CONSTRAINTS"("CONSTRAINT_TYPE");
+
+CREATE UNIQUE INDEX "APP"."UNIQUE_WM_RESOURCEPLAN" ON "APP"."WM_RESOURCEPLAN" ("NAME");
+
+CREATE UNIQUE INDEX "APP"."UNIQUE_WM_POOL" ON "APP"."WM_POOL" ("RP_ID", "PATH");
+
+CREATE UNIQUE INDEX "APP"."UNIQUE_WM_TRIGGER" ON "APP"."WM_TRIGGER" ("RP_ID", "NAME");
+
+CREATE UNIQUE INDEX "APP"."UNIQUE_WM_MAPPING" ON "APP"."WM_MAPPING" ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME");
+
+-- ----------------------------------------------
+-- DDL Statements for keys
+-- ----------------------------------------------
+
+-- primary/unique
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_PK" PRIMARY KEY ("INDEX_ID");
+
+ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_PK" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+
+ALTER TABLE "APP"."CDS" ADD CONSTRAINT "SQL110922153006460" PRIMARY KEY ("CD_ID");
+
+ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_PK" PRIMARY KEY ("DB_GRANT_ID");
+
+ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_PK" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEY_PK" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+
+ALTER TABLE "APP"."SEQUENCE_TABLE" ADD CONSTRAINT "SEQUENCE_TABLE_PK" PRIMARY KEY ("SEQUENCE_NAME");
+
+ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_PK" PRIMARY KEY ("PART_GRANT_ID");
+
+ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_PK" PRIMARY KEY ("SD_ID");
+
+ALTER TABLE "APP"."SERDES" ADD CONSTRAINT "SERDES_PK" PRIMARY KEY ("SERDE_ID");
+
+ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_PK" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
+
+ALTER TABLE "APP"."PARTITION_EVENTS" ADD CONSTRAINT "PARTITION_EVENTS_PK" PRIMARY KEY ("PART_NAME_ID");
+
+ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_PK" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+
+ALTER TABLE "APP"."ROLES" ADD CONSTRAINT "ROLES_PK" PRIMARY KEY ("ROLE_ID");
+
+ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_PK" PRIMARY KEY ("TBL_GRANT_ID");
+
+ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_PK" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."NUCLEUS_TABLES" ADD CONSTRAINT "NUCLEUS_TABLES_PK" PRIMARY KEY ("CLASS_NAME");
+
+ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_PK" PRIMARY KEY ("TBL_ID");
+
+ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_PK" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_PK" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_PK" PRIMARY KEY ("DB_ID");
+
+ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_PK" PRIMARY KEY ("ROLE_GRANT_ID");
+
+ALTER TABLE "APP"."GLOBAL_PRIVS" ADD CONSTRAINT "GLOBAL_PRIVS_PK" PRIMARY KEY ("USER_GRANT_ID");
+
+ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_PK" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."TYPES" ADD CONSTRAINT "TYPES_PK" PRIMARY KEY ("TYPES_ID");
+
+ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "SQL110922153006740" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+
+ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_PK" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+
+ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_PK" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_PK" PRIMARY KEY ("PART_ID");
+
+ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_PK" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."SKEWED_STRING_LIST" ADD CONSTRAINT "SKEWED_STRING_LIST_PK" PRIMARY KEY ("STRING_LIST_ID");
+
+ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_PK" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_PK" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+
+ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_PK" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_PK" PRIMARY KEY ("CS_ID");
+
+ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_PK" PRIMARY KEY ("CS_ID");
+
+ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_PK" PRIMARY KEY ("FUNC_ID");
+
+ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_PK" PRIMARY KEY ("FUNC_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."NOTIFICATION_LOG" ADD CONSTRAINT "NOTIFICATION_LOG_PK" PRIMARY KEY ("NL_ID");
+
+ALTER TABLE "APP"."NOTIFICATION_SEQUENCE" ADD CONSTRAINT "NOTIFICATION_SEQUENCE_PK" PRIMARY KEY ("NNI_ID");
+
+ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY ("CONSTRAINT_NAME", "POSITION");
+
+ALTER TABLE "APP"."METASTORE_DB_PROPERTIES" ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY");
+
+-- foreign
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK3" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_FK1" FOREIGN KEY ("INDEX_ID") REFERENCES "APP"."IDXS" ("INDEX_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEYS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK2" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_FK1" FOREIGN KEY ("TYPE_NAME") REFERENCES "APP"."TYPES" ("TYPES_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_FK1" FOREIGN KEY ("ROLE_ID") REFERENCES "APP"."ROLES" ("ROLE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "COLUMNS_V2_FK1" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_FK1" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK1" FOREIGN KEY ("SD_ID_OID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK2" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_FK" FOREIGN KEY ("TBL_ID") REFERENCES TBLS("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_FK" FOREIGN KEY ("PART_ID") REFERENCES PARTITIONS("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."VERSION" ADD CONSTRAINT "VERSION_PK" PRIMARY KEY ("VER_ID");
+
+ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "APP"."FUNCS" ("FUNC_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_PK" PRIMARY KEY ("RP_ID");
+
+ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_PK" PRIMARY KEY ("POOL_ID");
+
+ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK2" FOREIGN KEY ("PARENT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_PK" PRIMARY KEY ("TRIGGER_ID");
+
+ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "APP"."WM_TRIGGER" ("TRIGGER_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_PK" PRIMARY KEY ("MAPPING_ID");
+
+ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+-- ----------------------------------------------
+-- DDL Statements for checks
+-- ----------------------------------------------
+
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "SQL110318025504980" CHECK (DEFERRED_REBUILD IN ('Y','N'));
+
+ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SQL110318025505550" CHECK (IS_COMPRESSED IN ('Y','N'));
+
+-- ----------------------------
+-- Transaction and Lock Tables
+-- ----------------------------
+CREATE TABLE TXNS (
+  TXN_ID bigint PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED bigint NOT NULL,
+  TXN_LAST_HEARTBEAT bigint NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL,
+  TXN_AGENT_INFO varchar(128),
+  TXN_META_INFO varchar(128),
+  TXN_HEARTBEAT_COUNT integer
+);
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID bigint REFERENCES TXNS (TXN_ID),
+  TC_DATABASE varchar(128) NOT NULL,
+  TC_TABLE varchar(128),
+  TC_PARTITION varchar(767),
+  TC_OPERATION_TYPE char(1) NOT NULL
+);
+
+CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID bigint,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(256),
+  CTC_PARTITION varchar(767)
+);
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID bigint NOT NULL,
+  HL_LOCK_INT_ID bigint NOT NULL,
+  HL_TXNID bigint,
+  HL_DB varchar(128) NOT NULL,
+  HL_TABLE varchar(128),
+  HL_PARTITION varchar(767),
+  HL_LOCK_STATE char(1) NOT NULL,
+  HL_LOCK_TYPE char(1) NOT NULL,
+  HL_LAST_HEARTBEAT bigint NOT NULL,
+  HL_ACQUIRED_AT bigint,
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  HL_HEARTBEAT_COUNT integer,
+  HL_AGENT_INFO varchar(128),
+  HL_BLOCKEDBY_EXT_ID bigint,
+  HL_BLOCKEDBY_INT_ID bigint,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+); 
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID bigint PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_TBLPROPERTIES varchar(2048),
+  CQ_WORKER_ID varchar(128),
+  CQ_START bigint,
+  CQ_RUN_AS varchar(128),
+  CQ_HIGHEST_TXN_ID bigint,
+  CQ_META_INFO varchar(2048) for bit data,
+  CQ_HADOOP_JOB_ID varchar(32)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+CREATE TABLE COMPLETED_COMPACTIONS (
+  CC_ID bigint PRIMARY KEY,
+  CC_DATABASE varchar(128) NOT NULL,
+  CC_TABLE varchar(128) NOT NULL,
+  CC_PARTITION varchar(767),
+  CC_STATE char(1) NOT NULL,
+  CC_TYPE char(1) NOT NULL,
+  CC_TBLPROPERTIES varchar(2048),
+  CC_WORKER_ID varchar(128),
+  CC_START bigint,
+  CC_END bigint,
+  CC_RUN_AS varchar(128),
+  CC_HIGHEST_TXN_ID bigint,
+  CC_META_INFO varchar(2048) for bit data,
+  CC_HADOOP_JOB_ID varchar(32)
+);
+
+CREATE TABLE AUX_TABLE (
+  MT_KEY1 varchar(128) NOT NULL,
+  MT_KEY2 bigint NOT NULL,
+  MT_COMMENT varchar(255),
+  PRIMARY KEY(MT_KEY1, MT_KEY2)
+);
+
+--1st 4 cols make up a PK but since WS_PARTITION is nullable we can't declare such PK
+--This is a good candidate for Index orgainzed table
+CREATE TABLE WRITE_SET (
+  WS_DATABASE varchar(128) NOT NULL,
+  WS_TABLE varchar(128) NOT NULL,
+  WS_PARTITION varchar(767),
+  WS_TXNID bigint NOT NULL,
+  WS_COMMIT_ID bigint NOT NULL,
+  WS_OPERATION_TYPE char(1) NOT NULL
+);
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO "APP"."VERSION" (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '3.0.0', 'Hive release version 3.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql b/standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql
new file mode 100644
index 0000000..4472b97
--- /dev/null
+++ b/standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql
@@ -0,0 +1,46 @@
+-- Upgrade MetaStore schema from 2.3.0 to 3.0.0
+--RUN '041-HIVE-16556.derby.sql';
+CREATE TABLE "APP"."METASTORE_DB_PROPERTIES" ("PROPERTY_KEY" VARCHAR(255) NOT NULL, "PROPERTY_VALUE" VARCHAR(1000) NOT NULL, "DESCRIPTION" VARCHAR(1000));
+
+ALTER TABLE "APP"."METASTORE_DB_PROPERTIES" ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY");
+--RUN '042-HIVE-16575.derby.sql';
+-- Remove the NOT NULL constraint from the CHILD_INTEGER_IDX column
+ALTER TABLE "APP"."KEY_CONSTRAINTS" ALTER COLUMN "CHILD_INTEGER_IDX" NULL;
+
+CREATE INDEX "APP"."CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "APP"."KEY_CONSTRAINTS"("CONSTRAINT_TYPE");
+--RUN '043-HIVE-16922.derby.sql';
+UPDATE SERDE_PARAMS
+SET PARAM_KEY='collection.delim'
+WHERE PARAM_KEY='colelction.delim';
+--RUN '044-HIVE-16997.derby.sql';
+ALTER TABLE "APP"."PART_COL_STATS" ADD COLUMN "BIT_VECTOR" BLOB;
+--RUN '045-HIVE-16886.derby.sql';
+INSERT INTO "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID", "NEXT_EVENT_ID") SELECT * FROM (VALUES (1,1)) tmp_table WHERE NOT EXISTS ( SELECT "NEXT_EVENT_ID" FROM "APP"."NOTIFICATION_SEQUENCE");
+--RUN '046-HIVE-17566.derby.sql';
+CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, QUERY_PARALLELISM INTEGER, STATUS VARCHAR(20) NOT NULL);
+CREATE UNIQUE INDEX "APP"."UNIQUE_WM_RESOURCEPLAN" ON "APP"."WM_RESOURCEPLAN" ("NAME");
+ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_PK" PRIMARY KEY ("RP_ID");
+
+CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, PARENT_POOL_ID BIGINT, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER);
+CREATE UNIQUE INDEX "APP"."UNIQUE_WM_POOL" ON "APP"."WM_POOL" ("RP_ID", "PATH");
+ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_PK" PRIMARY KEY ("POOL_ID");
+ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK2" FOREIGN KEY ("PARENT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024));
+CREATE UNIQUE INDEX "APP"."UNIQUE_WM_TRIGGER" ON "APP"."WM_TRIGGER" ("RP_ID", "NAME");
+ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_PK" PRIMARY KEY ("TRIGGER_ID");
+ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+CREATE TABLE "APP"."WM_POOL_TO_TRIGGER"  (POOL_ID BIGINT NOT NULL, TRIGGER_ID BIGINT NOT NULL);
+ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_PK" PRIMARY KEY ("POOL_ID", "TRIGGER_ID");
+ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "APP"."WM_TRIGGER" ("TRIGGER_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+CREATE TABLE "APP"."WM_MAPPING" (MAPPING_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, ENTITY_TYPE VARCHAR(10) NOT NULL, ENTITY_NAME VARCHAR(128) NOT NULL, POOL_ID BIGINT NOT NULL, ORDERING INTEGER);
+CREATE UNIQUE INDEX "APP"."UNIQUE_WM_MAPPING" ON "APP"."WM_MAPPING" ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME");
+ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_PK" PRIMARY KEY ("MAPPING_ID");
+ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+UPDATE "APP".VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/derby/upgrade.order.derby
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/derby/upgrade.order.derby b/standalone-metastore/src/main/sql/derby/upgrade.order.derby
new file mode 100644
index 0000000..15531df
--- /dev/null
+++ b/standalone-metastore/src/main/sql/derby/upgrade.order.derby
@@ -0,0 +1 @@
+2.3.0-to-3.0.0

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/mssql/create-user.mssql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mssql/create-user.mssql.sql b/standalone-metastore/src/main/sql/mssql/create-user.mssql.sql
new file mode 100644
index 0000000..cb39118
--- /dev/null
+++ b/standalone-metastore/src/main/sql/mssql/create-user.mssql.sql
@@ -0,0 +1,5 @@
+CREATE DATABASE _REPLACE_WITH_DB_;
+use _REPLACE_WITH_DB_;
+CREATE LOGIN _REPLACE_WITH_USER_ WITH PASSWORD='_REPLACE_WITH_PASSWD_';
+CREATE USER _REPLACE_WITH_USER_ FOR LOGIN _REPLACE_WITH_USER_;
+ALTER ROLE db_owner ADD MEMBER _REPLACE_WITH_USER_;


[28/50] [abbrv] hive git commit: HIVE-17983 Make the standalone metastore generate tarballs etc.

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/postgres/hive-schema-2.3.0.postgres.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/postgres/hive-schema-2.3.0.postgres.sql b/standalone-metastore/src/main/sql/postgres/hive-schema-2.3.0.postgres.sql
new file mode 100644
index 0000000..0dca1a0
--- /dev/null
+++ b/standalone-metastore/src/main/sql/postgres/hive-schema-2.3.0.postgres.sql
@@ -0,0 +1,1593 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = off;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET escape_string_warning = off;
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "BUCKETING_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "CDS" (
+    "CD_ID" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_V2" (
+    "CD_ID" bigint NOT NULL,
+    "COMMENT" character varying(4000),
+    "COLUMN_NAME" character varying(767) NOT NULL,
+    "TYPE_NAME" text,
+    "INTEGER_IDX" integer NOT NULL
+);
+
+
+--
+-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DATABASE_PARAMS" (
+    "DB_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(180) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DBS" (
+    "DB_ID" bigint NOT NULL,
+    "DESC" character varying(4000) DEFAULT NULL::character varying,
+    "DB_LOCATION_URI" character varying(4000) NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DB_PRIVS" (
+    "DB_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "GLOBAL_PRIVS" (
+    "USER_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "IDXS" (
+    "INDEX_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DEFERRED_REBUILD" boolean NOT NULL,
+    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
+    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+    "INDEX_TBL_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "ORIG_TBL_ID" bigint,
+    "SD_ID" bigint
+);
+
+
+--
+-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "INDEX_PARAMS" (
+    "INDEX_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "NUCLEUS_TABLES" (
+    "CLASS_NAME" character varying(128) NOT NULL,
+    "TABLE_NAME" character varying(128) NOT NULL,
+    "TYPE" character varying(4) NOT NULL,
+    "OWNER" character varying(2) NOT NULL,
+    "VERSION" character varying(20) NOT NULL,
+    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITIONS" (
+    "PART_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
+    "SD_ID" bigint,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_EVENTS" (
+    "PART_NAME_ID" bigint NOT NULL,
+    "DB_NAME" character varying(128),
+    "EVENT_TIME" bigint NOT NULL,
+    "EVENT_TYPE" integer NOT NULL,
+    "PARTITION_NAME" character varying(767),
+    "TBL_NAME" character varying(256)
+);
+
+
+--
+-- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEYS" (
+    "TBL_ID" bigint NOT NULL,
+    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
+    "PKEY_NAME" character varying(128) NOT NULL,
+    "PKEY_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEY_VALS" (
+    "PART_ID" bigint NOT NULL,
+    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_PARAMS" (
+    "PART_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_PRIVS" (
+    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_PRIVS" (
+    "PART_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLES" (
+    "ROLE_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLE_MAP" (
+    "ROLE_GRANT_ID" bigint NOT NULL,
+    "ADD_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_ID" bigint
+);
+
+
+--
+-- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SDS" (
+    "SD_ID" bigint NOT NULL,
+    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "IS_COMPRESSED" boolean NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
+    "NUM_BUCKETS" bigint NOT NULL,
+    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "SERDE_ID" bigint,
+    "CD_ID" bigint,
+    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
+);
+
+
+--
+-- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SD_PARAMS" (
+    "SD_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" text DEFAULT NULL
+);
+
+
+--
+-- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SEQUENCE_TABLE" (
+    "SEQUENCE_NAME" character varying(255) NOT NULL,
+    "NEXT_VAL" bigint NOT NULL
+);
+
+
+--
+-- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDES" (
+    "SERDE_ID" bigint NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "SLIB" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDE_PARAMS" (
+    "SERDE_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" text DEFAULT NULL
+);
+
+
+--
+-- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SORT_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+    "ORDER" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TABLE_PARAMS" (
+    "TBL_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" text DEFAULT NULL
+);
+
+
+--
+-- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBLS" (
+    "TBL_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "OWNER" character varying(767) DEFAULT NULL::character varying,
+    "RETENTION" bigint NOT NULL,
+    "SD_ID" bigint,
+    "TBL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "VIEW_EXPANDED_TEXT" text,
+    "VIEW_ORIGINAL_TEXT" text,
+    "IS_REWRITE_ENABLED" boolean NOT NULL
+);
+
+
+--
+-- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_COL_PRIVS" (
+    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_PRIVS" (
+    "TBL_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPES" (
+    "TYPES_ID" bigint NOT NULL,
+    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TYPE1" character varying(767) DEFAULT NULL::character varying,
+    "TYPE2" character varying(767) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPE_FIELDS" (
+    "TYPE_NAME" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "FIELD_NAME" character varying(128) NOT NULL,
+    "FIELD_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST" (
+    "STRING_LIST_ID" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
+    "STRING_LIST_ID" bigint NOT NULL,
+    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_NAMES" (
+    "SD_ID" bigint NOT NULL,
+    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
+    "SD_ID" bigint NOT NULL,
+    "STRING_LIST_ID_KID" bigint NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying
+);
+
+CREATE TABLE "SKEWED_VALUES" (
+    "SD_ID_OID" bigint NOT NULL,
+    "STRING_LIST_ID_EID" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE  "MASTER_KEYS"
+(
+    "KEY_ID" SERIAL,
+    "MASTER_KEY" varchar(767) NULL,
+    PRIMARY KEY ("KEY_ID")
+);
+
+CREATE TABLE  "DELEGATION_TOKENS"
+(
+    "TOKEN_IDENT" varchar(767) NOT NULL,
+    "TOKEN" varchar(767) NULL,
+    PRIMARY KEY ("TOKEN_IDENT")
+);
+
+CREATE TABLE "TAB_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(256) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "TBL_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE "VERSION" (
+  "VER_ID" bigint,
+  "SCHEMA_VERSION" character varying(127) NOT NULL,
+  "VERSION_COMMENT" character varying(255) NOT NULL
+);
+
+--
+-- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(256) DEFAULT NULL::character varying,
+ "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for FUNCS
+--
+CREATE TABLE "FUNCS" (
+  "FUNC_ID" BIGINT NOT NULL,
+  "CLASS_NAME" VARCHAR(4000),
+  "CREATE_TIME" INTEGER NOT NULL,
+  "DB_ID" BIGINT,
+  "FUNC_NAME" VARCHAR(128),
+  "FUNC_TYPE" INTEGER NOT NULL,
+  "OWNER_NAME" VARCHAR(128),
+  "OWNER_TYPE" VARCHAR(10),
+  PRIMARY KEY ("FUNC_ID")
+);
+
+--
+-- Table structure for FUNC_RU
+--
+CREATE TABLE "FUNC_RU" (
+  "FUNC_ID" BIGINT NOT NULL,
+  "RESOURCE_TYPE" INTEGER NOT NULL,
+  "RESOURCE_URI" VARCHAR(4000),
+  "INTEGER_IDX" INTEGER NOT NULL,
+  PRIMARY KEY ("FUNC_ID", "INTEGER_IDX")
+);
+
+CREATE TABLE "NOTIFICATION_LOG"
+(
+    "NL_ID" BIGINT NOT NULL,
+    "EVENT_ID" BIGINT NOT NULL,
+    "EVENT_TIME" INTEGER NOT NULL,
+    "EVENT_TYPE" VARCHAR(32) NOT NULL,
+    "DB_NAME" VARCHAR(128),
+    "TBL_NAME" VARCHAR(256),
+    "MESSAGE" text,
+    "MESSAGE_FORMAT" VARCHAR(16),
+    PRIMARY KEY ("NL_ID")
+);
+
+CREATE TABLE "NOTIFICATION_SEQUENCE"
+(
+    "NNI_ID" BIGINT NOT NULL,
+    "NEXT_EVENT_ID" BIGINT NOT NULL,
+    PRIMARY KEY ("NNI_ID")
+);
+
+CREATE TABLE "KEY_CONSTRAINTS"
+(
+  "CHILD_CD_ID" BIGINT,
+  "CHILD_INTEGER_IDX" BIGINT,
+  "CHILD_TBL_ID" BIGINT,
+  "PARENT_CD_ID" BIGINT NOT NULL,
+  "PARENT_INTEGER_IDX" BIGINT NOT NULL,
+  "PARENT_TBL_ID" BIGINT NOT NULL,
+  "POSITION" BIGINT NOT NULL,
+  "CONSTRAINT_NAME" VARCHAR(400) NOT NULL,
+  "CONSTRAINT_TYPE" SMALLINT NOT NULL,
+  "UPDATE_RULE" SMALLINT,
+  "DELETE_RULE"	SMALLINT,
+  "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL,
+  PRIMARY KEY ("CONSTRAINT_NAME", "POSITION")
+) ;
+
+CREATE INDEX "CONSTRAINTS_PARENT_TBLID_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("PARENT_TBL_ID");
+
+--
+-- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "CDS"
+    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
+
+
+--
+-- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+
+
+--
+-- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
+
+
+--
+-- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
+
+
+--
+-- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
+
+
+--
+-- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+
+
+--
+-- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "NUCLEUS_TABLES"
+    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
+
+
+--
+-- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
+
+
+--
+-- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_EVENTS"
+    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
+
+
+--
+-- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+
+
+--
+-- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+
+
+--
+-- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+
+
+--
+-- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+
+
+--
+-- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
+
+
+--
+-- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
+
+
+--
+-- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
+
+
+--
+-- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
+
+
+--
+-- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
+
+
+--
+-- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+
+
+--
+-- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SEQUENCE_TABLE"
+    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
+
+
+--
+-- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDES"
+    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
+
+
+--
+-- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+
+
+--
+-- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+
+
+--
+-- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+
+
+--
+-- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
+
+
+--
+-- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
+
+
+--
+-- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+
+--
+-- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
+
+
+--
+-- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
+
+
+--
+-- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
+
+
+--
+-- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
+
+
+--
+-- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
+
+
+--
+-- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
+
+
+--
+-- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
+
+
+--
+-- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
+
+
+--
+-- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
+
+
+--
+-- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
+
+
+--
+-- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
+
+
+--
+-- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
+
+
+--
+-- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
+
+
+--
+-- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
+
+
+--
+-- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
+
+
+--
+-- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
+
+
+--
+-- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
+
+--
+-- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
+
+--
+-- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+
+--
+-- Name: UNIQUEFUNCTION; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE UNIQUE INDEX "UNIQUEFUNCTION" ON "FUNCS" ("FUNC_NAME", "DB_ID");
+
+--
+-- Name: FUNCS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "FUNCS_N49" ON "FUNCS" ("DB_ID");
+
+--
+-- Name: FUNC_RU_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "FUNC_RU_N49" ON "FUNC_RU" ("FUNC_ID");
+
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
+
+--
+-- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
+
+-- Name: FUNCS_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ALTER TABLE ONLY "FUNCS"
+    ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE;
+
+-- Name: FUNC_RU_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ALTER TABLE ONLY "FUNC_RU"
+    ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "FUNCS" ("FUNC_ID") DEFERRABLE;
+
+--
+-- Name: public; Type: ACL; Schema: -; Owner: hiveuser
+--
+
+REVOKE ALL ON SCHEMA public FROM PUBLIC;
+GRANT ALL ON SCHEMA public TO PUBLIC;
+
+--
+-- PostgreSQL database dump complete
+--
+
+------------------------------
+-- Transaction and lock tables
+------------------------------
+--\i hive-txn-schema-2.3.0.postgres.sql;
+
+CREATE TABLE TXNS (
+  TXN_ID bigint PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED bigint NOT NULL,
+  TXN_LAST_HEARTBEAT bigint NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL,
+  TXN_AGENT_INFO varchar(128),
+  TXN_META_INFO varchar(128),
+  TXN_HEARTBEAT_COUNT integer
+);
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID bigint REFERENCES TXNS (TXN_ID),
+  TC_DATABASE varchar(128) NOT NULL,
+  TC_TABLE varchar(128),
+  TC_PARTITION varchar(767) DEFAULT NULL,
+  TC_OPERATION_TYPE char(1) NOT NULL
+);
+
+CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS USING hash (TC_TXNID);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID bigint,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(256),
+  CTC_PARTITION varchar(767)
+);
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID bigint NOT NULL,
+  HL_LOCK_INT_ID bigint NOT NULL,
+  HL_TXNID bigint,
+  HL_DB varchar(128) NOT NULL,
+  HL_TABLE varchar(128),
+  HL_PARTITION varchar(767) DEFAULT NULL,
+  HL_LOCK_STATE char(1) NOT NULL,
+  HL_LOCK_TYPE char(1) NOT NULL,
+  HL_LAST_HEARTBEAT bigint NOT NULL,
+  HL_ACQUIRED_AT bigint,
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  HL_HEARTBEAT_COUNT integer,
+  HL_AGENT_INFO varchar(128),
+  HL_BLOCKEDBY_EXT_ID bigint,
+  HL_BLOCKEDBY_INT_ID bigint,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+); 
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS USING hash (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID bigint PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_TBLPROPERTIES varchar(2048),
+  CQ_WORKER_ID varchar(128),
+  CQ_START bigint,
+  CQ_RUN_AS varchar(128),
+  CQ_HIGHEST_TXN_ID bigint,
+  CQ_META_INFO bytea,
+  CQ_HADOOP_JOB_ID varchar(32)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+CREATE TABLE COMPLETED_COMPACTIONS (
+  CC_ID bigint PRIMARY KEY,
+  CC_DATABASE varchar(128) NOT NULL,
+  CC_TABLE varchar(128) NOT NULL,
+  CC_PARTITION varchar(767),
+  CC_STATE char(1) NOT NULL,
+  CC_TYPE char(1) NOT NULL,
+  CC_TBLPROPERTIES varchar(2048),
+  CC_WORKER_ID varchar(128),
+  CC_START bigint,
+  CC_END bigint,
+  CC_RUN_AS varchar(128),
+  CC_HIGHEST_TXN_ID bigint,
+  CC_META_INFO bytea,
+  CC_HADOOP_JOB_ID varchar(32)
+);
+
+CREATE TABLE AUX_TABLE (
+  MT_KEY1 varchar(128) NOT NULL,
+  MT_KEY2 bigint NOT NULL,
+  MT_COMMENT varchar(255),
+  PRIMARY KEY(MT_KEY1, MT_KEY2)
+);
+
+CREATE TABLE WRITE_SET (
+  WS_DATABASE varchar(128) NOT NULL,
+  WS_TABLE varchar(128) NOT NULL,
+  WS_PARTITION varchar(767),
+  WS_TXNID bigint NOT NULL,
+  WS_COMMIT_ID bigint NOT NULL,
+  WS_OPERATION_TYPE char(1) NOT NULL
+);
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '2.3.0', 'Hive release version 2.3.0');


[19/50] [abbrv] hive git commit: HIVE-17982 Move metastore specific itests

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionNameWhitelistValidation.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionNameWhitelistValidation.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionNameWhitelistValidation.java
new file mode 100644
index 0000000..180a666
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionNameWhitelistValidation.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import static org.junit.Assert.*;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+// Validate the metastore client call validatePartitionNameCharacters to ensure it throws
+// an exception if partition fields contain Unicode characters or commas
+
+public class TestPartitionNameWhitelistValidation {
+
+  private static final String partitionValidationPattern = "[\\x20-\\x7E&&[^,]]*";
+  private static Configuration conf;
+  private static HiveMetaStoreClient msc;
+
+  @BeforeClass
+  public static void setupBeforeClass() throws Exception {
+    System.setProperty(ConfVars.PARTITION_NAME_WHITELIST_PATTERN.toString(), partitionValidationPattern);
+    conf = MetastoreConf.newMetastoreConf();
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    msc = new HiveMetaStoreClient(conf);
+  }
+
+  // Runs an instance of DisallowUnicodePreEventListener
+  // Returns whether or not it succeeded
+  private boolean runValidation(List<String> partVals) {
+    try {
+      msc.validatePartitionNameCharacters(partVals);
+    } catch (Exception e) {
+      return false;
+    }
+
+    return true;
+ }
+
+  // Sample data
+  private List<String> getPartValsWithUnicode() {
+    List<String> partVals = new ArrayList<>();
+    partVals.add("klâwen");
+    partVals.add("tägelîch");
+
+    return partVals;
+  }
+
+  private List<String> getPartValsWithCommas() {
+    List<String> partVals = new ArrayList<>();
+    partVals.add("a,b");
+    partVals.add("c,d,e,f");
+
+    return partVals;
+  }
+
+  private List<String> getPartValsWithValidCharacters() {
+    List<String> partVals = new ArrayList<>();
+    partVals.add("part1");
+    partVals.add("part2");
+
+    return partVals;
+  }
+
+  @Test
+  public void testAddPartitionWithCommas() {
+    assertFalse("Add a partition with commas in name",
+        runValidation(getPartValsWithCommas()));
+  }
+
+  @Test
+  public void testAddPartitionWithUnicode() {
+    assertFalse("Add a partition with unicode characters in name",
+        runValidation(getPartValsWithUnicode()));
+  }
+
+  @Test
+  public void testAddPartitionWithValidPartVal() {
+    assertTrue("Add a partition with unicode characters in name",
+        runValidation(getPartValsWithValidCharacters()));
+  }
+
+  @Test
+  public void testAppendPartitionWithUnicode() {
+    assertFalse("Append a partition with unicode characters in name",
+        runValidation(getPartValsWithUnicode()));
+  }
+
+  @Test
+  public void testAppendPartitionWithCommas() {
+    assertFalse("Append a partition with unicode characters in name",
+        runValidation(getPartValsWithCommas()));
+  }
+
+  @Test
+  public void testAppendPartitionWithValidCharacters() {
+    assertTrue("Append a partition with no unicode characters in name",
+        runValidation(getPartValsWithValidCharacters()));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java
new file mode 100644
index 0000000..9eec6f8
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.Assert;
+import org.junit.Before;
+
+
+public class TestRemoteHiveMetaStore extends TestHiveMetaStore {
+  private static boolean isServerStarted = false;
+  protected static int port;
+
+  public TestRemoteHiveMetaStore() {
+    super();
+    isThriftClient = true;
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    super.setUp();
+
+    if (isServerStarted) {
+      Assert.assertNotNull("Unable to connect to the MetaStore server", client);
+      MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+      return;
+    }
+
+    port = MetaStoreTestUtils.findFreePort();
+    System.out.println("Starting MetaStore Server on port " + port);
+    MetaStoreTestUtils.startMetaStoreWithRetry(port, HadoopThriftAuthBridge.getBridge(), conf);
+    isServerStarted = true;
+
+    // This is default case with setugi off for both client and server
+    client = createClient();
+  }
+
+  @Override
+  protected HiveMetaStoreClient createClient() throws Exception {
+    MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+    MetastoreConf.setBoolVar(conf, ConfVars.EXECUTE_SET_UGI, false);
+    return new HiveMetaStoreClient(conf);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java
new file mode 100644
index 0000000..598956a
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ *
+ * TestRemoteHiveMetaStoreIpAddress.
+ *
+ * Test which checks that the remote Hive metastore stores the proper IP address using
+ * IpAddressListener
+ */
+public class TestRemoteHiveMetaStoreIpAddress {
+  private static final Logger LOG = LoggerFactory.getLogger(TestRemoteHiveMetaStoreIpAddress.class);
+  private static Configuration conf;
+  private static HiveMetaStoreClient msc;
+
+  @Before
+  public void setUp() throws Exception {
+    conf = MetastoreConf.newMetastoreConf();
+    int port = MetaStoreTestUtils.findFreePort();
+    MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+
+
+    LOG.debug("Starting MetaStore Server on port " + port);
+    System.setProperty(ConfVars.EVENT_LISTENERS.toString(), IpAddressListener.class.getName());
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    MetaStoreTestUtils.startMetaStoreWithRetry(port, HadoopThriftAuthBridge.getBridge(), conf);
+
+    msc = new HiveMetaStoreClient(conf);
+  }
+
+  @Test
+  public void testIpAddress() throws Exception {
+    Database db = new Database();
+    db.setName("testIpAddressIp");
+    msc.createDatabase(db);
+    msc.dropDatabase(db.getName());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteUGIHiveMetaStoreIpAddress.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteUGIHiveMetaStoreIpAddress.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteUGIHiveMetaStoreIpAddress.java
new file mode 100644
index 0000000..92d2d0e
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteUGIHiveMetaStoreIpAddress.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+
+public class TestRemoteUGIHiveMetaStoreIpAddress extends TestRemoteHiveMetaStoreIpAddress {
+  public TestRemoteUGIHiveMetaStoreIpAddress() {
+    System.setProperty(MetastoreConf.ConfVars.EXECUTE_SET_UGI.toString(), "true");
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java
new file mode 100644
index 0000000..bbcd4d7
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * TestRetryingHMSHandler. Test case for
+ * {@link org.apache.hadoop.hive.metastore.RetryingHMSHandler}
+ */
+public class TestRetryingHMSHandler {
+  private Configuration conf;
+  private HiveMetaStoreClient msc;
+
+  @Before
+  public void setUp() throws Exception {
+    System.setProperty("hive.metastore.pre.event.listeners",
+        AlternateFailurePreListener.class.getName());
+    int port = MetaStoreTestUtils.findFreePort();
+    conf = MetastoreConf.newMetastoreConf();
+    MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+    MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+    MetastoreConf.setLongVar(conf, ConfVars.HMSHANDLERATTEMPTS, 2);
+    MetastoreConf.setTimeVar(conf, ConfVars.HMSHANDLERINTERVAL, 0, TimeUnit.MILLISECONDS);
+    MetastoreConf.setBoolVar(conf, ConfVars.HMSHANDLERFORCERELOADCONF, false);
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    MetaStoreTestUtils.startMetaStoreWithRetry(port, HadoopThriftAuthBridge.getBridge(), conf);
+    msc = new HiveMetaStoreClient(conf);
+  }
+
+  // Create a database and a table in that database.  Because the AlternateFailurePreListener is
+  // being used each attempt to create something should require two calls by the RetryingHMSHandler
+  @Test
+  public void testRetryingHMSHandler() throws Exception {
+    String dbName = "hive4159";
+    String tblName = "tmptbl";
+
+    Database db = new Database();
+    db.setName(dbName);
+    msc.createDatabase(db);
+
+    Assert.assertEquals(2, AlternateFailurePreListener.getCallCount());
+
+    Table tbl = new TableBuilder()
+        .setDbName(dbName)
+        .setTableName(tblName)
+        .addCol("c1", ColumnType.STRING_TYPE_NAME)
+        .build();
+
+    msc.createTable(tbl);
+
+    Assert.assertEquals(4, AlternateFailurePreListener.getCallCount());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnBothClientServer.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnBothClientServer.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnBothClientServer.java
new file mode 100644
index 0000000..e34d089
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnBothClientServer.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+
+public class TestSetUGIOnBothClientServer extends TestRemoteHiveMetaStore{
+
+  public TestSetUGIOnBothClientServer() {
+    super();
+    isThriftClient = true;
+    // This will turn on setugi on both client and server processes of the test.
+    System.setProperty(MetastoreConf.ConfVars.EXECUTE_SET_UGI.toString(), "true");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyClient.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyClient.java
new file mode 100644
index 0000000..beff656
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyClient.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+
+public class TestSetUGIOnOnlyClient extends TestRemoteHiveMetaStore{
+
+  @Override
+  protected HiveMetaStoreClient createClient() throws Exception {
+    MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+    MetastoreConf.setBoolVar(conf, ConfVars.EXECUTE_SET_UGI, true);
+    return new HiveMetaStoreClient(conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyServer.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyServer.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyServer.java
new file mode 100644
index 0000000..bec5a55
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestSetUGIOnOnlyServer.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+
+public class TestSetUGIOnOnlyServer extends TestSetUGIOnBothClientServer {
+
+  @Override
+  protected HiveMetaStoreClient createClient() throws Exception {
+    MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+    MetastoreConf.setBoolVar(conf, ConfVars.EXECUTE_SET_UGI, false);
+    return new HiveMetaStoreClient(conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
index d486f7c..d95fcfa 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
@@ -25,6 +25,7 @@ import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog;
+import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
 import org.apache.hadoop.hive.metastore.ObjectStore;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.TestObjectStore.MockPartitionExpressionProxy;
@@ -59,8 +60,7 @@ public class TestCachedStore {
   public void setUp() throws Exception {
     Configuration conf = MetastoreConf.newMetastoreConf();
     MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true);
-    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS,
-        MockPartitionExpressionProxy.class.getName());
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
     objectStore = new ObjectStore();
     objectStore.setConf(conf);
     cachedStore = new CachedStore();


[30/50] [abbrv] hive git commit: HIVE-17983 Make the standalone metastore generate tarballs etc.

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/mysql/hive-schema-2.3.0.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mysql/hive-schema-2.3.0.mysql.sql b/standalone-metastore/src/main/sql/mysql/hive-schema-2.3.0.mysql.sql
new file mode 100644
index 0000000..45fe6ec
--- /dev/null
+++ b/standalone-metastore/src/main/sql/mysql/hive-schema-2.3.0.mysql.sql
@@ -0,0 +1,970 @@
+-- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+--
+-- Host: localhost    Database: test
+-- ------------------------------------------------------
+-- Server version	5.5.25
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `BUCKETING_COLS_N49` (`SD_ID`),
+  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `CDS` (
+  `CD_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+  `CD_ID` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE_NAME` MEDIUMTEXT DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+  KEY `COLUMNS_V2_N49` (`CD_ID`),
+  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DBS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DBS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`),
+  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DB_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
+  `DB_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_GRANT_ID`),
+  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `DB_PRIVS_N49` (`DB_ID`),
+  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `GLOBAL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
+  `USER_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`USER_GRANT_ID`),
+  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `IDXS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `IDXS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DEFERRED_REBUILD` bit(1) NOT NULL,
+  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`),
+  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
+  KEY `IDXS_N51` (`SD_ID`),
+  KEY `IDXS_N50` (`INDEX_TBL_ID`),
+  KEY `IDXS_N49` (`ORIG_TBL_ID`),
+  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `INDEX_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
+  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
+  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `NUCLEUS_TABLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
+  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`CLASS_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITIONS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITIONS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`),
+  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
+  KEY `PARTITIONS_N49` (`TBL_ID`),
+  KEY `PARTITIONS_N50` (`SD_ID`),
+  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_EVENTS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
+  `PART_NAME_ID` bigint(20) NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `EVENT_TIME` bigint(20) NOT NULL,
+  `EVENT_TYPE` int(11) NOT NULL,
+  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_NAME_ID`),
+  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEYS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
+  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
+  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEY_VALS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
+  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
+  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
+  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
+  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
+  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
+  `PART_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_GRANT_ID`),
+  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `PART_PRIVS_N49` (`PART_ID`),
+  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLES` (
+  `ROLE_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`ROLE_ID`),
+  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLE_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
+  `ROLE_GRANT_ID` bigint(20) NOT NULL,
+  `ADD_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`ROLE_GRANT_ID`),
+  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `ROLE_MAP_N49` (`ROLE_ID`),
+  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SDS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `CD_ID` bigint(20) DEFAULT NULL,
+  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `IS_COMPRESSED` bit(1) NOT NULL,
+  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `NUM_BUCKETS` int(11) NOT NULL,
+  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SERDE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`),
+  KEY `SDS_N49` (`SERDE_ID`),
+  KEY `SDS_N50` (`CD_ID`),
+  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SD_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
+  KEY `SD_PARAMS_N49` (`SD_ID`),
+  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SEQUENCE_TABLE`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
+  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NEXT_VAL` bigint(20) NOT NULL,
+  PRIMARY KEY (`SEQUENCE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDES` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
+  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
+  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_NAMES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
+  `SD_ID` bigint(20) NOT NULL,
+  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
+  `SD_ID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
+  `SD_ID_OID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
+  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
+  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
+  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SORT_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SORT_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ORDER` int(11) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SORT_COLS_N49` (`SD_ID`),
+  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TABLE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
+  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
+  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBLS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `RETENTION` int(11) NOT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `VIEW_EXPANDED_TEXT` mediumtext,
+  `VIEW_ORIGINAL_TEXT` mediumtext,
+  `IS_REWRITE_ENABLED` bit(1) NOT NULL,
+  PRIMARY KEY (`TBL_ID`),
+  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
+  KEY `TBLS_N50` (`SD_ID`),
+  KEY `TBLS_N49` (`DB_ID`),
+  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
+  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
+  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
+  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
+  `TBL_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_GRANT_ID`),
+  KEY `TBL_PRIVS_N49` (`TBL_ID`),
+  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TAB_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TBL_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `PART_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PART_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE;
+
+--
+-- Table structure for table `TYPES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPES` (
+  `TYPES_ID` bigint(20) NOT NULL,
+  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TYPES_ID`),
+  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TYPE_FIELDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
+  `TYPE_NAME` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
+  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
+  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+-- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+CREATE TABLE IF NOT EXISTS `MASTER_KEYS` 
+(
+    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
+    `MASTER_KEY` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`KEY_ID`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+-- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
+(
+    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
+    `TOKEN` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`TOKEN_IDENT`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE IF NOT EXISTS `VERSION` (
+  `VER_ID` BIGINT NOT NULL,
+  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
+  `VERSION_COMMENT` VARCHAR(255),
+  PRIMARY KEY (`VER_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table FUNCS
+--
+CREATE TABLE IF NOT EXISTS `FUNCS` (
+  `FUNC_ID` BIGINT(20) NOT NULL,
+  `CLASS_NAME` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `CREATE_TIME` INT(11) NOT NULL,
+  `DB_ID` BIGINT(20),
+  `FUNC_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+  `FUNC_TYPE` INT(11) NOT NULL,
+  `OWNER_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+  `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin,
+  PRIMARY KEY (`FUNC_ID`),
+  UNIQUE KEY `UNIQUEFUNCTION` (`FUNC_NAME`, `DB_ID`),
+  KEY `FUNCS_N49` (`DB_ID`),
+  CONSTRAINT `FUNCS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table FUNC_RU
+--
+CREATE TABLE IF NOT EXISTS `FUNC_RU` (
+  `FUNC_ID` BIGINT(20) NOT NULL,
+  `RESOURCE_TYPE` INT(11) NOT NULL,
+  `RESOURCE_URI` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `INTEGER_IDX` INT(11) NOT NULL,
+  PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`),
+  CONSTRAINT `FUNC_RU_FK1` FOREIGN KEY (`FUNC_ID`) REFERENCES `FUNCS` (`FUNC_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE IF NOT EXISTS `NOTIFICATION_LOG`
+(
+    `NL_ID` BIGINT(20) NOT NULL,
+    `EVENT_ID` BIGINT(20) NOT NULL,
+    `EVENT_TIME` INT(11) NOT NULL,
+    `EVENT_TYPE` varchar(32) NOT NULL,
+    `DB_NAME` varchar(128),
+    `TBL_NAME` varchar(256),
+    `MESSAGE` longtext,
+    `MESSAGE_FORMAT` varchar(16),
+    PRIMARY KEY (`NL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE IF NOT EXISTS `NOTIFICATION_SEQUENCE`
+(
+    `NNI_ID` BIGINT(20) NOT NULL,
+    `NEXT_EVENT_ID` BIGINT(20) NOT NULL,
+    PRIMARY KEY (`NNI_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS`
+(
+  `CHILD_CD_ID` BIGINT,
+  `CHILD_INTEGER_IDX` INT(11),
+  `CHILD_TBL_ID` BIGINT,
+  `PARENT_CD_ID` BIGINT NOT NULL,
+  `PARENT_INTEGER_IDX` INT(11) NOT NULL,
+  `PARENT_TBL_ID` BIGINT NOT NULL,
+  `POSITION` BIGINT NOT NULL,
+  `CONSTRAINT_NAME` VARCHAR(400) NOT NULL,
+  `CONSTRAINT_TYPE` SMALLINT(6)  NOT NULL,
+  `UPDATE_RULE` SMALLINT(6),
+  `DELETE_RULE` SMALLINT(6),
+  `ENABLE_VALIDATE_RELY` SMALLINT(6) NOT NULL,
+  PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX `CONSTRAINTS_PARENT_TABLE_ID_INDEX` ON KEY_CONSTRAINTS (`PARENT_TBL_ID`) USING BTREE;
+
+-- ----------------------------
+-- Transaction and Lock Tables
+-- ----------------------------
+--SOURCE hive-txn-schema-2.3.0.mysql.sql;
+
+CREATE TABLE TXNS (
+  TXN_ID bigint PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED bigint NOT NULL,
+  TXN_LAST_HEARTBEAT bigint NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL,
+  TXN_AGENT_INFO varchar(128),
+  TXN_META_INFO varchar(128),
+  TXN_HEARTBEAT_COUNT int
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID bigint NOT NULL,
+  TC_DATABASE varchar(128) NOT NULL,
+  TC_TABLE varchar(128) NOT NULL,
+  TC_PARTITION varchar(767),
+  TC_OPERATION_TYPE char(1) NOT NULL,
+  FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID bigint NOT NULL,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(256),
+  CTC_PARTITION varchar(767)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID bigint NOT NULL,
+  HL_LOCK_INT_ID bigint NOT NULL,
+  HL_TXNID bigint,
+  HL_DB varchar(128) NOT NULL,
+  HL_TABLE varchar(128),
+  HL_PARTITION varchar(767),
+  HL_LOCK_STATE char(1) not null,
+  HL_LOCK_TYPE char(1) not null,
+  HL_LAST_HEARTBEAT bigint NOT NULL,
+  HL_ACQUIRED_AT bigint,
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  HL_HEARTBEAT_COUNT int,
+  HL_AGENT_INFO varchar(128),
+  HL_BLOCKEDBY_EXT_ID bigint,
+  HL_BLOCKEDBY_INT_ID bigint,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID),
+  KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID bigint PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_TBLPROPERTIES varchar(2048),
+  CQ_WORKER_ID varchar(128),
+  CQ_START bigint,
+  CQ_RUN_AS varchar(128),
+  CQ_HIGHEST_TXN_ID bigint,
+  CQ_META_INFO varbinary(2048),
+  CQ_HADOOP_JOB_ID varchar(32)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE COMPLETED_COMPACTIONS (
+  CC_ID bigint PRIMARY KEY,
+  CC_DATABASE varchar(128) NOT NULL,
+  CC_TABLE varchar(128) NOT NULL,
+  CC_PARTITION varchar(767),
+  CC_STATE char(1) NOT NULL,
+  CC_TYPE char(1) NOT NULL,
+  CC_TBLPROPERTIES varchar(2048),
+  CC_WORKER_ID varchar(128),
+  CC_START bigint,
+  CC_END bigint,
+  CC_RUN_AS varchar(128),
+  CC_HIGHEST_TXN_ID bigint,
+  CC_META_INFO varbinary(2048),
+  CC_HADOOP_JOB_ID varchar(32)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+CREATE TABLE AUX_TABLE (
+  MT_KEY1 varchar(128) NOT NULL,
+  MT_KEY2 bigint NOT NULL,
+  MT_COMMENT varchar(255),
+  PRIMARY KEY(MT_KEY1, MT_KEY2)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE WRITE_SET (
+  WS_DATABASE varchar(128) NOT NULL,
+  WS_TABLE varchar(128) NOT NULL,
+  WS_PARTITION varchar(767),
+  WS_TXNID bigint NOT NULL,
+  WS_COMMIT_ID bigint NOT NULL,
+  WS_OPERATION_TYPE char(1) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '2.3.0', 'Hive release version 2.3.0');
+
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2012-08-23  0:56:31

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql b/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
new file mode 100644
index 0000000..8176fff
--- /dev/null
+++ b/standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql
@@ -0,0 +1,1045 @@
+-- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+--
+-- Host: localhost    Database: test
+-- ------------------------------------------------------
+-- Server version	5.5.25
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `BUCKETING_COLS_N49` (`SD_ID`),
+  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `CDS` (
+  `CD_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+  `CD_ID` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE_NAME` MEDIUMTEXT DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+  KEY `COLUMNS_V2_N49` (`CD_ID`),
+  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DBS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DBS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`),
+  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DB_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
+  `DB_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_GRANT_ID`),
+  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `DB_PRIVS_N49` (`DB_ID`),
+  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `GLOBAL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
+  `USER_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`USER_GRANT_ID`),
+  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `IDXS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `IDXS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DEFERRED_REBUILD` bit(1) NOT NULL,
+  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`),
+  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
+  KEY `IDXS_N51` (`SD_ID`),
+  KEY `IDXS_N50` (`INDEX_TBL_ID`),
+  KEY `IDXS_N49` (`ORIG_TBL_ID`),
+  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `INDEX_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
+  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
+  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `NUCLEUS_TABLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
+  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`CLASS_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITIONS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITIONS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`),
+  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
+  KEY `PARTITIONS_N49` (`TBL_ID`),
+  KEY `PARTITIONS_N50` (`SD_ID`),
+  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_EVENTS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
+  `PART_NAME_ID` bigint(20) NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `EVENT_TIME` bigint(20) NOT NULL,
+  `EVENT_TYPE` int(11) NOT NULL,
+  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_NAME_ID`),
+  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEYS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
+  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
+  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEY_VALS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
+  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
+  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
+  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
+  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
+  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
+  `PART_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_GRANT_ID`),
+  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `PART_PRIVS_N49` (`PART_ID`),
+  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLES` (
+  `ROLE_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`ROLE_ID`),
+  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLE_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
+  `ROLE_GRANT_ID` bigint(20) NOT NULL,
+  `ADD_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`ROLE_GRANT_ID`),
+  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `ROLE_MAP_N49` (`ROLE_ID`),
+  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SDS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `CD_ID` bigint(20) DEFAULT NULL,
+  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `IS_COMPRESSED` bit(1) NOT NULL,
+  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `NUM_BUCKETS` int(11) NOT NULL,
+  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SERDE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`),
+  KEY `SDS_N49` (`SERDE_ID`),
+  KEY `SDS_N50` (`CD_ID`),
+  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SD_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
+  KEY `SD_PARAMS_N49` (`SD_ID`),
+  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SEQUENCE_TABLE`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
+  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NEXT_VAL` bigint(20) NOT NULL,
+  PRIMARY KEY (`SEQUENCE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDES` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
+  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
+  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_NAMES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
+  `SD_ID` bigint(20) NOT NULL,
+  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
+  `SD_ID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
+  `SD_ID_OID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
+  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
+  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
+  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SORT_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SORT_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ORDER` int(11) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SORT_COLS_N49` (`SD_ID`),
+  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TABLE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
+  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
+  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBLS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `RETENTION` int(11) NOT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `VIEW_EXPANDED_TEXT` mediumtext,
+  `VIEW_ORIGINAL_TEXT` mediumtext,
+  `IS_REWRITE_ENABLED` bit(1) NOT NULL,
+  PRIMARY KEY (`TBL_ID`),
+  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
+  KEY `TBLS_N50` (`SD_ID`),
+  KEY `TBLS_N49` (`DB_ID`),
+  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
+  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
+  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
+  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
+  `TBL_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_GRANT_ID`),
+  KEY `TBL_PRIVS_N49` (`TBL_ID`),
+  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TAB_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TBL_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `BIT_VECTOR` blob,
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `PART_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PART_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `BIT_VECTOR` blob,
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE;
+
+--
+-- Table structure for table `TYPES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPES` (
+  `TYPES_ID` bigint(20) NOT NULL,
+  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TYPES_ID`),
+  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TYPE_FIELDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
+  `TYPE_NAME` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
+  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
+  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+-- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+CREATE TABLE IF NOT EXISTS `MASTER_KEYS` 
+(
+    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
+    `MASTER_KEY` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`KEY_ID`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+-- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
+(
+    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
+    `TOKEN` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`TOKEN_IDENT`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE IF NOT EXISTS `VERSION` (
+  `VER_ID` BIGINT NOT NULL,
+  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
+  `VERSION_COMMENT` VARCHAR(255),
+  PRIMARY KEY (`VER_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table FUNCS
+--
+CREATE TABLE IF NOT EXISTS `FUNCS` (
+  `FUNC_ID` BIGINT(20) NOT NULL,
+  `CLASS_NAME` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `CREATE_TIME` INT(11) NOT NULL,
+  `DB_ID` BIGINT(20),
+  `FUNC_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+  `FUNC_TYPE` INT(11) NOT NULL,
+  `OWNER_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+  `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin,
+  PRIMARY KEY (`FUNC_ID`),
+  UNIQUE KEY `UNIQUEFUNCTION` (`FUNC_NAME`, `DB_ID`),
+  KEY `FUNCS_N49` (`DB_ID`),
+  CONSTRAINT `FUNCS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table FUNC_RU
+--
+CREATE TABLE IF NOT EXISTS `FUNC_RU` (
+  `FUNC_ID` BIGINT(20) NOT NULL,
+  `RESOURCE_TYPE` INT(11) NOT NULL,
+  `RESOURCE_URI` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `INTEGER_IDX` INT(11) NOT NULL,
+  PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`),
+  CONSTRAINT `FUNC_RU_FK1` FOREIGN KEY (`FUNC_ID`) REFERENCES `FUNCS` (`FUNC_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE IF NOT EXISTS `NOTIFICATION_LOG`
+(
+    `NL_ID` BIGINT(20) NOT NULL,
+    `EVENT_ID` BIGINT(20) NOT NULL,
+    `EVENT_TIME` INT(11) NOT NULL,
+    `EVENT_TYPE` varchar(32) NOT NULL,
+    `DB_NAME` varchar(128),
+    `TBL_NAME` varchar(256),
+    `MESSAGE` longtext,
+    `MESSAGE_FORMAT` varchar(16),
+    PRIMARY KEY (`NL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE IF NOT EXISTS `NOTIFICATION_SEQUENCE`
+(
+    `NNI_ID` BIGINT(20) NOT NULL,
+    `NEXT_EVENT_ID` BIGINT(20) NOT NULL,
+    PRIMARY KEY (`NNI_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+INSERT INTO `NOTIFICATION_SEQUENCE` (`NNI_ID`, `NEXT_EVENT_ID`) SELECT * from (select 1 as `NNI_ID`, 1 as `NOTIFICATION_SEQUENCE`) a WHERE (SELECT COUNT(*) FROM `NOTIFICATION_SEQUENCE`) = 0;
+
+CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS`
+(
+  `CHILD_CD_ID` BIGINT,
+  `CHILD_INTEGER_IDX` INT(11),
+  `CHILD_TBL_ID` BIGINT,
+  `PARENT_CD_ID` BIGINT NOT NULL,
+  `PARENT_INTEGER_IDX` INT(11) NOT NULL,
+  `PARENT_TBL_ID` BIGINT NOT NULL,
+  `POSITION` BIGINT NOT NULL,
+  `CONSTRAINT_NAME` VARCHAR(400) NOT NULL,
+  `CONSTRAINT_TYPE` SMALLINT(6)  NOT NULL,
+  `UPDATE_RULE` SMALLINT(6),
+  `DELETE_RULE` SMALLINT(6),
+  `ENABLE_VALIDATE_RELY` SMALLINT(6) NOT NULL,
+  PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX `CONSTRAINTS_PARENT_TABLE_ID_INDEX` ON KEY_CONSTRAINTS (`PARENT_TBL_ID`) USING BTREE;
+
+CREATE INDEX `CONSTRAINTS_CONSTRAINT_TYPE_INDEX` ON KEY_CONSTRAINTS (`CONSTRAINT_TYPE`) USING BTREE;
+
+-- -----------------------------
+-- Metastore DB Properties table
+-- -----------------------------
+CREATE TABLE IF NOT EXISTS `METASTORE_DB_PROPERTIES` (
+  `PROPERTY_KEY` varchar(255) NOT NULL,
+  `PROPERTY_VALUE` varchar(1000) NOT NULL,
+  `DESCRIPTION` varchar(1000),
+ PRIMARY KEY(`PROPERTY_KEY`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+
+-- ---------------------
+-- Resource plan tables.
+-- ---------------------
+CREATE TABLE IF NOT EXISTS WM_RESOURCEPLAN (
+    `RP_ID` bigint(20) NOT NULL,
+    `NAME` varchar(128) NOT NULL,
+    `QUERY_PARALLELISM` int(11),
+    `STATUS` varchar(20) NOT NULL,
+    PRIMARY KEY (`RP_ID`),
+    KEY `UNIQUE_WM_RESOURCEPLAN` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE IF NOT EXISTS WM_POOL
+(
+    `POOL_ID` bigint(20) NOT NULL,
+    `RP_ID` bigint(20) NOT NULL,
+    `PATH` varchar(767) NOT NULL,
+    `PARENT_POOL_ID` bigint(20),
+    `ALLOC_FRACTION` DOUBLE,
+    `QUERY_PARALLELISM` int(11),
+    PRIMARY KEY (`POOL_ID`),
+    KEY `UNIQUE_WM_POOL` (`RP_ID`, `PATH`),
+    CONSTRAINT `WM_POOL_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`),
+    CONSTRAINT `WM_POOL_FK2` FOREIGN KEY (`PARENT_POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE IF NOT EXISTS WM_TRIGGER
+(
+    `TRIGGER_ID` bigint(20) NOT NULL,
+    `RP_ID` bigint(20) NOT NULL,
+    `NAME` varchar(128) NOT NULL,
+    `TRIGGER_EXPRESSION` varchar(1024),
+    `ACTION_EXPRESSION` varchar(1024),
+    PRIMARY KEY (`TRIGGER_ID`),
+    KEY `UNIQUE_WM_TRIGGER` (`RP_ID`, `NAME`),
+    CONSTRAINT `WM_TRIGGER_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE IF NOT EXISTS WM_POOL_TO_TRIGGER
+(
+    `POOL_ID` bigint(20) NOT NULL,
+    `TRIGGER_ID` bigint(20) NOT NULL,
+    PRIMARY KEY (`POOL_ID`, `TRIGGER_ID`),
+    CONSTRAINT `WM_POOL_TO_TRIGGER_FK1` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`),
+    CONSTRAINT `WM_POOL_TO_TRIGGER_FK2` FOREIGN KEY (`TRIGGER_ID`) REFERENCES `WM_TRIGGER` (`TRIGGER_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE IF NOT EXISTS WM_MAPPING
+(
+    `MAPPING_ID` bigint(20) NOT NULL,
+    `RP_ID` bigint(20) NOT NULL,
+    `ENTITY_TYPE` varchar(10) NOT NULL,
+    `ENTITY_NAME` varchar(128) NOT NULL,
+    `POOL_ID` bigint(20) NOT NULL,
+    `ORDERING` int,
+    PRIMARY KEY (`MAPPING_ID`),
+    KEY `UNIQUE_WM_MAPPING` (`RP_ID`, `ENTITY_TYPE`, `ENTITY_NAME`),
+    CONSTRAINT `WM_MAPPING_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`),
+    CONSTRAINT `WM_MAPPING_FK2` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+-- ----------------------------
+-- Transaction and Lock Tables
+-- ----------------------------
+CREATE TABLE TXNS (
+  TXN_ID bigint PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED bigint NOT NULL,
+  TXN_LAST_HEARTBEAT bigint NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL,
+  TXN_AGENT_INFO varchar(128),
+  TXN_META_INFO varchar(128),
+  TXN_HEARTBEAT_COUNT int
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID bigint NOT NULL,
+  TC_DATABASE varchar(128) NOT NULL,
+  TC_TABLE varchar(128) NOT NULL,
+  TC_PARTITION varchar(767),
+  TC_OPERATION_TYPE char(1) NOT NULL,
+  FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID bigint NOT NULL,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(256),
+  CTC_PARTITION varchar(767)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID bigint NOT NULL,
+  HL_LOCK_INT_ID bigint NOT NULL,
+  HL_TXNID bigint,
+  HL_DB varchar(128) NOT NULL,
+  HL_TABLE varchar(128),
+  HL_PARTITION varchar(767),
+  HL_LOCK_STATE char(1) not null,
+  HL_LOCK_TYPE char(1) not null,
+  HL_LAST_HEARTBEAT bigint NOT NULL,
+  HL_ACQUIRED_AT bigint,
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  HL_HEARTBEAT_COUNT int,
+  HL_AGENT_INFO varchar(128),
+  HL_BLOCKEDBY_EXT_ID bigint,
+  HL_BLOCKEDBY_INT_ID bigint,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID),
+  KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID bigint PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_TBLPROPERTIES varchar(2048),
+  CQ_WORKER_ID varchar(128),
+  CQ_START bigint,
+  CQ_RUN_AS varchar(128),
+  CQ_HIGHEST_TXN_ID bigint,
+  CQ_META_INFO varbinary(2048),
+  CQ_HADOOP_JOB_ID varchar(32)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE COMPLETED_COMPACTIONS (
+  CC_ID bigint PRIMARY KEY,
+  CC_DATABASE varchar(128) NOT NULL,
+  CC_TABLE varchar(128) NOT NULL,
+  CC_PARTITION varchar(767),
+  CC_STATE char(1) NOT NULL,
+  CC_TYPE char(1) NOT NULL,
+  CC_TBLPROPERTIES varchar(2048),
+  CC_WORKER_ID varchar(128),
+  CC_START bigint,
+  CC_END bigint,
+  CC_RUN_AS varchar(128),
+  CC_HIGHEST_TXN_ID bigint,
+  CC_META_INFO varbinary(2048),
+  CC_HADOOP_JOB_ID varchar(32)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+CREATE TABLE AUX_TABLE (
+  MT_KEY1 varchar(128) NOT NULL,
+  MT_KEY2 bigint NOT NULL,
+  MT_COMMENT varchar(255),
+  PRIMARY KEY(MT_KEY1, MT_KEY2)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE WRITE_SET (
+  WS_DATABASE varchar(128) NOT NULL,
+  WS_TABLE varchar(128) NOT NULL,
+  WS_PARTITION varchar(767),
+  WS_TXNID bigint NOT NULL,
+  WS_COMMIT_ID bigint NOT NULL,
+  WS_OPERATION_TYPE char(1) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '3.0.0', 'Hive release version 3.0.0');
+
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2012-08-23  0:56:31

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql b/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
new file mode 100644
index 0000000..63128fb
--- /dev/null
+++ b/standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql
@@ -0,0 +1,90 @@
+SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0' AS ' ';
+
+--SOURCE 041-HIVE-16556.mysql.sql;
+--
+-- Table structure for table METASTORE_DB_PROPERTIES
+--
+CREATE TABLE IF NOT EXISTS `METASTORE_DB_PROPERTIES` (
+  `PROPERTY_KEY` varchar(255) NOT NULL,
+  `PROPERTY_VALUE` varchar(1000) NOT NULL,
+  `DESCRIPTION` varchar(1000),
+ PRIMARY KEY(`PROPERTY_KEY`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--SOURCE 042-HIVE-16575.mysql.sql;
+CREATE INDEX `CONSTRAINTS_CONSTRAINT_TYPE_INDEX` ON KEY_CONSTRAINTS (`CONSTRAINT_TYPE`) USING BTREE;
+
+--SOURCE 043-HIVE-16922.mysql.sql;
+UPDATE SERDE_PARAMS
+SET PARAM_KEY='collection.delim'
+WHERE PARAM_KEY='colelction.delim';
+
+--SOURCE 044-HIVE-16997.mysql.sql;
+ALTER TABLE PART_COL_STATS ADD COLUMN BIT_VECTOR BLOB;
+ALTER TABLE TAB_COL_STATS ADD COLUMN BIT_VECTOR BLOB;
+
+--SOURCE 045-HIVE-16886.mysql.sql;
+INSERT INTO `NOTIFICATION_SEQUENCE` (`NNI_ID`, `NEXT_EVENT_ID`) SELECT * from (select 1 as `NNI_ID`, 1 as `NOTIFICATION_SEQUENCE`) a WHERE (SELECT COUNT(*) FROM `NOTIFICATION_SEQUENCE`) = 0;
+
+--SOURCE 046-HIVE-17566.mysql.sql;
+CREATE TABLE IF NOT EXISTS WM_RESOURCEPLAN (
+    `RP_ID` bigint(20) NOT NULL,
+    `NAME` varchar(128) NOT NULL,
+    `QUERY_PARALLELISM` int(11),
+    `STATUS` varchar(20) NOT NULL,
+    PRIMARY KEY (`RP_ID`),
+    KEY `UNIQUE_WM_RESOURCEPLAN` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE IF NOT EXISTS WM_POOL
+(
+    `POOL_ID` bigint(20) NOT NULL,
+    `RP_ID` bigint(20) NOT NULL,
+    `PATH` varchar(767) NOT NULL,
+    `PARENT_POOL_ID` bigint(20),
+    `ALLOC_FRACTION` DOUBLE,
+    `QUERY_PARALLELISM` int(11),
+    PRIMARY KEY (`POOL_ID`),
+    KEY `UNIQUE_WM_POOL` (`RP_ID`, `PATH`),
+    CONSTRAINT `WM_POOL_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`),
+    CONSTRAINT `WM_POOL_FK2` FOREIGN KEY (`PARENT_POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE IF NOT EXISTS WM_TRIGGER
+(
+    `TRIGGER_ID` bigint(20) NOT NULL,
+    `RP_ID` bigint(20) NOT NULL,
+    `NAME` varchar(128) NOT NULL,
+    `TRIGGER_EXPRESSION` varchar(1024),
+    `ACTION_EXPRESSION` varchar(1024),
+    PRIMARY KEY (`TRIGGER_ID`),
+    KEY `UNIQUE_WM_TRIGGER` (`RP_ID`, `NAME`),
+    CONSTRAINT `WM_TRIGGER_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE IF NOT EXISTS WM_POOL_TO_TRIGGER
+(
+    `POOL_ID` bigint(20) NOT NULL,
+    `TRIGGER_ID` bigint(20) NOT NULL,
+    PRIMARY KEY (`POOL_ID`, `TRIGGER_ID`),
+    CONSTRAINT `WM_POOL_TO_TRIGGER_FK1` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`),
+    CONSTRAINT `WM_POOL_TO_TRIGGER_FK2` FOREIGN KEY (`TRIGGER_ID`) REFERENCES `WM_TRIGGER` (`TRIGGER_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE IF NOT EXISTS WM_MAPPING
+(
+    `MAPPING_ID` bigint(20) NOT NULL,
+    `RP_ID` bigint(20) NOT NULL,
+    `ENTITY_TYPE` varchar(10) NOT NULL,
+    `ENTITY_NAME` varchar(128) NOT NULL,
+    `POOL_ID` bigint(20) NOT NULL,
+    `ORDERING` int,
+    PRIMARY KEY (`MAPPING_ID`),
+    KEY `UNIQUE_WM_MAPPING` (`RP_ID`, `ENTITY_TYPE`, `ENTITY_NAME`),
+    CONSTRAINT `WM_MAPPING_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`),
+    CONSTRAINT `WM_MAPPING_FK2` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
+SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS ' ';
+

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/mysql/upgrade.order.mysql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mysql/upgrade.order.mysql b/standalone-metastore/src/main/sql/mysql/upgrade.order.mysql
new file mode 100644
index 0000000..15531df
--- /dev/null
+++ b/standalone-metastore/src/main/sql/mysql/upgrade.order.mysql
@@ -0,0 +1 @@
+2.3.0-to-3.0.0

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/oracle/create-user.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/oracle/create-user.oracle.sql b/standalone-metastore/src/main/sql/oracle/create-user.oracle.sql
new file mode 100644
index 0000000..41e8722
--- /dev/null
+++ b/standalone-metastore/src/main/sql/oracle/create-user.oracle.sql
@@ -0,0 +1,3 @@
+create user _REPLACE_WITH_USER_ identified by _REPLACE_WITH_PASSWD_;
+grant connect to _REPLACE_WITH_USER_;
+grant all privileges to _REPLACE_WITH_USER_;


[38/50] [abbrv] hive git commit: HIVE-17990 Add Thrift and DB storage for Schema Registry objects

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 863031d..87df6d0 100644
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -211,6 +211,100 @@ class EventRequestType:
     "DELETE": 3,
   }
 
+class SerdeType:
+  HIVE = 1
+  SCHEMA_REGISTRY = 2
+
+  _VALUES_TO_NAMES = {
+    1: "HIVE",
+    2: "SCHEMA_REGISTRY",
+  }
+
+  _NAMES_TO_VALUES = {
+    "HIVE": 1,
+    "SCHEMA_REGISTRY": 2,
+  }
+
+class SchemaType:
+  HIVE = 1
+  AVRO = 2
+
+  _VALUES_TO_NAMES = {
+    1: "HIVE",
+    2: "AVRO",
+  }
+
+  _NAMES_TO_VALUES = {
+    "HIVE": 1,
+    "AVRO": 2,
+  }
+
+class SchemaCompatibility:
+  NONE = 1
+  BACKWARD = 2
+  FORWARD = 3
+  BOTH = 4
+
+  _VALUES_TO_NAMES = {
+    1: "NONE",
+    2: "BACKWARD",
+    3: "FORWARD",
+    4: "BOTH",
+  }
+
+  _NAMES_TO_VALUES = {
+    "NONE": 1,
+    "BACKWARD": 2,
+    "FORWARD": 3,
+    "BOTH": 4,
+  }
+
+class SchemaValidation:
+  LATEST = 1
+  ALL = 2
+
+  _VALUES_TO_NAMES = {
+    1: "LATEST",
+    2: "ALL",
+  }
+
+  _NAMES_TO_VALUES = {
+    "LATEST": 1,
+    "ALL": 2,
+  }
+
+class SchemaVersionState:
+  INITIATED = 1
+  START_REVIEW = 2
+  CHANGES_REQUIRED = 3
+  REVIEWED = 4
+  ENABLED = 5
+  DISABLED = 6
+  ARCHIVED = 7
+  DELETED = 8
+
+  _VALUES_TO_NAMES = {
+    1: "INITIATED",
+    2: "START_REVIEW",
+    3: "CHANGES_REQUIRED",
+    4: "REVIEWED",
+    5: "ENABLED",
+    6: "DISABLED",
+    7: "ARCHIVED",
+    8: "DELETED",
+  }
+
+  _NAMES_TO_VALUES = {
+    "INITIATED": 1,
+    "START_REVIEW": 2,
+    "CHANGES_REQUIRED": 3,
+    "REVIEWED": 4,
+    "ENABLED": 5,
+    "DISABLED": 6,
+    "ARCHIVED": 7,
+    "DELETED": 8,
+  }
+
 class FunctionType:
   JAVA = 1
 
@@ -2883,6 +2977,10 @@ class SerDeInfo:
    - name
    - serializationLib
    - parameters
+   - description
+   - serializerClass
+   - deserializerClass
+   - serdeType
   """
 
   thrift_spec = (
@@ -2890,12 +2988,20 @@ class SerDeInfo:
     (1, TType.STRING, 'name', None, None, ), # 1
     (2, TType.STRING, 'serializationLib', None, None, ), # 2
     (3, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 3
+    (4, TType.STRING, 'description', None, None, ), # 4
+    (5, TType.STRING, 'serializerClass', None, None, ), # 5
+    (6, TType.STRING, 'deserializerClass', None, None, ), # 6
+    (7, TType.I32, 'serdeType', None, None, ), # 7
   )
 
-  def __init__(self, name=None, serializationLib=None, parameters=None,):
+  def __init__(self, name=None, serializationLib=None, parameters=None, description=None, serializerClass=None, deserializerClass=None, serdeType=None,):
     self.name = name
     self.serializationLib = serializationLib
     self.parameters = parameters
+    self.description = description
+    self.serializerClass = serializerClass
+    self.deserializerClass = deserializerClass
+    self.serdeType = serdeType
 
   def read(self, iprot):
     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -2927,6 +3033,26 @@ class SerDeInfo:
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.STRING:
+          self.description = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 5:
+        if ftype == TType.STRING:
+          self.serializerClass = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 6:
+        if ftype == TType.STRING:
+          self.deserializerClass = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 7:
+        if ftype == TType.I32:
+          self.serdeType = iprot.readI32()
+        else:
+          iprot.skip(ftype)
       else:
         iprot.skip(ftype)
       iprot.readFieldEnd()
@@ -2953,6 +3079,22 @@ class SerDeInfo:
         oprot.writeString(viter100)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
+    if self.description is not None:
+      oprot.writeFieldBegin('description', TType.STRING, 4)
+      oprot.writeString(self.description)
+      oprot.writeFieldEnd()
+    if self.serializerClass is not None:
+      oprot.writeFieldBegin('serializerClass', TType.STRING, 5)
+      oprot.writeString(self.serializerClass)
+      oprot.writeFieldEnd()
+    if self.deserializerClass is not None:
+      oprot.writeFieldBegin('deserializerClass', TType.STRING, 6)
+      oprot.writeString(self.deserializerClass)
+      oprot.writeFieldEnd()
+    if self.serdeType is not None:
+      oprot.writeFieldBegin('serdeType', TType.I32, 7)
+      oprot.writeI32(self.serdeType)
+      oprot.writeFieldEnd()
     oprot.writeFieldStop()
     oprot.writeStructEnd()
 
@@ -2965,6 +3107,10 @@ class SerDeInfo:
     value = (value * 31) ^ hash(self.name)
     value = (value * 31) ^ hash(self.serializationLib)
     value = (value * 31) ^ hash(self.parameters)
+    value = (value * 31) ^ hash(self.description)
+    value = (value * 31) ^ hash(self.serializerClass)
+    value = (value * 31) ^ hash(self.deserializerClass)
+    value = (value * 31) ^ hash(self.serdeType)
     return value
 
   def __repr__(self):
@@ -17237,6 +17383,597 @@ class WMCreateOrDropTriggerToPoolMappingResponse:
   def __ne__(self, other):
     return not (self == other)
 
+class ISchema:
+  """
+  Attributes:
+   - schemaType
+   - name
+   - dbName
+   - compatibility
+   - validationLevel
+   - canEvolve
+   - schemaGroup
+   - description
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.I32, 'schemaType', None, None, ), # 1
+    (2, TType.STRING, 'name', None, None, ), # 2
+    (3, TType.STRING, 'dbName', None, None, ), # 3
+    (4, TType.I32, 'compatibility', None, None, ), # 4
+    (5, TType.I32, 'validationLevel', None, None, ), # 5
+    (6, TType.BOOL, 'canEvolve', None, None, ), # 6
+    (7, TType.STRING, 'schemaGroup', None, None, ), # 7
+    (8, TType.STRING, 'description', None, None, ), # 8
+  )
+
+  def __init__(self, schemaType=None, name=None, dbName=None, compatibility=None, validationLevel=None, canEvolve=None, schemaGroup=None, description=None,):
+    self.schemaType = schemaType
+    self.name = name
+    self.dbName = dbName
+    self.compatibility = compatibility
+    self.validationLevel = validationLevel
+    self.canEvolve = canEvolve
+    self.schemaGroup = schemaGroup
+    self.description = description
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.I32:
+          self.schemaType = iprot.readI32()
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.name = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.STRING:
+          self.dbName = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.I32:
+          self.compatibility = iprot.readI32()
+        else:
+          iprot.skip(ftype)
+      elif fid == 5:
+        if ftype == TType.I32:
+          self.validationLevel = iprot.readI32()
+        else:
+          iprot.skip(ftype)
+      elif fid == 6:
+        if ftype == TType.BOOL:
+          self.canEvolve = iprot.readBool()
+        else:
+          iprot.skip(ftype)
+      elif fid == 7:
+        if ftype == TType.STRING:
+          self.schemaGroup = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 8:
+        if ftype == TType.STRING:
+          self.description = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('ISchema')
+    if self.schemaType is not None:
+      oprot.writeFieldBegin('schemaType', TType.I32, 1)
+      oprot.writeI32(self.schemaType)
+      oprot.writeFieldEnd()
+    if self.name is not None:
+      oprot.writeFieldBegin('name', TType.STRING, 2)
+      oprot.writeString(self.name)
+      oprot.writeFieldEnd()
+    if self.dbName is not None:
+      oprot.writeFieldBegin('dbName', TType.STRING, 3)
+      oprot.writeString(self.dbName)
+      oprot.writeFieldEnd()
+    if self.compatibility is not None:
+      oprot.writeFieldBegin('compatibility', TType.I32, 4)
+      oprot.writeI32(self.compatibility)
+      oprot.writeFieldEnd()
+    if self.validationLevel is not None:
+      oprot.writeFieldBegin('validationLevel', TType.I32, 5)
+      oprot.writeI32(self.validationLevel)
+      oprot.writeFieldEnd()
+    if self.canEvolve is not None:
+      oprot.writeFieldBegin('canEvolve', TType.BOOL, 6)
+      oprot.writeBool(self.canEvolve)
+      oprot.writeFieldEnd()
+    if self.schemaGroup is not None:
+      oprot.writeFieldBegin('schemaGroup', TType.STRING, 7)
+      oprot.writeString(self.schemaGroup)
+      oprot.writeFieldEnd()
+    if self.description is not None:
+      oprot.writeFieldBegin('description', TType.STRING, 8)
+      oprot.writeString(self.description)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.schemaType)
+    value = (value * 31) ^ hash(self.name)
+    value = (value * 31) ^ hash(self.dbName)
+    value = (value * 31) ^ hash(self.compatibility)
+    value = (value * 31) ^ hash(self.validationLevel)
+    value = (value * 31) ^ hash(self.canEvolve)
+    value = (value * 31) ^ hash(self.schemaGroup)
+    value = (value * 31) ^ hash(self.description)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class SchemaVersion:
+  """
+  Attributes:
+   - schemaName
+   - version
+   - createdAt
+   - cols
+   - state
+   - description
+   - schemaText
+   - fingerprint
+   - name
+   - serDe
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'schemaName', None, None, ), # 1
+    (2, TType.I32, 'version', None, None, ), # 2
+    (3, TType.I64, 'createdAt', None, None, ), # 3
+    (4, TType.LIST, 'cols', (TType.STRUCT,(FieldSchema, FieldSchema.thrift_spec)), None, ), # 4
+    (5, TType.I32, 'state', None, None, ), # 5
+    (6, TType.STRING, 'description', None, None, ), # 6
+    (7, TType.STRING, 'schemaText', None, None, ), # 7
+    (8, TType.STRING, 'fingerprint', None, None, ), # 8
+    (9, TType.STRING, 'name', None, None, ), # 9
+    (10, TType.STRUCT, 'serDe', (SerDeInfo, SerDeInfo.thrift_spec), None, ), # 10
+  )
+
+  def __init__(self, schemaName=None, version=None, createdAt=None, cols=None, state=None, description=None, schemaText=None, fingerprint=None, name=None, serDe=None,):
+    self.schemaName = schemaName
+    self.version = version
+    self.createdAt = createdAt
+    self.cols = cols
+    self.state = state
+    self.description = description
+    self.schemaText = schemaText
+    self.fingerprint = fingerprint
+    self.name = name
+    self.serDe = serDe
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.schemaName = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.I32:
+          self.version = iprot.readI32()
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.I64:
+          self.createdAt = iprot.readI64()
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.LIST:
+          self.cols = []
+          (_etype702, _size699) = iprot.readListBegin()
+          for _i703 in xrange(_size699):
+            _elem704 = FieldSchema()
+            _elem704.read(iprot)
+            self.cols.append(_elem704)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 5:
+        if ftype == TType.I32:
+          self.state = iprot.readI32()
+        else:
+          iprot.skip(ftype)
+      elif fid == 6:
+        if ftype == TType.STRING:
+          self.description = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 7:
+        if ftype == TType.STRING:
+          self.schemaText = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 8:
+        if ftype == TType.STRING:
+          self.fingerprint = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 9:
+        if ftype == TType.STRING:
+          self.name = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 10:
+        if ftype == TType.STRUCT:
+          self.serDe = SerDeInfo()
+          self.serDe.read(iprot)
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('SchemaVersion')
+    if self.schemaName is not None:
+      oprot.writeFieldBegin('schemaName', TType.STRING, 1)
+      oprot.writeString(self.schemaName)
+      oprot.writeFieldEnd()
+    if self.version is not None:
+      oprot.writeFieldBegin('version', TType.I32, 2)
+      oprot.writeI32(self.version)
+      oprot.writeFieldEnd()
+    if self.createdAt is not None:
+      oprot.writeFieldBegin('createdAt', TType.I64, 3)
+      oprot.writeI64(self.createdAt)
+      oprot.writeFieldEnd()
+    if self.cols is not None:
+      oprot.writeFieldBegin('cols', TType.LIST, 4)
+      oprot.writeListBegin(TType.STRUCT, len(self.cols))
+      for iter705 in self.cols:
+        iter705.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.state is not None:
+      oprot.writeFieldBegin('state', TType.I32, 5)
+      oprot.writeI32(self.state)
+      oprot.writeFieldEnd()
+    if self.description is not None:
+      oprot.writeFieldBegin('description', TType.STRING, 6)
+      oprot.writeString(self.description)
+      oprot.writeFieldEnd()
+    if self.schemaText is not None:
+      oprot.writeFieldBegin('schemaText', TType.STRING, 7)
+      oprot.writeString(self.schemaText)
+      oprot.writeFieldEnd()
+    if self.fingerprint is not None:
+      oprot.writeFieldBegin('fingerprint', TType.STRING, 8)
+      oprot.writeString(self.fingerprint)
+      oprot.writeFieldEnd()
+    if self.name is not None:
+      oprot.writeFieldBegin('name', TType.STRING, 9)
+      oprot.writeString(self.name)
+      oprot.writeFieldEnd()
+    if self.serDe is not None:
+      oprot.writeFieldBegin('serDe', TType.STRUCT, 10)
+      self.serDe.write(oprot)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.schemaName)
+    value = (value * 31) ^ hash(self.version)
+    value = (value * 31) ^ hash(self.createdAt)
+    value = (value * 31) ^ hash(self.cols)
+    value = (value * 31) ^ hash(self.state)
+    value = (value * 31) ^ hash(self.description)
+    value = (value * 31) ^ hash(self.schemaText)
+    value = (value * 31) ^ hash(self.fingerprint)
+    value = (value * 31) ^ hash(self.name)
+    value = (value * 31) ^ hash(self.serDe)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class FindSchemasByColsRqst:
+  """
+  Attributes:
+   - colName
+   - colNamespace
+   - type
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'colName', None, None, ), # 1
+    (2, TType.STRING, 'colNamespace', None, None, ), # 2
+    (3, TType.STRING, 'type', None, None, ), # 3
+  )
+
+  def __init__(self, colName=None, colNamespace=None, type=None,):
+    self.colName = colName
+    self.colNamespace = colNamespace
+    self.type = type
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.colName = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.colNamespace = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.STRING:
+          self.type = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('FindSchemasByColsRqst')
+    if self.colName is not None:
+      oprot.writeFieldBegin('colName', TType.STRING, 1)
+      oprot.writeString(self.colName)
+      oprot.writeFieldEnd()
+    if self.colNamespace is not None:
+      oprot.writeFieldBegin('colNamespace', TType.STRING, 2)
+      oprot.writeString(self.colNamespace)
+      oprot.writeFieldEnd()
+    if self.type is not None:
+      oprot.writeFieldBegin('type', TType.STRING, 3)
+      oprot.writeString(self.type)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.colName)
+    value = (value * 31) ^ hash(self.colNamespace)
+    value = (value * 31) ^ hash(self.type)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class FindSchemasByColsRespEntry:
+  """
+  Attributes:
+   - schemaName
+   - version
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'schemaName', None, None, ), # 1
+    (2, TType.I32, 'version', None, None, ), # 2
+  )
+
+  def __init__(self, schemaName=None, version=None,):
+    self.schemaName = schemaName
+    self.version = version
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.schemaName = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.I32:
+          self.version = iprot.readI32()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('FindSchemasByColsRespEntry')
+    if self.schemaName is not None:
+      oprot.writeFieldBegin('schemaName', TType.STRING, 1)
+      oprot.writeString(self.schemaName)
+      oprot.writeFieldEnd()
+    if self.version is not None:
+      oprot.writeFieldBegin('version', TType.I32, 2)
+      oprot.writeI32(self.version)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.schemaName)
+    value = (value * 31) ^ hash(self.version)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class FindSchemasByColsResp:
+  """
+  Attributes:
+   - schemaVersions
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.LIST, 'schemaVersions', (TType.STRUCT,(FindSchemasByColsRespEntry, FindSchemasByColsRespEntry.thrift_spec)), None, ), # 1
+  )
+
+  def __init__(self, schemaVersions=None,):
+    self.schemaVersions = schemaVersions
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.LIST:
+          self.schemaVersions = []
+          (_etype709, _size706) = iprot.readListBegin()
+          for _i710 in xrange(_size706):
+            _elem711 = FindSchemasByColsRespEntry()
+            _elem711.read(iprot)
+            self.schemaVersions.append(_elem711)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('FindSchemasByColsResp')
+    if self.schemaVersions is not None:
+      oprot.writeFieldBegin('schemaVersions', TType.LIST, 1)
+      oprot.writeListBegin(TType.STRUCT, len(self.schemaVersions))
+      for iter712 in self.schemaVersions:
+        iter712.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.schemaVersions)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
 class MetaException(TException):
   """
   Attributes:

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
index ec967a6..88f7ac0 100644
--- a/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -98,6 +98,49 @@ module EventRequestType
   VALID_VALUES = Set.new([INSERT, UPDATE, DELETE]).freeze
 end
 
+module SerdeType
+  HIVE = 1
+  SCHEMA_REGISTRY = 2
+  VALUE_MAP = {1 => "HIVE", 2 => "SCHEMA_REGISTRY"}
+  VALID_VALUES = Set.new([HIVE, SCHEMA_REGISTRY]).freeze
+end
+
+module SchemaType
+  HIVE = 1
+  AVRO = 2
+  VALUE_MAP = {1 => "HIVE", 2 => "AVRO"}
+  VALID_VALUES = Set.new([HIVE, AVRO]).freeze
+end
+
+module SchemaCompatibility
+  NONE = 1
+  BACKWARD = 2
+  FORWARD = 3
+  BOTH = 4
+  VALUE_MAP = {1 => "NONE", 2 => "BACKWARD", 3 => "FORWARD", 4 => "BOTH"}
+  VALID_VALUES = Set.new([NONE, BACKWARD, FORWARD, BOTH]).freeze
+end
+
+module SchemaValidation
+  LATEST = 1
+  ALL = 2
+  VALUE_MAP = {1 => "LATEST", 2 => "ALL"}
+  VALID_VALUES = Set.new([LATEST, ALL]).freeze
+end
+
+module SchemaVersionState
+  INITIATED = 1
+  START_REVIEW = 2
+  CHANGES_REQUIRED = 3
+  REVIEWED = 4
+  ENABLED = 5
+  DISABLED = 6
+  ARCHIVED = 7
+  DELETED = 8
+  VALUE_MAP = {1 => "INITIATED", 2 => "START_REVIEW", 3 => "CHANGES_REQUIRED", 4 => "REVIEWED", 5 => "ENABLED", 6 => "DISABLED", 7 => "ARCHIVED", 8 => "DELETED"}
+  VALID_VALUES = Set.new([INITIATED, START_REVIEW, CHANGES_REQUIRED, REVIEWED, ENABLED, DISABLED, ARCHIVED, DELETED]).freeze
+end
+
 module FunctionType
   JAVA = 1
   VALUE_MAP = {1 => "JAVA"}
@@ -694,16 +737,27 @@ class SerDeInfo
   NAME = 1
   SERIALIZATIONLIB = 2
   PARAMETERS = 3
+  DESCRIPTION = 4
+  SERIALIZERCLASS = 5
+  DESERIALIZERCLASS = 6
+  SERDETYPE = 7
 
   FIELDS = {
     NAME => {:type => ::Thrift::Types::STRING, :name => 'name'},
     SERIALIZATIONLIB => {:type => ::Thrift::Types::STRING, :name => 'serializationLib'},
-    PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}}
+    PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}},
+    DESCRIPTION => {:type => ::Thrift::Types::STRING, :name => 'description', :optional => true},
+    SERIALIZERCLASS => {:type => ::Thrift::Types::STRING, :name => 'serializerClass', :optional => true},
+    DESERIALIZERCLASS => {:type => ::Thrift::Types::STRING, :name => 'deserializerClass', :optional => true},
+    SERDETYPE => {:type => ::Thrift::Types::I32, :name => 'serdeType', :optional => true, :enum_class => ::SerdeType}
   }
 
   def struct_fields; FIELDS; end
 
   def validate
+    unless @serdeType.nil? || ::SerdeType::VALID_VALUES.include?(@serdeType)
+      raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field serdeType!')
+    end
   end
 
   ::Thrift::Struct.generate_accessors self
@@ -3951,6 +4005,136 @@ class WMCreateOrDropTriggerToPoolMappingResponse
   ::Thrift::Struct.generate_accessors self
 end
 
+class ISchema
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  SCHEMATYPE = 1
+  NAME = 2
+  DBNAME = 3
+  COMPATIBILITY = 4
+  VALIDATIONLEVEL = 5
+  CANEVOLVE = 6
+  SCHEMAGROUP = 7
+  DESCRIPTION = 8
+
+  FIELDS = {
+    SCHEMATYPE => {:type => ::Thrift::Types::I32, :name => 'schemaType', :enum_class => ::SchemaType},
+    NAME => {:type => ::Thrift::Types::STRING, :name => 'name'},
+    DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+    COMPATIBILITY => {:type => ::Thrift::Types::I32, :name => 'compatibility', :enum_class => ::SchemaCompatibility},
+    VALIDATIONLEVEL => {:type => ::Thrift::Types::I32, :name => 'validationLevel', :enum_class => ::SchemaValidation},
+    CANEVOLVE => {:type => ::Thrift::Types::BOOL, :name => 'canEvolve'},
+    SCHEMAGROUP => {:type => ::Thrift::Types::STRING, :name => 'schemaGroup', :optional => true},
+    DESCRIPTION => {:type => ::Thrift::Types::STRING, :name => 'description', :optional => true}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+    unless @schemaType.nil? || ::SchemaType::VALID_VALUES.include?(@schemaType)
+      raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field schemaType!')
+    end
+    unless @compatibility.nil? || ::SchemaCompatibility::VALID_VALUES.include?(@compatibility)
+      raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field compatibility!')
+    end
+    unless @validationLevel.nil? || ::SchemaValidation::VALID_VALUES.include?(@validationLevel)
+      raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field validationLevel!')
+    end
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
+class SchemaVersion
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  SCHEMANAME = 1
+  VERSION = 2
+  CREATEDAT = 3
+  COLS = 4
+  STATE = 5
+  DESCRIPTION = 6
+  SCHEMATEXT = 7
+  FINGERPRINT = 8
+  NAME = 9
+  SERDE = 10
+
+  FIELDS = {
+    SCHEMANAME => {:type => ::Thrift::Types::STRING, :name => 'schemaName'},
+    VERSION => {:type => ::Thrift::Types::I32, :name => 'version'},
+    CREATEDAT => {:type => ::Thrift::Types::I64, :name => 'createdAt'},
+    COLS => {:type => ::Thrift::Types::LIST, :name => 'cols', :element => {:type => ::Thrift::Types::STRUCT, :class => ::FieldSchema}},
+    STATE => {:type => ::Thrift::Types::I32, :name => 'state', :optional => true, :enum_class => ::SchemaVersionState},
+    DESCRIPTION => {:type => ::Thrift::Types::STRING, :name => 'description', :optional => true},
+    SCHEMATEXT => {:type => ::Thrift::Types::STRING, :name => 'schemaText', :optional => true},
+    FINGERPRINT => {:type => ::Thrift::Types::STRING, :name => 'fingerprint', :optional => true},
+    NAME => {:type => ::Thrift::Types::STRING, :name => 'name', :optional => true},
+    SERDE => {:type => ::Thrift::Types::STRUCT, :name => 'serDe', :class => ::SerDeInfo, :optional => true}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+    unless @state.nil? || ::SchemaVersionState::VALID_VALUES.include?(@state)
+      raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field state!')
+    end
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
+class FindSchemasByColsRqst
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  COLNAME = 1
+  COLNAMESPACE = 2
+  TYPE = 3
+
+  FIELDS = {
+    COLNAME => {:type => ::Thrift::Types::STRING, :name => 'colName', :optional => true},
+    COLNAMESPACE => {:type => ::Thrift::Types::STRING, :name => 'colNamespace', :optional => true},
+    TYPE => {:type => ::Thrift::Types::STRING, :name => 'type', :optional => true}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
+class FindSchemasByColsRespEntry
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  SCHEMANAME = 1
+  VERSION = 2
+
+  FIELDS = {
+    SCHEMANAME => {:type => ::Thrift::Types::STRING, :name => 'schemaName'},
+    VERSION => {:type => ::Thrift::Types::I32, :name => 'version'}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
+class FindSchemasByColsResp
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  SCHEMAVERSIONS = 1
+
+  FIELDS = {
+    SCHEMAVERSIONS => {:type => ::Thrift::Types::LIST, :name => 'schemaVersions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::FindSchemasByColsRespEntry}}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
 class MetaException < ::Thrift::Exception
   include ::Thrift::Struct, ::Thrift::Struct_Union
   def initialize(message=nil)

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
index 182cc37..322a8b1 100644
--- a/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
+++ b/standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
@@ -2997,6 +2997,239 @@ module ThriftHiveMetastore
       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'create_or_drop_wm_trigger_to_pool_mapping failed: unknown result')
     end
 
+    def create_ischema(schema)
+      send_create_ischema(schema)
+      recv_create_ischema()
+    end
+
+    def send_create_ischema(schema)
+      send_message('create_ischema', Create_ischema_args, :schema => schema)
+    end
+
+    def recv_create_ischema()
+      result = receive_message(Create_ischema_result)
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise result.o3 unless result.o3.nil?
+      return
+    end
+
+    def alter_ischema(schemaName, newSchema)
+      send_alter_ischema(schemaName, newSchema)
+      recv_alter_ischema()
+    end
+
+    def send_alter_ischema(schemaName, newSchema)
+      send_message('alter_ischema', Alter_ischema_args, :schemaName => schemaName, :newSchema => newSchema)
+    end
+
+    def recv_alter_ischema()
+      result = receive_message(Alter_ischema_result)
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      return
+    end
+
+    def get_ischema(schemaName)
+      send_get_ischema(schemaName)
+      return recv_get_ischema()
+    end
+
+    def send_get_ischema(schemaName)
+      send_message('get_ischema', Get_ischema_args, :schemaName => schemaName)
+    end
+
+    def recv_get_ischema()
+      result = receive_message(Get_ischema_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_ischema failed: unknown result')
+    end
+
+    def drop_ischema(schemaName)
+      send_drop_ischema(schemaName)
+      recv_drop_ischema()
+    end
+
+    def send_drop_ischema(schemaName)
+      send_message('drop_ischema', Drop_ischema_args, :schemaName => schemaName)
+    end
+
+    def recv_drop_ischema()
+      result = receive_message(Drop_ischema_result)
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise result.o3 unless result.o3.nil?
+      return
+    end
+
+    def add_schema_version(schemaVersion)
+      send_add_schema_version(schemaVersion)
+      recv_add_schema_version()
+    end
+
+    def send_add_schema_version(schemaVersion)
+      send_message('add_schema_version', Add_schema_version_args, :schemaVersion => schemaVersion)
+    end
+
+    def recv_add_schema_version()
+      result = receive_message(Add_schema_version_result)
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise result.o3 unless result.o3.nil?
+      return
+    end
+
+    def get_schema_version(schemaName, version)
+      send_get_schema_version(schemaName, version)
+      return recv_get_schema_version()
+    end
+
+    def send_get_schema_version(schemaName, version)
+      send_message('get_schema_version', Get_schema_version_args, :schemaName => schemaName, :version => version)
+    end
+
+    def recv_get_schema_version()
+      result = receive_message(Get_schema_version_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_schema_version failed: unknown result')
+    end
+
+    def get_schema_latest_version(schemaName)
+      send_get_schema_latest_version(schemaName)
+      return recv_get_schema_latest_version()
+    end
+
+    def send_get_schema_latest_version(schemaName)
+      send_message('get_schema_latest_version', Get_schema_latest_version_args, :schemaName => schemaName)
+    end
+
+    def recv_get_schema_latest_version()
+      result = receive_message(Get_schema_latest_version_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_schema_latest_version failed: unknown result')
+    end
+
+    def get_schema_all_versions(schemaName)
+      send_get_schema_all_versions(schemaName)
+      return recv_get_schema_all_versions()
+    end
+
+    def send_get_schema_all_versions(schemaName)
+      send_message('get_schema_all_versions', Get_schema_all_versions_args, :schemaName => schemaName)
+    end
+
+    def recv_get_schema_all_versions()
+      result = receive_message(Get_schema_all_versions_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_schema_all_versions failed: unknown result')
+    end
+
+    def drop_schema_version(schemaName, version)
+      send_drop_schema_version(schemaName, version)
+      recv_drop_schema_version()
+    end
+
+    def send_drop_schema_version(schemaName, version)
+      send_message('drop_schema_version', Drop_schema_version_args, :schemaName => schemaName, :version => version)
+    end
+
+    def recv_drop_schema_version()
+      result = receive_message(Drop_schema_version_result)
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      return
+    end
+
+    def get_schemas_by_cols(rqst)
+      send_get_schemas_by_cols(rqst)
+      return recv_get_schemas_by_cols()
+    end
+
+    def send_get_schemas_by_cols(rqst)
+      send_message('get_schemas_by_cols', Get_schemas_by_cols_args, :rqst => rqst)
+    end
+
+    def recv_get_schemas_by_cols()
+      result = receive_message(Get_schemas_by_cols_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_schemas_by_cols failed: unknown result')
+    end
+
+    def map_schema_version_to_serde(schemaName, version, serdeName)
+      send_map_schema_version_to_serde(schemaName, version, serdeName)
+      recv_map_schema_version_to_serde()
+    end
+
+    def send_map_schema_version_to_serde(schemaName, version, serdeName)
+      send_message('map_schema_version_to_serde', Map_schema_version_to_serde_args, :schemaName => schemaName, :version => version, :serdeName => serdeName)
+    end
+
+    def recv_map_schema_version_to_serde()
+      result = receive_message(Map_schema_version_to_serde_result)
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      return
+    end
+
+    def set_schema_version_state(schemaName, version, state)
+      send_set_schema_version_state(schemaName, version, state)
+      recv_set_schema_version_state()
+    end
+
+    def send_set_schema_version_state(schemaName, version, state)
+      send_message('set_schema_version_state', Set_schema_version_state_args, :schemaName => schemaName, :version => version, :state => state)
+    end
+
+    def recv_set_schema_version_state()
+      result = receive_message(Set_schema_version_state_result)
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise result.o3 unless result.o3.nil?
+      return
+    end
+
+    def add_serde(serde)
+      send_add_serde(serde)
+      recv_add_serde()
+    end
+
+    def send_add_serde(serde)
+      send_message('add_serde', Add_serde_args, :serde => serde)
+    end
+
+    def recv_add_serde()
+      result = receive_message(Add_serde_result)
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      return
+    end
+
+    def get_serde(serdeName)
+      send_get_serde(serdeName)
+      return recv_get_serde()
+    end
+
+    def send_get_serde(serdeName)
+      send_message('get_serde', Get_serde_args, :serdeName => serdeName)
+    end
+
+    def recv_get_serde()
+      result = receive_message(Get_serde_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_serde failed: unknown result')
+    end
+
   end
 
   class Processor < ::FacebookService::Processor 
@@ -5239,6 +5472,194 @@ module ThriftHiveMetastore
       write_result(result, oprot, 'create_or_drop_wm_trigger_to_pool_mapping', seqid)
     end
 
+    def process_create_ischema(seqid, iprot, oprot)
+      args = read_args(iprot, Create_ischema_args)
+      result = Create_ischema_result.new()
+      begin
+        @handler.create_ischema(args.schema)
+      rescue ::AlreadyExistsException => o1
+        result.o1 = o1
+      rescue ::NoSuchObjectException => o2
+        result.o2 = o2
+      rescue ::MetaException => o3
+        result.o3 = o3
+      end
+      write_result(result, oprot, 'create_ischema', seqid)
+    end
+
+    def process_alter_ischema(seqid, iprot, oprot)
+      args = read_args(iprot, Alter_ischema_args)
+      result = Alter_ischema_result.new()
+      begin
+        @handler.alter_ischema(args.schemaName, args.newSchema)
+      rescue ::NoSuchObjectException => o1
+        result.o1 = o1
+      rescue ::MetaException => o2
+        result.o2 = o2
+      end
+      write_result(result, oprot, 'alter_ischema', seqid)
+    end
+
+    def process_get_ischema(seqid, iprot, oprot)
+      args = read_args(iprot, Get_ischema_args)
+      result = Get_ischema_result.new()
+      begin
+        result.success = @handler.get_ischema(args.schemaName)
+      rescue ::NoSuchObjectException => o1
+        result.o1 = o1
+      rescue ::MetaException => o2
+        result.o2 = o2
+      end
+      write_result(result, oprot, 'get_ischema', seqid)
+    end
+
+    def process_drop_ischema(seqid, iprot, oprot)
+      args = read_args(iprot, Drop_ischema_args)
+      result = Drop_ischema_result.new()
+      begin
+        @handler.drop_ischema(args.schemaName)
+      rescue ::NoSuchObjectException => o1
+        result.o1 = o1
+      rescue ::InvalidOperationException => o2
+        result.o2 = o2
+      rescue ::MetaException => o3
+        result.o3 = o3
+      end
+      write_result(result, oprot, 'drop_ischema', seqid)
+    end
+
+    def process_add_schema_version(seqid, iprot, oprot)
+      args = read_args(iprot, Add_schema_version_args)
+      result = Add_schema_version_result.new()
+      begin
+        @handler.add_schema_version(args.schemaVersion)
+      rescue ::AlreadyExistsException => o1
+        result.o1 = o1
+      rescue ::NoSuchObjectException => o2
+        result.o2 = o2
+      rescue ::MetaException => o3
+        result.o3 = o3
+      end
+      write_result(result, oprot, 'add_schema_version', seqid)
+    end
+
+    def process_get_schema_version(seqid, iprot, oprot)
+      args = read_args(iprot, Get_schema_version_args)
+      result = Get_schema_version_result.new()
+      begin
+        result.success = @handler.get_schema_version(args.schemaName, args.version)
+      rescue ::NoSuchObjectException => o1
+        result.o1 = o1
+      rescue ::MetaException => o2
+        result.o2 = o2
+      end
+      write_result(result, oprot, 'get_schema_version', seqid)
+    end
+
+    def process_get_schema_latest_version(seqid, iprot, oprot)
+      args = read_args(iprot, Get_schema_latest_version_args)
+      result = Get_schema_latest_version_result.new()
+      begin
+        result.success = @handler.get_schema_latest_version(args.schemaName)
+      rescue ::NoSuchObjectException => o1
+        result.o1 = o1
+      rescue ::MetaException => o2
+        result.o2 = o2
+      end
+      write_result(result, oprot, 'get_schema_latest_version', seqid)
+    end
+
+    def process_get_schema_all_versions(seqid, iprot, oprot)
+      args = read_args(iprot, Get_schema_all_versions_args)
+      result = Get_schema_all_versions_result.new()
+      begin
+        result.success = @handler.get_schema_all_versions(args.schemaName)
+      rescue ::NoSuchObjectException => o1
+        result.o1 = o1
+      rescue ::MetaException => o2
+        result.o2 = o2
+      end
+      write_result(result, oprot, 'get_schema_all_versions', seqid)
+    end
+
+    def process_drop_schema_version(seqid, iprot, oprot)
+      args = read_args(iprot, Drop_schema_version_args)
+      result = Drop_schema_version_result.new()
+      begin
+        @handler.drop_schema_version(args.schemaName, args.version)
+      rescue ::NoSuchObjectException => o1
+        result.o1 = o1
+      rescue ::MetaException => o2
+        result.o2 = o2
+      end
+      write_result(result, oprot, 'drop_schema_version', seqid)
+    end
+
+    def process_get_schemas_by_cols(seqid, iprot, oprot)
+      args = read_args(iprot, Get_schemas_by_cols_args)
+      result = Get_schemas_by_cols_result.new()
+      begin
+        result.success = @handler.get_schemas_by_cols(args.rqst)
+      rescue ::MetaException => o1
+        result.o1 = o1
+      end
+      write_result(result, oprot, 'get_schemas_by_cols', seqid)
+    end
+
+    def process_map_schema_version_to_serde(seqid, iprot, oprot)
+      args = read_args(iprot, Map_schema_version_to_serde_args)
+      result = Map_schema_version_to_serde_result.new()
+      begin
+        @handler.map_schema_version_to_serde(args.schemaName, args.version, args.serdeName)
+      rescue ::NoSuchObjectException => o1
+        result.o1 = o1
+      rescue ::MetaException => o2
+        result.o2 = o2
+      end
+      write_result(result, oprot, 'map_schema_version_to_serde', seqid)
+    end
+
+    def process_set_schema_version_state(seqid, iprot, oprot)
+      args = read_args(iprot, Set_schema_version_state_args)
+      result = Set_schema_version_state_result.new()
+      begin
+        @handler.set_schema_version_state(args.schemaName, args.version, args.state)
+      rescue ::NoSuchObjectException => o1
+        result.o1 = o1
+      rescue ::InvalidOperationException => o2
+        result.o2 = o2
+      rescue ::MetaException => o3
+        result.o3 = o3
+      end
+      write_result(result, oprot, 'set_schema_version_state', seqid)
+    end
+
+    def process_add_serde(seqid, iprot, oprot)
+      args = read_args(iprot, Add_serde_args)
+      result = Add_serde_result.new()
+      begin
+        @handler.add_serde(args.serde)
+      rescue ::AlreadyExistsException => o1
+        result.o1 = o1
+      rescue ::MetaException => o2
+        result.o2 = o2
+      end
+      write_result(result, oprot, 'add_serde', seqid)
+    end
+
+    def process_get_serde(seqid, iprot, oprot)
+      args = read_args(iprot, Get_serde_args)
+      result = Get_serde_result.new()
+      begin
+        result.success = @handler.get_serde(args.serdeName)
+      rescue ::NoSuchObjectException => o1
+        result.o1 = o1
+      rescue ::MetaException => o2
+        result.o2 = o2
+      end
+      write_result(result, oprot, 'get_serde', seqid)
+    end
+
   end
 
   # HELPER FUNCTIONS AND STRUCTURES
@@ -11936,5 +12357,516 @@ module ThriftHiveMetastore
     ::Thrift::Struct.generate_accessors self
   end
 
+  class Create_ischema_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SCHEMA = 1
+
+    FIELDS = {
+      SCHEMA => {:type => ::Thrift::Types::STRUCT, :name => 'schema', :class => ::ISchema}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Create_ischema_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    O1 = 1
+    O2 = -1
+    O3 = 3
+
+    FIELDS = {
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::AlreadyExistsException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::NoSuchObjectException},
+      O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Alter_ischema_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SCHEMANAME = 1
+    NEWSCHEMA = 2
+
+    FIELDS = {
+      SCHEMANAME => {:type => ::Thrift::Types::STRING, :name => 'schemaName'},
+      NEWSCHEMA => {:type => ::Thrift::Types::STRUCT, :name => 'newSchema', :class => ::ISchema}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Alter_ischema_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    O1 = 1
+    O2 = 2
+
+    FIELDS = {
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_ischema_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SCHEMANAME = 1
+
+    FIELDS = {
+      SCHEMANAME => {:type => ::Thrift::Types::STRING, :name => 'schemaName'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_ischema_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+    O2 = 2
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::ISchema},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Drop_ischema_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SCHEMANAME = 1
+
+    FIELDS = {
+      SCHEMANAME => {:type => ::Thrift::Types::STRING, :name => 'schemaName'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Drop_ischema_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    O1 = 1
+    O2 = 2
+    O3 = 3
+
+    FIELDS = {
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidOperationException},
+      O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Add_schema_version_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SCHEMAVERSION = 1
+
+    FIELDS = {
+      SCHEMAVERSION => {:type => ::Thrift::Types::STRUCT, :name => 'schemaVersion', :class => ::SchemaVersion}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Add_schema_version_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    O1 = 1
+    O2 = 2
+    O3 = 3
+
+    FIELDS = {
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::AlreadyExistsException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::NoSuchObjectException},
+      O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_schema_version_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SCHEMANAME = 1
+    VERSION = 2
+
+    FIELDS = {
+      SCHEMANAME => {:type => ::Thrift::Types::STRING, :name => 'schemaName'},
+      VERSION => {:type => ::Thrift::Types::I32, :name => 'version'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_schema_version_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+    O2 = 2
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::SchemaVersion},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_schema_latest_version_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SCHEMANAME = 1
+
+    FIELDS = {
+      SCHEMANAME => {:type => ::Thrift::Types::STRING, :name => 'schemaName'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_schema_latest_version_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+    O2 = 2
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::SchemaVersion},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_schema_all_versions_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SCHEMANAME = 1
+
+    FIELDS = {
+      SCHEMANAME => {:type => ::Thrift::Types::STRING, :name => 'schemaName'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_schema_all_versions_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+    O2 = 2
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::LIST, :name => 'success', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SchemaVersion}},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Drop_schema_version_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SCHEMANAME = 1
+    VERSION = 2
+
+    FIELDS = {
+      SCHEMANAME => {:type => ::Thrift::Types::STRING, :name => 'schemaName'},
+      VERSION => {:type => ::Thrift::Types::I32, :name => 'version'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Drop_schema_version_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    O1 = 1
+    O2 = 2
+
+    FIELDS = {
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_schemas_by_cols_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    RQST = 1
+
+    FIELDS = {
+      RQST => {:type => ::Thrift::Types::STRUCT, :name => 'rqst', :class => ::FindSchemasByColsRqst}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_schemas_by_cols_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::FindSchemasByColsResp},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Map_schema_version_to_serde_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SCHEMANAME = 1
+    VERSION = 2
+    SERDENAME = 3
+
+    FIELDS = {
+      SCHEMANAME => {:type => ::Thrift::Types::STRING, :name => 'schemaName'},
+      VERSION => {:type => ::Thrift::Types::I32, :name => 'version'},
+      SERDENAME => {:type => ::Thrift::Types::STRING, :name => 'serdeName'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Map_schema_version_to_serde_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    O1 = 1
+    O2 = 2
+
+    FIELDS = {
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Set_schema_version_state_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SCHEMANAME = 1
+    VERSION = 2
+    STATE = 3
+
+    FIELDS = {
+      SCHEMANAME => {:type => ::Thrift::Types::STRING, :name => 'schemaName'},
+      VERSION => {:type => ::Thrift::Types::I32, :name => 'version'},
+      STATE => {:type => ::Thrift::Types::I32, :name => 'state', :enum_class => ::SchemaVersionState}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+      unless @state.nil? || ::SchemaVersionState::VALID_VALUES.include?(@state)
+        raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field state!')
+      end
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Set_schema_version_state_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    O1 = 1
+    O2 = 2
+    O3 = 3
+
+    FIELDS = {
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidOperationException},
+      O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Add_serde_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SERDE = 1
+
+    FIELDS = {
+      SERDE => {:type => ::Thrift::Types::STRUCT, :name => 'serde', :class => ::SerDeInfo}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Add_serde_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    O1 = 1
+    O2 = 2
+
+    FIELDS = {
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::AlreadyExistsException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_serde_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SERDENAME = 1
+
+    FIELDS = {
+      SERDENAME => {:type => ::Thrift::Types::STRING, :name => 'serdeName'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_serde_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+    O2 = 2
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::SerDeInfo},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
 end
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 6e0da57..69d26c4 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -53,6 +53,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Consumer;
 import java.util.regex.Pattern;
 
 import javax.jdo.JDOException;
@@ -79,38 +80,52 @@ import org.apache.hadoop.hive.metastore.events.AddNotNullConstraintEvent;
 import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AddPrimaryKeyEvent;
 import org.apache.hadoop.hive.metastore.events.AddUniqueConstraintEvent;
+import org.apache.hadoop.hive.metastore.events.AlterISchemaEvent;
 import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
 import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterSchemaVersionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
 import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
 import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent;
+import org.apache.hadoop.hive.metastore.events.CreateISchemaEvent;
+import org.apache.hadoop.hive.metastore.events.AddSchemaVersionEvent;
 import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
 import org.apache.hadoop.hive.metastore.events.DropConstraintEvent;
 import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.DropFunctionEvent;
+import org.apache.hadoop.hive.metastore.events.DropISchemaEvent;
 import org.apache.hadoop.hive.metastore.events.DropIndexEvent;
 import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropSchemaVersionEvent;
 import org.apache.hadoop.hive.metastore.events.DropTableEvent;
 import org.apache.hadoop.hive.metastore.events.InsertEvent;
 import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
 import org.apache.hadoop.hive.metastore.events.PreAddIndexEvent;
 import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.PreAlterDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterISchemaEvent;
 import org.apache.hadoop.hive.metastore.events.PreAlterIndexEvent;
 import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterSchemaVersionEvent;
 import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
 import org.apache.hadoop.hive.metastore.events.PreAuthorizationCallEvent;
 import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreCreateISchemaEvent;
+import org.apache.hadoop.hive.metastore.events.PreAddSchemaVersionEvent;
 import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
 import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropISchemaEvent;
 import org.apache.hadoop.hive.metastore.events.PreDropIndexEvent;
 import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropSchemaVersionEvent;
 import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
 import org.apache.hadoop.hive.metastore.events.PreEventContext;
 import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
 import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreReadISchemaEvent;
 import org.apache.hadoop.hive.metastore.events.PreReadTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreReadhSchemaVersionEvent;
 import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
 import org.apache.hadoop.hive.metastore.metrics.JvmPauseMonitor;
 import org.apache.hadoop.hive.metastore.metrics.Metrics;
@@ -7262,7 +7277,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       }
     }
 
-
     @Override
     public WMCreateResourcePlanResponse create_resource_plan(WMCreateResourcePlanRequest request)
         throws AlreadyExistsException, InvalidObjectException, MetaException, TException {
@@ -7405,7 +7419,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         WMGetTriggersForResourePlanRequest request)
         throws NoSuchObjectException, MetaException, TException {
       try {
-        List<WMTrigger> triggers = getMS().getTriggersForResourcePlan(request.getResourcePlanName());
+        List<WMTrigger> triggers =
+            getMS().getTriggersForResourcePlan(request.getResourcePlanName());
         WMGetTriggersForResourePlanResponse response = new WMGetTriggersForResourePlanResponse();
         response.setTriggers(triggers);
         return response;
@@ -7467,6 +7482,25 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     }
 
     @Override
+    public WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping(
+        WMCreateOrDropTriggerToPoolMappingRequest request) throws AlreadyExistsException,
+        NoSuchObjectException, InvalidObjectException, MetaException, TException {
+      try {
+        if (request.isDrop()) {
+          getMS().dropWMTriggerToPoolMapping(
+              request.getResourcePlanName(), request.getTriggerName(), request.getPoolPath());
+        } else {
+          getMS().createWMTriggerToPoolMapping(
+              request.getResourcePlanName(), request.getTriggerName(), request.getPoolPath());
+        }
+        return new WMCreateOrDropTriggerToPoolMappingResponse();
+      } catch (MetaException e) {
+        LOG.error("Exception while trying to create or drop pool mappings", e);
+        throw e;
+      }
+    }
+
+    @Override
     public WMDropMappingResponse drop_wm_mapping(WMDropMappingRequest request)
         throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
       try {
@@ -7478,22 +7512,438 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       }
     }
 
+    public void create_ischema(ISchema schema) throws TException {
+      startFunction("create_ischema", ": " + schema.getName());
+      boolean success = false;
+      Exception ex = null;
+      RawStore ms = getMS();
+      try {
+        firePreEvent(new PreCreateISchemaEvent(this, schema));
+        Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+        ms.openTransaction();
+        try {
+          ms.createISchema(schema);
+
+          if (!transactionalListeners.isEmpty()) {
+            transactionalListenersResponses =
+                MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                    EventType.CREATE_ISCHEMA, new CreateISchemaEvent(true, this, schema));
+          }
+          success = ms.commitTransaction();
+        } finally {
+          if (!success) ms.rollbackTransaction();
+          if (!listeners.isEmpty()) {
+            MetaStoreListenerNotifier.notifyEvent(listeners, EventType.CREATE_ISCHEMA,
+                new CreateISchemaEvent(success, this, schema), null,
+                transactionalListenersResponses, ms);
+          }
+        }
+      } catch (MetaException|AlreadyExistsException e) {
+        LOG.error("Caught exception creating schema", e);
+        ex = e;
+        throw e;
+      } finally {
+        endFunction("create_ischema", success, ex);
+      }
+    }
+
     @Override
-    public WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping(
-        WMCreateOrDropTriggerToPoolMappingRequest request) throws AlreadyExistsException,
-        NoSuchObjectException, InvalidObjectException, MetaException, TException {
+    public void alter_ischema(String schemaName, ISchema newSchema) throws TException {
+      startFunction("alter_ischema", ": " + schemaName);
+      boolean success = false;
+      Exception ex = null;
+      RawStore ms = getMS();
       try {
-        if (request.isDrop()) {
-          getMS().dropWMTriggerToPoolMapping(
-              request.getResourcePlanName(), request.getTriggerName(), request.getPoolPath());
-        } else {
-          getMS().createWMTriggerToPoolMapping(
-              request.getResourcePlanName(), request.getTriggerName(), request.getPoolPath());
+        ISchema oldSchema = ms.getISchema(schemaName);
+        if (oldSchema == null) {
+          throw new NoSuchObjectException("Could not find schema " + schemaName);
         }
-        return new WMCreateOrDropTriggerToPoolMappingResponse();
+        firePreEvent(new PreAlterISchemaEvent(this, oldSchema, newSchema));
+        Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+        ms.openTransaction();
+        try {
+          ms.alterISchema(schemaName, newSchema);
+          if (!transactionalListeners.isEmpty()) {
+            transactionalListenersResponses =
+                MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                    EventType.ALTER_ISCHEMA, new AlterISchemaEvent(true, this, oldSchema, newSchema));
+          }
+          success = ms.commitTransaction();
+        } finally {
+          if (!success) ms.rollbackTransaction();
+          if (!listeners.isEmpty()) {
+            MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ALTER_ISCHEMA,
+                new AlterISchemaEvent(success, this, oldSchema, newSchema), null,
+                transactionalListenersResponses, ms);
+          }
+        }
+      } catch (MetaException|NoSuchObjectException e) {
+        LOG.error("Caught exception altering schema", e);
+        ex = e;
+        throw e;
+      } finally {
+        endFunction("alter_ischema", success, ex);
+      }
+    }
+
+    @Override
+    public ISchema get_ischema(String schemaName) throws TException {
+      startFunction("get_ischema", ": " + schemaName);
+      Exception ex = null;
+      ISchema schema = null;
+      try {
+        schema = getMS().getISchema(schemaName);
+        if (schema == null) {
+          throw new NoSuchObjectException("No schema named " + schemaName + " exists");
+        }
+        firePreEvent(new PreReadISchemaEvent(this, schema));
+        return schema;
       } catch (MetaException e) {
-        LOG.error("Exception while trying to create or drop pool mappings", e);
+        LOG.error("Caught exception getting schema", e);
+        ex = e;
+        throw e;
+      } finally {
+        endFunction("get_ischema", schema != null, ex);
+      }
+    }
+
+    @Override
+    public void drop_ischema(String schemaName) throws TException {
+      startFunction("drop_ischema", ": " + schemaName);
+      Exception ex = null;
+      boolean success = false;
+      RawStore ms = getMS();
+      try {
+        // look for any valid versions.  This will also throw NoSuchObjectException if the schema
+        // itself doesn't exist, which is what we want.
+        SchemaVersion latest = ms.getLatestSchemaVersion(schemaName);
+        if (latest != null) {
+          ex = new InvalidOperationException("Schema " + schemaName + " cannot be dropped, it has" +
+              " at least one valid version");
+          throw (InvalidObjectException)ex;
+        }
+        ISchema schema = ms.getISchema(schemaName);
+        firePreEvent(new PreDropISchemaEvent(this, schema));
+        Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+        ms.openTransaction();
+        try {
+          ms.dropISchema(schemaName);
+          if (!transactionalListeners.isEmpty()) {
+            transactionalListenersResponses =
+                MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                    EventType.DROP_ISCHEMA, new DropISchemaEvent(true, this, schema));
+          }
+          success = ms.commitTransaction();
+        } finally {
+          if (!success) ms.rollbackTransaction();
+          if (!listeners.isEmpty()) {
+            MetaStoreListenerNotifier.notifyEvent(listeners, EventType.DROP_ISCHEMA,
+                new DropISchemaEvent(success, this, schema), null,
+                transactionalListenersResponses, ms);
+          }
+        }
+      } catch (MetaException|NoSuchObjectException e) {
+        LOG.error("Caught exception dropping schema", e);
+        ex = e;
+        throw e;
+      } finally {
+        endFunction("drop_ischema", success, ex);
+      }
+    }
+
+    @Override
+    public void add_schema_version(SchemaVersion schemaVersion) throws TException {
+      startFunction("add_schema_version", ": " + schemaVersion.getSchemaName() + ", " +
+          schemaVersion.getVersion());
+      boolean success = false;
+      Exception ex = null;
+      RawStore ms = getMS();
+      try {
+        // Make sure the referenced schema exists
+        if (ms.getISchema(schemaVersion.getSchemaName()) == null) {
+          throw new NoSuchObjectException("No schema named " + schemaVersion.getSchemaName());
+        }
+        firePreEvent(new PreAddSchemaVersionEvent(this, schemaVersion));
+        Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+        ms.openTransaction();
+        try {
+          ms.addSchemaVersion(schemaVersion);
+
+          if (!transactionalListeners.isEmpty()) {
+            transactionalListenersResponses =
+                MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                    EventType.ADD_SCHEMA_VERSION, new AddSchemaVersionEvent(true, this, schemaVersion));
+          }
+          success = ms.commitTransaction();
+        } finally {
+          if (!success) ms.rollbackTransaction();
+          if (!listeners.isEmpty()) {
+            MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_SCHEMA_VERSION,
+                new AddSchemaVersionEvent(success, this, schemaVersion), null,
+                transactionalListenersResponses, ms);
+          }
+        }
+      } catch (MetaException|AlreadyExistsException e) {
+        LOG.error("Caught exception adding schema version", e);
+        ex = e;
+        throw e;
+      } finally {
+        endFunction("add_schema_version", success, ex);
+      }
+    }
+
+    @Override
+    public SchemaVersion get_schema_version(String schemaName, int version) throws TException {
+      startFunction("get_schema_version", ": " + schemaName);
+      Exception ex = null;
+      SchemaVersion schemaVersion = null;
+      try {
+        schemaVersion = getMS().getSchemaVersion(schemaName, version);
+        if (schemaVersion == null) {
+          throw new NoSuchObjectException("No schema " + schemaName + " with version " + version
+              + "exists");
+        }
+        firePreEvent(new PreReadhSchemaVersionEvent(this, Collections.singletonList(schemaVersion)));
+        return schemaVersion;
+      } catch (MetaException e) {
+        LOG.error("Caught exception getting schema version", e);
+        ex = e;
+        throw e;
+      } finally {
+        endFunction("get_schema_version", schemaVersion != null, ex);
+      }
+    }
+
+    @Override
+    public SchemaVersion get_schema_latest_version(String schemaName) throws TException {
+      startFunction("get_latest_schema_version", ": " + schemaName);
+      Exception ex = null;
+      SchemaVersion schemaVersion = null;
+      try {
+        schemaVersion = getMS().getLatestSchemaVersion(schemaName);
+        if (schemaVersion == null) {
+          throw new NoSuchObjectException("No versions of schema " + schemaName + "exist");
+        }
+        firePreEvent(new PreReadhSchemaVersionEvent(this, Collections.singletonList(schemaVersion)));
+        return schemaVersion;
+      } catch (MetaException e) {
+        LOG.error("Caught exception getting latest schema version", e);
+        ex = e;
+        throw e;
+      } finally {
+        endFunction("get_latest_schema_version", schemaVersion != null, ex);
+      }
+    }
+
+    @Override
+    public List<SchemaVersion> get_schema_all_versions(String schemaName) throws TException {
+      startFunction("get_all_schema_versions", ": " + schemaName);
+      Exception ex = null;
+      List<SchemaVersion> schemaVersions = null;
+      try {
+        schemaVersions = getMS().getAllSchemaVersion(schemaName);
+        if (schemaVersions == null) {
+          throw new NoSuchObjectException("No versions of schema " + schemaName + "exist");
+        }
+        firePreEvent(new PreReadhSchemaVersionEvent(this, schemaVersions));
+        return schemaVersions;
+      } catch (MetaException e) {
+        LOG.error("Caught exception getting all schema versions", e);
+        ex = e;
+        throw e;
+      } finally {
+        endFunction("get_all_schema_versions", schemaVersions != null, ex);
+      }
+    }
+
+    @Override
+    public void drop_schema_version(String schemaName, int version) throws TException {
+      startFunction("drop_schema_version", ": " + schemaName);
+      Exception ex = null;
+      boolean success = false;
+      RawStore ms = getMS();
+      try {
+        SchemaVersion schemaVersion = ms.getSchemaVersion(schemaName, version);
+        if (schemaVersion == null) {
+          throw new NoSuchObjectException("No schema " + schemaName + " of version " + version);
+        }
+        firePreEvent(new PreDropSchemaVersionEvent(this, schemaVersion));
+        Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+        ms.openTransaction();
+        try {
+          ms.dropSchemaVersion(schemaName, version);
+          if (!transactionalListeners.isEmpty()) {
+            transactionalListenersResponses =
+                MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                    EventType.DROP_SCHEMA_VERSION, new DropSchemaVersionEvent(true, this, schemaVersion));
+          }
+          success = ms.commitTransaction();
+        } finally {
+          if (!success) ms.rollbackTransaction();
+          if (!listeners.isEmpty()) {
+            MetaStoreListenerNotifier.notifyEvent(listeners, EventType.DROP_SCHEMA_VERSION,
+                new DropSchemaVersionEvent(success, this, schemaVersion), null,
+                transactionalListenersResponses, ms);
+          }
+        }
+      } catch (MetaException|NoSuchObjectException e) {
+        LOG.error("Caught exception dropping schema version", e);
+        ex = e;
+        throw e;
+      } finally {
+        endFunction("drop_schema_version", success, ex);
+      }
+    }
+
+    @Override
+    public FindSchemasByColsResp get_schemas_by_cols(FindSchemasByColsRqst rqst) throws TException {
+      startFunction("get_schemas_by_cols");
+      Exception ex = null;
+      List<SchemaVersion> schemaVersions = Collections.emptyList();
+      try {
+        schemaVersions = getMS().getSchemaVersionsByColumns(rqst.getColName(),
+            rqst.getColNamespace(), rqst.getType());
+        firePreEvent(new PreReadhSchemaVersionEvent(this, schemaVersions));
+        final List<FindSchemasByColsRespEntry> entries = new ArrayList<>(schemaVersions.size());
+        schemaVersions.forEach(schemaVersion -> entries.add(
+            new FindSchemasByColsRespEntry(schemaVersion.getSchemaName(), schemaVersion.getVersion())));
+        return new FindSchemasByColsResp(entries);
+      } catch (MetaException e) {
+        LOG.error("Caught exception doing schema version query", e);
+        ex = e;
+        throw e;
+      } finally {
+        endFunction("get_schemas_by_cols", !schemaVersions.isEmpty(), ex);
+      }
+    }
+
+    @Override
+    public void map_schema_version_to_serde(String schemaName, int version, String serdeName)
+        throws TException {
+      startFunction("map_schema_version_to_serde");
+      boolean success = false;
+      Exception ex = null;
+      RawStore ms = getMS();
+      try {
+        SchemaVersion oldSchemaVersion = ms.getSchemaVersion(schemaName, version);
+        if (oldSchemaVersion == null) {
+          throw new NoSuchObjectException("No schema " + schemaName + " of version " + version);
+        }
+        SerDeInfo serde = ms.getSerDeInfo(serdeName);
+        if (serde == null) {
+          throw new NoSuchObjectException("No SerDe named " + serdeName);
+        }
+        SchemaVersion newSchemaVersion = new SchemaVersion(oldSchemaVersion);
+        newSchemaVersion.setSerDe(serde);
+        firePreEvent(new PreAlterSchemaVersionEvent(this, oldSchemaVersion, newSchemaVersion));
+        Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+        ms.openTransaction();
+        try {
+          ms.alterSchemaVersion(schemaName, version, newSchemaVersion);
+          if (!transactionalListeners.isEmpty()) {
+            transactionalListenersResponses =
+                MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                    EventType.ALTER_SCHEMA_VERSION, new AlterSchemaVersionEvent(true, this,
+                        oldSchemaVersion, newSchemaVersion));
+          }
+          success = ms.commitTransaction();
+        } finally {
+          if (!success) ms.rollbackTransaction();
+          if (!listeners.isEmpty()) {
+            MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ALTER_SCHEMA_VERSION,
+                new AlterSchemaVersionEvent(success, this, oldSchemaVersion, newSchemaVersion), null,
+                transactionalListenersResponses, ms);
+          }
+        }
+      } catch (MetaException|NoSuchObjectException e) {
+        LOG.error("Caught exception mapping schema version to serde", e);
+        ex = e;
         throw e;
+      } finally {
+        endFunction("map_schema_version_to_serde", success, ex);
+      }
+    }
+
+    @Override
+    public void set_schema_version_state(String schemaName, int version,
+                                         SchemaVersionState state) throws TException {
+      startFunction("set_schema_version_state");
+      boolean success = false;
+      Exception ex = null;
+      RawStore ms = getMS();
+      try {
+        SchemaVersion oldSchemaVersion = ms.getSchemaVersion(schemaName, version);
+        if (oldSchemaVersion == null) {
+          throw new NoSuchObjectException("No schema " + schemaName + " of version " + version);
+        }
+        SchemaVersion newSchemaVersion = new SchemaVersion(oldSchemaVersion);
+        newSchemaVersion.setState(state);
+        firePreEvent(new PreAlterSchemaVersionEvent(this, oldSchemaVersion, newSchemaVersion));
+        Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+        ms.openTransaction();
+        try {
+          ms.alterSchemaVersion(schemaName, version, newSchemaVersion);
+          if (!transactionalListeners.isEmpty()) {
+            transactionalListenersResponses =
+                MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                    EventType.ALTER_SCHEMA_VERSION, new AlterSchemaVersionEvent(true, this,
+                        oldSchemaVersion, newSchemaVersion));
+          }
+          success = ms.commitTransaction();
+        } finally {
+          if (!success) ms.rollbackTransaction();
+          if (!listeners.isEmpty()) {
+            MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ALTER_SCHEMA_VERSION,
+                new AlterSchemaVersionEvent(success, this, oldSchemaVersion, newSchemaVersion), null,
+                transactionalListenersResponses, ms);
+          }
+        }
+      } catch (MetaException|NoSuchObjectException e) {
+        LOG.error("Caught exception changing schema version state", e);
+        ex = e;
+        throw e;
+      } finally {
+        endFunction("set_schema_version_state", success, ex);
+      }
+    }
+
+    @Override
+    public void add_serde(SerDeInfo serde) throws TException {
+      startFunction("create_serde", ": " + serde.getName());
+      Exception ex = null;
+      boolean success = false;
+      RawStore ms = getMS();
+      try {
+        ms.openTransaction();
+        ms.addSerde(serde);
+        success = ms.commitTransaction();
+      } catch (MetaException|AlreadyExistsException e) {
+        LOG.error("Caught exception creating serde", e);
+        ex = e;
+        throw e;
+      } finally {
+        if (!success) ms.rollbackTransaction();
+        endFunction("create_serde", success, ex);
+      }
+    }
+
+    @Override
+    public SerDeInfo get_serde(String serdeName) throws TException {
+      startFunction("get_serde", ": " + serdeName);
+      Exception ex = null;
+      SerDeInfo serde = null;
+      try {
+        serde = getMS().getSerDeInfo(serdeName);
+        if (serde == null) {
+          throw new NoSuchObjectException("No serde named " + serdeName + " exists");
+        }
+        return serde;
+      } catch (MetaException e) {
+        LOG.error("Caught exception getting serde", e);
+        ex = e;
+        throw e;
+      } finally {
+        endFunction("get_serde", serde != null, ex);
       }
     }
   }


[04/50] [abbrv] hive git commit: HIVE-18241: Query with LEFT SEMI JOIN producing wrong result (Vineet Garg, reviewed by Jesus Camacho Rodriguez)

Posted by ga...@apache.org.
HIVE-18241: Query with LEFT SEMI JOIN producing wrong result (Vineet Garg, reviewed by Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8ab523b9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8ab523b9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8ab523b9

Branch: refs/heads/standalone-metastore
Commit: 8ab523b9612e81aec255051bcb2b4137419cfa3b
Parents: b7be4ac
Author: Vineet Garg <vg...@apache.com>
Authored: Wed Dec 13 12:23:37 2017 -0800
Committer: Vineet Garg <vg...@apache.com>
Committed: Wed Dec 13 12:25:02 2017 -0800

----------------------------------------------------------------------
 .../rules/HiveRemoveGBYSemiJoinRule.java        |  66 ++++----
 ql/src/test/queries/clientpositive/semijoin.q   |   7 +
 .../results/clientpositive/llap/semijoin.q.out  | 149 +++++++++++++++++++
 .../results/clientpositive/spark/semijoin.q.out | 148 ++++++++++++++++++
 4 files changed, 341 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/8ab523b9/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRemoveGBYSemiJoinRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRemoveGBYSemiJoinRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRemoveGBYSemiJoinRule.java
index 4e6cce9..4992e70 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRemoveGBYSemiJoinRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRemoveGBYSemiJoinRule.java
@@ -22,58 +22,66 @@ import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.Aggregate;
 import org.apache.calcite.rel.core.JoinInfo;
 import org.apache.calcite.rel.core.JoinRelType;
+import org.apache.calcite.tools.RelBuilder;
 import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSemiJoin;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
 /**
  * Planner rule that removes a {@code Aggregate} from a HiveSemiJoin
- * right input
+ * right input.
  */
 public class HiveRemoveGBYSemiJoinRule extends RelOptRule {
 
   protected static final Logger LOG = LoggerFactory.getLogger(HiveRemoveGBYSemiJoinRule.class);
   public static final HiveRemoveGBYSemiJoinRule INSTANCE =
-      new HiveRemoveGBYSemiJoinRule() ;
+      new HiveRemoveGBYSemiJoinRule();
 
   public HiveRemoveGBYSemiJoinRule() {
     super(
-            operand(HiveSemiJoin.class,
-                some(
-                    operand(RelNode.class, any()),
-                    operand(Aggregate.class, any()))),
+        operand(HiveSemiJoin.class,
+            some(
+                operand(RelNode.class, any()),
+                operand(Aggregate.class, any()))),
         HiveRelFactories.HIVE_BUILDER, "HiveRemoveGBYSemiJoinRule");
   }
 
-    @Override public void onMatch(RelOptRuleCall call) {
-      final HiveSemiJoin semijoin= call.rel(0);
+  @Override public void onMatch(RelOptRuleCall call) {
+    final HiveSemiJoin semijoin= call.rel(0);
+
+    if(semijoin.getJoinType() != JoinRelType.INNER) {
+      return;
+    }
+    final RelNode left = call.rel(1);
+    final Aggregate rightAggregate= call.rel(2);
 
-      if(semijoin.getJoinType() != JoinRelType.INNER) {
-        return;
-      }
-      final RelNode left = call.rel(1);
-      final Aggregate rightAggregate= call.rel(2);
+    // if grouping sets are involved do early return
+    if(rightAggregate.getGroupType() != Aggregate.Group.SIMPLE) {
+      return;
+    }
 
-      // if grouping sets are involved do early return
-      if(rightAggregate.indicator) {
-        return;
-      }
+    if(rightAggregate.indicator) {
+      return;
+    }
 
-      // if there is any aggregate function this group by is not un-necessary
-      if(!rightAggregate.getAggCallList().isEmpty()) {
-        return;
-      }
-      final JoinInfo joinInfo = semijoin.analyzeCondition();
+    // if there is any aggregate function this group by is not un-necessary
+    if(!rightAggregate.getAggCallList().isEmpty()) {
+      return;
+    }
+    final JoinInfo joinInfo = semijoin.analyzeCondition();
 
-      boolean shouldTransform = joinInfo.rightSet().equals(
-          ImmutableBitSet.range(rightAggregate.getGroupCount()));
-      if(shouldTransform) {
-        RelNode newSemiJoin = call.builder().push(left).push(rightAggregate.getInput()).semiJoin(semijoin.getCondition()).build();
-        call.transformTo(newSemiJoin);
-      }
+    boolean shouldTransform = joinInfo.rightSet().equals(
+        ImmutableBitSet.range(rightAggregate.getGroupCount()));
+    if(shouldTransform) {
+      final RelBuilder relBuilder = call.builder();
+      RelNode newRightInput = relBuilder.project(relBuilder.push(rightAggregate.getInput()).
+          fields(rightAggregate.getGroupSet().asList())).build();
+      RelNode newSemiJoin = call.builder().push(left).push(newRightInput)
+          .semiJoin(semijoin.getCondition()).build();
+      call.transformTo(newSemiJoin);
     }
   }
+}
 // End HiveRemoveGBYSemiJoinRule

http://git-wip-us.apache.org/repos/asf/hive/blob/8ab523b9/ql/src/test/queries/clientpositive/semijoin.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/semijoin.q b/ql/src/test/queries/clientpositive/semijoin.q
index c41d222..8f3a6ad 100644
--- a/ql/src/test/queries/clientpositive/semijoin.q
+++ b/ql/src/test/queries/clientpositive/semijoin.q
@@ -77,3 +77,10 @@ select a.key from t3 a left semi join t2 b on a.key = b.key left outer join t1 c
 
 explain select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
 select a.key from t3 a left semi join t2 b on a.value = b.value where a.key > 100;
+
+explain select key, value from src outr left semi join
+    (select a.key, b.value from src a join (select distinct value from src) b on a.value > b.value group by a.key, b.value) inr
+    on outr.key=inr.key and outr.value=inr.value;
+select key, value from src outr left semi join
+    (select a.key, b.value from src a join (select distinct value from src) b on a.value > b.value group by a.key, b.value) inr
+    on outr.key=inr.key and outr.value=inr.value;

http://git-wip-us.apache.org/repos/asf/hive/blob/8ab523b9/ql/src/test/results/clientpositive/llap/semijoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/semijoin.q.out b/ql/src/test/results/clientpositive/llap/semijoin.q.out
index f788ae3..82cee33 100644
--- a/ql/src/test/results/clientpositive/llap/semijoin.q.out
+++ b/ql/src/test/results/clientpositive/llap/semijoin.q.out
@@ -2745,3 +2745,152 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t2
 POSTHOOK: Input: default@t3
 #### A masked pattern was here ####
+Warning: Shuffle Join MERGEJOIN[30][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 3' is a cross product
+PREHOOK: query: explain select key, value from src outr left semi join
+    (select a.key, b.value from src a join (select distinct value from src) b on a.value > b.value group by a.key, b.value) inr
+    on outr.key=inr.key and outr.value=inr.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key, value from src outr left semi join
+    (select a.key, b.value from src a join (select distinct value from src) b on a.value > b.value group by a.key, b.value) inr
+    on outr.key=inr.key and outr.value=inr.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE)
+        Reducer 3 <- Map 1 (XPROD_EDGE), Reducer 5 (XPROD_EDGE)
+        Reducer 5 <- Map 4 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: outr
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: (key is not null and value is not null) (type: boolean)
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                        Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col0 (type: string), _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: value is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      keys: value (type: string)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 250 Data size: 22750 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 250 Data size: 22750 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 _col0 (type: string), _col1 (type: string)
+                  1 _col0 (type: string), _col1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 
+                  1 
+                outputColumnNames: _col0, _col1, _col2
+                residual filter predicates: {(_col1 > _col2)}
+                Statistics: Num rows: 41666 Data size: 11208154 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: _col0 (type: string), _col2 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 41666 Data size: 7416548 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    keys: _col0 (type: string), _col1 (type: string)
+                    mode: hash
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 10609 Data size: 1888402 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string), _col1 (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                      Statistics: Num rows: 10609 Data size: 1888402 Basic stats: COMPLETE Column stats: COMPLETE
+        Reducer 5 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 22750 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 250 Data size: 22750 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col0 (type: string)
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+Warning: Shuffle Join MERGEJOIN[30][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 3' is a cross product
+PREHOOK: query: select key, value from src outr left semi join
+    (select a.key, b.value from src a join (select distinct value from src) b on a.value > b.value group by a.key, b.value) inr
+    on outr.key=inr.key and outr.value=inr.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key, value from src outr left semi join
+    (select a.key, b.value from src a join (select distinct value from src) b on a.value > b.value group by a.key, b.value) inr
+    on outr.key=inr.key and outr.value=inr.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/8ab523b9/ql/src/test/results/clientpositive/spark/semijoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/semijoin.q.out b/ql/src/test/results/clientpositive/spark/semijoin.q.out
index 8f5354c..6932efa 100644
--- a/ql/src/test/results/clientpositive/spark/semijoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/semijoin.q.out
@@ -2590,3 +2590,151 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t2
 POSTHOOK: Input: default@t3
 #### A masked pattern was here ####
+Warning: Shuffle Join JOIN[15][tables = [$hdt$_1, $hdt$_2]] in Work 'Reducer 4' is a cross product
+PREHOOK: query: explain select key, value from src outr left semi join
+    (select a.key, b.value from src a join (select distinct value from src) b on a.value > b.value group by a.key, b.value) inr
+    on outr.key=inr.key and outr.value=inr.value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select key, value from src outr left semi join
+    (select a.key, b.value from src a join (select distinct value from src) b on a.value > b.value group by a.key, b.value) inr
+    on outr.key=inr.key and outr.value=inr.value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Reducer 4 (PARTITION-LEVEL SORT, 2)
+        Reducer 4 <- Map 3 (PARTITION-LEVEL SORT, 1), Reducer 6 (PARTITION-LEVEL SORT, 1)
+        Reducer 6 <- Map 5 (GROUP, 2)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: outr
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key is not null and value is not null) (type: boolean)
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: a
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: key is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: string), _col1 (type: string)
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: value is not null (type: boolean)
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: value (type: string)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                keys:
+                  0 _col0 (type: string), _col1 (type: string)
+                  1 _col0 (type: string), _col1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 45832 Data size: 1019683 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 45832 Data size: 1019683 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 4 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 
+                  1 
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 125000 Data size: 2781000 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: (_col1 > _col2) (type: boolean)
+                  Statistics: Num rows: 41666 Data size: 926985 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col2 (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 41666 Data size: 926985 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: _col0 (type: string), _col1 (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 41666 Data size: 926985 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                        Statistics: Num rows: 41666 Data size: 926985 Basic stats: COMPLETE Column stats: NONE
+        Reducer 6 
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col0 (type: string)
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+Warning: Shuffle Join JOIN[15][tables = [$hdt$_1, $hdt$_2]] in Work 'Reducer 4' is a cross product
+PREHOOK: query: select key, value from src outr left semi join
+    (select a.key, b.value from src a join (select distinct value from src) b on a.value > b.value group by a.key, b.value) inr
+    on outr.key=inr.key and outr.value=inr.value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select key, value from src outr left semi join
+    (select a.key, b.value from src a join (select distinct value from src) b on a.value > b.value group by a.key, b.value) inr
+    on outr.key=inr.key and outr.value=inr.value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####


[08/50] [abbrv] hive git commit: HIVE-18153 : refactor reopen and file management in TezTask (Sergey Shelukhin, reviewed by Prasanth Jayachandran)

Posted by ga...@apache.org.
HIVE-18153 : refactor reopen and file management in TezTask (Sergey Shelukhin, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/89dbf4e9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/89dbf4e9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/89dbf4e9

Branch: refs/heads/standalone-metastore
Commit: 89dbf4e904592318da954eaf94548ec1b130e17c
Parents: ca96613
Author: sergey <se...@apache.org>
Authored: Thu Dec 14 15:53:44 2017 -0800
Committer: sergey <se...@apache.org>
Committed: Thu Dec 14 15:53:44 2017 -0800

----------------------------------------------------------------------
 ql/pom.xml                                      |   2 -
 .../hadoop/hive/ql/exec/mr/ExecDriver.java      |   1 +
 .../hadoop/hive/ql/exec/tez/DagUtils.java       | 102 ++++-----
 .../hadoop/hive/ql/exec/tez/TezSessionPool.java |  23 +-
 .../hive/ql/exec/tez/TezSessionPoolManager.java |  36 +---
 .../hive/ql/exec/tez/TezSessionPoolSession.java |  20 +-
 .../hive/ql/exec/tez/TezSessionState.java       | 208 +++++++++++--------
 .../apache/hadoop/hive/ql/exec/tez/TezTask.java | 189 ++++++-----------
 .../hive/ql/exec/tez/WorkloadManager.java       | 118 +++++++----
 .../ql/exec/tez/monitoring/TezJobMonitor.java   |   2 +-
 .../ql/udf/generic/GenericUDTFGetSplits.java    |   5 +-
 .../hive/ql/exec/tez/SampleTezSessionState.java |   7 +-
 .../hive/ql/exec/tez/TestTezSessionPool.java    |  30 ++-
 .../hadoop/hive/ql/exec/tez/TestTezTask.java    |  73 ++-----
 .../hive/ql/exec/tez/TestWorkloadManager.java   |  10 +-
 15 files changed, 378 insertions(+), 448 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/89dbf4e9/ql/pom.xml
----------------------------------------------------------------------
diff --git a/ql/pom.xml b/ql/pom.xml
index f35a4c8..cbf71cd 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -225,8 +225,6 @@
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-yarn-registry</artifactId>
       <version>${hadoop.version}</version>
-      <optional>true</optional>
-      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>

http://git-wip-us.apache.org/repos/asf/hive/blob/89dbf4e9/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
index 88a75ed..3f470eb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
@@ -400,6 +400,7 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
       SessionState ss = SessionState.get();
       // TODO: why is there a TezSession in MR ExecDriver?
       if (ss != null && HiveConf.getVar(job, ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
+        // TODO: this is the only place that uses keepTmpDir. Why?
         TezSessionPoolManager.closeIfNotDefault(ss.getTezSession(), true);
       }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/89dbf4e9/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
index 6c1afa6..e4a6f62 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
@@ -17,14 +17,13 @@
  */
 package org.apache.hadoop.hive.ql.exec.tez;
 
-import java.util.concurrent.ConcurrentHashMap;
+import java.util.Collection;
 
+import java.util.concurrent.ConcurrentHashMap;
 import com.google.common.base.Function;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.Lists;
-
 import javax.security.auth.login.LoginException;
-
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
@@ -40,7 +39,6 @@ import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
-
 import org.apache.commons.io.FilenameUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.tez.mapreduce.common.MRInputSplitDistributor;
@@ -541,16 +539,15 @@ public class DagUtils {
     }
   }
 
-  private Vertex createVertex(JobConf conf, MergeJoinWork mergeJoinWork, LocalResource appJarLr,
-      List<LocalResource> additionalLr, FileSystem fs, Path mrScratchDir, Context ctx,
-      VertexType vertexType)
-      throws Exception {
+  private Vertex createVertex(JobConf conf, MergeJoinWork mergeJoinWork, FileSystem fs,
+      Path mrScratchDir, Context ctx, VertexType vertexType,
+      Map<String, LocalResource> localResources) throws Exception {
     Utilities.setMergeWork(conf, mergeJoinWork, mrScratchDir, false);
     if (mergeJoinWork.getMainWork() instanceof MapWork) {
       List<BaseWork> mapWorkList = mergeJoinWork.getBaseWorkList();
       MapWork mapWork = (MapWork) (mergeJoinWork.getMainWork());
-      Vertex mergeVx =
-          createVertex(conf, mapWork, appJarLr, additionalLr, fs, mrScratchDir, ctx, vertexType);
+      Vertex mergeVx = createVertex(
+          conf, mapWork, fs, mrScratchDir, ctx, vertexType, localResources);
 
       conf.setClass("mapred.input.format.class", HiveInputFormat.class, InputFormat.class);
       // mapreduce.tez.input.initializer.serialize.event.payload should be set
@@ -580,10 +577,8 @@ public class DagUtils {
       mergeVx.setVertexManagerPlugin(desc);
       return mergeVx;
     } else {
-      Vertex mergeVx =
-          createVertex(conf, (ReduceWork) mergeJoinWork.getMainWork(), appJarLr, additionalLr, fs,
-              mrScratchDir, ctx);
-      return mergeVx;
+      return createVertex(conf,
+          (ReduceWork) mergeJoinWork.getMainWork(), fs, mrScratchDir, ctx, localResources);
     }
   }
 
@@ -591,11 +586,8 @@ public class DagUtils {
    * Helper function to create Vertex from MapWork.
    */
   private Vertex createVertex(JobConf conf, MapWork mapWork,
-      LocalResource appJarLr, List<LocalResource> additionalLr, FileSystem fs,
-      Path mrScratchDir, Context ctx, VertexType vertexType)
-      throws Exception {
-
-    Path tezDir = getTezDir(mrScratchDir);
+      FileSystem fs, Path mrScratchDir, Context ctx, VertexType vertexType,
+      Map<String, LocalResource> localResources) throws Exception {
 
     // set up the operator plan
     Utilities.cacheMapWork(conf, mapWork, mrScratchDir);
@@ -726,13 +718,6 @@ public class DagUtils {
     // Add the actual source input
     String alias = mapWork.getAliasToWork().keySet().iterator().next();
     map.addDataSource(alias, dataSource);
-
-    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
-    localResources.put(getBaseName(appJarLr), appJarLr);
-    for (LocalResource lr: additionalLr) {
-      localResources.put(getBaseName(lr), lr);
-    }
-
     map.addTaskLocalFiles(localResources);
     return map;
   }
@@ -772,9 +757,9 @@ public class DagUtils {
   /*
    * Helper function to create Vertex for given ReduceWork.
    */
-  private Vertex createVertex(JobConf conf, ReduceWork reduceWork,
-      LocalResource appJarLr, List<LocalResource> additionalLr, FileSystem fs,
-      Path mrScratchDir, Context ctx) throws Exception {
+  private Vertex createVertex(JobConf conf, ReduceWork reduceWork, FileSystem fs,
+      Path mrScratchDir, Context ctx, Map<String, LocalResource> localResources)
+          throws Exception {
 
     // set up operator plan
     conf.set(Utilities.INPUT_NAME, reduceWork.getName());
@@ -796,17 +781,28 @@ public class DagUtils {
     reducer.setTaskEnvironment(getContainerEnvironment(conf, false));
     reducer.setExecutionContext(vertexExecutionContext);
     reducer.setTaskLaunchCmdOpts(getContainerJavaOpts(conf));
-
-    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
-    localResources.put(getBaseName(appJarLr), appJarLr);
-    for (LocalResource lr: additionalLr) {
-      localResources.put(getBaseName(lr), lr);
-    }
     reducer.addTaskLocalFiles(localResources);
-
     return reducer;
   }
 
+  public static Map<String, LocalResource> createTezLrMap(
+      LocalResource appJarLr, Collection<LocalResource> additionalLr) {
+    // Note: interestingly this would exclude LLAP app jars that the session adds for LLAP case.
+    //       Of course it doesn't matter because vertices run ON LLAP and have those jars, and
+    //       moreover we anyway don't localize jars for the vertices on LLAP; but in theory
+    //       this is still crappy code that assumes there's one and only app jar.
+    Map<String, LocalResource> localResources = new HashMap<>();
+    if (appJarLr != null) {
+      localResources.put(getBaseName(appJarLr), appJarLr);
+    }
+    if (additionalLr != null) {
+      for (LocalResource lr: additionalLr) {
+        localResources.put(getBaseName(lr), lr);
+      }
+    }
+    return localResources;
+  }
+
   /*
    * Helper method to create a yarn local resource.
    */
@@ -1064,7 +1060,7 @@ public class DagUtils {
   /*
    * Helper function to retrieve the basename of a local resource
    */
-  public String getBaseName(LocalResource lr) {
+  public static String getBaseName(LocalResource lr) {
     return FilenameUtils.getName(lr.getResource().getFile());
   }
 
@@ -1254,30 +1250,26 @@ public class DagUtils {
    * @param work The instance of BaseWork representing the actual work to be performed
    * by this vertex.
    * @param scratchDir HDFS scratch dir for this execution unit.
-   * @param appJarLr Local resource for hive-exec.
-   * @param additionalLr
    * @param fileSystem FS corresponding to scratchDir and LocalResources
    * @param ctx This query's context
    * @return Vertex
    */
   @SuppressWarnings("deprecation")
   public Vertex createVertex(JobConf conf, BaseWork work,
-      Path scratchDir, LocalResource appJarLr,
-      List<LocalResource> additionalLr, FileSystem fileSystem, Context ctx, boolean hasChildren,
-      TezWork tezWork, VertexType vertexType) throws Exception {
+      Path scratchDir, FileSystem fileSystem, Context ctx, boolean hasChildren,
+      TezWork tezWork, VertexType vertexType, Map<String, LocalResource> localResources) throws Exception {
 
     Vertex v = null;
     // simply dispatch the call to the right method for the actual (sub-) type of
     // BaseWork.
     if (work instanceof MapWork) {
-      v = createVertex(conf, (MapWork) work, appJarLr, additionalLr, fileSystem, scratchDir, ctx,
-              vertexType);
+      v = createVertex(
+          conf, (MapWork) work, fileSystem, scratchDir, ctx, vertexType, localResources);
     } else if (work instanceof ReduceWork) {
-      v = createVertex(conf, (ReduceWork) work, appJarLr,
-          additionalLr, fileSystem, scratchDir, ctx);
+      v = createVertex(conf, (ReduceWork) work, fileSystem, scratchDir, ctx, localResources);
     } else if (work instanceof MergeJoinWork) {
-      v = createVertex(conf, (MergeJoinWork) work, appJarLr, additionalLr, fileSystem, scratchDir,
-              ctx, vertexType);
+      v = createVertex(
+          conf, (MergeJoinWork) work, fileSystem, scratchDir, ctx, vertexType, localResources);
       // set VertexManagerPlugin if whether it's a cross product destination vertex
       List<String> crossProductSources = new ArrayList<>();
       for (BaseWork parentWork : tezWork.getParents(work)) {
@@ -1522,4 +1514,18 @@ public class DagUtils {
     // -Xmx not specified
     return -1;
   }
+
+  // The utility of this method is not certain.
+  public static Map<String, LocalResource> getResourcesUpdatableForAm(
+      Collection<LocalResource> allNonAppResources) {
+    HashMap<String, LocalResource> allNonAppFileResources = new HashMap<>();
+    if (allNonAppResources == null) return allNonAppFileResources;
+    for (LocalResource lr : allNonAppResources) {
+      if (lr.getType() == LocalResourceType.FILE) {
+        // TEZ AM will only localize FILE (no script operators in the AM)
+        allNonAppFileResources.put(DagUtils.getBaseName(lr), lr);
+      }
+    }
+    return allNonAppFileResources;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/89dbf4e9/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPool.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPool.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPool.java
index 3bcf657..6e2dfe1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPool.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPool.java
@@ -217,7 +217,10 @@ class TezSessionPool<SessionType extends TezSessionPoolSession> {
         }
       }
       // If there are async requests, satisfy them first.
-      if (!asyncRequests.isEmpty() && session.tryUse(false)) {
+      if (!asyncRequests.isEmpty()) {
+        if (!session.tryUse(false)) {
+          return true; // Session has expired and will be returned to us later.
+        }
         future = asyncRequests.poll();
       }
       if (future == null) {
@@ -238,24 +241,12 @@ class TezSessionPool<SessionType extends TezSessionPoolSession> {
     return true;
   }
 
-  void replaceSession(SessionType oldSession, boolean keepTmpDir,
-      String[] additionalFilesArray) throws Exception {
-    // Retain the stuff from the old session.
+  void replaceSession(SessionType oldSession) throws Exception {
     // Re-setting the queue config is an old hack that we may remove in future.
     SessionType newSession = sessionObjFactory.create(oldSession);
-    Path scratchDir = oldSession.getTezScratchDir();
     String queueName = oldSession.getQueueName();
-    Set<String> additionalFiles = null;
-    if (additionalFilesArray != null) {
-      additionalFiles = new HashSet<>();
-      for (String file : additionalFilesArray) {
-        additionalFiles.add(file);
-      }
-    } else {
-      additionalFiles = oldSession.getAdditionalFilesNotFromConf();
-    }
     try {
-      oldSession.close(keepTmpDir);
+      oldSession.close(false);
     } finally {
       poolLock.lock();
       try {
@@ -280,7 +271,7 @@ class TezSessionPool<SessionType extends TezSessionPoolSession> {
         // probably just get rid of the thread local usage in TezSessionState.
         SessionState.setCurrentSessionState(parentSessionState);
       }
-      newSession.open(additionalFiles, scratchDir);
+      newSession.open();
       if (!putSessionBack(newSession, false)) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Closing an unneeded session " + newSession

http://git-wip-us.apache.org/repos/asf/hive/blob/89dbf4e9/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java
index 8417ebb..3c1b8d0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java
@@ -18,16 +18,14 @@
 
 package org.apache.hadoop.hive.ql.exec.tez;
 
+import org.apache.hadoop.hive.ql.exec.tez.TezSessionState.HiveResources;
+
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
-import java.util.Set;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
-
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
@@ -43,7 +41,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.tez.dag.api.TezConfiguration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-
 import com.google.common.annotations.VisibleForTesting;
 
 /**
@@ -444,44 +441,35 @@ public class TezSessionPoolManager extends TezSessionPoolSession.AbstractTrigger
 
   /** Reopens the session that was found to not be running. */
   @Override
-  public TezSessionState reopen(TezSessionState sessionState,
-      Configuration conf, String[] additionalFiles) throws Exception {
+  public TezSessionState reopen(TezSessionState sessionState) throws Exception {
     HiveConf sessionConf = sessionState.getConf();
     if (sessionState.getQueueName() != null
         && sessionConf.get(TezConfiguration.TEZ_QUEUE_NAME) == null) {
       sessionConf.set(TezConfiguration.TEZ_QUEUE_NAME, sessionState.getQueueName());
     }
-    reopenInternal(sessionState, additionalFiles);
+    reopenInternal(sessionState);
     return sessionState;
   }
 
   static void reopenInternal(
-      TezSessionState sessionState, String[] additionalFiles) throws Exception {
-    Set<String> oldAdditionalFiles = sessionState.getAdditionalFilesNotFromConf();
-    // TODO: implies the session files and the array are the same if not null; why? very brittle
-    if ((oldAdditionalFiles == null || oldAdditionalFiles.isEmpty())
-        && (additionalFiles != null)) {
-      oldAdditionalFiles = new HashSet<>();
-      for (String file : additionalFiles) {
-        oldAdditionalFiles.add(file);
-      }
-    }
+      TezSessionState sessionState) throws Exception {
+    HiveResources resources = sessionState.extractHiveResources();
     // TODO: close basically resets the object to a bunch of nulls.
     //       We should ideally not reuse the object because it's pointless and error-prone.
-    sessionState.close(true);
+    sessionState.close(false);
     // Note: scratchdir is reused implicitly because the sessionId is the same.
-    sessionState.open(oldAdditionalFiles, null);
+    sessionState.open(resources);
   }
 
 
-  public void closeNonDefaultSessions(boolean keepTmpDir) throws Exception {
+  public void closeNonDefaultSessions() throws Exception {
     List<TezSessionState> sessionsToClose = null;
     synchronized (openSessions) {
       sessionsToClose = new ArrayList<TezSessionState>(openSessions);
     }
     for (TezSessionState sessionState : sessionsToClose) {
       System.err.println("Shutting down tez session.");
-      closeIfNotDefault(sessionState, keepTmpDir);
+      closeIfNotDefault(sessionState, false);
     }
   }
 
@@ -492,9 +480,7 @@ public class TezSessionPoolManager extends TezSessionPoolSession.AbstractTrigger
     if (queueName == null) {
       LOG.warn("Pool session has a null queue: " + oldSession);
     }
-    TezSessionPoolSession newSession = createAndInitSession(
-      queueName, oldSession.isDefault(), oldSession.getConf());
-    defaultSessionPool.replaceSession(oldSession, false, null);
+    defaultSessionPool.replaceSession(oldSession);
   }
 
   /** Called by TezSessionPoolSession when opened. */

http://git-wip-us.apache.org/repos/asf/hive/blob/89dbf4e9/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolSession.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolSession.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolSession.java
index b3ccd24..96ade50 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolSession.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolSession.java
@@ -19,11 +19,8 @@
 package org.apache.hadoop.hive.ql.exec.tez;
 
 import com.google.common.util.concurrent.SettableFuture;
-
 import org.apache.hadoop.hive.registry.impl.TezAmInstance;
-
 import org.apache.hadoop.conf.Configuration;
-
 import java.io.IOException;
 import java.net.URISyntaxException;
 import java.util.Collection;
@@ -31,9 +28,7 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
-
 import javax.security.auth.login.LoginException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -42,7 +37,6 @@ import org.apache.hadoop.hive.ql.wm.SessionTriggerProvider;
 import org.apache.hadoop.hive.ql.wm.TriggerActionHandler;
 import org.apache.hadoop.hive.registry.impl.TezAmInstance;
 import org.apache.tez.dag.api.TezException;
-
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
@@ -66,8 +60,7 @@ class TezSessionPoolSession extends TezSessionState {
 
     void returnAfterUse(TezSessionPoolSession session) throws Exception;
 
-    TezSessionState reopen(TezSessionState session, Configuration conf,
-      String[] inputOutputJars) throws Exception;
+    TezSessionState reopen(TezSessionState session) throws Exception;
 
     void destroy(TezSessionState session) throws Exception;
   }
@@ -128,10 +121,10 @@ class TezSessionPoolSession extends TezSessionState {
   }
 
   @Override
-  protected void openInternal(Collection<String> additionalFiles,
-      boolean isAsync, LogHelper console, Path scratchDir)
+  protected void openInternal(String[] additionalFiles,
+      boolean isAsync, LogHelper console, HiveResources resources)
           throws IOException, LoginException, URISyntaxException, TezException {
-    super.openInternal(additionalFiles, isAsync, console, scratchDir);
+    super.openInternal(additionalFiles, isAsync, console, resources);
     parent.registerOpenSession(this);
     if (expirationTracker != null) {
       expirationTracker.addToExpirationQueue(this);
@@ -206,9 +199,8 @@ class TezSessionPoolSession extends TezSessionState {
   }
 
   @Override
-  public TezSessionState reopen(
-      Configuration conf, String[] inputOutputJars) throws Exception {
-    return parent.reopen(this, conf, inputOutputJars);
+  public TezSessionState reopen() throws Exception {
+    return parent.reopen(this);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/89dbf4e9/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
index dd879fc..5e892c6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.hive.ql.exec.tez;
 
-import java.util.Collection;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
+
 import java.io.File;
 import java.io.IOException;
 import java.net.URISyntaxException;
@@ -37,7 +38,6 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicReference;
 import javax.security.auth.login.LoginException;
-
 import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.commons.io.FilenameUtils;
 import org.apache.hadoop.conf.Configuration;
@@ -90,7 +90,6 @@ import org.codehaus.jackson.map.annotate.JsonSerialize;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.tez.monitoring.TezJobMonitor;
-
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
 
@@ -126,8 +125,18 @@ public class TezSessionState {
 
   private AtomicReference<String> ownerThread = new AtomicReference<>(null);
 
-  private final Set<String> additionalFilesNotFromConf = new HashSet<String>();
-  private final Set<LocalResource> localizedResources = new HashSet<LocalResource>();
+  public static final class HiveResources {
+    public HiveResources(Path dagResourcesDir) {
+      this.dagResourcesDir = dagResourcesDir;
+    }
+    /** A directory that will contain resources related to DAGs and specified in configs. */
+    public final Path dagResourcesDir;
+    public final Set<String> additionalFilesNotFromConf = new HashSet<>();
+    /** Localized resources of this session; both from conf and not from conf (above). */
+    public final Set<LocalResource> localizedResources = new HashSet<>();
+  }
+
+  private HiveResources resources;
   @JsonProperty("doAsEnabled")
   private boolean doAsEnabled;
   private boolean isLegacyLlapMode;
@@ -201,45 +210,32 @@ public class TezSessionState {
   }
 
   public void open() throws IOException, LoginException, URISyntaxException, TezException {
-    Set<String> noFiles = null;
-    open(noFiles, null);
+    String[] noFiles = null;
+    open(noFiles);
   }
 
   /**
    * Creates a tez session. A session is tied to either a cli/hs2 session. You can
    * submit multiple DAGs against a session (as long as they are executed serially).
-   * @throws IOException
-   * @throws URISyntaxException
-   * @throws LoginException
-   * @throws TezException
-   * @throws InterruptedException
    */
-  public void open(String[] additionalFiles)
+  public void open(String[] additionalFilesNotFromConf)
       throws IOException, LoginException, URISyntaxException, TezException {
-    openInternal(setFromArray(additionalFiles), false, null, null);
+    openInternal(additionalFilesNotFromConf, false, null, null);
   }
 
-  private static Set<String> setFromArray(String[] additionalFiles) {
-    if (additionalFiles == null) return null;
-    Set<String> files = new HashSet<>();
-    for (String originalFile : additionalFiles) {
-      files.add(originalFile);
-    }
-    return files;
+
+  public void open(HiveResources resources)
+      throws LoginException, IOException, URISyntaxException, TezException {
+    openInternal(null, false, null, resources);
   }
 
   public void beginOpen(String[] additionalFiles, LogHelper console)
       throws IOException, LoginException, URISyntaxException, TezException {
-    openInternal(setFromArray(additionalFiles), true, console, null);
+    openInternal(additionalFiles, true, console, null);
   }
 
-  public void open(Collection<String> additionalFiles, Path scratchDir)
-      throws LoginException, IOException, URISyntaxException, TezException {
-    openInternal(additionalFiles, false, null, scratchDir);
-  }
-
-  protected void openInternal(Collection<String> additionalFiles,
-      boolean isAsync, LogHelper console, Path scratchDir)
+  protected void openInternal(String[] additionalFilesNotFromConf,
+      boolean isAsync, LogHelper console, HiveResources resources)
           throws IOException, LoginException, URISyntaxException, TezException {
     // TODO Why is the queue name set again. It has already been setup via setQueueName. Do only one of the two.
     String confQueueName = conf.get(TezConfiguration.TEZ_QUEUE_NAME);
@@ -258,25 +254,25 @@ public class TezSessionState {
     user = ugi.getShortUserName();
     LOG.info("User of session id " + sessionId + " is " + user);
 
-    // create the tez tmp dir
-    tezScratchDir = scratchDir == null ? createTezDir(sessionId) : scratchDir;
-
-    additionalFilesNotFromConf.clear();
-    if (additionalFiles != null) {
-      additionalFilesNotFromConf.addAll(additionalFiles);
+    // Create the tez tmp dir and a directory for Hive resources.
+    tezScratchDir = createTezDir(sessionId, null);
+    if (resources != null) {
+      // If we are getting the resources externally, don't relocalize anything.
+      this.resources = resources;
+    } else {
+      this.resources = new HiveResources(createTezDir(sessionId, "resources"));
+      ensureLocalResources(conf, additionalFilesNotFromConf);
     }
 
-    refreshLocalResourcesFromConf(conf);
-
     // unless already installed on all the cluster nodes, we'll have to
     // localize hive-exec.jar as well.
     appJarLr = createJarLocalResource(utils.getExecJarPathLocal());
 
     // configuration for the application master
     final Map<String, LocalResource> commonLocalResources = new HashMap<String, LocalResource>();
-    commonLocalResources.put(utils.getBaseName(appJarLr), appJarLr);
-    for (LocalResource lr : localizedResources) {
-      commonLocalResources.put(utils.getBaseName(lr), lr);
+    commonLocalResources.put(DagUtils.getBaseName(appJarLr), appJarLr);
+    for (LocalResource lr : this.resources.localizedResources) {
+      commonLocalResources.put(DagUtils.getBaseName(lr), lr);
     }
 
     if (llapMode) {
@@ -284,7 +280,7 @@ public class TezSessionState {
       addJarLRByClass(LlapTaskSchedulerService.class, commonLocalResources);
       addJarLRByClass(LlapProtocolClientImpl.class, commonLocalResources);
       addJarLRByClass(LlapProtocolClientProxy.class, commonLocalResources);
-      addJarLRByClassName("org.apache.hadoop.registry.client.api.RegistryOperations", commonLocalResources);
+      addJarLRByClass(RegistryOperations.class, commonLocalResources);
     }
 
     // Create environment for AM.
@@ -556,36 +552,54 @@ public class TezSessionState {
     tezConf.set(TezConfiguration.TEZ_AM_MODIFY_ACLS, modifyStr);
   }
 
-  public void refreshLocalResourcesFromConf(HiveConf conf)
-      throws IOException, LoginException, URISyntaxException, TezException {
-
-    String dir = tezScratchDir.toString();
-
-    localizedResources.clear();
+  /** This is called in openInternal and in TezTask.updateSession to localize conf resources. */
+  public void ensureLocalResources(Configuration conf, String[] newFilesNotFromConf)
+          throws IOException, LoginException, URISyntaxException, TezException {
+    String dir = resources.dagResourcesDir.toString();
+    resources.localizedResources.clear();
 
-    // these are local resources set through add file, jar, etc
+    // Always localize files from conf; duplicates are handled on FS level.
+    // TODO: we could do the same thing as below and only localize if missing.
+    //       That could be especially valuable given that this almost always the same set.
     List<LocalResource> lrs = utils.localizeTempFilesFromConf(dir, conf);
     if (lrs != null) {
-      localizedResources.addAll(lrs);
+      resources.localizedResources.addAll(lrs);
     }
 
-    // these are local resources that are set through the mr "tmpjars" property; skip session files.
-    List<LocalResource> handlerLr = utils.localizeTempFiles(dir, conf,
-      additionalFilesNotFromConf.toArray(new String[additionalFilesNotFromConf.size()]),
-      DagUtils.getTempFilesFromConf(conf));
-
-    if (handlerLr != null) {
-      localizedResources.addAll(handlerLr);
+    // Localize the non-conf resources that are missing from the current list.
+    List<LocalResource> newResources = null;
+    if (newFilesNotFromConf != null && newFilesNotFromConf.length > 0) {
+      boolean hasResources = !resources.additionalFilesNotFromConf.isEmpty();
+      if (hasResources) {
+        for (String s : newFilesNotFromConf) {
+          hasResources = resources.additionalFilesNotFromConf.contains(s);
+          if (!hasResources) break;
+        }
+      }
+      if (!hasResources) {
+        String[] skipFilesFromConf = DagUtils.getTempFilesFromConf(conf);
+        newResources = utils.localizeTempFiles(dir, conf, newFilesNotFromConf, skipFilesFromConf);
+        if (newResources != null) {
+          resources.localizedResources.addAll(newResources);
+        }
+        for (String fullName : newFilesNotFromConf) {
+          resources.additionalFilesNotFromConf.add(fullName);
+        }
+      }
     }
-  }
 
-  public boolean hasResources(String[] localAmResources) {
-    if (localAmResources == null || localAmResources.length == 0) return true;
-    if (additionalFilesNotFromConf.isEmpty()) return false;
-    for (String s : localAmResources) {
-      if (!additionalFilesNotFromConf.contains(s)) return false;
+    // Finally add the files to AM. The old code seems to do this twice, first for all the new
+    // resources regardless of type; and then for all the session resources that are not of type
+    // file (see branch-1 calls to addAppMasterLocalFiles: from updateSession and with resourceMap
+    // from submit).
+    // TODO: Do we really need all this nonsense?
+    if (newResources != null && !newResources.isEmpty()) {
+      session.addAppMasterLocalFiles(DagUtils.createTezLrMap(null, newResources));
+    }
+    if (!resources.localizedResources.isEmpty()) {
+      session.addAppMasterLocalFiles(
+          DagUtils.getResourcesUpdatableForAm(resources.localizedResources));
     }
-    return true;
   }
 
   /**
@@ -593,11 +607,11 @@ public class TezSessionState {
    * further DAGs can be executed against it. Only called by session management classes; some
    * sessions should not simply be closed by users - e.g. pool sessions need to be restarted.
    *
-   * @param keepTmpDir
+   * @param keepDagFilesDir
    *          whether or not to remove the scratch dir at the same time.
    * @throws Exception
    */
-  void close(boolean keepTmpDir) throws Exception {
+  void close(boolean keepDagFilesDir) throws Exception {
     if (session != null) {
       LOG.info("Closing Tez Session");
       closeClient(session);
@@ -618,20 +632,16 @@ public class TezSessionState {
       }
     }
 
-    if (!keepTmpDir) {
-      cleanupScratchDir();
+    cleanupScratchDir();
+    if (!keepDagFilesDir) {
+      cleanupDagResources();
     }
     session = null;
     sessionFuture = null;
     console = null;
     tezScratchDir = null;
+    // Do not reset dag resources; if it wasn't cleaned it's still needed.
     appJarLr = null;
-    additionalFilesNotFromConf.clear();
-    localizedResources.clear();
-  }
-
-  public Set<String> getAdditionalFilesNotFromConf() {
-    return additionalFilesNotFromConf;
   }
 
   private void closeClient(TezClient client) throws TezException,
@@ -643,12 +653,18 @@ public class TezSessionState {
     }
   }
 
-  protected final void cleanupScratchDir () throws IOException {
+  protected final void cleanupScratchDir() throws IOException {
     FileSystem fs = tezScratchDir.getFileSystem(conf);
     fs.delete(tezScratchDir, true);
     tezScratchDir = null;
   }
 
+  protected final void cleanupDagResources() throws IOException {
+    FileSystem fs = resources.dagResourcesDir.getFileSystem(conf);
+    fs.delete(resources.dagResourcesDir, true);
+    resources = null;
+  }
+
   public String getSessionId() {
     return sessionId;
   }
@@ -675,10 +691,6 @@ public class TezSessionState {
     return session;
   }
 
-  public Path getTezScratchDir() {
-    return tezScratchDir;
-  }
-
   public LocalResource getAppJarLr() {
     return appJarLr;
   }
@@ -687,11 +699,11 @@ public class TezSessionState {
    * createTezDir creates a temporary directory in the scratchDir folder to
    * be used with Tez. Assumes scratchDir exists.
    */
-  private Path createTezDir(String sessionId) throws IOException {
+  private Path createTezDir(String sessionId, String suffix) throws IOException {
     // tez needs its own scratch dir (per session)
     // TODO: De-link from SessionState. A TezSession can be linked to different Hive Sessions via the pool.
     Path tezDir = new Path(SessionState.get().getHdfsScratchDirURIString(), TEZ_DIR);
-    tezDir = new Path(tezDir, sessionId);
+    tezDir = new Path(tezDir, sessionId + ((suffix == null) ? "" : ("-" + suffix)));
     FileSystem fs = tezDir.getFileSystem(conf);
     FsPermission fsPermission = new FsPermission(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION));
     fs.mkdirs(tezDir, fsPermission);
@@ -759,9 +771,8 @@ public class TezSessionState {
     final File jar =
         new File(Utilities.jarFinderGetJar(clazz));
     final String localJarPath = jar.toURI().toURL().toExternalForm();
-    final LocalResource jarLr =
-      createJarLocalResource(localJarPath);
-    lrMap.put(utils.getBaseName(jarLr), jarLr);
+    final LocalResource jarLr = createJarLocalResource(localJarPath);
+    lrMap.put(DagUtils.getBaseName(jarLr), jarLr);
   }
 
   private String getSha(final Path localFile) throws IOException, IllegalArgumentException {
@@ -808,7 +819,7 @@ public class TezSessionState {
   }
 
   public List<LocalResource> getLocalizedResources() {
-    return new ArrayList<>(localizedResources);
+    return new ArrayList<>(resources.localizedResources);
   }
 
   public String getUser() {
@@ -849,10 +860,9 @@ public class TezSessionState {
     TezSessionPoolManager.getInstance().returnSession(this);
   }
 
-  public TezSessionState reopen(
-      Configuration conf, String[] inputOutputJars) throws Exception {
+  public TezSessionState reopen() throws Exception {
     // By default, TezSessionPoolManager handles this for both pool and non-pool session.
-    return TezSessionPoolManager.getInstance().reopen(this, conf, inputOutputJars);
+    return TezSessionPoolManager.getInstance().reopen(this);
   }
 
   public void destroy() throws Exception {
@@ -875,4 +885,28 @@ public class TezSessionState {
   public KillQuery getKillQuery() {
     return killQuery;
   }
+
+  public HiveResources extractHiveResources() {
+    HiveResources result = resources;
+    resources = null;
+    return result;
+  }
+
+  public Path replaceHiveResources(HiveResources resources, boolean isAsync) {
+    Path dir = null;
+    if (this.resources != null) {
+      dir = this.resources.dagResourcesDir;
+      if (!isAsync) {
+        try {
+          dir.getFileSystem(conf).delete(dir, true);
+        } catch (Exception ex) {
+          LOG.error("Failed to delete the old resources directory "
+              + dir + "; ignoring " + ex.getLocalizedMessage());
+        }
+        dir = null;
+      }
+    }
+    this.resources = resources;
+    return dir;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/89dbf4e9/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
index 8795cfc..27799a8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.hive.ql.exec.tez;
 
+import org.apache.hive.common.util.Ref;
 import org.apache.hadoop.hive.ql.exec.tez.UserPoolMapping.MappingInput;
-
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
@@ -31,9 +31,7 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-
 import javax.annotation.Nullable;
-
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -67,7 +65,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.LocalResource;
-import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.tez.client.CallerContext;
 import org.apache.tez.client.TezClient;
 import org.apache.tez.common.counters.CounterGroup;
@@ -87,7 +84,6 @@ import org.apache.tez.dag.api.client.DAGStatus;
 import org.apache.tez.dag.api.client.StatusGetOpts;
 import org.apache.tez.dag.api.client.VertexStatus;
 import org.json.JSONObject;
-
 import com.google.common.annotations.VisibleForTesting;
 
 /**
@@ -133,7 +129,7 @@ public class TezTask extends Task<TezWork> {
     int rc = 1;
     boolean cleanContext = false;
     Context ctx = null;
-    TezSessionState session = null;
+    Ref<TezSessionState> sessionRef = Ref.from(null);
 
     try {
       // Get or create Context object. If we create it we have to clean it later as well.
@@ -147,15 +143,15 @@ public class TezTask extends Task<TezWork> {
         WmContext wmContext = new WmContext(System.currentTimeMillis(), queryId);
         ctx.setWmContext(wmContext);
       }
-
       // Need to remove this static hack. But this is the way currently to get a session.
       SessionState ss = SessionState.get();
       // Note: given that we return pool sessions to the pool in the finally block below, and that
       //       we need to set the global to null to do that, this "reuse" may be pointless.
-      session = ss.getTezSession();
+      TezSessionState session = sessionRef.value = ss.getTezSession();
       if (session != null && !session.isOpen()) {
         LOG.warn("The session: " + session + " has not been opened");
       }
+
       // We only need a username for UGI to use for groups; getGroups will fetch the groups
       // based on Hadoop configuration, as documented at
       // https://hadoop.apache.org/docs/r2.8.0/hadoop-project-dist/hadoop-common/GroupsMapping.html
@@ -163,57 +159,51 @@ public class TezTask extends Task<TezWork> {
       MappingInput mi = (userName == null) ? new MappingInput("anonymous", null)
         : new MappingInput(ss.getUserName(),
             UserGroupInformation.createRemoteUser(ss.getUserName()).getGroups());
+
       WmContext wmContext = ctx.getWmContext();
-      session = WorkloadManagerFederation.getSession(session, conf, mi, getWork().getLlapMode(), wmContext);
+      // jobConf will hold all the configuration for hadoop, tez, and hive
+      JobConf jobConf = utils.createConfiguration(conf);
+      // Get all user jars from work (e.g. input format stuff).
+      String[] allNonConfFiles = work.configureJobConfAndExtractJars(jobConf);
+      // DAG scratch dir. We get a session from the pool so it may be different from Tez one.
+      // TODO: we could perhaps reuse the same directory for HiveResources?
+      Path scratchDir = utils.createTezDir(ctx.getMRScratchDir(), conf);
+      CallerContext callerContext = CallerContext.create(
+          "HIVE", queryPlan.getQueryId(), "HIVE_QUERY_ID", queryPlan.getQueryStr());
+
+      session = sessionRef.value = WorkloadManagerFederation.getSession(
+          sessionRef.value, conf, mi, getWork().getLlapMode(), wmContext);
 
-      LOG.info("Subscribed to counters: {} for queryId: {}", wmContext.getSubscribedCounters(), wmContext.getQueryId());
-      ss.setTezSession(session);
       try {
-        // jobConf will hold all the configuration for hadoop, tez, and hive
-        JobConf jobConf = utils.createConfiguration(conf);
-
-        // Get all user jars from work (e.g. input format stuff).
-        String[] inputOutputJars = work.configureJobConfAndExtractJars(jobConf);
-
-        // we will localize all the files (jars, plans, hashtables) to the
-        // scratch dir. let's create this and tmp first.
-        Path scratchDir = ctx.getMRScratchDir();
-
-        // create the tez tmp dir
-        scratchDir = utils.createTezDir(scratchDir, conf);
+        ss.setTezSession(session);
+        LOG.info("Subscribed to counters: {} for queryId: {}", wmContext.getSubscribedCounters(),
+          wmContext.getQueryId());
 
-        // This is used to compare global and vertex resources. Global resources are originally
-        // derived from session conf via localizeTempFilesFromConf. So, use that here.
-        Configuration sessionConf = (session.getConf() != null) ? session.getConf() : conf;
-        Map<String,LocalResource> inputOutputLocalResources =
-            getExtraLocalResources(jobConf, scratchDir, inputOutputJars, sessionConf);
+        // Ensure the session is open and has the necessary local resources.
+        // This would refresh any conf resources and also local resources.
+        ensureSessionHasResources(session, allNonConfFiles);
 
-        // Ensure the session is open and has the necessary local resources
-        updateSession(session, jobConf, scratchDir, inputOutputJars, inputOutputLocalResources);
+        // This is a combination of the jar stuff from conf, and not from conf.
+        List<LocalResource> allNonAppResources = session.getLocalizedResources();
+        logResources(allNonAppResources);
 
-        List<LocalResource> additionalLr = session.getLocalizedResources();
-        logResources(additionalLr);
-
-        // unless already installed on all the cluster nodes, we'll have to
-        // localize hive-exec.jar as well.
-        LocalResource appJarLr = session.getAppJarLr();
+        Map<String, LocalResource> allResources = DagUtils.createTezLrMap(
+            session.getAppJarLr(), allNonAppResources);
 
         // next we translate the TezWork to a Tez DAG
-        DAG dag = build(jobConf, work, scratchDir, appJarLr, additionalLr, ctx);
-        CallerContext callerContext = CallerContext.create(
-            "HIVE", queryPlan.getQueryId(),
-            "HIVE_QUERY_ID", queryPlan.getQueryStr());
+        DAG dag = build(jobConf, work, scratchDir, ctx, allResources);
         dag.setCallerContext(callerContext);
 
-        // Add the extra resources to the dag
-        addExtraResourcesToDag(session, dag, inputOutputJars, inputOutputLocalResources);
+        // Note: we no longer call addTaskLocalFiles because all the resources are correctly
+        //       updated in the session resource lists now, and thus added to vertices.
+        //       If something breaks, dag.addTaskLocalFiles might need to be called here.
 
         // Check isShutdown opportunistically; it's never unset.
         if (this.isShutdown) {
           throw new HiveException("Operation cancelled");
         }
-        DAGClient dagClient = submit(jobConf, dag, scratchDir, appJarLr, session,
-            additionalLr, inputOutputJars, inputOutputLocalResources);
+        DAGClient dagClient = submit(jobConf, dag, sessionRef);
+        session = sessionRef.value;
         boolean wasShutdown = false;
         synchronized (dagClientLock) {
           assert this.dagClient == null;
@@ -251,7 +241,9 @@ public class TezTask extends Task<TezWork> {
         // We return this to the pool even if it's unusable; reopen is supposed to handle this.
         wmContext = ctx.getWmContext();
         try {
-          session.returnToSessionManager();
+          if (sessionRef.value != null) {
+            sessionRef.value.returnToSessionManager();
+          }
         } catch (Exception e) {
           LOG.error("Failed to return session: {} to pool", session, e);
           throw e;
@@ -340,61 +332,23 @@ public class TezTask extends Task<TezWork> {
   }
 
   /**
-   * Converted the list of jars into local resources
-   */
-  Map<String,LocalResource> getExtraLocalResources(JobConf jobConf, Path scratchDir,
-      String[] inputOutputJars, Configuration sessionConf) throws Exception {
-    final Map<String,LocalResource> resources = new HashMap<String,LocalResource>();
-    // Skip the files already in session local resources...
-    final List<LocalResource> localResources = utils.localizeTempFiles(scratchDir.toString(),
-        jobConf, inputOutputJars, DagUtils.getTempFilesFromConf(sessionConf));
-    if (null != localResources) {
-      for (LocalResource lr : localResources) {
-        resources.put(utils.getBaseName(lr), lr);
-      }
-    }
-    return resources;
-  }
-
-  /**
    * Ensures that the Tez Session is open and the AM has all necessary jars configured.
    */
-  void updateSession(TezSessionState session,
-      JobConf jobConf, Path scratchDir, String[] inputOutputJars,
-      Map<String,LocalResource> extraResources) throws Exception {
-    final boolean missingLocalResources = !session
-        .hasResources(inputOutputJars);
-
+  @VisibleForTesting
+  void ensureSessionHasResources(
+      TezSessionState session, String[] nonConfResources) throws Exception {
     TezClient client = session.getSession();
     // TODO null can also mean that this operation was interrupted. Should we really try to re-create the session in that case ?
     if (client == null) {
+      // Note: the only sane case where this can happen is the non-pool one. We should get rid
+      //       of it, in non-pool case perf doesn't matter so we might as well open at get time
+      //       and then call update like we do in the else.
       // Can happen if the user sets the tez flag after the session was established.
       LOG.info("Tez session hasn't been created yet. Opening session");
-      session.open(inputOutputJars);
+      session.open(nonConfResources);
     } else {
       LOG.info("Session is already open");
-
-      // Ensure the open session has the necessary resources (StorageHandler)
-      if (missingLocalResources) {
-        LOG.info("Tez session missing resources," +
-            " adding additional necessary resources");
-        client.addAppMasterLocalFiles(extraResources);
-      }
-
-      session.refreshLocalResourcesFromConf(conf);
-    }
-  }
-
-  /**
-   * Adds any necessary resources that must be localized in each vertex to the DAG.
-   */
-  void addExtraResourcesToDag(TezSessionState session, DAG dag,
-      String[] inputOutputJars,
-      Map<String,LocalResource> inputOutputLocalResources) throws Exception {
-    if (!session.hasResources(inputOutputJars)) {
-      if (null != inputOutputLocalResources) {
-        dag.addTaskLocalFiles(inputOutputLocalResources);
-      }
+      session.ensureLocalResources(conf, nonConfResources);
     }
   }
 
@@ -406,9 +360,8 @@ public class TezTask extends Task<TezWork> {
     }
   }
 
-  DAG build(JobConf conf, TezWork work, Path scratchDir,
-      LocalResource appJarLr, List<LocalResource> additionalLr, Context ctx)
-      throws Exception {
+  DAG build(JobConf conf, TezWork work, Path scratchDir, Context ctx,
+      Map<String, LocalResource> vertexResources) throws Exception {
 
     perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_BUILD_DAG);
 
@@ -426,7 +379,7 @@ public class TezTask extends Task<TezWork> {
     DAG dag = DAG.create(dagName);
 
     // set some info for the query
-    JSONObject json = new JSONObject(new LinkedHashMap()).put("context", "Hive")
+    JSONObject json = new JSONObject(new LinkedHashMap<>()).put("context", "Hive")
         .put("description", ctx.getCmd());
     String dagInfo = json.toString();
 
@@ -474,7 +427,6 @@ public class TezTask extends Task<TezWork> {
 
         // For a vertex group, all Outputs use the same Key-class, Val-class and partitioner.
         // Pick any one source vertex to figure out the Edge configuration.
-       
 
         // now hook up the children
         for (BaseWork v: children) {
@@ -488,9 +440,8 @@ public class TezTask extends Task<TezWork> {
         // Regular vertices
         JobConf wxConf = utils.initializeVertexConf(conf, ctx, w);
         checkOutputSpec(w, wxConf);
-        Vertex wx =
-            utils.createVertex(wxConf, w, scratchDir, appJarLr, additionalLr, fs, ctx, !isFinal,
-                work, work.getVertexType(w));
+        Vertex wx = utils.createVertex(wxConf, w, scratchDir, fs, ctx, !isFinal,
+            work, work.getVertexType(w), vertexResources);
         if (w.getReservedMemoryMB() > 0) {
           // If reversedMemoryMB is set, make memory allocation fraction adjustment as needed
           double frac = DagUtils.adjustMemoryReserveFraction(w.getReservedMemoryMB(), super.conf);
@@ -548,38 +499,28 @@ public class TezTask extends Task<TezWork> {
     dag.setAccessControls(ac);
   }
 
-  DAGClient submit(JobConf conf, DAG dag, Path scratchDir,
-      LocalResource appJarLr, TezSessionState sessionState,
-      List<LocalResource> additionalLr, String[] inputOutputJars,
-      Map<String,LocalResource> inputOutputLocalResources)
-      throws Exception {
+  private TezSessionState getNewTezSessionOnError(
+      TezSessionState oldSession) throws Exception {
+    // Note: we don't pass the config to reopen. If the session was already open, it would
+    //       have kept running with its current config - preserve that behavior.
+    TezSessionState newSession = oldSession.reopen();
+    console.printInfo("Session re-established.");
+    return newSession;
+  }
+
+  DAGClient submit(JobConf conf, DAG dag, Ref<TezSessionState> sessionStateRef) throws Exception {
 
     perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_SUBMIT_DAG);
     DAGClient dagClient = null;
-
-    Map<String, LocalResource> resourceMap = new HashMap<String, LocalResource>();
-    if (additionalLr != null) {
-      for (LocalResource lr: additionalLr) {
-        if (lr.getType() == LocalResourceType.FILE) {
-          // TEZ AM will only localize FILE (no script operators in the AM)
-          resourceMap.put(utils.getBaseName(lr), lr);
-        }
-      }
-    }
-
+    TezSessionState sessionState = sessionStateRef.value;
     try {
       try {
         // ready to start execution on the cluster
-        sessionState.getSession().addAppMasterLocalFiles(resourceMap);
         dagClient = sessionState.getSession().submitDAG(dag);
       } catch (SessionNotRunning nr) {
         console.printInfo("Tez session was closed. Reopening...");
-
-        // close the old one, but keep the tmp files around
-        // conf is passed in only for the case when session conf is null (tests and legacy paths?)
-        sessionState = sessionState.reopen(conf, inputOutputJars);
+        sessionStateRef.value = sessionState = getNewTezSessionOnError(sessionState);
         console.printInfo("Session re-established.");
-
         dagClient = sessionState.getSession().submitDAG(dag);
       }
     } catch (Exception e) {
@@ -587,14 +528,12 @@ public class TezTask extends Task<TezWork> {
       try {
         console.printInfo("Dag submit failed due to " + e.getMessage() + " stack trace: "
             + Arrays.toString(e.getStackTrace()) + " retrying...");
-        // TODO: this is temporary, need to refactor how reopen is invoked.
-        WmContext oldCtx = sessionState.getWmContext();
-        sessionState = sessionState.reopen(conf, inputOutputJars);
-        sessionState.setWmContext(oldCtx);
+        sessionStateRef.value = sessionState = getNewTezSessionOnError(sessionState);
         dagClient = sessionState.getSession().submitDAG(dag);
       } catch (Exception retryException) {
         // we failed to submit after retrying. Destroy session and bail.
         sessionState.destroy();
+        sessionStateRef.value = null;
         throw retryException;
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/89dbf4e9/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
index dbdbbf2..1f4843d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java
@@ -17,6 +17,14 @@
  */
 package org.apache.hadoop.hive.ql.exec.tez;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -39,9 +47,8 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.ReentrantLock;
-
 import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.llap.tezplugins.LlapTaskSchedulerService;
@@ -50,6 +57,7 @@ import org.apache.hadoop.hive.metastore.api.WMPool;
 import org.apache.hadoop.hive.metastore.api.WMPoolTrigger;
 import org.apache.hadoop.hive.metastore.api.WMTrigger;
 import org.apache.hadoop.hive.ql.exec.tez.AmPluginNode.AmPluginInfo;
+import org.apache.hadoop.hive.ql.exec.tez.TezSessionState.HiveResources;
 import org.apache.hadoop.hive.ql.exec.tez.UserPoolMapping.MappingInput;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.session.KillQuery;
@@ -67,15 +75,6 @@ import org.codehaus.jackson.map.SerializationConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
 
 /** Workload management entry point for HS2.
  * Note on how this class operates.
@@ -342,6 +341,7 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
     private List<WmTezSession> toRestartInUse = new LinkedList<>(),
         toDestroyNoRestart = new LinkedList<>();
     private Map<WmTezSession, KillQueryContext> toKillQuery = new IdentityHashMap<>();
+    private List<Path> pathsToDelete = Lists.newArrayList();
   }
 
   private void runWmThread() {
@@ -440,7 +440,7 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
         try {
           WmEvent wmEvent = new WmEvent(WmEvent.EventType.RESTART);
           // Note: sessions in toRestart are always in use, so they cannot expire in parallel.
-          tezAmPool.replaceSession(toRestart, false, null);
+          tezAmPool.replaceSession(toRestart);
           wmEvent.endEvent(toRestart);
         } catch (Exception ex) {
           LOG.error("Failed to restart an old session; ignoring", ex);
@@ -463,6 +463,19 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
       });
     }
     context.toDestroyNoRestart.clear();
+
+    // 4. Delete unneeded directories that were replaced by other ones via reopen.
+    for (final Path path : context.pathsToDelete) {
+      LOG.info("Deleting {}", path);
+      workPool.submit(() -> {
+        try {
+          path.getFileSystem(conf).delete(path, true);
+        } catch (Exception ex) {
+          LOG.error("Failed to delete an old path; ignoring " + ex.getMessage());
+        }
+      });
+    }
+    context.pathsToDelete.clear();
   }
 
   /**
@@ -654,7 +667,7 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
       if (LOG.isDebugEnabled()) {
         LOG.info("Processing changes for pool " + poolName + ": " + pools.get(poolName));
       }
-      processPoolChangesOnMasterThread(poolName, hasRequeues);
+      processPoolChangesOnMasterThread(poolName, hasRequeues, syncWork);
     }
 
 
@@ -852,7 +865,8 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
     case OK:
       // If pool didn't exist, checkAndRemoveSessionFromItsPool wouldn't have returned OK.
       PoolState pool = pools.get(poolName);
-      SessionInitContext sw = new SessionInitContext(future, poolName, session.getQueryId(), session.getWmContext());
+      SessionInitContext sw = new SessionInitContext(future, poolName, session.getQueryId(),
+          session.getWmContext(), session.extractHiveResources());
       // We have just removed the session from the same pool, so don't check concurrency here.
       pool.initializingSessions.add(sw);
       ListenableFuture<WmTezSession> getFuture = tezAmPool.getSessionAsync();
@@ -953,7 +967,7 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
           state = new PoolState(fullName, qp, fraction);
         } else {
           // This will also take care of the queries if query parallelism changed.
-          state.update(qp, fraction, syncWork.toKillQuery, e);
+          state.update(qp, fraction, syncWork, e);
           poolsToRedistribute.add(fullName);
         }
         state.setTriggers(new LinkedList<Trigger>());
@@ -988,7 +1002,7 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
     if (oldPools != null && !oldPools.isEmpty()) {
       // Looks like some pools were removed; kill running queries, re-queue the queued ones.
       for (PoolState oldPool : oldPools.values()) {
-        oldPool.destroy(syncWork.toKillQuery, e.getRequests, e.toReuse);
+        oldPool.destroy(syncWork, e.getRequests, e.toReuse);
       }
     }
 
@@ -1027,7 +1041,6 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
     return deltaSessions + toTransfer;
   }
 
-  @SuppressWarnings("unchecked")
   private void failOnFutureFailure(ListenableFuture<?> future) {
     Futures.addCallback(future, FATAL_ERROR_CALLBACK);
   }
@@ -1088,7 +1101,8 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
   }
 
 
-  private void processPoolChangesOnMasterThread(String poolName, boolean hasRequeues) throws Exception {
+  private void processPoolChangesOnMasterThread(
+      String poolName, boolean hasRequeues, WmThreadSyncWork syncWork) throws Exception {
     PoolState pool = pools.get(poolName);
     if (pool == null) return; // Might be from before the new resource plan.
 
@@ -1109,15 +1123,15 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
       // Note that in theory, we are guaranteed to have a session waiting for us here, but
       // the expiration, failures, etc. may cause one to be missing pending restart.
       // See SessionInitContext javadoc.
-      SessionInitContext sw = new SessionInitContext(queueReq.future, poolName, queueReq.queryId,
-        queueReq.wmContext);
+      SessionInitContext sw = new SessionInitContext(
+          queueReq.future, poolName, queueReq.queryId, queueReq.wmContext, null);
       ListenableFuture<WmTezSession> getFuture = tezAmPool.getSessionAsync();
       Futures.addCallback(getFuture, sw);
       // It is possible that all the async methods returned on the same thread because the
       // session with registry data and stuff was available in the pool.
       // If this happens, we'll take the session out here and "cancel" the init so we skip
       // processing the message that the successful init has queued for us.
-      boolean isDone = sw.extractSessionAndCancelIfDone(pool.sessions);
+      boolean isDone = sw.extractSessionAndCancelIfDone(pool.sessions, syncWork.pathsToDelete);
       if (!isDone) {
         pool.initializingSessions.add(sw);
       }
@@ -1458,22 +1472,14 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
 
 
   @Override
-  public TezSessionState reopen(TezSessionState session, Configuration conf,
-    String[] additionalFiles) throws Exception {
+  public TezSessionState reopen(TezSessionState session) throws Exception {
     WmTezSession wmTezSession = ensureOwnedSession(session);
     HiveConf sessionConf = wmTezSession.getConf();
     if (sessionConf == null) {
+      // TODO: can this ever happen?
       LOG.warn("Session configuration is null for " + wmTezSession);
       sessionConf = new HiveConf(conf, WorkloadManager.class);
     }
-    // TODO: ideally, we should handle reopen the same way no matter what. However, the cases
-    //       with additional files will have to wait until HIVE-17827 is unfucked, because it's
-    //       difficult to determine how the additionalFiles are to be propagated/reused between
-    //       two sessions. Once the update logic is encapsulated in the session we can remove this.
-    if (additionalFiles != null && additionalFiles.length > 0) {
-      TezSessionPoolManager.reopenInternal(session, additionalFiles);
-      return session;
-    }
 
     SettableFuture<WmTezSession> future = SettableFuture.create();
     currentLock.lock();
@@ -1493,7 +1499,7 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
   public void closeAndReopenExpiredSession(TezSessionPoolSession session) throws Exception {
     // By definition, this session is not in use and can no longer be in use, so it only
     // affects the session pool. We can handle this inline.
-    tezAmPool.replaceSession(ensureOwnedSession(session), false, null);
+    tezAmPool.replaceSession(ensureOwnedSession(session));
   }
 
   // ======= VARIOUS UTILITY METHOD
@@ -1637,7 +1643,7 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
     }
 
     public void update(int queryParallelism, double fraction,
-        Map<WmTezSession, KillQueryContext> toKill, EventState e) {
+        WmThreadSyncWork syncWork, EventState e) {
       this.finalFraction = this.finalFractionRemaining = fraction;
       this.queryParallelism = queryParallelism;
       // TODO: two possible improvements
@@ -1646,7 +1652,8 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
       //          If we could somehow restart queries we could instead put them at the front
       //          of the queue (esp. in conjunction with (1)) and rerun them.
       if (queryParallelism < getTotalActiveSessions()) {
-        extractAllSessionsToKill("The query pool was resized by administrator", e.toReuse, toKill);
+        extractAllSessionsToKill("The query pool was resized by administrator",
+            e.toReuse, syncWork);
       }
       // We will requeue, and not kill, the queries that are not running yet.
       // Insert them all before the get requests from this iteration.
@@ -1656,9 +1663,9 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
       }
     }
 
-    public void destroy(Map<WmTezSession, KillQueryContext> toKill,
+    public void destroy(WmThreadSyncWork syncWork,
         LinkedList<GetRequest> globalQueue, IdentityHashMap<WmTezSession, GetRequest> toReuse) {
-      extractAllSessionsToKill("The query pool was removed by administrator", toReuse, toKill);
+      extractAllSessionsToKill("The query pool was removed by administrator", toReuse, syncWork);
       // All the pending get requests should just be requeued elsewhere.
       // Note that we never queue session reuse so sessionToReuse would be null.
       globalQueue.addAll(0, queue);
@@ -1694,19 +1701,22 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
 
     private void extractAllSessionsToKill(String killReason,
         IdentityHashMap<WmTezSession, GetRequest> toReuse,
-        Map<WmTezSession, KillQueryContext> toKill) {
+        WmThreadSyncWork syncWork) {
       for (WmTezSession sessionToKill : sessions) {
-        resetRemovedSessionToKill(toKill, new KillQueryContext(sessionToKill, killReason), toReuse);
+        resetRemovedSessionToKill(syncWork.toKillQuery,
+          new KillQueryContext(sessionToKill, killReason), toReuse);
       }
       sessions.clear();
       for (SessionInitContext initCtx : initializingSessions) {
         // It is possible that the background init thread has finished in parallel, queued
         // the message for us but also returned the session to the user.
-        WmTezSession sessionToKill = initCtx.cancelAndExtractSessionIfDone(killReason);
+        WmTezSession sessionToKill = initCtx.cancelAndExtractSessionIfDone(
+            killReason, syncWork.pathsToDelete);
         if (sessionToKill == null) {
           continue; // Async op in progress; the callback will take care of this.
         }
-        resetRemovedSessionToKill(toKill, new KillQueryContext(sessionToKill, killReason), toReuse);
+        resetRemovedSessionToKill(syncWork.toKillQuery,
+          new KillQueryContext(sessionToKill, killReason), toReuse);
       }
       initializingSessions.clear();
     }
@@ -1740,14 +1750,18 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
     private SettableFuture<WmTezSession> future;
     private SessionInitState state;
     private String cancelReason;
+    private HiveResources prelocalizedResources;
+    private Path pathToDelete;
     private WmContext wmContext;
 
-    public SessionInitContext(SettableFuture<WmTezSession> future, String poolName, String queryId,
-      final WmContext wmContext) {
+    public SessionInitContext(SettableFuture<WmTezSession> future,
+        String poolName, String queryId, WmContext wmContext,
+        HiveResources prelocalizedResources) {
       this.state = SessionInitState.GETTING;
       this.future = future;
       this.poolName = poolName;
       this.queryId = queryId;
+      this.prelocalizedResources = prelocalizedResources;
       this.wmContext = wmContext;
     }
 
@@ -1765,7 +1779,12 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
           session.setPoolName(poolName);
           session.setQueueName(yarnQueue);
           session.setQueryId(queryId);
-          session.setWmContext(wmContext);
+          if (prelocalizedResources != null) {
+            pathToDelete = session.replaceHiveResources(prelocalizedResources, true);
+          }
+          if (wmContext != null) {
+            session.setWmContext(wmContext);
+          }
           this.session = session;
           this.state = SessionInitState.WAITING_FOR_REGISTRY;
           break;
@@ -1855,7 +1874,7 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
         session.setQueryId(null);
         // We can just restart the session if we have received one.
         try {
-          tezAmPool.replaceSession(session, false, null);
+          tezAmPool.replaceSession(session);
         } catch (Exception e) {
           LOG.error("Failed to restart a failed session", e);
         }
@@ -1863,7 +1882,7 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
     }
 
     /** Cancel the async operation (even if it's done), and return the session if done. */
-    public WmTezSession cancelAndExtractSessionIfDone(String cancelReason) {
+    public WmTezSession cancelAndExtractSessionIfDone(String cancelReason, List<Path> toDelete) {
       lock.lock();
       try {
         SessionInitState state = this.state;
@@ -1872,6 +1891,9 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
         if (state == SessionInitState.DONE) {
           WmTezSession result = this.session;
           this.session = null;
+          if (pathToDelete != null) {
+            toDelete.add(pathToDelete);
+          }
           return result;
         } else {
           // In the states where a background operation is in progress, wait for the callback.
@@ -1887,11 +1909,15 @@ public class WorkloadManager extends TezSessionPoolSession.AbstractTriggerValida
     }
 
     /** Extracts the session and cancel the operation, both only if done. */
-    public boolean extractSessionAndCancelIfDone(List<WmTezSession> results) {
+    public boolean extractSessionAndCancelIfDone(
+        List<WmTezSession> results, List<Path> toDelete) {
       lock.lock();
       try {
         if (state != SessionInitState.DONE) return false;
         this.state = SessionInitState.CANCELED;
+        if (pathToDelete != null) {
+          toDelete.add(pathToDelete);
+        }
         if (this.session != null) {
           results.add(this.session);
         } // Otherwise we have failed; the callback has taken care of the failure.

http://git-wip-us.apache.org/repos/asf/hive/blob/89dbf4e9/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezJobMonitor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezJobMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezJobMonitor.java
index 9726af1..5ade1f3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezJobMonitor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezJobMonitor.java
@@ -92,7 +92,7 @@ public class TezJobMonitor {
         try {
           // TODO: why does this only kill non-default sessions?
           // Nothing for workload management since that only deals with default ones.
-          TezSessionPoolManager.getInstance().closeNonDefaultSessions(false);
+          TezSessionPoolManager.getInstance().closeNonDefaultSessions();
         } catch (Exception e) {
           // ignore
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/89dbf4e9/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
index 4148a8a..d6ae171 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
@@ -356,9 +356,8 @@ public class GenericUDTFGetSplits extends GenericUDTF {
       // Update the queryId to use the generated applicationId. See comment below about
       // why this is done.
       HiveConf.setVar(wxConf, HiveConf.ConfVars.HIVEQUERYID, applicationId.toString());
-      Vertex wx = utils.createVertex(wxConf, mapWork, scratchDir, appJarLr,
-          new ArrayList<LocalResource>(), fs, ctx, false, work,
-          work.getVertexType(mapWork));
+      Vertex wx = utils.createVertex(wxConf, mapWork, scratchDir, fs, ctx, false, work,
+          work.getVertexType(mapWork), DagUtils.createTezLrMap(appJarLr, null));
       String vertexName = wx.getName();
       dag.addVertex(wx);
       utils.addCredentials(mapWork, dag);

http://git-wip-us.apache.org/repos/asf/hive/blob/89dbf4e9/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/SampleTezSessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/SampleTezSessionState.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/SampleTezSessionState.java
index 5248454..0a47cda 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/SampleTezSessionState.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/SampleTezSessionState.java
@@ -20,16 +20,12 @@ package org.apache.hadoop.hive.ql.exec.tez;
 
 
 import com.google.common.util.concurrent.Futures;
-
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.SettableFuture;
 import java.io.IOException;
-import java.util.Collection;
 import java.util.concurrent.ScheduledExecutorService;
 import javax.security.auth.login.LoginException;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.shims.Utils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -83,8 +79,7 @@ public class SampleTezSessionState extends WmTezSession {
   }
 
   @Override
-  public void open(Collection<String> additionalFiles, Path scratchDir)
-      throws LoginException, IOException {
+  public void open(HiveResources resources) throws LoginException, IOException {
     open();
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/89dbf4e9/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java
index 829ea8c..8fbe9a7 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java
@@ -20,20 +20,16 @@ package org.apache.hadoop.hive.ql.exec.tez;
 
 import static org.junit.Assert.*;
 
-import java.util.HashSet;
-import java.util.Set;
-
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Random;
-
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 
 public class TestTezSessionPool {
 
@@ -190,22 +186,22 @@ public class TestTezSessionPool {
       Mockito.when(session.isDefault()).thenReturn(false);
       Mockito.when(session.getConf()).thenReturn(conf);
 
-      poolManager.reopen(session, conf, null);
+      poolManager.reopen(session);
 
-      Mockito.verify(session).close(true);
-      Mockito.verify(session).open(new HashSet<String>(), null);
+      Mockito.verify(session).close(false);
+      Mockito.verify(session).open(Mockito.<TezSessionState.HiveResources>any());
 
       // mocked session starts with default queue
       assertEquals("default", session.getQueueName());
 
       // user explicitly specified queue name
       conf.set("tez.queue.name", "tezq1");
-      poolManager.reopen(session, conf, null);
+      poolManager.reopen(session);
       assertEquals("tezq1", poolManager.getSession(null, conf, false, false).getQueueName());
 
       // user unsets queue name, will fallback to default session queue
       conf.unset("tez.queue.name");
-      poolManager.reopen(session, conf, null);
+      poolManager.reopen(session);
       assertEquals("default", poolManager.getSession(null, conf, false, false).getQueueName());
 
       // session.open will unset the queue name from conf but Mockito intercepts the open call
@@ -213,17 +209,17 @@ public class TestTezSessionPool {
       conf.unset("tez.queue.name");
       // change session's default queue to tezq1 and rerun test sequence
       Mockito.when(session.getQueueName()).thenReturn("tezq1");
-      poolManager.reopen(session, conf, null);
+      poolManager.reopen(session);
       assertEquals("tezq1", poolManager.getSession(null, conf, false, false).getQueueName());
 
       // user sets default queue now
       conf.set("tez.queue.name", "default");
-      poolManager.reopen(session, conf, null);
+      poolManager.reopen(session);
       assertEquals("default", poolManager.getSession(null, conf, false, false).getQueueName());
 
       // user does not specify queue so use session default
       conf.unset("tez.queue.name");
-      poolManager.reopen(session, conf, null);
+      poolManager.reopen(session);
       assertEquals("tezq1", poolManager.getSession(null, conf, false, false).getQueueName());
     } catch (Exception e) {
       e.printStackTrace();
@@ -328,10 +324,10 @@ public class TestTezSessionPool {
     Mockito.when(session.isDefault()).thenReturn(false);
     Mockito.when(session.getConf()).thenReturn(conf);
 
-    poolManager.reopen(session, conf, null);
+    poolManager.reopen(session);
 
-    Mockito.verify(session).close(true);
-    Mockito.verify(session).open(new HashSet<String>(), null);
+    Mockito.verify(session).close(false);
+    Mockito.verify(session).open(Mockito.<TezSessionState.HiveResources>any());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hive/blob/89dbf4e9/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java
index 47aa936..44d2b66 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java
@@ -23,16 +23,16 @@ import static org.junit.Assert.assertTrue;
 import static org.mockito.Matchers.*;
 import static org.mockito.Mockito.*;
 
+import org.apache.hadoop.yarn.api.records.URL;
+import org.apache.hive.common.util.Ref;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.UUID;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -96,8 +96,8 @@ public class TestTezTask {
     when(utils.getTezDir(any(Path.class))).thenReturn(path);
     when(
         utils.createVertex(any(JobConf.class), any(BaseWork.class), any(Path.class),
-            any(LocalResource.class), any(List.class), any(FileSystem.class), any(Context.class),
-            anyBoolean(), any(TezWork.class), any(VertexType.class))).thenAnswer(
+            any(FileSystem.class), any(Context.class),
+            anyBoolean(), any(TezWork.class), any(VertexType.class), any(Map.class))).thenAnswer(
         new Answer<Vertex>() {
 
           @Override
@@ -163,7 +163,7 @@ public class TestTezTask {
     task.setQueryPlan(mockQueryPlan);
 
     conf = new JobConf();
-    appLr = mock(LocalResource.class);
+    appLr = createResource("foo.jar");
 
     HiveConf hiveConf = new HiveConf();
     hiveConf
@@ -173,8 +173,7 @@ public class TestTezTask {
     session = mock(TezClient.class);
     sessionState = mock(TezSessionState.class);
     when(sessionState.getSession()).thenReturn(session);
-    when(sessionState.reopen(any(Configuration.class), any(String[].class)))
-      .thenReturn(sessionState);
+    when(sessionState.reopen()).thenReturn(sessionState);
     when(session.submitDAG(any(DAG.class)))
       .thenThrow(new SessionNotRunning(""))
       .thenReturn(mock(DAGClient.class));
@@ -192,7 +191,8 @@ public class TestTezTask {
 
   @Test
   public void testBuildDag() throws IllegalArgumentException, IOException, Exception {
-    DAG dag = task.build(conf, work, path, appLr, null, new Context(conf));
+    DAG dag = task.build(conf, work, path, new Context(conf),
+        DagUtils.createTezLrMap(appLr, null));
     for (BaseWork w: work.getAllWork()) {
       Vertex v = dag.getVertex(w.getName());
       assertNotNull(v);
@@ -212,17 +212,17 @@ public class TestTezTask {
 
   @Test
   public void testEmptyWork() throws IllegalArgumentException, IOException, Exception {
-    DAG dag = task.build(conf, new TezWork("", null), path, appLr, null, new Context(conf));
+    DAG dag = task.build(conf, new TezWork("", null), path, new Context(conf),
+        DagUtils.createTezLrMap(appLr, null));
     assertEquals(dag.getVertices().size(), 0);
   }
 
   @Test
   public void testSubmit() throws Exception {
     DAG dag = DAG.create("test");
-    task.submit(conf, dag, path, appLr, sessionState, Collections.<LocalResource> emptyList(),
-        new String[0], Collections.<String,LocalResource> emptyMap());
+    task.submit(conf, dag, Ref.from(sessionState));
     // validate close/reopen
-    verify(sessionState, times(1)).reopen(any(Configuration.class), any(String[].class));
+    verify(sessionState, times(1)).reopen();
     verify(session, times(2)).submitDAG(any(DAG.class));
   }
 
@@ -235,53 +235,22 @@ public class TestTezTask {
   @Test
   public void testExistingSessionGetsStorageHandlerResources() throws Exception {
     final String[] inputOutputJars = new String[] {"file:///tmp/foo.jar"};
-    LocalResource res = mock(LocalResource.class);
-    final List<LocalResource> resources = Collections.singletonList(res);
-    final Map<String,LocalResource> resMap = new HashMap<String,LocalResource>();
-    resMap.put("foo.jar", res);
-
-    when(utils.localizeTempFiles(path.toString(), conf, inputOutputJars, null))
-        .thenReturn(resources);
-    when(utils.getBaseName(res)).thenReturn("foo.jar");
-    when(sessionState.isOpen()).thenReturn(true);
-    when(sessionState.isOpening()).thenReturn(false);
-    when(sessionState.hasResources(inputOutputJars)).thenReturn(false);
-    task.updateSession(sessionState, conf, path, inputOutputJars, resMap);
-    verify(session).addAppMasterLocalFiles(resMap);
-  }
-
-  @Test
-  public void testExtraResourcesAddedToDag() throws Exception {
-    final String[] inputOutputJars = new String[] {"file:///tmp/foo.jar"};
-    LocalResource res = mock(LocalResource.class);
+    LocalResource res = createResource(inputOutputJars[0]);
     final List<LocalResource> resources = Collections.singletonList(res);
-    final Map<String,LocalResource> resMap = new HashMap<String,LocalResource>();
-    resMap.put("foo.jar", res);
-    DAG dag = mock(DAG.class);
 
-    when(utils.localizeTempFiles(path.toString(), conf, inputOutputJars, null))
-        .thenReturn(resources);
-    when(utils.getBaseName(res)).thenReturn("foo.jar");
+    when(utils.localizeTempFiles(anyString(), any(Configuration.class), eq(inputOutputJars),
+        any(String[].class))).thenReturn(resources);
     when(sessionState.isOpen()).thenReturn(true);
     when(sessionState.isOpening()).thenReturn(false);
-    when(sessionState.hasResources(inputOutputJars)).thenReturn(false);
-    task.addExtraResourcesToDag(sessionState, dag, inputOutputJars, resMap);
-    verify(dag).addTaskLocalFiles(resMap);
+    task.ensureSessionHasResources(sessionState, inputOutputJars);
+    // TODO: ideally we should have a test for session itself.
+    verify(sessionState).ensureLocalResources(any(Configuration.class), eq(inputOutputJars));
   }
 
-  @Test
-  public void testGetExtraLocalResources() throws Exception {
-    final String[] inputOutputJars = new String[] {"file:///tmp/foo.jar"};
+  private static LocalResource createResource(String url) {
     LocalResource res = mock(LocalResource.class);
-    final List<LocalResource> resources = Collections.singletonList(res);
-    final Map<String,LocalResource> resMap = new HashMap<String,LocalResource>();
-    resMap.put("foo.jar", res);
-
-    when(utils.localizeTempFiles(eq(path.toString()), eq(conf), eq(inputOutputJars),
-        Mockito.<String[]>any())).thenReturn(resources);
-    when(utils.getBaseName(res)).thenReturn("foo.jar");
-
-    assertEquals(resMap, task.getExtraLocalResources(conf, path, inputOutputJars, null));
+    when(res.getResource()).thenReturn(URL.fromPath(new Path(url)));
+    return res;
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hive/blob/89dbf4e9/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
index c58e450..fc8f66a 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java
@@ -42,7 +42,6 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 import java.util.concurrent.atomic.AtomicReference;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
@@ -212,9 +211,8 @@ public class TestWorkloadManager {
     }
 
     @Override
-    public TezSessionState reopen(
-        TezSessionState session, Configuration conf, String[] additionalFiles) throws Exception {
-      session = super.reopen(session, conf, additionalFiles);
+    public TezSessionState reopen(TezSessionState session) throws Exception {
+      session = super.reopen(session);
       ensureWm();
       return session;
     }
@@ -274,7 +272,7 @@ public class TestWorkloadManager {
         null, new MappingInput("user", null), conf, null);
     assertEquals(1.0, session.getClusterFraction(), EPSILON);
     qam.assertWasCalledAndReset();
-    WmTezSession session2 = (WmTezSession) session.reopen(conf, null);
+    WmTezSession session2 = (WmTezSession) session.reopen();
     assertNotSame(session, session2);
     wm.addTestEvent().get();
     assertEquals(session2.toString(), 1.0, session2.getClusterFraction(), EPSILON);
@@ -682,7 +680,7 @@ public class TestWorkloadManager {
     waitForThreadToBlock(cdl1, t1);
     checkError(error);
     // Replacing it directly in the pool should unblock get.
-    pool.replaceSession(oob, false, null);
+    pool.replaceSession(oob);
     t1.join();
     assertNotNull(sessionA1.get());
     assertEquals("A", sessionA1.get().getPoolName());


[20/50] [abbrv] hive git commit: HIVE-17982 Move metastore specific itests

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
new file mode 100644
index 0000000..d4cedb0
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.ValidTxnList;
+import org.apache.hadoop.hive.common.ValidReadTxnList;
+import org.apache.hadoop.hive.metastore.api.DataOperationType;
+import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
+import org.apache.hadoop.hive.metastore.api.LockResponse;
+import org.apache.hadoop.hive.metastore.api.LockState;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.List;
+
+/**
+ * Unit tests for {@link org.apache.hadoop.hive.metastore.HiveMetaStoreClient}.  For now this just has
+ * transaction and locking tests.  The goal here is not to test all
+ * functionality possible through the interface, as all permutations of DB
+ * operations should be tested in the appropriate DB handler classes.  The
+ * goal is to test that we can properly pass the messages through the thrift
+ * service.
+ *
+ * This is in the ql directory rather than the metastore directory because it
+ * required the hive-exec jar, and hive-exec jar already depends on
+ * hive-metastore jar, thus I can't make hive-metastore depend on hive-exec.
+ */
+public class TestHiveMetaStoreTxns {
+
+  private final Configuration conf = MetastoreConf.newMetastoreConf();
+  private IMetaStoreClient client;
+
+  @Test
+  public void testTxns() throws Exception {
+    List<Long> tids = client.openTxns("me", 3).getTxn_ids();
+    Assert.assertEquals(1L, (long) tids.get(0));
+    Assert.assertEquals(2L, (long) tids.get(1));
+    Assert.assertEquals(3L, (long) tids.get(2));
+    client.rollbackTxn(1);
+    client.commitTxn(2);
+    ValidTxnList validTxns = client.getValidTxns();
+    Assert.assertFalse(validTxns.isTxnValid(1));
+    Assert.assertTrue(validTxns.isTxnValid(2));
+    Assert.assertFalse(validTxns.isTxnValid(3));
+    Assert.assertFalse(validTxns.isTxnValid(4));
+  }
+
+  @Test
+  public void testOpenTxnNotExcluded() throws Exception {
+    List<Long> tids = client.openTxns("me", 3).getTxn_ids();
+    Assert.assertEquals(1L, (long) tids.get(0));
+    Assert.assertEquals(2L, (long) tids.get(1));
+    Assert.assertEquals(3L, (long) tids.get(2));
+    client.rollbackTxn(1);
+    client.commitTxn(2);
+    ValidTxnList validTxns = client.getValidTxns(3);
+    Assert.assertFalse(validTxns.isTxnValid(1));
+    Assert.assertTrue(validTxns.isTxnValid(2));
+    Assert.assertTrue(validTxns.isTxnValid(3));
+    Assert.assertFalse(validTxns.isTxnValid(4));
+  }
+
+  @Test
+  public void testTxnRange() throws Exception {
+    ValidTxnList validTxns = client.getValidTxns();
+    Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
+        validTxns.isTxnRangeValid(1L, 3L));
+    List<Long> tids = client.openTxns("me", 5).getTxn_ids();
+
+    HeartbeatTxnRangeResponse rsp = client.heartbeatTxnRange(1, 5);
+    Assert.assertEquals(0, rsp.getNosuch().size());
+    Assert.assertEquals(0, rsp.getAborted().size());
+
+    client.rollbackTxn(1L);
+    client.commitTxn(2L);
+    client.commitTxn(3L);
+    client.commitTxn(4L);
+    validTxns = client.getValidTxns();
+    System.out.println("validTxns = " + validTxns);
+    Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
+        validTxns.isTxnRangeValid(2L, 2L));
+    Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
+        validTxns.isTxnRangeValid(2L, 3L));
+    Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
+        validTxns.isTxnRangeValid(2L, 4L));
+    Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
+        validTxns.isTxnRangeValid(3L, 4L));
+
+    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
+        validTxns.isTxnRangeValid(1L, 4L));
+    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
+        validTxns.isTxnRangeValid(2L, 5L));
+    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
+        validTxns.isTxnRangeValid(1L, 2L));
+    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
+        validTxns.isTxnRangeValid(4L, 5L));
+
+    Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
+        validTxns.isTxnRangeValid(1L, 1L));
+    Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
+        validTxns.isTxnRangeValid(5L, 10L));
+
+    validTxns = new ValidReadTxnList("10:5:4,5,6:");
+    Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
+        validTxns.isTxnRangeValid(4,6));
+    Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
+        validTxns.isTxnRangeValid(7, 10));
+    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
+        validTxns.isTxnRangeValid(7, 11));
+    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
+        validTxns.isTxnRangeValid(3, 6));
+    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
+        validTxns.isTxnRangeValid(4, 7));
+    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
+        validTxns.isTxnRangeValid(1, 12));
+    Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
+        validTxns.isTxnRangeValid(1, 3));
+  }
+
+  @Test
+  public void testLocks() throws Exception {
+    LockRequestBuilder rqstBuilder = new LockRequestBuilder();
+    rqstBuilder.addLockComponent(new LockComponentBuilder()
+        .setDbName("mydb")
+        .setTableName("mytable")
+        .setPartitionName("mypartition")
+        .setExclusive()
+        .setOperationType(DataOperationType.NO_TXN)
+        .build());
+    rqstBuilder.addLockComponent(new LockComponentBuilder()
+        .setDbName("mydb")
+        .setTableName("yourtable")
+        .setSemiShared()
+        .setOperationType(DataOperationType.NO_TXN)
+        .build());
+    rqstBuilder.addLockComponent(new LockComponentBuilder()
+        .setDbName("yourdb")
+        .setOperationType(DataOperationType.NO_TXN)
+        .setShared()
+        .build());
+    rqstBuilder.setUser("fred");
+
+    LockResponse res = client.lock(rqstBuilder.build());
+    Assert.assertEquals(1L, res.getLockid());
+    Assert.assertEquals(LockState.ACQUIRED, res.getState());
+
+    res = client.checkLock(1);
+    Assert.assertEquals(1L, res.getLockid());
+    Assert.assertEquals(LockState.ACQUIRED, res.getState());
+
+    client.heartbeat(0, 1);
+
+    client.unlock(1);
+  }
+
+  @Test
+  public void testLocksWithTxn() throws Exception {
+    long txnid = client.openTxn("me");
+
+    LockRequestBuilder rqstBuilder = new LockRequestBuilder();
+    rqstBuilder.setTransactionId(txnid)
+      .addLockComponent(new LockComponentBuilder()
+        .setDbName("mydb")
+        .setTableName("mytable")
+        .setPartitionName("mypartition")
+        .setSemiShared()
+        .setOperationType(DataOperationType.UPDATE)
+        .build())
+      .addLockComponent(new LockComponentBuilder()
+        .setDbName("mydb")
+        .setTableName("yourtable")
+        .setSemiShared()
+        .setOperationType(DataOperationType.UPDATE)
+        .build())
+      .addLockComponent(new LockComponentBuilder()
+        .setDbName("yourdb")
+        .setShared()
+        .setOperationType(DataOperationType.SELECT)
+        .build())
+      .setUser("fred");
+
+    LockResponse res = client.lock(rqstBuilder.build());
+    Assert.assertEquals(1L, res.getLockid());
+    Assert.assertEquals(LockState.ACQUIRED, res.getState());
+
+    res = client.checkLock(1);
+    Assert.assertEquals(1L, res.getLockid());
+    Assert.assertEquals(LockState.ACQUIRED, res.getState());
+
+    client.heartbeat(txnid, 1);
+
+    client.commitTxn(txnid);
+  }
+
+  @Test
+  public void stringifyValidTxns() throws Exception {
+    // Test with just high water mark
+    ValidTxnList validTxns = new ValidReadTxnList("1:" + Long.MAX_VALUE + "::");
+    String asString = validTxns.toString();
+    Assert.assertEquals("1:" + Long.MAX_VALUE + "::", asString);
+    validTxns = new ValidReadTxnList(asString);
+    Assert.assertEquals(1, validTxns.getHighWatermark());
+    Assert.assertNotNull(validTxns.getInvalidTransactions());
+    Assert.assertEquals(0, validTxns.getInvalidTransactions().length);
+    asString = validTxns.toString();
+    Assert.assertEquals("1:" + Long.MAX_VALUE + "::", asString);
+    validTxns = new ValidReadTxnList(asString);
+    Assert.assertEquals(1, validTxns.getHighWatermark());
+    Assert.assertNotNull(validTxns.getInvalidTransactions());
+    Assert.assertEquals(0, validTxns.getInvalidTransactions().length);
+
+    // Test with open transactions
+    validTxns = new ValidReadTxnList("10:3:5:3");
+    asString = validTxns.toString();
+    if (!asString.equals("10:3:3:5") && !asString.equals("10:3:5:3")) {
+      Assert.fail("Unexpected string value " + asString);
+    }
+    validTxns = new ValidReadTxnList(asString);
+    Assert.assertEquals(10, validTxns.getHighWatermark());
+    Assert.assertNotNull(validTxns.getInvalidTransactions());
+    Assert.assertEquals(2, validTxns.getInvalidTransactions().length);
+    boolean sawThree = false, sawFive = false;
+    for (long tid : validTxns.getInvalidTransactions()) {
+      if (tid == 3)  sawThree = true;
+      else if (tid == 5) sawFive = true;
+      else  Assert.fail("Unexpected value " + tid);
+    }
+    Assert.assertTrue(sawThree);
+    Assert.assertTrue(sawFive);
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    TxnDbUtil.setConfValues(conf);
+    TxnDbUtil.prepDb(conf);
+    client = new HiveMetaStoreClient(conf);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    TxnDbUtil.cleanDb(conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java
new file mode 100644
index 0000000..271204c
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.events.ListenerEvent;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * TestHiveMetaStoreWithEnvironmentContext. Test case for _with_environment_context
+ * calls in {@link org.apache.hadoop.hive.metastore.HiveMetaStore}
+ */
+public class TestHiveMetaStoreWithEnvironmentContext {
+
+  private Configuration conf;
+  private HiveMetaStoreClient msc;
+  private EnvironmentContext envContext;
+  private final Database db = new Database();
+  private Table table;
+  private Partition partition;
+
+  private static final String dbName = "hive3252";
+  private static final String tblName = "tmptbl";
+  private static final String renamed = "tmptbl2";
+
+  @Before
+  public void setUp() throws Exception {
+    System.setProperty("hive.metastore.event.listeners",
+        DummyListener.class.getName());
+
+    int port = MetaStoreTestUtils.findFreePort();
+    conf = MetastoreConf.newMetastoreConf();
+    MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+    MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+    MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    MetaStoreTestUtils.startMetaStoreWithRetry(port, HadoopThriftAuthBridge.getBridge(), conf);
+    msc = new HiveMetaStoreClient(conf);
+
+    msc.dropDatabase(dbName, true, true);
+
+    Map<String, String> envProperties = new HashMap<>();
+    envProperties.put("hadoop.job.ugi", "test_user");
+    envContext = new EnvironmentContext(envProperties);
+
+    db.setName(dbName);
+
+    table = new TableBuilder()
+        .setDbName(dbName)
+        .setTableName(tblName)
+        .addTableParam("a", "string")
+        .addPartCol("b", "string")
+        .addCol("a", "string")
+        .addCol("b", "string")
+        .build();
+
+
+    partition = new PartitionBuilder()
+        .fromTable(table)
+        .addValue("2011")
+        .build();
+
+    DummyListener.notifyList.clear();
+  }
+
+  @Test
+  public void testEnvironmentContext() throws Exception {
+    int listSize = 0;
+
+    List<ListenerEvent> notifyList = DummyListener.notifyList;
+    assertEquals(notifyList.size(), listSize);
+    msc.createDatabase(db);
+    listSize++;
+    assertEquals(listSize, notifyList.size());
+    CreateDatabaseEvent dbEvent = (CreateDatabaseEvent)(notifyList.get(listSize - 1));
+    assert dbEvent.getStatus();
+
+    msc.createTable(table, envContext);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+    CreateTableEvent tblEvent = (CreateTableEvent)(notifyList.get(listSize - 1));
+    assert tblEvent.getStatus();
+    assertEquals(envContext, tblEvent.getEnvironmentContext());
+
+    table = msc.getTable(dbName, tblName);
+
+    partition.getSd().setLocation(table.getSd().getLocation() + "/part1");
+    msc.add_partition(partition, envContext);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+    AddPartitionEvent partEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
+    assert partEvent.getStatus();
+    assertEquals(envContext, partEvent.getEnvironmentContext());
+
+    List<String> partVals = new ArrayList<>();
+    partVals.add("2012");
+    msc.appendPartition(dbName, tblName, partVals, envContext);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+    AddPartitionEvent appendPartEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
+    assert appendPartEvent.getStatus();
+    assertEquals(envContext, appendPartEvent.getEnvironmentContext());
+
+    table.setTableName(renamed);
+    msc.alter_table_with_environmentContext(dbName, tblName, table, envContext);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+    AlterTableEvent alterTableEvent = (AlterTableEvent) notifyList.get(listSize-1);
+    assert alterTableEvent.getStatus();
+    assertEquals(envContext, alterTableEvent.getEnvironmentContext());
+
+    table.setTableName(tblName);
+    msc.alter_table_with_environmentContext(dbName, renamed, table, envContext);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+
+    List<String> dropPartVals = new ArrayList<>();
+    dropPartVals.add("2011");
+    msc.dropPartition(dbName, tblName, dropPartVals, envContext);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+    DropPartitionEvent dropPartEvent = (DropPartitionEvent)notifyList.get(listSize - 1);
+    assert dropPartEvent.getStatus();
+    assertEquals(envContext, dropPartEvent.getEnvironmentContext());
+
+    msc.dropPartition(dbName, tblName, "b=2012", true, envContext);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+    DropPartitionEvent dropPartByNameEvent = (DropPartitionEvent)notifyList.get(listSize - 1);
+    assert dropPartByNameEvent.getStatus();
+    assertEquals(envContext, dropPartByNameEvent.getEnvironmentContext());
+
+    msc.dropTable(dbName, tblName, true, false, envContext);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+    DropTableEvent dropTblEvent = (DropTableEvent)notifyList.get(listSize-1);
+    assert dropTblEvent.getStatus();
+    assertEquals(envContext, dropTblEvent.getEnvironmentContext());
+
+    msc.dropDatabase(dbName);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+
+    DropDatabaseEvent dropDB = (DropDatabaseEvent)notifyList.get(listSize-1);
+    assert dropDB.getStatus();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java
new file mode 100644
index 0000000..6854a93
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.thrift.TException;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestMarkPartition {
+
+  protected Configuration conf;
+
+  @Before
+  public void setUp() throws Exception {
+
+    System.setProperty("hive.metastore.event.clean.freq", "1s");
+    System.setProperty("hive.metastore.event.expiry.duration", "2s");
+    conf = MetastoreConf.newMetastoreConf();
+    MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+
+  }
+
+  @Test
+  public void testMarkingPartitionSet() throws TException, InterruptedException {
+    HiveMetaStoreClient msc = new HiveMetaStoreClient(conf);
+
+    final String dbName = "hive2215";
+    msc.dropDatabase(dbName, true, true, true);
+    Database db = new DatabaseBuilder()
+        .setName(dbName)
+        .build();
+    msc.createDatabase(db);
+
+    final String tableName = "tmptbl";
+    msc.dropTable(dbName, tableName, true, true);
+    Table table = new TableBuilder()
+        .setDbName(dbName)
+        .setTableName(tableName)
+        .addCol("a", "string")
+        .addPartCol("b", "string")
+        .build();
+    msc.createTable(table);
+
+    Partition part = new PartitionBuilder()
+        .fromTable(table)
+        .addValue("2011")
+        .build();
+    msc.add_partition(part);
+    Map<String,String> kvs = new HashMap<>();
+    kvs.put("b", "'2011'");
+    msc.markPartitionForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE);
+    Assert.assertTrue(msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE));
+    Thread.sleep(3000);
+    Assert.assertFalse(msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE));
+
+    kvs.put("b", "'2012'");
+    Assert.assertFalse(msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE));
+    try {
+      msc.markPartitionForEvent(dbName, "tmptbl2", kvs, PartitionEventType.LOAD_DONE);
+      Assert.fail("Expected UnknownTableException");
+    } catch (UnknownTableException e) {
+      // All good
+    } catch(Exception e){
+      Assert.fail("Expected UnknownTableException");
+    }
+    try{
+      msc.isPartitionMarkedForEvent(dbName, "tmptbl2", kvs, PartitionEventType.LOAD_DONE);
+      Assert.fail("Expected UnknownTableException");
+    } catch (UnknownTableException e) {
+      // All good
+    } catch(Exception e){
+      Assert.fail("Expected UnknownTableException, received " + e.getClass().getName());
+    }
+    kvs.put("a", "'2012'");
+    try {
+      msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE);
+      Assert.fail("Expected InvalidPartitionException");
+    } catch (InvalidPartitionException e) {
+      // All good
+    } catch(Exception e){
+      Assert.fail("Expected InvalidPartitionException, received " + e.getClass().getName());
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java
new file mode 100644
index 0000000..ac1cc4c
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.Before;
+
+public class TestMarkPartitionRemote extends TestMarkPartition {
+
+  @Before
+  public void startServer() throws Exception {
+    int port = MetaStoreTestUtils.findFreePort();
+    MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+    MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    MetaStoreTestUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
new file mode 100644
index 0000000..643abd3
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * TestMetaStoreEventListener. Test case for
+ * {@link org.apache.hadoop.hive.metastore.MetaStoreEndFunctionListener}
+ */
+public class TestMetaStoreEndFunctionListener {
+  private Configuration conf;
+  private HiveMetaStoreClient msc;
+
+  @Before
+  public void setUp() throws Exception {
+    System.setProperty("hive.metastore.event.listeners",
+        DummyListener.class.getName());
+    System.setProperty("hive.metastore.pre.event.listeners",
+        DummyPreListener.class.getName());
+    System.setProperty("hive.metastore.end.function.listeners",
+        DummyEndFunctionListener.class.getName());
+    int port = MetaStoreTestUtils.findFreePort();
+    conf = MetastoreConf.newMetastoreConf();
+    MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+    MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+    MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    MetaStoreTestUtils.startMetaStoreWithRetry(port, HadoopThriftAuthBridge.getBridge(), conf);
+    msc = new HiveMetaStoreClient(conf);
+  }
+
+  @Test
+  public void testEndFunctionListener() throws Exception {
+    /* Objective here is to ensure that when exceptions are thrown in HiveMetaStore in API methods
+     * they bubble up and are stored in the MetaStoreEndFunctionContext objects
+     */
+    String dbName = "hive3524";
+    String tblName = "tmptbl";
+    int listSize;
+
+    Database db = new DatabaseBuilder()
+        .setName(dbName)
+        .build();
+    msc.createDatabase(db);
+
+    try {
+      msc.getDatabase("UnknownDB");
+    } catch (Exception e) {
+      // All good
+    }
+    listSize = DummyEndFunctionListener.funcNameList.size();
+    String func_name = DummyEndFunctionListener.funcNameList.get(listSize-1);
+    MetaStoreEndFunctionContext context = DummyEndFunctionListener.contextList.get(listSize-1);
+    assertEquals(func_name,"get_database");
+    assertFalse(context.isSuccess());
+    Exception e = context.getException();
+    assertTrue((e!=null));
+    assertTrue((e instanceof NoSuchObjectException));
+    assertEquals(context.getInputTableName(), null);
+
+    String unknownTable = "UnknownTable";
+    Table table = new TableBuilder()
+        .setDbName(db)
+        .setTableName(tblName)
+        .addCol("a", "string")
+        .addPartCol("b", "string")
+        .build();
+    msc.createTable(table);
+    try {
+      msc.getTable(dbName, unknownTable);
+    } catch (Exception e1) {
+      // All good
+    }
+    listSize = DummyEndFunctionListener.funcNameList.size();
+    func_name = DummyEndFunctionListener.funcNameList.get(listSize-1);
+    context = DummyEndFunctionListener.contextList.get(listSize-1);
+    assertEquals(func_name,"get_table");
+    assertFalse(context.isSuccess());
+    e = context.getException();
+    assertTrue((e!=null));
+    assertTrue((e instanceof NoSuchObjectException));
+    assertEquals(context.getInputTableName(), unknownTable);
+
+    try {
+      msc.getPartition("hive3524", tblName, "b=2012");
+    } catch (Exception e2) {
+      // All good
+    }
+    listSize = DummyEndFunctionListener.funcNameList.size();
+    func_name = DummyEndFunctionListener.funcNameList.get(listSize-1);
+    context = DummyEndFunctionListener.contextList.get(listSize-1);
+    assertEquals(func_name,"get_partition_by_name");
+    assertFalse(context.isSuccess());
+    e = context.getException();
+    assertTrue((e!=null));
+    assertTrue((e instanceof NoSuchObjectException));
+    assertEquals(context.getInputTableName(), tblName);
+    try {
+      msc.dropTable(dbName, unknownTable);
+    } catch (Exception e4) {
+      // All good
+    }
+    listSize = DummyEndFunctionListener.funcNameList.size();
+    func_name = DummyEndFunctionListener.funcNameList.get(listSize-1);
+    context = DummyEndFunctionListener.contextList.get(listSize-1);
+    assertEquals(func_name,"get_table");
+    assertFalse(context.isSuccess());
+    e = context.getException();
+    assertTrue((e!=null));
+    assertTrue((e instanceof NoSuchObjectException));
+    assertEquals(context.getInputTableName(), "UnknownTable");
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
new file mode 100644
index 0000000..70ad262
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
@@ -0,0 +1,556 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import com.google.common.collect.Lists;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Index;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.IndexBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
+import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
+import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
+import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropIndexEvent;
+import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.events.ListenerEvent;
+import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
+import org.apache.hadoop.hive.metastore.events.PreAddIndexEvent;
+import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterIndexEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropIndexEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreEventContext;
+import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * TestMetaStoreEventListener. Test case for
+ * {@link org.apache.hadoop.hive.metastore.MetaStoreEventListener} and
+ * {@link org.apache.hadoop.hive.metastore.MetaStorePreEventListener}
+ */
+public class TestMetaStoreEventListener {
+  private Configuration conf;
+  private HiveMetaStoreClient msc;
+
+  private static final String dbName = "hive2038";
+  private static final String tblName = "tmptbl";
+  private static final String renamed = "tmptbl2";
+  private static final String metaConfKey = "metastore.partition.name.whitelist.pattern";
+  private static final String metaConfVal = "";
+
+  @Before
+  public void setUp() throws Exception {
+    System.setProperty("hive.metastore.event.listeners",
+        DummyListener.class.getName());
+    System.setProperty("hive.metastore.pre.event.listeners",
+        DummyPreListener.class.getName());
+
+    int port = MetaStoreTestUtils.findFreePort();
+    conf = MetastoreConf.newMetastoreConf();
+
+    MetastoreConf.setVar(conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN, metaConfVal);
+    MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+    MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+    MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    MetaStoreTestUtils.startMetaStoreWithRetry(port, HadoopThriftAuthBridge.getBridge(), conf);
+
+    msc = new HiveMetaStoreClient(conf);
+
+    msc.dropDatabase(dbName, true, true, true);
+    DummyListener.notifyList.clear();
+    DummyPreListener.notifyList.clear();
+  }
+
+  private void validateCreateDb(Database expectedDb, Database actualDb) {
+    assertEquals(expectedDb.getName(), actualDb.getName());
+    assertEquals(expectedDb.getLocationUri(), actualDb.getLocationUri());
+  }
+
+  private void validateTable(Table expectedTable, Table actualTable) {
+    assertEquals(expectedTable.getTableName(), actualTable.getTableName());
+    assertEquals(expectedTable.getDbName(), actualTable.getDbName());
+    assertEquals(expectedTable.getSd().getLocation(), actualTable.getSd().getLocation());
+  }
+
+  private void validateCreateTable(Table expectedTable, Table actualTable) {
+    validateTable(expectedTable, actualTable);
+  }
+
+  private void validateAddPartition(Partition expectedPartition, Partition actualPartition) {
+    assertEquals(expectedPartition, actualPartition);
+  }
+
+  private void validateTableInAddPartition(Table expectedTable, Table actualTable) {
+    assertEquals(expectedTable, actualTable);
+  }
+
+  private void validatePartition(Partition expectedPartition, Partition actualPartition) {
+    assertEquals(expectedPartition.getValues(), actualPartition.getValues());
+    assertEquals(expectedPartition.getDbName(), actualPartition.getDbName());
+    assertEquals(expectedPartition.getTableName(), actualPartition.getTableName());
+  }
+
+  private void validateAlterPartition(Partition expectedOldPartition,
+      Partition expectedNewPartition, String actualOldPartitionDbName,
+      String actualOldPartitionTblName,List<String> actualOldPartitionValues,
+      Partition actualNewPartition) {
+    assertEquals(expectedOldPartition.getValues(), actualOldPartitionValues);
+    assertEquals(expectedOldPartition.getDbName(), actualOldPartitionDbName);
+    assertEquals(expectedOldPartition.getTableName(), actualOldPartitionTblName);
+
+    validatePartition(expectedNewPartition, actualNewPartition);
+  }
+
+  private void validateAlterTable(Table expectedOldTable, Table expectedNewTable,
+      Table actualOldTable, Table actualNewTable) {
+    validateTable(expectedOldTable, actualOldTable);
+    validateTable(expectedNewTable, actualNewTable);
+  }
+
+  private void validateAlterTableColumns(Table expectedOldTable, Table expectedNewTable,
+      Table actualOldTable, Table actualNewTable) {
+    validateAlterTable(expectedOldTable, expectedNewTable, actualOldTable, actualNewTable);
+
+    assertEquals(expectedOldTable.getSd().getCols(), actualOldTable.getSd().getCols());
+    assertEquals(expectedNewTable.getSd().getCols(), actualNewTable.getSd().getCols());
+  }
+
+  private void validateLoadPartitionDone(String expectedTableName,
+      Map<String,String> expectedPartitionName, String actualTableName,
+      Map<String,String> actualPartitionName) {
+    assertEquals(expectedPartitionName, actualPartitionName);
+    assertEquals(expectedTableName, actualTableName);
+  }
+
+  private void validateDropPartition(Iterator<Partition> expectedPartitions, Iterator<Partition> actualPartitions) {
+    while (expectedPartitions.hasNext()){
+      assertTrue(actualPartitions.hasNext());
+      validatePartition(expectedPartitions.next(), actualPartitions.next());
+    }
+    assertFalse(actualPartitions.hasNext());
+  }
+
+  private void validateTableInDropPartition(Table expectedTable, Table actualTable) {
+    validateTable(expectedTable, actualTable);
+  }
+
+  private void validateDropTable(Table expectedTable, Table actualTable) {
+    validateTable(expectedTable, actualTable);
+  }
+
+  private void validateDropDb(Database expectedDb, Database actualDb) {
+    assertEquals(expectedDb, actualDb);
+  }
+
+  private void validateIndex(Index expectedIndex, Index actualIndex) {
+    assertEquals(expectedIndex.getDbName(), actualIndex.getDbName());
+    assertEquals(expectedIndex.getIndexName(), actualIndex.getIndexName());
+    assertEquals(expectedIndex.getIndexHandlerClass(), actualIndex.getIndexHandlerClass());
+    assertEquals(expectedIndex.getOrigTableName(), actualIndex.getOrigTableName());
+    assertEquals(expectedIndex.getIndexTableName(), actualIndex.getIndexTableName());
+    assertEquals(expectedIndex.getSd().getLocation(), actualIndex.getSd().getLocation());
+  }
+
+  private void validateAddIndex(Index expectedIndex, Index actualIndex) {
+    validateIndex(expectedIndex, actualIndex);
+  }
+
+  private void validateAlterIndex(Index expectedOldIndex, Index actualOldIndex,
+      Index expectedNewIndex, Index actualNewIndex) {
+    validateIndex(expectedOldIndex, actualOldIndex);
+    validateIndex(expectedNewIndex, actualNewIndex);
+  }
+
+  private void validateDropIndex(Index expectedIndex, Index actualIndex) {
+    validateIndex(expectedIndex, actualIndex);
+  }
+
+  @Test
+  public void testListener() throws Exception {
+    int listSize = 0;
+
+    List<ListenerEvent> notifyList = DummyListener.notifyList;
+    List<PreEventContext> preNotifyList = DummyPreListener.notifyList;
+    assertEquals(notifyList.size(), listSize);
+    assertEquals(preNotifyList.size(), listSize);
+
+    Database db = new DatabaseBuilder()
+        .setName(dbName)
+        .build();
+    msc.createDatabase(db);
+    listSize++;
+    PreCreateDatabaseEvent preDbEvent = (PreCreateDatabaseEvent)(preNotifyList.get(preNotifyList.size() - 1));
+    db = msc.getDatabase(dbName);
+    assertEquals(listSize, notifyList.size());
+    assertEquals(listSize + 1, preNotifyList.size());
+    validateCreateDb(db, preDbEvent.getDatabase());
+
+    CreateDatabaseEvent dbEvent = (CreateDatabaseEvent)(notifyList.get(listSize - 1));
+    Assert.assertTrue(dbEvent.getStatus());
+    validateCreateDb(db, dbEvent.getDatabase());
+
+    Table table = new TableBuilder()
+        .setDbName(db)
+        .setTableName(tblName)
+        .addCol("a", "string")
+        .addPartCol("b", "string")
+        .build();
+    msc.createTable(table);
+    PreCreateTableEvent preTblEvent = (PreCreateTableEvent)(preNotifyList.get(preNotifyList.size() - 1));
+    listSize++;
+    Table tbl = msc.getTable(dbName, tblName);
+    validateCreateTable(tbl, preTblEvent.getTable());
+    assertEquals(notifyList.size(), listSize);
+
+    CreateTableEvent tblEvent = (CreateTableEvent)(notifyList.get(listSize - 1));
+    Assert.assertTrue(tblEvent.getStatus());
+    validateCreateTable(tbl, tblEvent.getTable());
+
+    String indexName = "tmptbl_i";
+    Index index = new IndexBuilder()
+        .setDbAndTableName(table)
+        .setIndexName(indexName)
+        .addCol("a", "string")
+        .setDeferredRebuild(true)
+        .addIndexParam("prop1", "val1")
+        .addIndexParam("prop2", "val2")
+        .build();
+    Table indexTable = new TableBuilder()
+        .fromIndex(index)
+        .build();
+    msc.createIndex(index, indexTable);
+    listSize += 2;  // creates index table internally
+    assertEquals(notifyList.size(), listSize);
+
+    AddIndexEvent addIndexEvent = (AddIndexEvent)notifyList.get(listSize - 1);
+    Assert.assertTrue(addIndexEvent.getStatus());
+    PreAddIndexEvent preAddIndexEvent = (PreAddIndexEvent)(preNotifyList.get(preNotifyList.size() - 2));
+
+    Index oldIndex = msc.getIndex(dbName, tblName, indexName);
+
+    validateAddIndex(oldIndex, addIndexEvent.getIndex());
+
+    validateAddIndex(oldIndex, preAddIndexEvent.getIndex());
+
+    Index alteredIndex = new Index(oldIndex);
+    alteredIndex.getParameters().put("prop3", "val3");
+    msc.alter_index(dbName, tblName, indexName, alteredIndex);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+
+    Index newIndex = msc.getIndex(dbName, tblName, indexName);
+
+    AlterIndexEvent alterIndexEvent = (AlterIndexEvent) notifyList.get(listSize - 1);
+    Assert.assertTrue(alterIndexEvent.getStatus());
+    validateAlterIndex(oldIndex, alterIndexEvent.getOldIndex(),
+        newIndex, alterIndexEvent.getNewIndex());
+
+    PreAlterIndexEvent preAlterIndexEvent = (PreAlterIndexEvent) (preNotifyList.get(preNotifyList.size() - 1));
+    validateAlterIndex(oldIndex, preAlterIndexEvent.getOldIndex(),
+        newIndex, preAlterIndexEvent.getNewIndex());
+
+    msc.dropIndex(dbName, tblName, indexName, true);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+
+    DropIndexEvent dropIndexEvent = (DropIndexEvent) notifyList.get(listSize - 1);
+    Assert.assertTrue(dropIndexEvent.getStatus());
+    validateDropIndex(newIndex, dropIndexEvent.getIndex());
+
+    PreDropIndexEvent preDropIndexEvent = (PreDropIndexEvent) (preNotifyList.get(preNotifyList.size() - 1));
+    validateDropIndex(newIndex, preDropIndexEvent.getIndex());
+
+    Partition part = new PartitionBuilder()
+        .fromTable(table)
+        .addValue("2011")
+        .build();
+    msc.add_partition(part);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+    PreAddPartitionEvent prePartEvent = (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1));
+
+    AddPartitionEvent partEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
+    Assert.assertTrue(partEvent.getStatus());
+    part = msc.getPartition("hive2038", "tmptbl", "b=2011");
+    Partition partAdded = partEvent.getPartitionIterator().next();
+    validateAddPartition(part, partAdded);
+    validateTableInAddPartition(tbl, partEvent.getTable());
+    validateAddPartition(part, prePartEvent.getPartitions().get(0));
+
+    // Test adding multiple partitions in a single partition-set, atomically.
+    int currentTime = (int)System.currentTimeMillis();
+    HiveMetaStoreClient hmsClient = new HiveMetaStoreClient(conf);
+    table = hmsClient.getTable(dbName, "tmptbl");
+    Partition partition1 = new Partition(Arrays.asList("20110101"), dbName, "tmptbl", currentTime,
+                                        currentTime, table.getSd(), table.getParameters());
+    Partition partition2 = new Partition(Arrays.asList("20110102"), dbName, "tmptbl", currentTime,
+                                        currentTime, table.getSd(), table.getParameters());
+    Partition partition3 = new Partition(Arrays.asList("20110103"), dbName, "tmptbl", currentTime,
+                                        currentTime, table.getSd(), table.getParameters());
+    hmsClient.add_partitions(Arrays.asList(partition1, partition2, partition3));
+    ++listSize;
+    AddPartitionEvent multiplePartitionEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
+    assertEquals("Unexpected table value.", table, multiplePartitionEvent.getTable());
+    List<Partition> multiParts = Lists.newArrayList(multiplePartitionEvent.getPartitionIterator());
+    assertEquals("Unexpected number of partitions in event!", 3, multiParts.size());
+    assertEquals("Unexpected partition value.", partition1.getValues(), multiParts.get(0).getValues());
+    assertEquals("Unexpected partition value.", partition2.getValues(), multiParts.get(1).getValues());
+    assertEquals("Unexpected partition value.", partition3.getValues(), multiParts.get(2).getValues());
+
+    part.setLastAccessTime((int)(System.currentTimeMillis()/1000));
+    msc.alter_partition(dbName, tblName, part);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+    PreAlterPartitionEvent preAlterPartEvent =
+        (PreAlterPartitionEvent)preNotifyList.get(preNotifyList.size() - 1);
+
+    //the partition did not change,
+    // so the new partition should be similar to the original partition
+    Partition origP = msc.getPartition(dbName, tblName, "b=2011");
+
+    AlterPartitionEvent alterPartEvent = (AlterPartitionEvent)notifyList.get(listSize - 1);
+    Assert.assertTrue(alterPartEvent.getStatus());
+    validateAlterPartition(origP, origP, alterPartEvent.getOldPartition().getDbName(),
+        alterPartEvent.getOldPartition().getTableName(),
+        alterPartEvent.getOldPartition().getValues(), alterPartEvent.getNewPartition());
+
+
+    validateAlterPartition(origP, origP, preAlterPartEvent.getDbName(),
+        preAlterPartEvent.getTableName(), preAlterPartEvent.getNewPartition().getValues(),
+        preAlterPartEvent.getNewPartition());
+
+    List<String> part_vals = new ArrayList<>();
+    part_vals.add("c=2012");
+    int preEventListSize;
+    preEventListSize = preNotifyList.size() + 1;
+    Partition newPart = msc.appendPartition(dbName, tblName, part_vals);
+
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+    assertEquals(preNotifyList.size(), preEventListSize);
+
+    AddPartitionEvent appendPartEvent =
+        (AddPartitionEvent)(notifyList.get(listSize-1));
+    Partition partAppended = appendPartEvent.getPartitionIterator().next();
+    validateAddPartition(newPart, partAppended);
+
+    PreAddPartitionEvent preAppendPartEvent =
+        (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1));
+    validateAddPartition(newPart, preAppendPartEvent.getPartitions().get(0));
+
+    Table renamedTable = new Table(table);
+    renamedTable.setTableName(renamed);
+    msc.alter_table(dbName, tblName, renamedTable);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+    PreAlterTableEvent preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1);
+
+    renamedTable = msc.getTable(dbName, renamed);
+
+    AlterTableEvent alterTableE = (AlterTableEvent) notifyList.get(listSize-1);
+    Assert.assertTrue(alterTableE.getStatus());
+    validateAlterTable(tbl, renamedTable, alterTableE.getOldTable(), alterTableE.getNewTable());
+    validateAlterTable(tbl, renamedTable, preAlterTableE.getOldTable(),
+        preAlterTableE.getNewTable());
+
+    //change the table name back
+    table = new Table(renamedTable);
+    table.setTableName(tblName);
+    msc.alter_table(dbName, renamed, table);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+
+    table = msc.getTable(dbName, tblName);
+    table.getSd().addToCols(new FieldSchema("c", "int", ""));
+    msc.alter_table(dbName, tblName, table);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+    preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1);
+
+    Table altTable = msc.getTable(dbName, tblName);
+
+    alterTableE = (AlterTableEvent) notifyList.get(listSize-1);
+    Assert.assertTrue(alterTableE.getStatus());
+    validateAlterTableColumns(tbl, altTable, alterTableE.getOldTable(), alterTableE.getNewTable());
+    validateAlterTableColumns(tbl, altTable, preAlterTableE.getOldTable(),
+        preAlterTableE.getNewTable());
+
+    Map<String,String> kvs = new HashMap<>(1);
+    kvs.put("b", "2011");
+    msc.markPartitionForEvent("hive2038", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+
+    LoadPartitionDoneEvent partMarkEvent = (LoadPartitionDoneEvent)notifyList.get(listSize - 1);
+    Assert.assertTrue(partMarkEvent.getStatus());
+    validateLoadPartitionDone("tmptbl", kvs, partMarkEvent.getTable().getTableName(),
+        partMarkEvent.getPartitionName());
+
+    PreLoadPartitionDoneEvent prePartMarkEvent =
+        (PreLoadPartitionDoneEvent)preNotifyList.get(preNotifyList.size() - 1);
+    validateLoadPartitionDone("tmptbl", kvs, prePartMarkEvent.getTableName(),
+        prePartMarkEvent.getPartitionName());
+
+    msc.dropPartition(dbName, tblName, Collections.singletonList("2011"));
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+    PreDropPartitionEvent preDropPart = (PreDropPartitionEvent) preNotifyList.get(preNotifyList
+        .size() - 1);
+
+    DropPartitionEvent dropPart = (DropPartitionEvent)notifyList.get(listSize - 1);
+    Assert.assertTrue(dropPart.getStatus());
+    validateDropPartition(Collections.singletonList(part).iterator(), dropPart.getPartitionIterator());
+    validateTableInDropPartition(tbl, dropPart.getTable());
+
+    validateDropPartition(Collections.singletonList(part).iterator(), preDropPart.getPartitionIterator());
+    validateTableInDropPartition(tbl, preDropPart.getTable());
+
+    msc.dropTable(dbName, tblName);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+    PreDropTableEvent preDropTbl = (PreDropTableEvent)preNotifyList.get(preNotifyList.size() - 1);
+
+    DropTableEvent dropTbl = (DropTableEvent)notifyList.get(listSize-1);
+    Assert.assertTrue(dropTbl.getStatus());
+    validateDropTable(tbl, dropTbl.getTable());
+    validateDropTable(tbl, preDropTbl.getTable());
+
+    msc.dropDatabase(dbName);
+    listSize++;
+    assertEquals(notifyList.size(), listSize);
+    PreDropDatabaseEvent preDropDB = (PreDropDatabaseEvent)preNotifyList.get(preNotifyList.size() - 1);
+
+    DropDatabaseEvent dropDB = (DropDatabaseEvent)notifyList.get(listSize-1);
+    Assert.assertTrue(dropDB.getStatus());
+    validateDropDb(db, dropDB.getDatabase());
+    validateDropDb(db, preDropDB.getDatabase());
+
+    msc.setMetaConf("metastore.try.direct.sql", "false");
+    ConfigChangeEvent event = (ConfigChangeEvent) notifyList.get(notifyList.size() - 1);
+    assertEquals("metastore.try.direct.sql", event.getKey());
+    assertEquals("true", event.getOldValue());
+    assertEquals("false", event.getNewValue());
+  }
+
+  @Test
+  public void testMetaConfNotifyListenersClosingClient() throws Exception {
+    HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+    closingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+    ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+    assertEquals(event.getOldValue(), metaConfVal);
+    assertEquals(event.getNewValue(), "[test pattern modified]");
+    closingClient.close();
+
+    Thread.sleep(2 * 1000);
+
+    event = (ConfigChangeEvent) DummyListener.getLastEvent();
+    assertEquals(event.getOldValue(), "[test pattern modified]");
+    assertEquals(event.getNewValue(), metaConfVal);
+  }
+
+  @Test
+  public void testMetaConfNotifyListenersNonClosingClient() throws Exception {
+    HiveMetaStoreClient nonClosingClient = new HiveMetaStoreClient(conf, null);
+    nonClosingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+    ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+    assertEquals(event.getOldValue(), metaConfVal);
+    assertEquals(event.getNewValue(), "[test pattern modified]");
+    // This should also trigger meta listener notification via TServerEventHandler#deleteContext
+    nonClosingClient.getTTransport().close();
+
+    Thread.sleep(2 * 1000);
+
+    event = (ConfigChangeEvent) DummyListener.getLastEvent();
+    assertEquals(event.getOldValue(), "[test pattern modified]");
+    assertEquals(event.getNewValue(), metaConfVal);
+  }
+
+  @Test
+  public void testMetaConfDuplicateNotification() throws Exception {
+    HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+    closingClient.setMetaConf(metaConfKey, metaConfVal);
+    int beforeCloseNotificationEventCounts = DummyListener.notifyList.size();
+    closingClient.close();
+
+    Thread.sleep(2 * 1000);
+
+    int afterCloseNotificationEventCounts = DummyListener.notifyList.size();
+    // Setting key to same value, should not trigger configChange event during shutdown
+    assertEquals(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts);
+  }
+
+  @Test
+  public void testMetaConfSameHandler() throws Exception {
+    HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+    closingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+    ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+    int beforeCloseNotificationEventCounts = DummyListener.notifyList.size();
+    IHMSHandler beforeHandler = event.getIHMSHandler();
+    closingClient.close();
+
+    Thread.sleep(2 * 1000);
+    event = (ConfigChangeEvent) DummyListener.getLastEvent();
+    int afterCloseNotificationEventCounts = DummyListener.notifyList.size();
+    IHMSHandler afterHandler = event.getIHMSHandler();
+    // Meta-conf cleanup should trigger an event to listener
+    assertNotSame(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts);
+    // Both the handlers should be same
+    assertEquals(beforeHandler, afterHandler);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java
new file mode 100644
index 0000000..d789cd6
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.events.ListenerEvent;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Ensure that the status of MetaStore events depend on the RawStore's commit status.
+ */
+public class TestMetaStoreEventListenerOnlyOnCommit {
+
+  private Configuration conf;
+  private HiveMetaStoreClient msc;
+
+  @Before
+  public void setUp() throws Exception {
+    DummyRawStoreControlledCommit.setCommitSucceed(true);
+
+    System.setProperty(ConfVars.EVENT_LISTENERS.toString(), DummyListener.class.getName());
+    System.setProperty(ConfVars.RAW_STORE_IMPL.toString(),
+            DummyRawStoreControlledCommit.class.getName());
+
+    int port = MetaStoreTestUtils.findFreePort();
+
+    conf = MetastoreConf.newMetastoreConf();
+    MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+    MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+    MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    MetaStoreTestUtils.startMetaStoreWithRetry(port, HadoopThriftAuthBridge.getBridge(), conf);
+    msc = new HiveMetaStoreClient(conf);
+
+    DummyListener.notifyList.clear();
+  }
+
+  @Test
+  public void testEventStatus() throws Exception {
+    int listSize = 0;
+    List<ListenerEvent> notifyList = DummyListener.notifyList;
+    assertEquals(notifyList.size(), listSize);
+
+    String dbName = "tmpDb";
+    Database db = new DatabaseBuilder()
+        .setName(dbName)
+        .build();
+    msc.createDatabase(db);
+
+    listSize += 1;
+    notifyList = DummyListener.notifyList;
+    assertEquals(notifyList.size(), listSize);
+    assertTrue(DummyListener.getLastEvent().getStatus());
+
+    String tableName = "unittest_TestMetaStoreEventListenerOnlyOnCommit";
+    Table table = new TableBuilder()
+        .setDbName(db)
+        .setTableName(tableName)
+        .addCol("id", "int")
+        .addPartCol("ds", "string")
+        .build();
+    msc.createTable(table);
+    listSize += 1;
+    notifyList = DummyListener.notifyList;
+    assertEquals(notifyList.size(), listSize);
+    assertTrue(DummyListener.getLastEvent().getStatus());
+
+    Partition part = new PartitionBuilder()
+        .fromTable(table)
+        .addValue("foo1")
+        .build();
+    msc.add_partition(part);
+    listSize += 1;
+    notifyList = DummyListener.notifyList;
+    assertEquals(notifyList.size(), listSize);
+    assertTrue(DummyListener.getLastEvent().getStatus());
+
+    DummyRawStoreControlledCommit.setCommitSucceed(false);
+
+    part = new PartitionBuilder()
+        .fromTable(table)
+        .addValue("foo2")
+        .build();
+    msc.add_partition(part);
+    listSize += 1;
+    notifyList = DummyListener.notifyList;
+    assertEquals(notifyList.size(), listSize);
+    assertFalse(DummyListener.getLastEvent().getStatus());
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerWithOldConf.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerWithOldConf.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerWithOldConf.java
new file mode 100644
index 0000000..82e39f1
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerWithOldConf.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import com.google.common.collect.Lists;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Index;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.IndexBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
+import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
+import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
+import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropIndexEvent;
+import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.events.ListenerEvent;
+import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
+import org.apache.hadoop.hive.metastore.events.PreAddIndexEvent;
+import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterIndexEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropIndexEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreEventContext;
+import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Mostly same tests as TestMetaStoreEventListener, but using old hive conf values instead of new
+ * metastore conf values.
+ */
+public class TestMetaStoreEventListenerWithOldConf {
+  private Configuration conf;
+
+  private static final String metaConfKey = "hive.metastore.partition.name.whitelist.pattern";
+  private static final String metaConfVal = "";
+
+  @Before
+  public void setUp() throws Exception {
+    System.setProperty("hive.metastore.event.listeners",
+        DummyListener.class.getName());
+    System.setProperty("hive.metastore.pre.event.listeners",
+        DummyPreListener.class.getName());
+
+    int port = MetaStoreTestUtils.findFreePort();
+    conf = MetastoreConf.newMetastoreConf();
+
+    MetastoreConf.setVar(conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN, metaConfVal);
+    MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+    MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+    MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    MetaStoreTestUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), conf);
+
+    DummyListener.notifyList.clear();
+    DummyPreListener.notifyList.clear();
+  }
+
+  @Test
+  public void testMetaConfNotifyListenersClosingClient() throws Exception {
+    HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+    closingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+    ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+    assertEquals(event.getOldValue(), metaConfVal);
+    assertEquals(event.getNewValue(), "[test pattern modified]");
+    closingClient.close();
+
+    Thread.sleep(2 * 1000);
+
+    event = (ConfigChangeEvent) DummyListener.getLastEvent();
+    assertEquals(event.getOldValue(), "[test pattern modified]");
+    assertEquals(event.getNewValue(), metaConfVal);
+  }
+
+  @Test
+  public void testMetaConfNotifyListenersNonClosingClient() throws Exception {
+    HiveMetaStoreClient nonClosingClient = new HiveMetaStoreClient(conf, null);
+    nonClosingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+    ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+    assertEquals(event.getOldValue(), metaConfVal);
+    assertEquals(event.getNewValue(), "[test pattern modified]");
+    // This should also trigger meta listener notification via TServerEventHandler#deleteContext
+    nonClosingClient.getTTransport().close();
+
+    Thread.sleep(2 * 1000);
+
+    event = (ConfigChangeEvent) DummyListener.getLastEvent();
+    assertEquals(event.getOldValue(), "[test pattern modified]");
+    assertEquals(event.getNewValue(), metaConfVal);
+  }
+
+  @Test
+  public void testMetaConfDuplicateNotification() throws Exception {
+    HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+    closingClient.setMetaConf(metaConfKey, metaConfVal);
+    int beforeCloseNotificationEventCounts = DummyListener.notifyList.size();
+    closingClient.close();
+
+    Thread.sleep(2 * 1000);
+
+    int afterCloseNotificationEventCounts = DummyListener.notifyList.size();
+    // Setting key to same value, should not trigger configChange event during shutdown
+    assertEquals(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts);
+  }
+
+  @Test
+  public void testMetaConfSameHandler() throws Exception {
+    HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+    closingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+    ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+    int beforeCloseNotificationEventCounts = DummyListener.notifyList.size();
+    IHMSHandler beforeHandler = event.getHandler();
+    closingClient.close();
+
+    Thread.sleep(2 * 1000);
+    event = (ConfigChangeEvent) DummyListener.getLastEvent();
+    int afterCloseNotificationEventCounts = DummyListener.notifyList.size();
+    IHMSHandler afterHandler = event.getHandler();
+    // Meta-conf cleanup should trigger an event to listener
+    assertNotSame(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts);
+    // Both the handlers should be same
+    assertEquals(beforeHandler, afterHandler);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java
new file mode 100644
index 0000000..dbd1426
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * TestMetaStoreInitListener. Test case for
+ * {@link org.apache.hadoop.hive.metastore.MetaStoreInitListener}
+ */
+public class TestMetaStoreInitListener {
+  private Configuration conf;
+
+  @Before
+  public void setUp() throws Exception {
+    System.setProperty("hive.metastore.init.hooks", DummyMetaStoreInitListener.class.getName());
+    int port = MetaStoreTestUtils.findFreePort();
+    conf = MetastoreConf.newMetastoreConf();
+    MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+    MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+    MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    MetaStoreTestUtils.startMetaStoreWithRetry(port, HadoopThriftAuthBridge.getBridge(), conf);
+  }
+
+  @Test
+  public void testMetaStoreInitListener() throws Exception {
+    // DummyMataStoreInitListener's onInit will be called at HMSHandler
+    // initialization, and set this to true
+    Assert.assertTrue(DummyMetaStoreInitListener.wasCalled);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java
new file mode 100644
index 0000000..8185d57
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test for unwrapping InvocationTargetException, which is thrown from
+ * constructor of listener class
+ */
+public class TestMetaStoreListenersError {
+
+  @Test
+  public void testInitListenerException() throws Throwable {
+
+    System.setProperty("hive.metastore.init.hooks", ErrorInitListener.class.getName());
+    Configuration conf = MetastoreConf.newMetastoreConf();
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    int port = MetaStoreTestUtils.findFreePort();
+    try {
+      HiveMetaStore.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), conf);
+      Assert.fail();
+    } catch (Throwable throwable) {
+      Assert.assertEquals(MetaException.class, throwable.getClass());
+      Assert.assertEquals(
+          "Failed to instantiate listener named: " +
+              "org.apache.hadoop.hive.metastore.TestMetaStoreListenersError$ErrorInitListener, " +
+              "reason: java.lang.IllegalArgumentException: exception on constructor",
+          throwable.getMessage());
+    }
+  }
+
+  @Test
+  public void testEventListenerException() throws Throwable {
+
+    System.setProperty("hive.metastore.init.hooks", "");
+    System.setProperty("hive.metastore.event.listeners", ErrorEventListener.class.getName());
+    Configuration conf = MetastoreConf.newMetastoreConf();
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    int port = MetaStoreTestUtils.findFreePort();
+    try {
+      HiveMetaStore.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), conf);
+      Assert.fail();
+    } catch (Throwable throwable) {
+      Assert.assertEquals(MetaException.class, throwable.getClass());
+      Assert.assertEquals(
+          "Failed to instantiate listener named: " +
+              "org.apache.hadoop.hive.metastore.TestMetaStoreListenersError$ErrorEventListener, " +
+              "reason: java.lang.IllegalArgumentException: exception on constructor",
+          throwable.getMessage());
+    }
+  }
+
+  public static class ErrorInitListener extends MetaStoreInitListener {
+
+    public ErrorInitListener(Configuration config) {
+      super(config);
+      throw new IllegalArgumentException("exception on constructor");
+    }
+
+    public void onInit(MetaStoreInitContext context) throws MetaException {
+    }
+  }
+
+  public static class ErrorEventListener extends MetaStoreEventListener {
+
+    public ErrorEventListener(Configuration config) {
+      super(config);
+      throw new IllegalArgumentException("exception on constructor");
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
index 24ea62e..372dee6 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@ -129,8 +129,7 @@ public class TestObjectStore {
   @Before
   public void setUp() throws Exception {
     Configuration conf = MetastoreConf.newMetastoreConf();
-    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS,
-        MockPartitionExpressionProxy.class.getName());
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
 
     objectStore = new ObjectStore();
     objectStore.setConf(conf);
@@ -462,8 +461,7 @@ public class TestObjectStore {
     String value1 = "another_value";
     Assume.assumeTrue(System.getProperty(key) == null);
     Configuration localConf = MetastoreConf.newMetastoreConf();
-    MetastoreConf.setVar(localConf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS,
-        MockPartitionExpressionProxy.class.getName());
+    MetaStoreTestUtils.setConfForStandloneMode(localConf);
     localConf.set(key, value);
     localConf.set(key1, value1);
     objectStore = new ObjectStore();
@@ -537,8 +535,7 @@ public class TestObjectStore {
             .debug(NUM_THREADS + " threads going to add notification"));
 
     Configuration conf = MetastoreConf.newMetastoreConf();
-    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS,
-        MockPartitionExpressionProxy.class.getName());
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
     /*
        Below are the properties that need to be set based on what database this test is going to be run
      */

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java
new file mode 100644
index 0000000..b4e5a85
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.concurrent.TimeUnit;
+
+import javax.jdo.JDOCanRetryException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class TestObjectStoreInitRetry {
+  private static final Logger LOG = LoggerFactory.getLogger(TestObjectStoreInitRetry.class);
+
+  private static int injectConnectFailure = 0;
+
+  private static void setInjectConnectFailure(int x){
+    injectConnectFailure = x;
+  }
+
+  private static int getInjectConnectFailure(){
+    return injectConnectFailure;
+  }
+
+  private static void decrementInjectConnectFailure(){
+    injectConnectFailure--;
+  }
+
+  @BeforeClass
+  public static void oneTimeSetup() throws SQLException {
+    // dummy instantiation to make sure any static/ctor code blocks of that
+    // driver are loaded and ready to go.
+    DriverManager.registerDriver(new FakeDerby());
+  }
+
+  @AfterClass
+  public static void oneTimeTearDown() throws SQLException {
+    DriverManager.deregisterDriver(new FakeDerby());
+  }
+
+  static void misbehave() throws RuntimeException{
+    TestObjectStoreInitRetry.debugTrace();
+    if (TestObjectStoreInitRetry.getInjectConnectFailure() > 0){
+      TestObjectStoreInitRetry.decrementInjectConnectFailure();
+      RuntimeException re = new JDOCanRetryException();
+      LOG.debug("MISBEHAVE:" + TestObjectStoreInitRetry.getInjectConnectFailure(), re);
+      throw re;
+    }
+  }
+
+  // debug instrumenter - useful in finding which fns get called, and how often
+  static void debugTrace() {
+    if (LOG.isDebugEnabled()){
+      Exception e = new Exception();
+      LOG.debug("." + e.getStackTrace()[1].getLineNumber() + ":" + TestObjectStoreInitRetry.getInjectConnectFailure());
+    }
+  }
+
+  protected static Configuration conf;
+
+  @Test
+  public void testObjStoreRetry() throws Exception {
+    conf = MetastoreConf.newMetastoreConf();
+
+    MetastoreConf.setLongVar(conf, ConfVars.HMSHANDLERATTEMPTS, 4);
+    MetastoreConf.setTimeVar(conf, ConfVars.HMSHANDLERINTERVAL, 1, TimeUnit.SECONDS);
+    MetastoreConf.setVar(conf, ConfVars.CONNECTION_DRIVER,FakeDerby.class.getName());
+    MetastoreConf.setBoolVar(conf, ConfVars.TRY_DIRECT_SQL,true);
+    String jdbcUrl = MetastoreConf.getVar(conf, ConfVars.CONNECTURLKEY);
+    jdbcUrl = jdbcUrl.replace("derby","fderby");
+    MetastoreConf.setVar(conf, ConfVars.CONNECTURLKEY,jdbcUrl);
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+
+    FakeDerby fd = new FakeDerby();
+
+    ObjectStore objStore = new ObjectStore();
+
+    Exception savE = null;
+    try {
+      setInjectConnectFailure(5);
+      objStore.setConf(conf);
+      Assert.fail();
+    } catch (Exception e) {
+      LOG.info("Caught exception ", e);
+      savE = e;
+    }
+
+    /*
+     * A note on retries.
+     *
+     * We've configured a total of 4 attempts.
+     * 5 - 4 == 1 connect failure simulation count left after this.
+     */
+
+    assertEquals(1, getInjectConnectFailure());
+    assertNotNull(savE);
+
+    setInjectConnectFailure(0);
+    objStore.setConf(conf);
+    assertEquals(0, getInjectConnectFailure());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
index bf8556d..6a44833 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
@@ -89,9 +89,8 @@ public class TestOldSchema {
   @Before
   public void setUp() throws Exception {
     Configuration conf = MetastoreConf.newMetastoreConf();
-    MetastoreConf.setClass(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS,
-        MockPartitionExpressionProxy.class, PartitionExpressionProxy.class);
     MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.STATS_FETCH_BITVECTOR, false);
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
 
     store = new ObjectStore();
     store.setConf(conf);


[18/50] [abbrv] hive git commit: HIVE-17981 Create a set of builders for Thrift classes. This closes #274. (Alan Gates, reviewed by Peter Vary)

Posted by ga...@apache.org.
HIVE-17981 Create a set of builders for Thrift classes.  This closes #274.  (Alan Gates, reviewed by Peter Vary)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/12a33fd0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/12a33fd0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/12a33fd0

Branch: refs/heads/standalone-metastore
Commit: 12a33fd0d1f82422048af5a389671812bdf03c93
Parents: ad106f0
Author: Alan Gates <ga...@hortonworks.com>
Authored: Mon Dec 18 14:27:13 2017 -0800
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Mon Dec 18 14:27:13 2017 -0800

----------------------------------------------------------------------
 .../client/builder/ConstraintBuilder.java       |  98 +++++++++
 .../client/builder/DatabaseBuilder.java         |  89 ++++++++
 .../GrantRevokePrivilegeRequestBuilder.java     |  63 ++++++
 .../builder/HiveObjectPrivilegeBuilder.java     |  63 ++++++
 .../client/builder/HiveObjectRefBuilder.java    |  63 ++++++
 .../metastore/client/builder/IndexBuilder.java  | 104 +++++++++
 .../client/builder/PartitionBuilder.java        | 102 +++++++++
 .../builder/PrivilegeGrantInfoBuilder.java      |  84 ++++++++
 .../metastore/client/builder/RoleBuilder.java   |  55 +++++
 .../client/builder/SQLForeignKeyBuilder.java    |  83 ++++++++
 .../builder/SQLNotNullConstraintBuilder.java    |  37 ++++
 .../client/builder/SQLPrimaryKeyBuilder.java    |  42 ++++
 .../builder/SQLUniqueConstraintBuilder.java     |  37 ++++
 .../builder/StorageDescriptorBuilder.java       | 210 +++++++++++++++++++
 .../metastore/client/builder/TableBuilder.java  | 156 ++++++++++++++
 15 files changed, 1286 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/12a33fd0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java
new file mode 100644
index 0000000..50e779a
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+
+/**
+ * Base builder for all types of constraints.  Database name, table name, and column name
+ * must be provided.
+ * @param <T> Type of builder extending this.
+ */
+abstract class ConstraintBuilder<T> {
+  protected String dbName, tableName, columnName, constraintName;
+  protected int keySeq;
+  protected boolean enable, validate, rely;
+  private T child;
+
+  protected ConstraintBuilder() {
+    keySeq = 1;
+    enable = true;
+    validate = rely = false;
+  }
+
+  protected void setChild(T child) {
+    this.child = child;
+  }
+
+  protected void checkBuildable(String defaultConstraintName) throws MetaException {
+    if (dbName == null || tableName == null || columnName == null) {
+      throw new MetaException("You must provide database name, table name, and column name");
+    }
+    if (constraintName == null) {
+      constraintName = dbName + "_" + tableName + "_" + columnName + "_" + defaultConstraintName;
+    }
+  }
+
+  public T setDbName(String dbName) {
+    this.dbName = dbName;
+    return child;
+  }
+
+  public T setTableName(String tableName) {
+    this.tableName = tableName;
+    return child;
+  }
+
+  public T setDbAndTableName(Table table) {
+    this.dbName = table.getDbName();
+    this.tableName = table.getTableName();
+    return child;
+  }
+
+  public T setColumnName(String columnName) {
+    this.columnName = columnName;
+    return child;
+  }
+
+  public T setConstraintName(String constraintName) {
+    this.constraintName = constraintName;
+    return child;
+  }
+
+  public T setKeySeq(int keySeq) {
+    this.keySeq = keySeq;
+    return child;
+  }
+
+  public T setEnable(boolean enable) {
+    this.enable = enable;
+    return child;
+  }
+
+  public T setValidate(boolean validate) {
+    this.validate = validate;
+    return child;
+  }
+
+  public T setRely(boolean rely) {
+    this.rely = rely;
+    return child;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/12a33fd0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
new file mode 100644
index 0000000..7627d89
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+import org.apache.thrift.TException;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A builder for {@link Database}.  The name of the new database is required.  Everything else
+ * selects reasonable defaults.
+ */
+public class DatabaseBuilder {
+  private String name, description, location;
+  private Map<String, String> params = new HashMap<>();
+  private String ownerName;
+  private PrincipalType ownerType;
+
+  public DatabaseBuilder setName(String name) {
+    this.name = name;
+    return this;
+  }
+
+  public DatabaseBuilder setDescription(String description) {
+    this.description = description;
+    return this;
+  }
+
+  public DatabaseBuilder setLocation(String location) {
+    this.location = location;
+    return this;
+  }
+
+  public DatabaseBuilder setParams(Map<String, String> params) {
+    this.params = params;
+    return this;
+  }
+
+  public DatabaseBuilder addParam(String key, String value) {
+    params.put(key, value);
+    return this;
+  }
+
+  public DatabaseBuilder setOwnerName(String ownerName) {
+    this.ownerName = ownerName;
+    return this;
+  }
+
+  public DatabaseBuilder setOwnerType(PrincipalType ownerType) {
+    this.ownerType = ownerType;
+    return this;
+  }
+
+  public Database build() throws TException {
+    if (name == null) throw new MetaException("You must name the database");
+    Database db = new Database(name, description, location, params);
+    try {
+      if (ownerName != null) ownerName = SecurityUtils.getUser();
+      db.setOwnerName(ownerName);
+      if (ownerType == null) ownerType = PrincipalType.USER;
+      db.setOwnerType(ownerType);
+      return db;
+    } catch (IOException e) {
+      throw MetaStoreUtils.newMetaException(e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/12a33fd0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GrantRevokePrivilegeRequestBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GrantRevokePrivilegeRequestBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GrantRevokePrivilegeRequestBuilder.java
new file mode 100644
index 0000000..26cea19
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GrantRevokePrivilegeRequestBuilder.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeRequest;
+import org.apache.hadoop.hive.metastore.api.GrantRevokeType;
+import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+
+/**
+ * A builder for {@link GrantRevokePrivilegeRequest}.  The revoke of grant option defaults to
+ * false.  The request Type and the privileges must be provided.
+ */
+public class GrantRevokePrivilegeRequestBuilder {
+  private GrantRevokeType requestType;
+  private PrivilegeBag privileges;
+  private boolean revokeGrantOption;
+
+  public GrantRevokePrivilegeRequestBuilder() {
+    privileges = new PrivilegeBag();
+    revokeGrantOption = false;
+  }
+
+  public GrantRevokePrivilegeRequestBuilder setRequestType(GrantRevokeType requestType) {
+    this.requestType = requestType;
+    return this;
+  }
+
+  public GrantRevokePrivilegeRequestBuilder setRevokeGrantOption(boolean revokeGrantOption) {
+    this.revokeGrantOption = revokeGrantOption;
+    return this;
+  }
+
+  public GrantRevokePrivilegeRequestBuilder addPrivilege(HiveObjectPrivilege privilege) {
+    privileges.addToPrivileges(privilege);
+    return this;
+  }
+
+  public GrantRevokePrivilegeRequest build() throws MetaException {
+    if (requestType == null || privileges.getPrivilegesSize() == 0) {
+      throw new MetaException("The request type and at least one privilege must be provided.");
+    }
+    GrantRevokePrivilegeRequest rqst = new GrantRevokePrivilegeRequest(requestType, privileges);
+    if (revokeGrantOption) rqst.setRevokeGrantOption(revokeGrantOption);
+    return rqst;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/12a33fd0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectPrivilegeBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectPrivilegeBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectPrivilegeBuilder.java
new file mode 100644
index 0000000..d802e1a
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectPrivilegeBuilder.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
+
+/**
+ * Builder for {@link HiveObjectPrivilege}.  All values must be set.
+ */
+public class HiveObjectPrivilegeBuilder {
+  private HiveObjectRef hiveObjectRef;
+  private String principleName;
+  private PrincipalType principalType;
+  private PrivilegeGrantInfo grantInfo;
+
+  public HiveObjectPrivilegeBuilder setHiveObjectRef(HiveObjectRef hiveObjectRef) {
+    this.hiveObjectRef = hiveObjectRef;
+    return this;
+  }
+
+  public HiveObjectPrivilegeBuilder setPrincipleName(String principleName) {
+    this.principleName = principleName;
+    return this;
+  }
+
+  public HiveObjectPrivilegeBuilder setPrincipalType(PrincipalType principalType) {
+    this.principalType = principalType;
+    return this;
+  }
+
+  public HiveObjectPrivilegeBuilder setGrantInfo(PrivilegeGrantInfo grantInfo) {
+    this.grantInfo = grantInfo;
+    return this;
+  }
+
+  public HiveObjectPrivilege build() throws MetaException {
+    if (hiveObjectRef == null || principleName == null || principalType == null ||
+        grantInfo == null) {
+      throw new MetaException("hive object reference, principle name and type, and grant info " +
+          "must all be provided");
+    }
+    return new HiveObjectPrivilege(hiveObjectRef, principleName, principalType, grantInfo);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/12a33fd0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectRefBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectRefBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectRefBuilder.java
new file mode 100644
index 0000000..62a227a
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/HiveObjectRefBuilder.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+import org.apache.hadoop.hive.metastore.api.HiveObjectType;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * A builder for {@link HiveObjectRef}.  Unlike most builders (which allow a gradual building up
+ * of the values) this gives a number of methods that take the object to be referenced and then
+ * build the appropriate reference.  This is intended primarily for use with
+ * {@link HiveObjectPrivilegeBuilder}
+ */
+public class HiveObjectRefBuilder {
+  private HiveObjectType objectType;
+  private String dbName, objectName, columnName;
+  private List<String> partValues;
+
+  public HiveObjectRef buildGlobalReference() {
+    return new HiveObjectRef(HiveObjectType.GLOBAL, null, null, Collections.emptyList(), null);
+  }
+
+  public HiveObjectRef buildDatabaseReference(Database db) {
+    return new
+        HiveObjectRef(HiveObjectType.DATABASE, db.getName(), null, Collections.emptyList(), null);
+  }
+
+  public HiveObjectRef buildTableReference(Table table) {
+    return new HiveObjectRef(HiveObjectType.TABLE, table.getDbName(), table.getTableName(),
+        Collections.emptyList(), null);
+  }
+
+  public HiveObjectRef buildPartitionReference(Partition part) {
+    return new HiveObjectRef(HiveObjectType.PARTITION, part.getDbName(), part.getTableName(),
+        part.getValues(), null);
+  }
+
+  public HiveObjectRef buildColumnReference(Table table, String columnName) {
+    return new HiveObjectRef(HiveObjectType.TABLE, table.getDbName(), table.getTableName(),
+        Collections.emptyList(), columnName);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/12a33fd0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/IndexBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/IndexBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/IndexBuilder.java
new file mode 100644
index 0000000..6c8b1d8
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/IndexBuilder.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.Index;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Builder for indices.  You must supply the database name and table name (or table reference), a
+ * name for the index, and whatever StorageDescriptorBuilder requires.  All other fields will be
+ * given reasonable defaults.
+ */
+public class IndexBuilder extends StorageDescriptorBuilder<IndexBuilder> {
+  private String dbName, tableName, indexName, indexTableName, handlerClass;
+  private int createTime, lastAccessTime;
+  private Map<String, String> indexParams;
+  private boolean deferredRebuild;
+
+  public IndexBuilder() {
+    // Set some reasonable defaults
+    indexParams = new HashMap<>();
+    createTime = lastAccessTime = (int)(System.currentTimeMillis() / 1000);
+    super.setChild(this);
+  }
+
+  public IndexBuilder setDbName(String dbName) {
+    this.dbName = dbName;
+    return this;
+  }
+
+  public IndexBuilder setTableName(String tableName) {
+    this.tableName = tableName;
+    return this;
+  }
+
+  public IndexBuilder setDbAndTableName(Table table) {
+    this.dbName = table.getDbName();
+    this.tableName = table.getTableName();
+    return this;
+  }
+
+  public IndexBuilder setCreateTime(int createTime) {
+    this.createTime = createTime;
+    return this;
+  }
+
+  public IndexBuilder setLastAccessTime(int lastAccessTime) {
+    this.lastAccessTime = lastAccessTime;
+    return this;
+  }
+
+  public IndexBuilder setIndexParams(Map<String, String> indexParams) {
+    this.indexParams = indexParams;
+    return this;
+  }
+
+  public IndexBuilder setIndexName(String indexName) {
+    this.indexName = indexName;
+    return this;
+  }
+
+  public IndexBuilder setIndexTableName(String indexTableName) {
+    this.indexTableName = indexTableName;
+    return this;
+  }
+
+  public IndexBuilder setHandlerClass(String handlerClass) {
+    this.handlerClass = handlerClass;
+    return this;
+  }
+
+  public IndexBuilder setDeferredRebuild(boolean deferredRebuild) {
+    this.deferredRebuild = deferredRebuild;
+    return this;
+  }
+
+  public Index build() throws MetaException {
+    if (dbName == null || tableName == null || indexName == null) {
+      throw new MetaException("You must provide database name, table name, and index name");
+    }
+    if (indexTableName == null) indexTableName = tableName + "_" + indexName + "_table";
+    return new Index(indexName, handlerClass, dbName, tableName, createTime, lastAccessTime,
+        indexTableName, buildSd(), indexParams, deferredRebuild);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/12a33fd0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java
new file mode 100644
index 0000000..265625f
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Builder for {@link Partition}.  The only requirements are 1. (database name and table name) or table
+ * reference; 2. partition values; 3. whatever {@link StorageDescriptorBuilder} requires.
+ */
+public class PartitionBuilder extends StorageDescriptorBuilder<PartitionBuilder> {
+  private String dbName, tableName;
+  private int createTime, lastAccessTime;
+  private Map<String, String> partParams;
+  private List<String> values;
+
+  public PartitionBuilder() {
+    // Set some reasonable defaults
+    partParams = new HashMap<>();
+    createTime = lastAccessTime = (int)(System.currentTimeMillis() / 1000);
+    super.setChild(this);
+  }
+
+  public PartitionBuilder setDbName(String dbName) {
+    this.dbName = dbName;
+    return this;
+  }
+
+  public PartitionBuilder setTableName(String tableName) {
+    this.tableName = tableName;
+    return this;
+  }
+
+  public PartitionBuilder setDbAndTableName(Table table) {
+    this.dbName = table.getDbName();
+    this.tableName = table.getTableName();
+    return this;
+  }
+
+  public PartitionBuilder setValues(List<String> values) {
+    this.values = values;
+    return this;
+  }
+
+  public PartitionBuilder addValue(String value) {
+    if (values == null) values = new ArrayList<>();
+    values.add(value);
+    return this;
+  }
+
+  public PartitionBuilder setCreateTime(int createTime) {
+    this.createTime = createTime;
+    return this;
+  }
+
+  public PartitionBuilder setLastAccessTime(int lastAccessTime) {
+    this.lastAccessTime = lastAccessTime;
+    return this;
+  }
+
+  public PartitionBuilder setPartParams(Map<String, String> partParams) {
+    this.partParams = partParams;
+    return this;
+  }
+
+  public PartitionBuilder addPartParam(String key, String value) {
+    if (partParams == null) partParams = new HashMap<>();
+    partParams.put(key, value);
+    return this;
+  }
+
+  public Partition build() throws MetaException {
+    if (dbName == null || tableName == null) {
+      throw new MetaException("database name and table name must be provided");
+    }
+    if (values == null) throw new MetaException("You must provide partition values");
+    return new Partition(values, dbName, tableName, createTime, lastAccessTime, buildSd(),
+        partParams);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/12a33fd0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PrivilegeGrantInfoBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PrivilegeGrantInfoBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PrivilegeGrantInfoBuilder.java
new file mode 100644
index 0000000..2e7fe5a
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PrivilegeGrantInfoBuilder.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
+import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+
+import java.io.IOException;
+
+/**
+ * Builder for {@link PrivilegeGrantInfo}.  The privilege is required.  If not provided the grantor
+ * is
+ * assumed to be the current user.  This is really intended for use by the
+ * {@link HiveObjectPrivilegeBuilder}.
+ */
+public class PrivilegeGrantInfoBuilder {
+  private String privilege, grantor;
+  private int createTime;
+  private PrincipalType grantorType;
+  private boolean grantOption;
+
+  public PrivilegeGrantInfoBuilder() {
+    createTime = (int)(System.currentTimeMillis() / 1000);
+    grantOption = false;
+  }
+
+  public PrivilegeGrantInfoBuilder setPrivilege(String privilege) {
+    this.privilege = privilege;
+    return this;
+  }
+
+  public PrivilegeGrantInfoBuilder setGrantor(String grantor) {
+    this.grantor = grantor;
+    return this;
+  }
+
+  public PrivilegeGrantInfoBuilder setCreateTime(int createTime) {
+    this.createTime = createTime;
+    return this;
+  }
+
+  public PrivilegeGrantInfoBuilder setGrantorType(PrincipalType grantorType) {
+    this.grantorType = grantorType;
+    return this;
+  }
+
+  public PrivilegeGrantInfoBuilder setGrantOption(boolean grantOption) {
+    this.grantOption = grantOption;
+    return this;
+  }
+
+  public PrivilegeGrantInfo build() throws MetaException {
+    if (privilege == null) {
+      throw new MetaException("Privilege must be provided.");
+    }
+    if (grantor == null) {
+      try {
+        grantor = SecurityUtils.getUser();
+        grantorType = PrincipalType.USER;
+      } catch (IOException e) {
+        throw MetaStoreUtils.newMetaException(e);
+      }
+    }
+    return new PrivilegeGrantInfo(privilege, createTime, grantor, grantorType, grantOption);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/12a33fd0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/RoleBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/RoleBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/RoleBuilder.java
new file mode 100644
index 0000000..0b8d189
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/RoleBuilder.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Role;
+
+/**
+ * A builder for {@link Role}.  The roleName and the ownerName must be provided.
+ */
+public class RoleBuilder {
+  private String roleName, ownerName;
+  private int createTime;
+
+  public RoleBuilder() {
+    createTime = (int)(System.currentTimeMillis() / 1000);
+  }
+
+  public RoleBuilder setRoleName(String roleName) {
+    this.roleName = roleName;
+    return this;
+  }
+
+  public RoleBuilder setOwnerName(String ownerName) {
+    this.ownerName = ownerName;
+    return this;
+  }
+
+  public RoleBuilder setCreateTime(int createTime) {
+    this.createTime = createTime;
+    return this;
+  }
+
+  public Role build() throws MetaException {
+    if (roleName == null || ownerName == null) {
+      throw new MetaException("role name and owner name must be provided.");
+    }
+    return new Role(roleName, createTime, ownerName);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/12a33fd0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java
new file mode 100644
index 0000000..a39319a
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+
+/**
+ * Builder for {@link SQLForeignKey}.  Requires what {@link ConstraintBuilder} requires, plus
+ * primary key
+ * database, table, column and name.
+ */
+public class SQLForeignKeyBuilder extends ConstraintBuilder<SQLForeignKeyBuilder> {
+  private String pkDb, pkTable, pkColumn, pkName;
+  private int updateRule, deleteRule;
+
+  public SQLForeignKeyBuilder() {
+    updateRule = deleteRule = 0;
+  }
+
+  public SQLForeignKeyBuilder setPkDb(String pkDb) {
+    this.pkDb = pkDb;
+    return this;
+  }
+
+  public SQLForeignKeyBuilder setPkTable(String pkTable) {
+    this.pkTable = pkTable;
+    return this;
+  }
+
+  public SQLForeignKeyBuilder setPkColumn(String pkColumn) {
+    this.pkColumn = pkColumn;
+    return this;
+  }
+
+  public SQLForeignKeyBuilder setPkName(String pkName) {
+    this.pkName = pkName;
+    return this;
+  }
+
+  public SQLForeignKeyBuilder setPrimaryKey(SQLPrimaryKey pk) {
+    pkDb = pk.getTable_db();
+    pkTable = pk.getTable_name();
+    pkColumn = pk.getColumn_name();
+    pkName = pk.getPk_name();
+    return this;
+  }
+
+  public SQLForeignKeyBuilder setUpdateRule(int updateRule) {
+    this.updateRule = updateRule;
+    return this;
+  }
+
+  public SQLForeignKeyBuilder setDeleteRule(int deleteRule) {
+    this.deleteRule = deleteRule;
+    return this;
+  }
+
+  public SQLForeignKey build() throws MetaException {
+    checkBuildable("foreign_key");
+    if (pkDb == null || pkTable == null || pkColumn == null || pkName == null) {
+      throw new MetaException("You must provide the primary key database, table, column, and name");
+    }
+    return new SQLForeignKey(pkDb, pkTable, pkColumn, dbName, tableName, columnName, keySeq,
+        updateRule, deleteRule, constraintName, pkName, enable, validate, rely);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/12a33fd0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLNotNullConstraintBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLNotNullConstraintBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLNotNullConstraintBuilder.java
new file mode 100644
index 0000000..77d1e49
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLNotNullConstraintBuilder.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
+
+/**
+ * Builder for {@link SQLNotNullConstraint}.  Only requires what {@link ConstraintBuilder} requires.
+ */
+public class SQLNotNullConstraintBuilder extends ConstraintBuilder<SQLNotNullConstraintBuilder> {
+
+  public SQLNotNullConstraintBuilder() {
+    super.setChild(this);
+  }
+
+  public SQLNotNullConstraint build() throws MetaException {
+    checkBuildable("not_null_constraint");
+    return new SQLNotNullConstraint(dbName, tableName, columnName, constraintName, enable,
+        validate, rely);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/12a33fd0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java
new file mode 100644
index 0000000..9000f86
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+
+/**
+ * Builder for {@link SQLPrimaryKey}.  Only requires what {@link ConstraintBuilder} requires.
+ */
+public class SQLPrimaryKeyBuilder extends ConstraintBuilder<SQLPrimaryKeyBuilder> {
+
+  public SQLPrimaryKeyBuilder() {
+    super.setChild(this);
+  }
+
+  // Just to translate
+  public SQLPrimaryKeyBuilder setPrimaryKeyName(String name) {
+    return setConstraintName(name);
+  }
+
+  public SQLPrimaryKey build() throws MetaException {
+    checkBuildable("primary_key");
+    return new SQLPrimaryKey(dbName, tableName, columnName, keySeq, constraintName, enable,
+        validate, rely);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/12a33fd0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLUniqueConstraintBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLUniqueConstraintBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLUniqueConstraintBuilder.java
new file mode 100644
index 0000000..640e9d1
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLUniqueConstraintBuilder.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+
+/**
+ * Builder for {@link SQLUniqueConstraint}.  Only requires what {@link ConstraintBuilder} requires.
+ */
+public class SQLUniqueConstraintBuilder extends ConstraintBuilder<SQLUniqueConstraintBuilder> {
+
+  public SQLUniqueConstraintBuilder() {
+    super.setChild(this);
+  }
+
+  public SQLUniqueConstraint build() throws MetaException {
+    checkBuildable("unique_constraint");
+    return new SQLUniqueConstraint(dbName, tableName, columnName, keySeq, constraintName, enable,
+        validate, rely);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/12a33fd0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/StorageDescriptorBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/StorageDescriptorBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/StorageDescriptorBuilder.java
new file mode 100644
index 0000000..39d1fa2
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/StorageDescriptorBuilder.java
@@ -0,0 +1,210 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Order;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.SkewedInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Builds a {@link StorageDescriptor}.  Only requires that columns be set.  It picks reasonable
+ * defaults for everything else.  This is intended for use just by objects that have a StorageDescriptor,
+ * not direct use.
+ */
+abstract class StorageDescriptorBuilder<T> {
+  private static final String SERDE_LIB = "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe";
+  private static final String INPUT_FORMAT = "org.apache.hadoop.hive.ql.io.HiveInputFormat";
+  private static final String OUTPUT_FORMAT = "org.apache.hadoop.hive.ql.io.HiveOutputFormat";
+
+  private String location, inputFormat, outputFormat, serdeName, serdeLib;
+  private List<FieldSchema> cols;
+  private int numBuckets;
+  private Map<String, String> storageDescriptorParams, serdeParams;
+  private boolean compressed, storedAsSubDirectories;
+  private List<String> bucketCols, skewedColNames;
+  private List<Order> sortCols;
+  private List<List<String>> skewedColValues;
+  private Map<List<String>, String> skewedColValueLocationMaps;
+  // This enables us to return the correct type from the builder
+  private T child;
+
+  protected StorageDescriptorBuilder() {
+    // Set some reasonable defaults
+    storageDescriptorParams = new HashMap<>();
+    serdeParams = new HashMap<>();
+    bucketCols = new ArrayList<>();
+    sortCols = new ArrayList<>();
+    numBuckets = 0;
+    compressed = false;
+    inputFormat = INPUT_FORMAT;
+    outputFormat = OUTPUT_FORMAT;
+    serdeLib = SERDE_LIB;
+    skewedColNames = new ArrayList<>();
+    skewedColValues = new ArrayList<>();
+    skewedColValueLocationMaps = new HashMap<>();
+  }
+
+  protected StorageDescriptor buildSd() throws MetaException {
+    if (cols == null) throw new MetaException("You must provide the columns");
+    SerDeInfo serdeInfo = new SerDeInfo(serdeName, serdeLib, serdeParams);
+    StorageDescriptor sd = new StorageDescriptor(cols, location, inputFormat, outputFormat,
+        compressed, numBuckets, serdeInfo, bucketCols, sortCols, storageDescriptorParams);
+    sd.setStoredAsSubDirectories(storedAsSubDirectories);
+    if (skewedColNames != null) {
+      SkewedInfo skewed = new SkewedInfo(skewedColNames, skewedColValues,
+          skewedColValueLocationMaps);
+      sd.setSkewedInfo(skewed);
+    }
+    return sd;
+  }
+
+  protected void setChild(T child) {
+    this.child = child;
+  }
+
+  public T setLocation(String location) {
+    this.location = location;
+    return child;
+  }
+
+  public T setInputFormat(String inputFormat) {
+    this.inputFormat = inputFormat;
+    return child;
+  }
+
+  public T setOutputFormat(String outputFormat) {
+    this.outputFormat = outputFormat;
+    return child;
+  }
+
+  public T setSerdeName(String serdeName) {
+    this.serdeName = serdeName;
+    return child;
+  }
+
+  public T setSerdeLib(String serdeLib) {
+    this.serdeLib = serdeLib;
+    return child;
+  }
+  public T setCols(List<FieldSchema> cols) {
+    this.cols = cols;
+    return child;
+  }
+
+  public T addCol(String name, String type, String comment) {
+    if (cols == null) cols = new ArrayList<>();
+    cols.add(new FieldSchema(name, type, comment));
+    return child;
+  }
+
+  public T addCol(String name, String type) {
+    return addCol(name, type, "");
+  }
+
+  public T setNumBuckets(int numBuckets) {
+    this.numBuckets = numBuckets;
+    return child;
+  }
+
+  public T setStorageDescriptorParams(
+      Map<String, String> storageDescriptorParams) {
+    this.storageDescriptorParams = storageDescriptorParams;
+    return child;
+  }
+
+  public T addStorageDescriptorParam(String key, String value) {
+    if (storageDescriptorParams == null) storageDescriptorParams = new HashMap<>();
+    storageDescriptorParams.put(key, value);
+    return child;
+  }
+
+  public T setSerdeParams(Map<String, String> serdeParams) {
+    this.serdeParams = serdeParams;
+    return child;
+  }
+
+  public T addSerdeParam(String key, String value) {
+    if (serdeParams == null) serdeParams = new HashMap<>();
+    serdeParams.put(key, value);
+    return child;
+  }
+
+  public T setCompressed(boolean compressed) {
+    this.compressed = compressed;
+    return child;
+  }
+
+  public T setStoredAsSubDirectories(boolean storedAsSubDirectories) {
+    this.storedAsSubDirectories = storedAsSubDirectories;
+    return child;
+  }
+
+  public T setBucketCols(List<String> bucketCols) {
+    this.bucketCols = bucketCols;
+    return child;
+  }
+
+  public T addBucketCol(String bucketCol) {
+    if (bucketCols == null) bucketCols = new ArrayList<>();
+    bucketCols.add(bucketCol);
+    return child;
+  }
+
+  public T setSkewedColNames(List<String> skewedColNames) {
+    this.skewedColNames = skewedColNames;
+    return child;
+  }
+
+  public T addSkewedColName(String skewedColName) {
+    if (skewedColNames == null) skewedColNames = new ArrayList<>();
+    skewedColNames.add(skewedColName);
+    return child;
+  }
+
+  public T setSortCols(List<Order> sortCols) {
+    this.sortCols = sortCols;
+    return child;
+  }
+
+  public T addSortCol(String col, int order) {
+    if (sortCols == null) sortCols = new ArrayList<>();
+    sortCols.add(new Order(col, order));
+    return child;
+  }
+
+  // It is not at all clear how to flatten these last two out in a useful way, and no one uses
+  // these anyway.
+  public T setSkewedColValues(List<List<String>> skewedColValues) {
+    this.skewedColValues = skewedColValues;
+    return child;
+  }
+
+  public T setSkewedColValueLocationMaps(
+      Map<List<String>, String> skewedColValueLocationMaps) {
+    this.skewedColValueLocationMaps = skewedColValueLocationMaps;
+    return child;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/12a33fd0/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java
new file mode 100644
index 0000000..1d457a6
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.client.builder;
+
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Build a {@link Table}.  The database name and table name must be provided, plus whatever is
+ * needed by the underlying {@link StorageDescriptorBuilder}.
+ */
+public class TableBuilder extends StorageDescriptorBuilder<TableBuilder> {
+  private String dbName, tableName, owner, viewOriginalText, viewExpandedText, type;
+  private List<FieldSchema> partCols;
+  private int createTime, lastAccessTime, retention;
+  private Map<String, String> tableParams;
+  private boolean rewriteEnabled, temporary;
+
+  public TableBuilder() {
+    // Set some reasonable defaults
+    tableParams = new HashMap<>();
+    createTime = lastAccessTime = (int)(System.currentTimeMillis() / 1000);
+    retention = 0;
+    super.setChild(this);
+  }
+
+  public TableBuilder setDbName(String dbName) {
+    this.dbName = dbName;
+    return this;
+  }
+
+  public TableBuilder setDbName(Database db) {
+    this.dbName = db.getName();
+    return this;
+  }
+
+  public TableBuilder setTableName(String tableName) {
+    this.tableName = tableName;
+    return this;
+  }
+
+  public TableBuilder setOwner(String owner) {
+    this.owner = owner;
+    return this;
+  }
+
+  public TableBuilder setViewOriginalText(String viewOriginalText) {
+    this.viewOriginalText = viewOriginalText;
+    return this;
+  }
+
+  public TableBuilder setViewExpandedText(String viewExpandedText) {
+    this.viewExpandedText = viewExpandedText;
+    return this;
+  }
+
+  public TableBuilder setType(String type) {
+    this.type = type;
+    return this;
+  }
+
+  public TableBuilder setPartCols(List<FieldSchema> partCols) {
+    this.partCols = partCols;
+    return this;
+  }
+
+  public TableBuilder addPartCol(String name, String type, String comment) {
+    if (partCols == null) partCols = new ArrayList<>();
+    partCols.add(new FieldSchema(name, type, comment));
+    return this;
+  }
+
+  public TableBuilder addPartCol(String name, String type) {
+    return addPartCol(name, type, "");
+  }
+
+  public TableBuilder setCreateTime(int createTime) {
+    this.createTime = createTime;
+    return this;
+  }
+
+  public TableBuilder setLastAccessTime(int lastAccessTime) {
+    this.lastAccessTime = lastAccessTime;
+    return this;
+  }
+
+  public TableBuilder setRetention(int retention) {
+    this.retention = retention;
+    return this;
+  }
+
+  public TableBuilder setTableParams(Map<String, String> tableParams) {
+    this.tableParams = tableParams;
+    return this;
+  }
+
+  public TableBuilder addTableParam(String key, String value) {
+    if (tableParams == null) tableParams = new HashMap<>();
+    tableParams.put(key, value);
+    return this;
+  }
+
+  public TableBuilder setRewriteEnabled(boolean rewriteEnabled) {
+    this.rewriteEnabled = rewriteEnabled;
+    return this;
+  }
+
+  public TableBuilder setTemporary(boolean temporary) {
+    this.temporary = temporary;
+    return this;
+  }
+
+  public Table build() throws MetaException {
+    if (dbName == null || tableName == null) {
+      throw new MetaException("You must set the database and table name");
+    }
+    if (owner == null) {
+      try {
+        owner = SecurityUtils.getUser();
+      } catch (IOException e) {
+        throw MetaStoreUtils.newMetaException(e);
+      }
+    }
+    Table t = new Table(tableName, dbName, owner, createTime, lastAccessTime, retention, buildSd(),
+        partCols, tableParams, viewOriginalText, viewExpandedText, type);
+    if (rewriteEnabled) t.setRewriteEnabled(true);
+    if (temporary) t.setTemporary(temporary);
+    return t;
+  }
+
+}


[06/50] [abbrv] hive git commit: HIVE-18272: Fix check-style violations in subquery code (Vineet Garg, reviewed by Ashutosh Chauhan)

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ca96613d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
index 90aab6e..4758a37 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSubQueryRemoveRule.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
 
 import org.apache.calcite.plan.RelOptRule;
 import org.apache.calcite.plan.RelOptRuleCall;
-import org.apache.calcite.plan.RelOptRuleOperand;
 import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.Aggregate;
@@ -42,7 +41,6 @@ import org.apache.calcite.sql.type.InferTypes;
 import org.apache.calcite.sql.type.OperandTypes;
 import org.apache.calcite.sql.type.ReturnTypes;
 import org.apache.calcite.sql.type.SqlTypeName;
-import org.apache.calcite.tools.RelBuilderFactory;
 import org.apache.calcite.util.Pair;
 import org.apache.calcite.util.Util;
 
@@ -67,7 +65,6 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
  * TODO:
  *  Reason this is replicated instead of using Calcite's is
  *    Calcite creates null literal with null type but hive needs it to be properly typed
- *    Need fix for Calcite-1493
  *
  * <p>Sub-queries are represented by {@link RexSubQuery} expressions.
  *
@@ -76,493 +73,491 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
  * the rewrite, and the product of the rewrite will be a {@link Correlate}.
  * The Correlate can be removed using {@link RelDecorrelator}.
  */
-public class HiveSubQueryRemoveRule extends RelOptRule{
-
-    private HiveConf conf;
-
-    public HiveSubQueryRemoveRule(HiveConf conf) {
-        super(operand(RelNode.class, null, HiveSubQueryFinder.RELNODE_PREDICATE,
-            any()),
-            HiveRelFactories.HIVE_BUILDER, "SubQueryRemoveRule:Filter") ;
-        this.conf = conf;
-
+public class HiveSubQueryRemoveRule extends RelOptRule {
+
+  private HiveConf conf;
+
+  public HiveSubQueryRemoveRule(HiveConf conf) {
+    super(operand(RelNode.class, null, HiveSubQueryFinder.RELNODE_PREDICATE,
+        any()),
+        HiveRelFactories.HIVE_BUILDER, "SubQueryRemoveRule:Filter");
+    this.conf = conf;
+  }
+  public void onMatch(RelOptRuleCall call) {
+    final RelNode relNode = call.rel(0);
+    final HiveSubQRemoveRelBuilder builder =
+        new HiveSubQRemoveRelBuilder(null, call.rel(0).getCluster(), null);
+
+    // if subquery is in FILTER
+    if(relNode instanceof Filter) {
+      final Filter filter = call.rel(0);
+      final RexSubQuery e =
+          RexUtil.SubQueryFinder.find(filter.getCondition());
+      assert e != null;
+
+      final RelOptUtil.Logic logic =
+          LogicVisitor.find(RelOptUtil.Logic.TRUE,
+              ImmutableList.of(filter.getCondition()), e);
+      builder.push(filter.getInput());
+      final int fieldCount = builder.peek().getRowType().getFieldCount();
+
+      assert(filter instanceof HiveFilter);
+      SubqueryConf subqueryConfig = filter.getCluster().getPlanner().
+          getContext().unwrap(SubqueryConf.class);
+      boolean isCorrScalarQuery = subqueryConfig.getCorrScalarRexSQWithAgg().contains(e.rel);
+      boolean hasNoWindowingAndNoGby =
+          subqueryConfig.getScalarAggWithoutGbyWindowing().contains(e.rel);
+
+      final RexNode target = apply(e, HiveFilter.getVariablesSet(e), logic,
+          builder, 1, fieldCount, isCorrScalarQuery, hasNoWindowingAndNoGby);
+      final RexShuttle shuttle = new ReplaceSubQueryShuttle(e, target);
+      builder.filter(shuttle.apply(filter.getCondition()));
+      builder.project(fields(builder, filter.getRowType().getFieldCount()));
+      call.transformTo(builder.build());
+    } else if(relNode instanceof Project) {
+      // if subquery is in PROJECT
+      final Project project = call.rel(0);
+      final RexSubQuery e =
+          RexUtil.SubQueryFinder.find(project.getProjects());
+      assert e != null;
+
+      final RelOptUtil.Logic logic =
+          LogicVisitor.find(RelOptUtil.Logic.TRUE_FALSE_UNKNOWN,
+              project.getProjects(), e);
+      builder.push(project.getInput());
+      final int fieldCount = builder.peek().getRowType().getFieldCount();
+
+      SubqueryConf subqueryConfig =
+          project.getCluster().getPlanner().getContext().unwrap(SubqueryConf.class);
+      boolean isCorrScalarQuery = subqueryConfig.getCorrScalarRexSQWithAgg().contains(e.rel);
+      boolean hasNoWindowingAndNoGby =
+          subqueryConfig.getScalarAggWithoutGbyWindowing().contains(e.rel);
+
+      final RexNode target = apply(e, HiveFilter.getVariablesSet(e),
+          logic, builder, 1, fieldCount, isCorrScalarQuery, hasNoWindowingAndNoGby);
+      final RexShuttle shuttle = new ReplaceSubQueryShuttle(e, target);
+      builder.project(shuttle.apply(project.getProjects()),
+          project.getRowType().getFieldNames());
+      call.transformTo(builder.build());
     }
-    public void onMatch(RelOptRuleCall call) {
-        final RelNode relNode = call.rel(0);
-        //TODO: replace HiveSubQRemoveRelBuilder with calcite's once calcite 1.11.0 is released
-        final HiveSubQRemoveRelBuilder builder = new HiveSubQRemoveRelBuilder(null, call.rel(0).getCluster(), null);
-
-        // if subquery is in FILTER
-        if(relNode instanceof Filter) {
-            final Filter filter = call.rel(0);
-            final RexSubQuery e =
-                RexUtil.SubQueryFinder.find(filter.getCondition());
-            assert e != null;
-
-            final RelOptUtil.Logic logic =
-                LogicVisitor.find(RelOptUtil.Logic.TRUE,
-                    ImmutableList.of(filter.getCondition()), e);
-            builder.push(filter.getInput());
-            final int fieldCount = builder.peek().getRowType().getFieldCount();
-
-            assert(filter instanceof HiveFilter);
-            SubqueryConf subqueryConfig = filter.getCluster().getPlanner().getContext().unwrap(SubqueryConf.class);
-            boolean isCorrScalarQuery = subqueryConfig.getCorrScalarRexSQWithAgg().contains(e.rel);
-            boolean hasNoWindowingAndNoGby = subqueryConfig.getScalarAggWithoutGbyWindowing().contains(e.rel);
-
-            final RexNode target = apply(e, HiveFilter.getVariablesSet(e), logic,
-                builder, 1, fieldCount, isCorrScalarQuery, hasNoWindowingAndNoGby);
-            final RexShuttle shuttle = new ReplaceSubQueryShuttle(e, target);
-            builder.filter(shuttle.apply(filter.getCondition()));
-            builder.project(fields(builder, filter.getRowType().getFieldCount()));
-            call.transformTo(builder.build());
-        }
-        // if subquery is in PROJECT
-        else if(relNode instanceof Project) {
-            final Project project = call.rel(0);
-            final RexSubQuery e =
-                RexUtil.SubQueryFinder.find(project.getProjects());
-            assert e != null;
-
-            final RelOptUtil.Logic logic =
-                LogicVisitor.find(RelOptUtil.Logic.TRUE_FALSE_UNKNOWN,
-                    project.getProjects(), e);
-            builder.push(project.getInput());
-            final int fieldCount = builder.peek().getRowType().getFieldCount();
-
-            SubqueryConf subqueryConfig = project.getCluster().getPlanner().getContext().unwrap(SubqueryConf.class);
-            boolean isCorrScalarQuery = subqueryConfig.getCorrScalarRexSQWithAgg().contains(e.rel);
-            boolean hasNoWindowingAndNoGby = subqueryConfig.getScalarAggWithoutGbyWindowing().contains(e.rel);
-
-            final RexNode target = apply(e, HiveFilter.getVariablesSet(e),
-                logic, builder, 1, fieldCount, isCorrScalarQuery, hasNoWindowingAndNoGby);
-            final RexShuttle shuttle = new ReplaceSubQueryShuttle(e, target);
-            builder.project(shuttle.apply(project.getProjects()),
-                project.getRowType().getFieldNames());
-            call.transformTo(builder.build());
-        }
+  }
+
+  // given a subquery it checks to see what is the aggegate function
+  /// if COUNT returns true since COUNT produces 0 on empty result set
+  private boolean isAggZeroOnEmpty(RexSubQuery e) {
+    //as this is corr scalar subquery with agg we expect one aggregate
+    assert(e.getKind() == SqlKind.SCALAR_QUERY);
+    assert(e.rel.getInputs().size() == 1);
+    Aggregate relAgg = (Aggregate)e.rel.getInput(0);
+    assert(relAgg.getAggCallList().size() == 1); //should only have one aggregate
+    if(relAgg.getAggCallList().get(0).getAggregation().getKind() == SqlKind.COUNT) {
+      return true;
     }
-
-    /*private HiveSubQueryRemoveRule(RelOptRuleOperand operand,
-                               RelBuilderFactory relBuilderFactory,
-                               String description) {
-        super(operand, relBuilderFactory, description);
-    } */
-
-    // given a subquery it checks to see what is the aggegate function
-    /// if COUNT returns true since COUNT produces 0 on empty result set
-    private boolean isAggZeroOnEmpty(RexSubQuery e) {
-        //as this is corr scalar subquery with agg we expect one aggregate
-        assert(e.getKind() == SqlKind.SCALAR_QUERY);
-        assert(e.rel.getInputs().size() == 1);
-        Aggregate relAgg = (Aggregate)e.rel.getInput(0);
-        assert( relAgg.getAggCallList().size() == 1); //should only have one aggregate
-        if( relAgg.getAggCallList().get(0).getAggregation().getKind() == SqlKind.COUNT ) {
-            return true;
+    return false;
+  }
+
+  private SqlTypeName getAggTypeForScalarSub(RexSubQuery e) {
+    assert(e.getKind() == SqlKind.SCALAR_QUERY);
+    assert(e.rel.getInputs().size() == 1);
+    Aggregate relAgg = (Aggregate)e.rel.getInput(0);
+    assert(relAgg.getAggCallList().size() == 1); //should only have one aggregate
+    return relAgg.getAggCallList().get(0).getType().getSqlTypeName();
+  }
+
+  protected RexNode apply(RexSubQuery e, Set<CorrelationId> variablesSet,
+                          RelOptUtil.Logic logic,
+                          HiveSubQRemoveRelBuilder builder, int inputCount, int offset,
+                          boolean isCorrScalarAgg,
+                          boolean hasNoWindowingAndNoGby) {
+    switch (e.getKind()) {
+    case SCALAR_QUERY:
+      // if scalar query has aggregate and no windowing and no gby avoid adding sq_count_check
+      // since it is guaranteed to produce at most one row
+      if(!hasNoWindowingAndNoGby) {
+        final List<RexNode> parentQueryFields = new ArrayList<>();
+        if (conf.getBoolVar(ConfVars.HIVE_REMOVE_SQ_COUNT_CHECK)) {
+          // we want to have project after join since sq_count_check's count() expression wouldn't
+          // be needed further up
+          parentQueryFields.addAll(builder.fields());
         }
-        return false;
-    }
-    private SqlTypeName getAggTypeForScalarSub(RexSubQuery e) {
-        assert(e.getKind() == SqlKind.SCALAR_QUERY);
-        assert(e.rel.getInputs().size() == 1);
-        Aggregate relAgg = (Aggregate)e.rel.getInput(0);
-        assert( relAgg.getAggCallList().size() == 1); //should only have one aggregate
-        return relAgg.getAggCallList().get(0).getType().getSqlTypeName();
-    }
 
-    protected RexNode apply(RexSubQuery e, Set<CorrelationId> variablesSet,
-        RelOptUtil.Logic logic,
-        HiveSubQRemoveRelBuilder builder, int inputCount, int offset,
-        boolean isCorrScalarAgg,
-        boolean hasNoWindowingAndNoGby ) {
-        switch (e.getKind()) {
-        case SCALAR_QUERY:
-            // if scalar query has aggregate and no windowing and no gby avoid adding sq_count_check
-            // since it is guaranteed to produce at most one row
-            if(!hasNoWindowingAndNoGby) {
-                final List<RexNode> parentQueryFields = new ArrayList<>();
-                if (conf.getBoolVar(ConfVars.HIVE_REMOVE_SQ_COUNT_CHECK)) {
-                    // we want to have project after join since sq_count_check's count() expression wouldn't
-                    // be needed further up
-                    parentQueryFields.addAll(builder.fields());
-                }
-
-                builder.push(e.rel);
-                // returns single row/column
-                builder.aggregate(builder.groupKey(), builder.count(false, "cnt"));
-
-                SqlFunction countCheck =
-                    new SqlFunction("sq_count_check", SqlKind.OTHER_FUNCTION, ReturnTypes.BIGINT,
-                        InferTypes.RETURN_TYPE, OperandTypes.NUMERIC, SqlFunctionCategory.USER_DEFINED_FUNCTION);
-
-                // we create FILTER (sq_count_check(count()) <= 1) instead of PROJECT because RelFieldTrimmer
-                //  ends up getting rid of Project since it is not used further up the tree
-                builder.filter(builder.call(SqlStdOperatorTable.LESS_THAN_OR_EQUAL,
-                    builder.call(countCheck, builder.field("cnt")), builder.literal(1)));
-                if (!variablesSet.isEmpty()) {
-                    builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
-                } else
-                    builder.join(JoinRelType.INNER, builder.literal(true), variablesSet);
-
-                if (conf.getBoolVar(ConfVars.HIVE_REMOVE_SQ_COUNT_CHECK)) {
-                    builder.project(parentQueryFields);
-                }
-                else {
-                    offset++;
-                }
-
-            }
-            if(isCorrScalarAgg) {
-                // Transformation :
-                // Outer Query Left Join (inner query) on correlated predicate and preserve rows only from left side.
-                builder.push(e.rel);
-                final List<RexNode> parentQueryFields = new ArrayList<>();
-                parentQueryFields.addAll(builder.fields());
-
-                // id is appended since there could be multiple scalar subqueries and FILTER
-                // is created using field name
-                String indicator = "alwaysTrue" + e.rel.getId();
-                parentQueryFields.add(builder.alias(builder.literal(true), indicator));
-                builder.project(parentQueryFields);
-                builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
-
-                final ImmutableList.Builder<RexNode> operands = ImmutableList.builder();
-                RexNode literal;
-                if(isAggZeroOnEmpty(e)) {
-                    // since count has a return type of BIG INT we need to make a literal of type big int
-                    // relbuilder's literal doesn't allow this
-                    literal = e.rel.getCluster().getRexBuilder().makeBigintLiteral(new BigDecimal(0));
-                }
-                else {
-                    literal = e.rel.getCluster().getRexBuilder().makeNullLiteral(getAggTypeForScalarSub(e));
-                }
-                operands.add((builder.isNull(builder.field(indicator))), literal);
-                operands.add(field(builder, 1, builder.fields().size()-2));
-                return builder.call(SqlStdOperatorTable.CASE, operands.build());
-            }
-
-            //Transformation is to left join for correlated predicates and inner join otherwise,
-            // but do a count on inner side before that to make sure it generates atmost 1 row.
-            builder.push(e.rel);
-            builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
-            return field(builder, inputCount, offset);
-
-        case IN:
-        case EXISTS:
-            // Most general case, where the left and right keys might have nulls, and
-            // caller requires 3-valued logic return.
-            //
-            // select e.deptno, e.deptno in (select deptno from emp)
-            //
-            // becomes
-            //
-            // select e.deptno,
-            //   case
-            //   when ct.c = 0 then false
-            //   when dt.i is not null then true
-            //   when e.deptno is null then null
-            //   when ct.ck < ct.c then null
-            //   else false
-            //   end
-            // from e
-            // left join (
-            //   (select count(*) as c, count(deptno) as ck from emp) as ct
-            //   cross join (select distinct deptno, true as i from emp)) as dt
-            //   on e.deptno = dt.deptno
-            //
-            // If keys are not null we can remove "ct" and simplify to
-            //
-            // select e.deptno,
-            //   case
-            //   when dt.i is not null then true
-            //   else false
-            //   end
-            // from e
-            // left join (select distinct deptno, true as i from emp) as dt
-            //   on e.deptno = dt.deptno
-            //
-            // We could further simplify to
-            //
-            // select e.deptno,
-            //   dt.i is not null
-            // from e
-            // left join (select distinct deptno, true as i from emp) as dt
-            //   on e.deptno = dt.deptno
-            //
-            // but have not yet.
-            //
-            // If the logic is TRUE we can just kill the record if the condition
-            // evaluates to FALSE or UNKNOWN. Thus the query simplifies to an inner
-            // join:
-            //
-            // select e.deptno,
-            //   true
-            // from e
-            // inner join (select distinct deptno from emp) as dt
-            //   on e.deptno = dt.deptno
-            //
-
-            builder.push(e.rel);
-            final List<RexNode> fields = new ArrayList<>();
-            switch (e.getKind()) {
-            case IN:
-                fields.addAll(builder.fields());
-                // Transformation: sq_count_check(count(*), true) FILTER is generated on top
-                //  of subquery which is then joined (LEFT or INNER) with outer query
-                //  This transformation is done to add run time check using sq_count_check to
-                //  throw an error if subquery is producing zero row, since with aggregate this
-                //  will produce wrong results (because we further rewrite such queries into JOIN)
-                if(isCorrScalarAgg) {
-                    // returns single row/column
-                    builder.aggregate(builder.groupKey(),
-                        builder.count(false, "cnt_in"));
-
-                    if (!variablesSet.isEmpty()) {
-                        builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
-                    } else {
-                        builder.join(JoinRelType.INNER, builder.literal(true), variablesSet);
-                    }
-
-                    SqlFunction inCountCheck = new SqlFunction("sq_count_check", SqlKind.OTHER_FUNCTION, ReturnTypes.BIGINT,
-                        InferTypes.RETURN_TYPE, OperandTypes.NUMERIC, SqlFunctionCategory.USER_DEFINED_FUNCTION);
-
-                    // we create FILTER (sq_count_check(count()) > 0) instead of PROJECT because RelFieldTrimmer
-                    //  ends up getting rid of Project since it is not used further up the tree
-                    builder.filter(builder.call(SqlStdOperatorTable.GREATER_THAN,
-                        //true here indicates that sq_count_check is for IN/NOT IN subqueries
-                        builder.call(inCountCheck, builder.field("cnt_in"), builder.literal(true)),
-                        builder.literal(0)));
-                    offset =  offset + 1;
-                    builder.push(e.rel);
-                }
-            }
-
-            // First, the cross join
-            switch (logic) {
-            case TRUE_FALSE_UNKNOWN:
-            case UNKNOWN_AS_TRUE:
-                // Since EXISTS/NOT EXISTS are not affected by presence of
-                // null keys we do not need to generate count(*), count(c)
-                if (e.getKind() == SqlKind.EXISTS) {
-                    logic = RelOptUtil.Logic.TRUE_FALSE;
-                    break;
-                }
-                builder.aggregate(builder.groupKey(),
-                    builder.count(false, "c"),
-                    builder.aggregateCall(SqlStdOperatorTable.COUNT, false, null, "ck",
-                        builder.fields()));
-                builder.as("ct");
-                if( !variablesSet.isEmpty())
-                {
-                    //builder.join(JoinRelType.INNER, builder.literal(true), variablesSet);
-                    builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
-                }
-                else
-                    builder.join(JoinRelType.INNER, builder.literal(true), variablesSet);
-
-                offset += 2;
-                builder.push(e.rel);
-                break;
-            }
-
-            // Now the left join
-            switch (logic) {
-            case TRUE:
-                if (fields.isEmpty()) {
-                    builder.project(builder.alias(builder.literal(true), "i" + e.rel.getId()));
-                    if(!variablesSet.isEmpty() && (e.getKind() == SqlKind.EXISTS || e.getKind() == SqlKind.IN)) {
-                        // avoid adding group by for correlated IN/EXISTS queries
-                        // since this is rewritting into semijoin
-                        break;
-                    }
-                    else {
-                        builder.aggregate(builder.groupKey(0));
-                    }
-                } else {
-                    if(!variablesSet.isEmpty() && (e.getKind() == SqlKind.EXISTS || e.getKind() == SqlKind.IN)) {
-                        // avoid adding group by for correlated IN/EXISTS queries
-                        // since this is rewritting into semijoin
-                      break;
-                    }
-                    else {
-                        builder.aggregate(builder.groupKey(fields));
-                    }
-                }
-                break;
-            default:
-                fields.add(builder.alias(builder.literal(true), "i" + e.rel.getId()));
-                builder.project(fields);
-                builder.distinct();
-            }
-            builder.as("dt");
-            final List<RexNode> conditions = new ArrayList<>();
-            for (Pair<RexNode, RexNode> pair
-                : Pair.zip(e.getOperands(), builder.fields())) {
-                conditions.add(
-                    builder.equals(pair.left, RexUtil.shift(pair.right, offset)));
-            }
-            switch (logic) {
-            case TRUE:
-                builder.join(JoinRelType.INNER, builder.and(conditions), variablesSet, true);
-                return builder.literal(true);
-            }
-            builder.join(JoinRelType.LEFT, builder.and(conditions), variablesSet);
-
-            final List<RexNode> keyIsNulls = new ArrayList<>();
-            for (RexNode operand : e.getOperands()) {
-                if (operand.getType().isNullable()) {
-                    keyIsNulls.add(builder.isNull(operand));
-                }
-            }
-            final ImmutableList.Builder<RexNode> operands = ImmutableList.builder();
-            switch (logic) {
-            case TRUE_FALSE_UNKNOWN:
-            case UNKNOWN_AS_TRUE:
-                operands.add(
-                    builder.equals(builder.field("ct", "c"), builder.literal(0)),
-                    builder.literal(false));
-                //now that we are using LEFT OUTER JOIN to join inner count, count(*)
-                // with outer table, we wouldn't be able to tell if count is zero
-                // for inner table since inner join with correlated values will get rid
-                // of all values where join cond is not true (i.e where actual inner table
-                // will produce zero result). To  handle this case we need to check both
-                // count is zero or count is null
-                operands.add((builder.isNull(builder.field("ct", "c"))), builder.literal(false));
-                break;
-            }
-            operands.add(builder.isNotNull(builder.field("dt", "i" + e.rel.getId())),
-                builder.literal(true));
-            if (!keyIsNulls.isEmpty()) {
-                //Calcite creates null literal with Null type here but because HIVE doesn't support null type
-                // it is appropriately typed boolean
-                operands.add(builder.or(keyIsNulls), e.rel.getCluster().getRexBuilder().makeNullLiteral(SqlTypeName.BOOLEAN));
-                // we are creating filter here so should not be returning NULL. Not sure why Calcite return NULL
-                //operands.add(builder.or(keyIsNulls), builder.literal(false));
-            }
-            RexNode b = builder.literal(true);
-            switch (logic) {
-            case TRUE_FALSE_UNKNOWN:
-                b = e.rel.getCluster().getRexBuilder().makeNullLiteral(SqlTypeName.BOOLEAN);
-                // fall through
-            case UNKNOWN_AS_TRUE:
-                operands.add(
-                    builder.call(SqlStdOperatorTable.LESS_THAN,
-                        builder.field("ct", "ck"), builder.field("ct", "c")),
-                    b);
-                break;
-            }
-            operands.add(builder.literal(false));
-            return builder.call(SqlStdOperatorTable.CASE, operands.build());
-
-        default:
-            throw new AssertionError(e.getKind());
+        builder.push(e.rel);
+        // returns single row/column
+        builder.aggregate(builder.groupKey(), builder.count(false, "cnt"));
+
+        SqlFunction countCheck =
+            new SqlFunction("sq_count_check", SqlKind.OTHER_FUNCTION, ReturnTypes.BIGINT,
+                InferTypes.RETURN_TYPE, OperandTypes.NUMERIC,
+                SqlFunctionCategory.USER_DEFINED_FUNCTION);
+
+        //we create FILTER (sq_count_check(count()) <= 1) instead of PROJECT because RelFieldTrimmer
+        // ends up getting rid of Project since it is not used further up the tree
+        builder.filter(builder.call(SqlStdOperatorTable.LESS_THAN_OR_EQUAL,
+            builder.call(countCheck, builder.field("cnt")), builder.literal(1)));
+        if (!variablesSet.isEmpty()) {
+          builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
+        } else {
+          builder.join(JoinRelType.INNER, builder.literal(true), variablesSet);
         }
-    }
 
-    /** Returns a reference to a particular field, by offset, across several
-     * inputs on a {@link RelBuilder}'s stack. */
-    private RexInputRef field(HiveSubQRemoveRelBuilder builder, int inputCount, int offset) {
-        for (int inputOrdinal = 0;;) {
-            final RelNode r = builder.peek(inputCount, inputOrdinal);
-            if (offset < r.getRowType().getFieldCount()) {
-                return builder.field(inputCount, inputOrdinal, offset);
-            }
-            ++inputOrdinal;
-            offset -= r.getRowType().getFieldCount();
+        if (conf.getBoolVar(ConfVars.HIVE_REMOVE_SQ_COUNT_CHECK)) {
+          builder.project(parentQueryFields);
+        } else {
+          offset++;
         }
-    }
-
-    /** Returns a list of expressions that project the first {@code fieldCount}
-     * fields of the top input on a {@link RelBuilder}'s stack. */
-    private static List<RexNode> fields(HiveSubQRemoveRelBuilder builder, int fieldCount) {
-        final List<RexNode> projects = new ArrayList<>();
-        for (int i = 0; i < fieldCount; i++) {
-            projects.add(builder.field(i));
+      }
+      if(isCorrScalarAgg) {
+        // Transformation :
+        // Outer Query Left Join (inner query) on correlated predicate
+        //      and preserve rows only from left side.
+        builder.push(e.rel);
+        final List<RexNode> parentQueryFields = new ArrayList<>();
+        parentQueryFields.addAll(builder.fields());
+
+        // id is appended since there could be multiple scalar subqueries and FILTER
+        // is created using field name
+        String indicator = "alwaysTrue" + e.rel.getId();
+        parentQueryFields.add(builder.alias(builder.literal(true), indicator));
+        builder.project(parentQueryFields);
+        builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
+
+        final ImmutableList.Builder<RexNode> operands = ImmutableList.builder();
+        RexNode literal;
+        if(isAggZeroOnEmpty(e)) {
+          // since count has a return type of BIG INT we need to make a literal of type big int
+          // relbuilder's literal doesn't allow this
+          literal = e.rel.getCluster().getRexBuilder().makeBigintLiteral(new BigDecimal(0));
+        } else {
+          literal = e.rel.getCluster().getRexBuilder().makeNullLiteral(getAggTypeForScalarSub(e));
         }
-        return projects;
-    }
-
-    /** Shuttle that replaces occurrences of a given
-     * {@link org.apache.calcite.rex.RexSubQuery} with a replacement
-     * expression. */
-    private static class ReplaceSubQueryShuttle extends RexShuttle {
-        private final RexSubQuery subQuery;
-        private final RexNode replacement;
-
-        public ReplaceSubQueryShuttle(RexSubQuery subQuery, RexNode replacement) {
-            this.subQuery = subQuery;
-            this.replacement = replacement;
+        operands.add((builder.isNull(builder.field(indicator))), literal);
+        operands.add(field(builder, 1, builder.fields().size()-2));
+        return builder.call(SqlStdOperatorTable.CASE, operands.build());
+      }
+
+      //Transformation is to left join for correlated predicates and inner join otherwise,
+      // but do a count on inner side before that to make sure it generates atmost 1 row.
+      builder.push(e.rel);
+      builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
+      return field(builder, inputCount, offset);
+
+    case IN:
+    case EXISTS:
+      // Most general case, where the left and right keys might have nulls, and
+      // caller requires 3-valued logic return.
+      //
+      // select e.deptno, e.deptno in (select deptno from emp)
+      //
+      // becomes
+      //
+      // select e.deptno,
+      //   case
+      //   when ct.c = 0 then false
+      //   when dt.i is not null then true
+      //   when e.deptno is null then null
+      //   when ct.ck < ct.c then null
+      //   else false
+      //   end
+      // from e
+      // left join (
+      //   (select count(*) as c, count(deptno) as ck from emp) as ct
+      //   cross join (select distinct deptno, true as i from emp)) as dt
+      //   on e.deptno = dt.deptno
+      //
+      // If keys are not null we can remove "ct" and simplify to
+      //
+      // select e.deptno,
+      //   case
+      //   when dt.i is not null then true
+      //   else false
+      //   end
+      // from e
+      // left join (select distinct deptno, true as i from emp) as dt
+      //   on e.deptno = dt.deptno
+      //
+      // We could further simplify to
+      //
+      // select e.deptno,
+      //   dt.i is not null
+      // from e
+      // left join (select distinct deptno, true as i from emp) as dt
+      //   on e.deptno = dt.deptno
+      //
+      // but have not yet.
+      //
+      // If the logic is TRUE we can just kill the record if the condition
+      // evaluates to FALSE or UNKNOWN. Thus the query simplifies to an inner
+      // join:
+      //
+      // select e.deptno,
+      //   true
+      // from e
+      // inner join (select distinct deptno from emp) as dt
+      //   on e.deptno = dt.deptno
+      //
+
+      builder.push(e.rel);
+      final List<RexNode> fields = new ArrayList<>();
+      switch (e.getKind()) {
+      case IN:
+        fields.addAll(builder.fields());
+        // Transformation: sq_count_check(count(*), true) FILTER is generated on top
+        //  of subquery which is then joined (LEFT or INNER) with outer query
+        //  This transformation is done to add run time check using sq_count_check to
+        //  throw an error if subquery is producing zero row, since with aggregate this
+        //  will produce wrong results (because we further rewrite such queries into JOIN)
+        if(isCorrScalarAgg) {
+          // returns single row/column
+          builder.aggregate(builder.groupKey(),
+              builder.count(false, "cnt_in"));
+
+          if (!variablesSet.isEmpty()) {
+            builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
+          } else {
+            builder.join(JoinRelType.INNER, builder.literal(true), variablesSet);
+          }
+
+          SqlFunction inCountCheck = new SqlFunction("sq_count_check", SqlKind.OTHER_FUNCTION,
+              ReturnTypes.BIGINT, InferTypes.RETURN_TYPE, OperandTypes.NUMERIC,
+              SqlFunctionCategory.USER_DEFINED_FUNCTION);
+
+          // we create FILTER (sq_count_check(count()) > 0) instead of PROJECT
+          // because RelFieldTrimmer ends up getting rid of Project
+          // since it is not used further up the tree
+          builder.filter(builder.call(SqlStdOperatorTable.GREATER_THAN,
+              //true here indicates that sq_count_check is for IN/NOT IN subqueries
+              builder.call(inCountCheck, builder.field("cnt_in"), builder.literal(true)),
+              builder.literal(0)));
+          offset =  offset + 1;
+          builder.push(e.rel);
         }
-
-        @Override public RexNode visitSubQuery(RexSubQuery subQuery) {
-            return RexUtil.eq(subQuery, this.subQuery) ? replacement : subQuery;
+      }
+
+      // First, the cross join
+      switch (logic) {
+      case TRUE_FALSE_UNKNOWN:
+      case UNKNOWN_AS_TRUE:
+        // Since EXISTS/NOT EXISTS are not affected by presence of
+        // null keys we do not need to generate count(*), count(c)
+        if (e.getKind() == SqlKind.EXISTS) {
+          logic = RelOptUtil.Logic.TRUE_FALSE;
+          break;
         }
-    }
-
-    // TODO:
-    // Following HiveSubQueryFinder has been copied from RexUtil::SubQueryFinder
-    // since there is BUG in there (CALCITE-1726).
-    // Once CALCITE-1726 is fixed we should get rid of the following code
-    /** Visitor that throws {@link org.apache.calcite.util.Util.FoundOne} if
-     * applied to an expression that contains a {@link RexSubQuery}. */
-    public static class HiveSubQueryFinder extends RexVisitorImpl<Void> {
-        public static final HiveSubQueryFinder INSTANCE = new HiveSubQueryFinder();
-
-        /** Returns whether a {@link Project} contains a sub-query. */
-        public static final Predicate<RelNode> RELNODE_PREDICATE=
-            new Predicate<RelNode>() {
-                public boolean apply(RelNode relNode) {
-                    if (relNode instanceof Project) {
-                        Project project = (Project)relNode;
-                        for (RexNode node : project.getProjects()) {
-                            try {
-                                node.accept(INSTANCE);
-                            } catch (Util.FoundOne e) {
-                                return true;
-                            }
-                        }
-                        return false;
-                    }
-                    else if (relNode instanceof Filter) {
-                        try {
-                            ((Filter)relNode).getCondition().accept(INSTANCE);
-                            return false;
-                        } catch (Util.FoundOne e) {
-                            return true;
-                        }
-                    }
-                    return false;
-                }
-            };
-
-        private HiveSubQueryFinder() {
-            super(true);
+        builder.aggregate(builder.groupKey(),
+            builder.count(false, "c"),
+            builder.aggregateCall(SqlStdOperatorTable.COUNT, false, null, "ck",
+                builder.fields()));
+        builder.as("ct");
+        if(!variablesSet.isEmpty()) {
+          //builder.join(JoinRelType.INNER, builder.literal(true), variablesSet);
+          builder.join(JoinRelType.LEFT, builder.literal(true), variablesSet);
+        } else {
+          builder.join(JoinRelType.INNER, builder.literal(true), variablesSet);
         }
 
-        @Override public Void visitSubQuery(RexSubQuery subQuery) {
-            throw new Util.FoundOne(subQuery);
+        offset += 2;
+        builder.push(e.rel);
+        break;
+      }
+
+      // Now the left join
+      switch (logic) {
+      case TRUE:
+        if (fields.isEmpty()) {
+          builder.project(builder.alias(builder.literal(true), "i" + e.rel.getId()));
+          if(!variablesSet.isEmpty()
+              && (e.getKind() == SqlKind.EXISTS || e.getKind() == SqlKind.IN)) {
+            // avoid adding group by for correlated IN/EXISTS queries
+            // since this is rewritting into semijoin
+            break;
+          } else {
+            builder.aggregate(builder.groupKey(0));
+          }
+        } else {
+          if(!variablesSet.isEmpty()
+              && (e.getKind() == SqlKind.EXISTS || e.getKind() == SqlKind.IN)) {
+            // avoid adding group by for correlated IN/EXISTS queries
+            // since this is rewritting into semijoin
+            break;
+          } else {
+            builder.aggregate(builder.groupKey(fields));
+          }
         }
+        break;
+      default:
+        fields.add(builder.alias(builder.literal(true), "i" + e.rel.getId()));
+        builder.project(fields);
+        builder.distinct();
+      }
+      builder.as("dt");
+      final List<RexNode> conditions = new ArrayList<>();
+      for (Pair<RexNode, RexNode> pair
+          : Pair.zip(e.getOperands(), builder.fields())) {
+        conditions.add(
+            builder.equals(pair.left, RexUtil.shift(pair.right, offset)));
+      }
+      switch (logic) {
+      case TRUE:
+        builder.join(JoinRelType.INNER, builder.and(conditions), variablesSet, true);
+        return builder.literal(true);
+      }
+      builder.join(JoinRelType.LEFT, builder.and(conditions), variablesSet);
+
+      final List<RexNode> keyIsNulls = new ArrayList<>();
+      for (RexNode operand : e.getOperands()) {
+        if (operand.getType().isNullable()) {
+          keyIsNulls.add(builder.isNull(operand));
+        }
+      }
+      final ImmutableList.Builder<RexNode> operands = ImmutableList.builder();
+      switch (logic) {
+      case TRUE_FALSE_UNKNOWN:
+      case UNKNOWN_AS_TRUE:
+        operands.add(
+            builder.equals(builder.field("ct", "c"), builder.literal(0)),
+            builder.literal(false));
+        //now that we are using LEFT OUTER JOIN to join inner count, count(*)
+        // with outer table, we wouldn't be able to tell if count is zero
+        // for inner table since inner join with correlated values will get rid
+        // of all values where join cond is not true (i.e where actual inner table
+        // will produce zero result). To  handle this case we need to check both
+        // count is zero or count is null
+        operands.add((builder.isNull(builder.field("ct", "c"))), builder.literal(false));
+        break;
+      }
+      operands.add(builder.isNotNull(builder.field("dt", "i" + e.rel.getId())),
+          builder.literal(true));
+      if (!keyIsNulls.isEmpty()) {
+        //Calcite creates null literal with Null type here but
+        // because HIVE doesn't support null type it is appropriately typed boolean
+        operands.add(builder.or(keyIsNulls),
+            e.rel.getCluster().getRexBuilder().makeNullLiteral(SqlTypeName.BOOLEAN));
+        // we are creating filter here so should not be returning NULL.
+        // Not sure why Calcite return NULL
+      }
+      RexNode b = builder.literal(true);
+      switch (logic) {
+      case TRUE_FALSE_UNKNOWN:
+        b = e.rel.getCluster().getRexBuilder().makeNullLiteral(SqlTypeName.BOOLEAN);
+        // fall through
+      case UNKNOWN_AS_TRUE:
+        operands.add(
+            builder.call(SqlStdOperatorTable.LESS_THAN,
+                builder.field("ct", "ck"), builder.field("ct", "c")),
+            b);
+        break;
+      }
+      operands.add(builder.literal(false));
+      return builder.call(SqlStdOperatorTable.CASE, operands.build());
+
+    default:
+      throw new AssertionError(e.getKind());
+    }
+  }
+
+  /** Returns a reference to a particular field, by offset, across several
+   * inputs on a {@link RelBuilder}'s stack. */
+  private RexInputRef field(HiveSubQRemoveRelBuilder builder, int inputCount, int offset) {
+    for (int inputOrdinal = 0;;) {
+      final RelNode r = builder.peek(inputCount, inputOrdinal);
+      if (offset < r.getRowType().getFieldCount()) {
+        return builder.field(inputCount, inputOrdinal, offset);
+      }
+      ++inputOrdinal;
+      offset -= r.getRowType().getFieldCount();
+    }
+  }
+
+  /** Returns a list of expressions that project the first {@code fieldCount}
+   * fields of the top input on a {@link RelBuilder}'s stack. */
+  private static List<RexNode> fields(HiveSubQRemoveRelBuilder builder, int fieldCount) {
+    final List<RexNode> projects = new ArrayList<>();
+    for (int i = 0; i < fieldCount; i++) {
+      projects.add(builder.field(i));
+    }
+    return projects;
+  }
+
+  /** Shuttle that replaces occurrences of a given
+   * {@link org.apache.calcite.rex.RexSubQuery} with a replacement
+   * expression. */
+  private static class ReplaceSubQueryShuttle extends RexShuttle {
+    private final RexSubQuery subQuery;
+    private final RexNode replacement;
+
+    ReplaceSubQueryShuttle(RexSubQuery subQuery, RexNode replacement) {
+      this.subQuery = subQuery;
+      this.replacement = replacement;
+    }
 
-        public static RexSubQuery find(Iterable<RexNode> nodes) {
-            for (RexNode node : nodes) {
+    @Override public RexNode visitSubQuery(RexSubQuery subQuery) {
+      return RexUtil.eq(subQuery, this.subQuery) ? replacement : subQuery;
+    }
+  }
+
+  // TODO:
+  // Following HiveSubQueryFinder has been copied from RexUtil::SubQueryFinder
+  // since there is BUG in there (CALCITE-1726).
+  // Once CALCITE-1726 is fixed we should get rid of the following code
+  /** Visitor that throws {@link org.apache.calcite.util.Util.FoundOne} if
+   * applied to an expression that contains a {@link RexSubQuery}. */
+  public static final class HiveSubQueryFinder extends RexVisitorImpl<Void> {
+    public static final HiveSubQueryFinder INSTANCE = new HiveSubQueryFinder();
+
+    /** Returns whether a {@link Project} contains a sub-query. */
+    public static final Predicate<RelNode> RELNODE_PREDICATE=
+        new Predicate<RelNode>() {
+          public boolean apply(RelNode relNode) {
+            if (relNode instanceof Project) {
+              Project project = (Project)relNode;
+              for (RexNode node : project.getProjects()) {
                 try {
-                    node.accept(INSTANCE);
+                  node.accept(INSTANCE);
                 } catch (Util.FoundOne e) {
-                    return (RexSubQuery) e.getNode();
+                  return true;
                 }
+              }
+              return false;
+            } else if (relNode instanceof Filter) {
+              try {
+                ((Filter)relNode).getCondition().accept(INSTANCE);
+                return false;
+              } catch (Util.FoundOne e) {
+                return true;
+              }
             }
-            return null;
-        }
+            return false;
+          }
+        };
 
-        public static RexSubQuery find(RexNode node) {
-            try {
-                node.accept(INSTANCE);
-                return null;
-            } catch (Util.FoundOne e) {
-                return (RexSubQuery) e.getNode();
-            }
+    private HiveSubQueryFinder() {
+      super(true);
+    }
+
+    @Override public Void visitSubQuery(RexSubQuery subQuery) {
+      throw new Util.FoundOne(subQuery);
+    }
+
+    public static RexSubQuery find(Iterable<RexNode> nodes) {
+      for (RexNode node : nodes) {
+        try {
+          node.accept(INSTANCE);
+        } catch (Util.FoundOne e) {
+          return (RexSubQuery) e.getNode();
         }
+      }
+      return null;
+    }
+
+    public static RexSubQuery find(RexNode node) {
+      try {
+        node.accept(INSTANCE);
+        return null;
+      } catch (Util.FoundOne e) {
+        return (RexSubQuery) e.getNode();
+      }
     }
+  }
 
 }
 


[25/50] [abbrv] hive git commit: HIVE-17982 Move metastore specific itests

Posted by ga...@apache.org.
HIVE-17982 Move metastore specific itests


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/002233b9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/002233b9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/002233b9

Branch: refs/heads/standalone-metastore
Commit: 002233b90126469edd61658c1a32988438dfae85
Parents: 12a33fd
Author: Alan Gates <ga...@hortonworks.com>
Authored: Thu Oct 26 09:49:19 2017 -0700
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Mon Dec 18 14:56:36 2017 -0800

----------------------------------------------------------------------
 .../apache/hadoop/hive/metastore/FakeDerby.java |  424 ---
 .../hive/metastore/TestAcidTableSetup.java      |  243 ++
 .../hadoop/hive/metastore/TestAdminUser.java    |   45 -
 .../metastore/TestEmbeddedHiveMetaStore.java    |   54 -
 .../hadoop/hive/metastore/TestFilterHooks.java  |  281 --
 .../hive/metastore/TestHiveMetaStore.java       | 3515 ------------------
 .../hive/metastore/TestHiveMetaStoreTxns.java   |  270 --
 ...TestHiveMetaStoreWithEnvironmentContext.java |  219 --
 .../hive/metastore/TestMarkPartition.java       |  107 -
 .../hive/metastore/TestMarkPartitionRemote.java |   32 -
 .../TestMetaStoreEndFunctionListener.java       |  143 -
 .../metastore/TestMetaStoreEventListener.java   |  524 ---
 .../TestMetaStoreEventListenerOnlyOnCommit.java |  104 -
 .../metastore/TestMetaStoreInitListener.java    |   68 -
 .../metastore/TestMetaStoreListenersError.java  |   85 -
 .../metastore/TestObjectStoreInitRetry.java     |  127 -
 .../TestPartitionNameWhitelistValidation.java   |  123 -
 .../hive/metastore/TestRemoteHiveMetaStore.java |   60 -
 .../TestRemoteHiveMetaStoreIpAddress.java       |   80 -
 .../TestRemoteUGIHiveMetaStoreIpAddress.java    |   28 -
 .../hive/metastore/TestRetryingHMSHandler.java  |  123 -
 .../metastore/TestSetUGIOnBothClientServer.java |   31 -
 .../hive/metastore/TestSetUGIOnOnlyClient.java  |   31 -
 .../hive/metastore/TestSetUGIOnOnlyServer.java  |   31 -
 standalone-metastore/pom.xml                    |    2 +
 .../metastore/client/builder/IndexBuilder.java  |    5 +
 .../client/builder/PartitionBuilder.java        |    3 +-
 .../metastore/client/builder/TableBuilder.java  |   12 +-
 .../hive/metastore/conf/MetastoreConf.java      |    2 +-
 .../apache/hadoop/hive/metastore/FakeDerby.java |  404 ++
 .../hive/metastore/MetaStoreTestUtils.java      |   31 +-
 .../hadoop/hive/metastore/TestAdminUser.java    |   46 +
 .../metastore/TestEmbeddedHiveMetaStore.java    |   48 +
 .../hadoop/hive/metastore/TestFilterHooks.java  |  303 ++
 .../hive/metastore/TestHiveMetaStore.java       | 3071 +++++++++++++++
 .../hive/metastore/TestHiveMetaStoreTxns.java   |  264 ++
 ...TestHiveMetaStoreWithEnvironmentContext.java |  188 +
 .../hive/metastore/TestMarkPartition.java       |  117 +
 .../hive/metastore/TestMarkPartitionRemote.java |   36 +
 .../TestMetaStoreEndFunctionListener.java       |  146 +
 .../metastore/TestMetaStoreEventListener.java   |  556 +++
 .../TestMetaStoreEventListenerOnlyOnCommit.java |  123 +
 .../TestMetaStoreEventListenerWithOldConf.java  |  178 +
 .../metastore/TestMetaStoreInitListener.java    |   55 +
 .../metastore/TestMetaStoreListenersError.java  |   93 +
 .../hadoop/hive/metastore/TestObjectStore.java  |    9 +-
 .../metastore/TestObjectStoreInitRetry.java     |  132 +
 .../hadoop/hive/metastore/TestOldSchema.java    |    3 +-
 .../TestPartitionNameWhitelistValidation.java   |  122 +
 .../hive/metastore/TestRemoteHiveMetaStore.java |   62 +
 .../TestRemoteHiveMetaStoreIpAddress.java       |   65 +
 .../TestRemoteUGIHiveMetaStoreIpAddress.java    |   28 +
 .../hive/metastore/TestRetryingHMSHandler.java  |   82 +
 .../metastore/TestSetUGIOnBothClientServer.java |   31 +
 .../hive/metastore/TestSetUGIOnOnlyClient.java  |   32 +
 .../hive/metastore/TestSetUGIOnOnlyServer.java  |   32 +
 .../hive/metastore/cache/TestCachedStore.java   |    4 +-
 57 files changed, 6506 insertions(+), 6527 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/FakeDerby.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/FakeDerby.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/FakeDerby.java
deleted file mode 100644
index 51be504..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/FakeDerby.java
+++ /dev/null
@@ -1,424 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.lang.Exception;
-import java.lang.Override;
-import java.lang.RuntimeException;
-import java.lang.StackTraceElement;
-import java.sql.Array;
-import java.sql.Blob;
-import java.sql.CallableStatement;
-import java.sql.Clob;
-import java.sql.Connection;
-import java.sql.DatabaseMetaData;
-import java.sql.DriverManager;
-import java.sql.DriverPropertyInfo;
-import java.sql.NClob;
-import java.sql.PreparedStatement;
-import java.sql.SQLClientInfoException;
-import java.sql.SQLException;
-import java.sql.SQLFeatureNotSupportedException;
-import java.sql.SQLWarning;
-import java.sql.SQLXML;
-import java.sql.Savepoint;
-import java.sql.Statement;
-import java.sql.Struct;
-import java.util.Map;
-import java.util.concurrent.Executor;
-import java.util.logging.Logger;
-import java.util.Properties;
-
-import javax.jdo.JDOCanRetryException;
-
-import junit.framework.TestCase;
-import org.junit.Test;
-
-import org.apache.derby.jdbc.EmbeddedDriver;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.ObjectStore;
-
-import org.apache.hadoop.hive.metastore.TestObjectStoreInitRetry;
-
-
-/**
- * Fake derby driver - companion class to enable testing by TestObjectStoreInitRetry
- */
-public class FakeDerby extends org.apache.derby.jdbc.EmbeddedDriver {
-
-  public class Connection implements java.sql.Connection {
-
-    private java.sql.Connection _baseConn;
-
-    public Connection(java.sql.Connection connection) {
-      TestObjectStoreInitRetry.debugTrace();
-      this._baseConn = connection;
-    }
-
-    @Override
-    public Statement createStatement() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.createStatement();
-    }
-
-    @Override
-    public PreparedStatement prepareStatement(String sql) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.prepareStatement(sql);
-    }
-
-    @Override
-    public CallableStatement prepareCall(String sql) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.prepareCall(sql);
-    }
-
-    @Override
-    public String nativeSQL(String sql) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.nativeSQL(sql);
-    }
-
-    @Override
-    public void setAutoCommit(boolean autoCommit) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      TestObjectStoreInitRetry.misbehave();
-      _baseConn.setAutoCommit(autoCommit);
-    }
-
-    @Override
-    public boolean getAutoCommit() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getAutoCommit();
-    }
-
-    @Override
-    public void commit() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.commit();
-    }
-
-    @Override
-    public void rollback() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.rollback();
-    }
-
-    @Override
-    public void close() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.close();
-    }
-
-    @Override
-    public boolean isClosed() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.isClosed();
-    }
-
-    @Override
-    public DatabaseMetaData getMetaData() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getMetaData();
-    }
-
-    @Override
-    public void setReadOnly(boolean readOnly) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.setReadOnly(readOnly);
-    }
-
-    @Override
-    public boolean isReadOnly() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.isReadOnly();
-    }
-
-    @Override
-    public void setCatalog(String catalog) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.setCatalog(catalog);
-    }
-
-    @Override
-    public String getCatalog() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getCatalog();
-    }
-
-    @Override
-    public void setTransactionIsolation(int level) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.setTransactionIsolation(level);
-    }
-
-    @Override
-    public int getTransactionIsolation() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getTransactionIsolation();
-    }
-
-    @Override
-    public SQLWarning getWarnings() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getWarnings();
-    }
-
-    @Override
-    public void clearWarnings() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.clearWarnings();
-    }
-
-    @Override
-    public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.createStatement(resultSetType, resultSetConcurrency);
-    }
-
-    @Override
-    public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.prepareStatement(sql, resultSetType, resultSetConcurrency);
-    }
-
-    @Override
-    public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.prepareCall(sql, resultSetType, resultSetConcurrency);
-    }
-
-    @Override
-    public Map<String, Class<?>> getTypeMap() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getTypeMap();
-    }
-
-    @Override
-    public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.setTypeMap(map);
-    }
-
-    @Override
-    public void setHoldability(int holdability) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.setHoldability(holdability);
-    }
-
-    @Override
-    public int getHoldability() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getHoldability();
-    }
-
-    @Override
-    public Savepoint setSavepoint() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.setSavepoint();
-    }
-
-    @Override
-    public Savepoint setSavepoint(String name) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.setSavepoint(name);
-    }
-
-    @Override
-    public void rollback(Savepoint savepoint) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.rollback(savepoint);
-    }
-
-    @Override
-    public void releaseSavepoint(Savepoint savepoint) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.releaseSavepoint(savepoint);
-    }
-
-    @Override
-    public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability);
-    }
-
-    @Override
-    public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability);
-    }
-
-    @Override
-    public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability);
-    }
-
-    @Override
-    public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.prepareStatement(sql, autoGeneratedKeys);
-    }
-
-    @Override
-    public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.prepareStatement(sql, columnIndexes);
-    }
-
-    @Override
-    public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.prepareStatement(sql, columnNames);
-    }
-
-    @Override
-    public Clob createClob() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.createClob();
-    }
-
-    @Override
-    public Blob createBlob() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.createBlob();
-    }
-
-    @Override
-    public NClob createNClob() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.createNClob();
-    }
-
-    @Override
-    public SQLXML createSQLXML() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.createSQLXML();
-    }
-
-    @Override
-    public boolean isValid(int timeout) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.isValid(timeout);
-    }
-
-    @Override
-    public void setClientInfo(String name, String value) throws SQLClientInfoException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.setClientInfo(name, value);
-    }
-
-    @Override
-    public void setClientInfo(Properties properties) throws SQLClientInfoException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.setClientInfo(properties);
-    }
-
-    @Override
-    public String getClientInfo(String name) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getClientInfo(name);
-    }
-
-    @Override
-    public Properties getClientInfo() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getClientInfo();
-    }
-
-    @Override
-    public Array createArrayOf(String typeName, Object[] elements) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.createArrayOf(typeName, elements);
-    }
-
-    @Override
-    public Struct createStruct(String typeName, Object[] attributes) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.createStruct(typeName, attributes);
-    }
-
-    @Override
-    public void setSchema(String schema) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.setSchema(schema);
-    }
-
-    @Override
-    public String getSchema() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getSchema();
-    }
-
-    @Override
-    public void abort(Executor executor) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.abort(executor);
-    }
-
-    @Override
-    public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.setNetworkTimeout(executor, milliseconds);
-    }
-
-    @Override
-    public int getNetworkTimeout() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getNetworkTimeout();
-    }
-
-    @Override
-    public <T> T unwrap(Class<T> iface) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.unwrap(iface);
-    }
-
-    @Override
-    public boolean isWrapperFor(Class<?> iface) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.isWrapperFor(iface);
-    }
-  }
-
-  public FakeDerby(){
-  }
-
-  @Override
-  public boolean acceptsURL(String url) throws SQLException {
-    url = url.replace("fderby","derby");
-    return super.acceptsURL(url);
-  }
-
-  @Override
-  public Connection connect(java.lang.String url, java.util.Properties info) throws SQLException {
-    TestObjectStoreInitRetry.misbehave();
-    url = url.replace("fderby","derby");
-    return new FakeDerby.Connection(super.connect(url, info));
-  }
-
-  @Override
-  public Logger getParentLogger() throws SQLFeatureNotSupportedException {
-    throw new SQLFeatureNotSupportedException(); // hope this is respected properly
-  }
-
-
-};

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java
new file mode 100644
index 0000000..bdc9551
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java
@@ -0,0 +1,243 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.Type;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.thrift.TException;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public class TestAcidTableSetup {
+  private static final Logger LOG = LoggerFactory.getLogger(TestHiveMetaStore.class);
+  protected static HiveMetaStoreClient client;
+  protected static Configuration conf;
+
+  @Before
+  public void setUp() throws Exception {
+    conf = MetastoreConf.newMetastoreConf();
+
+    MetastoreConf.setClass(conf, ConfVars.EXPRESSION_PROXY_CLASS,
+        DefaultPartitionExpressionProxy.class, PartitionExpressionProxy.class);
+    client = new HiveMetaStoreClient(conf);
+  }
+
+  @Test
+  public void testTransactionalValidation() throws Throwable {
+    String dbName = "acidDb";
+    silentDropDatabase(dbName);
+    Database db = new Database();
+    db.setName(dbName);
+    client.createDatabase(db);
+    String tblName = "acidTable";
+    Map<String, String> fields = new HashMap<>();
+    fields.put("name", ColumnType.STRING_TYPE_NAME);
+    fields.put("income", ColumnType.INT_TYPE_NAME);
+
+    Type type = createType("Person1", fields);
+
+    Map<String, String> params = new HashMap<>();
+    params.put("transactional", "");
+
+    /// CREATE TABLE scenarios
+
+    // Fail - No "transactional" property is specified
+    try {
+      Table t = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(tblName)
+          .setTableParams(params)
+          .setCols(type.getFields())
+          .build();
+      client.createTable(t);
+      fail("Expected exception");
+    } catch (MetaException e) {
+      assertEquals("'transactional' property of TBLPROPERTIES may only have value 'true'",
+          e.getMessage());
+    }
+
+    // Fail - "transactional" property is set to an invalid value
+    try {
+      params.clear();
+      params.put("transactional", "foobar");
+      Table t = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(tblName)
+          .setTableParams(params)
+          .setCols(type.getFields())
+          .build();
+      client.createTable(t);
+      fail("Expected exception");
+    } catch (MetaException e) {
+      assertEquals("'transactional' property of TBLPROPERTIES may only have value 'true'",
+          e.getMessage());
+    }
+
+    // Fail - "transactional" is set to true, but the table is not bucketed
+    try {
+      params.clear();
+      params.put("transactional", "true");
+      Table t = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(tblName)
+          .setTableParams(params)
+          .setCols(type.getFields())
+          .build();
+      client.createTable(t);
+      fail("Expected exception");
+    } catch (MetaException e) {
+      assertEquals("The table must be stored using an ACID compliant format (such as ORC)",
+          e.getMessage());
+    }
+
+    List<String> bucketCols = new ArrayList<>();
+    bucketCols.add("income");
+    // Fail - "transactional" is set to true, and the table is bucketed, but doesn't use ORC
+    try {
+      params.clear();
+      params.put("transactional", "true");
+      Table t = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(tblName)
+          .setTableParams(params)
+          .setCols(type.getFields())
+          .setBucketCols(bucketCols)
+          .build();
+      client.createTable(t);
+      fail("Expected exception");
+    } catch (MetaException e) {
+      assertEquals("The table must be stored using an ACID compliant format (such as ORC)",
+          e.getMessage());
+    }
+
+    // Succeed - "transactional" is set to true, and the table is bucketed, and uses ORC
+    params.clear();
+    params.put("transactional", "true");
+    Table t = new TableBuilder()
+        .setDbName(dbName)
+        .setTableName(tblName)
+        .setTableParams(params)
+        .setCols(type.getFields())
+        .setBucketCols(bucketCols)
+        .setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat")
+        .setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat")
+        .build();
+    client.createTable(t);
+    assertTrue("CREATE TABLE should succeed",
+        "true".equals(t.getParameters().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL)));
+
+    /// ALTER TABLE scenarios
+
+    // Fail - trying to set "transactional" to "false" is not allowed
+    try {
+      params.clear();
+      params.put("transactional", "false");
+      t = new Table();
+      t.setParameters(params);
+      client.alter_table(dbName, tblName, t);
+      fail("Expected exception");
+    } catch (MetaException e) {
+      assertEquals("TBLPROPERTIES with 'transactional'='true' cannot be unset", e.getMessage());
+    }
+
+    // Fail - trying to set "transactional" to "true" but doesn't satisfy bucketing and Input/OutputFormat requirement
+    try {
+      tblName += "1";
+      params.clear();
+      t = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(tblName)
+          .setCols(type.getFields())
+          .setInputFormat("org.apache.hadoop.mapred.FileInputFormat")
+          .build();
+      client.createTable(t);
+      params.put("transactional", "true");
+      t.setParameters(params);
+      client.alter_table(dbName, tblName, t);
+      fail("Expected exception");
+    } catch (MetaException e) {
+      assertEquals("The table must be stored using an ACID compliant format (such as ORC)",
+          e.getMessage());
+    }
+
+    // Succeed - trying to set "transactional" to "true", and satisfies bucketing and Input/OutputFormat requirement
+    tblName += "2";
+    params.clear();
+    t = new TableBuilder()
+        .setDbName(dbName)
+        .setTableName(tblName)
+        .setCols(type.getFields())
+        .setNumBuckets(1)
+        .setBucketCols(bucketCols)
+        .setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat")
+        .setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat")
+        .build();
+    client.createTable(t);
+    params.put("transactional", "true");
+    t.setParameters(params);
+    client.alter_table(dbName, tblName, t);
+    assertTrue("ALTER TABLE should succeed",
+        "true".equals(t.getParameters().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL)));
+  }
+
+  private static void silentDropDatabase(String dbName) throws TException {
+    try {
+      for (String tableName : client.getTables(dbName, "*")) {
+        client.dropTable(dbName, tableName);
+      }
+      client.dropDatabase(dbName);
+    } catch (NoSuchObjectException|InvalidOperationException e) {
+      // NOP
+    }
+  }
+
+  private Type createType(String typeName, Map<String, String> fields) throws Throwable {
+    Type typ1 = new Type();
+    typ1.setName(typeName);
+    typ1.setFields(new ArrayList<>(fields.size()));
+    for(String fieldName : fields.keySet()) {
+      typ1.getFields().add(
+          new FieldSchema(fieldName, fields.get(fieldName), ""));
+    }
+    client.createType(typ1);
+    return typ1;
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java
deleted file mode 100644
index e9dabee..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-package org.apache.hadoop.hive.metastore;
-
-import java.io.IOException;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.Role;
-import org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory;
-
-public class TestAdminUser extends TestCase{
-
- public void testCreateAdminNAddUser() throws IOException, Throwable {
-   HiveConf conf = new HiveConf();
-   conf.setVar(ConfVars.USERS_IN_ADMIN_ROLE, "adminuser");
-   conf.setVar(ConfVars.HIVE_AUTHORIZATION_MANAGER,SQLStdHiveAuthorizerFactory.class.getName());
-   RawStore rawStore = new HMSHandler("testcreateroot", conf).getMS();
-   Role adminRole = rawStore.getRole(HiveMetaStore.ADMIN);
-   assertTrue(adminRole.getOwnerName().equals(HiveMetaStore.ADMIN));
-   assertEquals(rawStore.listPrincipalGlobalGrants(HiveMetaStore.ADMIN, PrincipalType.ROLE)
-    .get(0).getGrantInfo().getPrivilege(),"All");
-   assertEquals(rawStore.listRoles("adminuser", PrincipalType.USER).get(0).
-     getRoleName(),HiveMetaStore.ADMIN);
- }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
deleted file mode 100644
index 462768d..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.util.StringUtils;
-
-public class TestEmbeddedHiveMetaStore extends TestHiveMetaStore {
-
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
-    warehouse = new Warehouse(hiveConf);
-    client = createClient();
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    try {
-      super.tearDown();
-      client.close();
-    } catch (Throwable e) {
-      System.err.println("Unable to close metastore");
-      System.err.println(StringUtils.stringifyException(e));
-      throw new Exception(e);
-    }
-  }
-
-  @Override
-  protected HiveMetaStoreClient createClient() throws Exception {
-    try {
-      return new HiveMetaStoreClient(hiveConf);
-    } catch (Throwable e) {
-      System.err.println("Unable to open the metastore");
-      System.err.println(StringUtils.stringifyException(e));
-      throw new Exception(e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/002233b9/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
deleted file mode 100644
index 91fc706..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
+++ /dev/null
@@ -1,281 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.UtilsForTest;
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionSpec;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.google.common.collect.Lists;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class TestFilterHooks {
-  private static final Logger LOG = LoggerFactory.getLogger(TestFilterHooks.class);
-
-  public static class DummyMetaStoreFilterHookImpl extends DefaultMetaStoreFilterHookImpl {
-    public static boolean blockResults = false;
-
-    public DummyMetaStoreFilterHookImpl(Configuration conf) {
-      super(conf);
-    }
-
-    @Override
-    public List<String> filterDatabases(List<String> dbList) throws MetaException  {
-      if (blockResults) {
-        return new ArrayList<String>();
-      }
-      return super.filterDatabases(dbList);
-    }
-
-    @Override
-    public Database filterDatabase(Database dataBase) throws NoSuchObjectException {
-      if (blockResults) {
-        throw new NoSuchObjectException("Blocked access");
-      }
-      return super.filterDatabase(dataBase);
-    }
-
-    @Override
-    public List<String> filterTableNames(String dbName, List<String> tableList) throws MetaException {
-      if (blockResults) {
-        return new ArrayList<String>();
-      }
-      return super.filterTableNames(dbName, tableList);
-    }
-
-    @Override
-    public Table filterTable(Table table) throws NoSuchObjectException {
-      if (blockResults) {
-        throw new NoSuchObjectException("Blocked access");
-      }
-      return super.filterTable(table);
-    }
-
-    @Override
-    public List<Table> filterTables(List<Table> tableList) throws MetaException {
-      if (blockResults) {
-        return new ArrayList<Table>();
-      }
-      return super.filterTables(tableList);
-    }
-
-    @Override
-    public List<Partition> filterPartitions(List<Partition> partitionList) throws MetaException {
-      if (blockResults) {
-        return new ArrayList<Partition>();
-      }
-      return super.filterPartitions(partitionList);
-    }
-
-    @Override
-    public List<PartitionSpec> filterPartitionSpecs(
-        List<PartitionSpec> partitionSpecList) throws MetaException {
-      if (blockResults) {
-        return new ArrayList<PartitionSpec>();
-      }
-      return super.filterPartitionSpecs(partitionSpecList);
-    }
-
-    @Override
-    public Partition filterPartition(Partition partition) throws NoSuchObjectException {
-      if (blockResults) {
-        throw new NoSuchObjectException("Blocked access");
-      }
-      return super.filterPartition(partition);
-    }
-
-    @Override
-    public List<String> filterPartitionNames(String dbName, String tblName,
-        List<String> partitionNames) throws MetaException {
-      if (blockResults) {
-        return new ArrayList<String>();
-      }
-      return super.filterPartitionNames(dbName, tblName, partitionNames);
-    }
-
-    @Override
-    public Index filterIndex(Index index) throws NoSuchObjectException {
-      if (blockResults) {
-        throw new NoSuchObjectException("Blocked access");
-      }
-      return super.filterIndex(index);
-    }
-
-    @Override
-    public List<String> filterIndexNames(String dbName, String tblName,
-        List<String> indexList) throws MetaException {
-      if (blockResults) {
-        return new ArrayList<String>();
-      }
-      return super.filterIndexNames(dbName, tblName, indexList);
-    }
-
-    @Override
-    public List<Index> filterIndexes(List<Index> indexeList) throws MetaException {
-      if (blockResults) {
-        return new ArrayList<Index>();
-      }
-      return super.filterIndexes(indexeList);
-    }
-  }
-
-  private static final String DBNAME1 = "testdb1";
-  private static final String DBNAME2 = "testdb2";
-  private static final String TAB1 = "tab1";
-  private static final String TAB2 = "tab2";
-  private static final String INDEX1 = "idx1";
-  private static HiveConf hiveConf;
-  private static HiveMetaStoreClient msc;
-  private static Driver driver;
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    DummyMetaStoreFilterHookImpl.blockResults = false;
-
-    hiveConf = new HiveConf(TestFilterHooks.class);
-    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    hiveConf.setVar(ConfVars.METASTORE_FILTER_HOOK, DummyMetaStoreFilterHookImpl.class.getName());
-    UtilsForTest.setNewDerbyDbLocation(hiveConf, TestFilterHooks.class.getSimpleName());
-    int port = MetaStoreTestUtils.startMetaStoreWithRetry(hiveConf);
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-
-    SessionState.start(new CliSessionState(hiveConf));
-    msc = new HiveMetaStoreClient(hiveConf);
-    driver = new Driver(hiveConf);
-
-    driver.run("drop database if exists " + DBNAME1  + " cascade");
-    driver.run("drop database if exists " + DBNAME2  + " cascade");
-    driver.run("create database " + DBNAME1);
-    driver.run("create database " + DBNAME2);
-    driver.run("use " + DBNAME1);
-    driver.run("create table " + DBNAME1 + "." + TAB1 + " (id int, name string)");
-    driver.run("create table " + TAB2 + " (id int) partitioned by (name string)");
-    driver.run("ALTER TABLE " + TAB2 + " ADD PARTITION (name='value1')");
-    driver.run("ALTER TABLE " + TAB2 + " ADD PARTITION (name='value2')");
-    driver.run("CREATE INDEX " + INDEX1 + " on table " + TAB1 + "(id) AS 'COMPACT' WITH DEFERRED REBUILD");
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    DummyMetaStoreFilterHookImpl.blockResults = false;
-    driver.run("drop database if exists " + DBNAME1  + " cascade");
-    driver.run("drop database if exists " + DBNAME2  + " cascade");
-    driver.close();
-    driver.destroy();
-    msc.close();
-  }
-
-  @Test
-  public void testDefaultFilter() throws Exception {
-    assertNotNull(msc.getTable(DBNAME1, TAB1));
-    assertEquals(3, msc.getTables(DBNAME1, "*").size());
-    assertEquals(3, msc.getAllTables(DBNAME1).size());
-    assertEquals(1, msc.getTables(DBNAME1, TAB2).size());
-    assertEquals(0, msc.getAllTables(DBNAME2).size());
-
-    assertNotNull(msc.getDatabase(DBNAME1));
-    assertEquals(3, msc.getDatabases("*").size());
-    assertEquals(3, msc.getAllDatabases().size());
-    assertEquals(1, msc.getDatabases(DBNAME1).size());
-
-    assertNotNull(msc.getPartition(DBNAME1, TAB2, "name=value1"));
-    assertEquals(1, msc.getPartitionsByNames(DBNAME1, TAB2, Lists.newArrayList("name=value1")).size());
-
-    assertNotNull(msc.getIndex(DBNAME1, TAB1, INDEX1));
-  }
-
-  @Test
-  public void testDummyFilterForTables() throws Exception {
-    DummyMetaStoreFilterHookImpl.blockResults = true;
-    try {
-      msc.getTable(DBNAME1, TAB1);
-      fail("getTable() should fail with blocking mode");
-    } catch (NoSuchObjectException e) {
-      // Excepted
-    }
-    assertEquals(0, msc.getTables(DBNAME1, "*").size());
-    assertEquals(0, msc.getAllTables(DBNAME1).size());
-    assertEquals(0, msc.getTables(DBNAME1, TAB2).size());
-  }
-
-  @Test
-  public void testDummyFilterForDb() throws Exception {
-    DummyMetaStoreFilterHookImpl.blockResults = true;
-    try {
-      assertNotNull(msc.getDatabase(DBNAME1));
-      fail("getDatabase() should fail with blocking mode");
-    } catch (NoSuchObjectException e) {
-        // Excepted
-    }
-    assertEquals(0, msc.getDatabases("*").size());
-    assertEquals(0, msc.getAllDatabases().size());
-    assertEquals(0, msc.getDatabases(DBNAME1).size());
-  }
-
-  @Test
-  public void testDummyFilterForPartition() throws Exception {
-    DummyMetaStoreFilterHookImpl.blockResults = true;
-    try {
-      assertNotNull(msc.getPartition(DBNAME1, TAB2, "name=value1"));
-      fail("getPartition() should fail with blocking mode");
-    } catch (NoSuchObjectException e) {
-      // Excepted
-    }
-    assertEquals(0, msc.getPartitionsByNames(DBNAME1, TAB2,
-        Lists.newArrayList("name=value1")).size());
-  }
-
-  @Test
-  public void testDummyFilterForIndex() throws Exception {
-    DummyMetaStoreFilterHookImpl.blockResults = true;
-    try {
-      assertNotNull(msc.getIndex(DBNAME1, TAB1, INDEX1));
-      fail("getPartition() should fail with blocking mode");
-    } catch (NoSuchObjectException e) {
-      // Excepted
-    }
-  }
-
-}


[47/50] [abbrv] hive git commit: HIVE-17990 Add Thrift and DB storage for Schema Registry objects

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
index bf4bd7a..5cddcba 100644
--- a/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
+++ b/standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
@@ -917,6 +917,76 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf {
     printf("create_or_drop_wm_trigger_to_pool_mapping\n");
   }
 
+  void create_ischema(const ISchema& schema) {
+    // Your implementation goes here
+    printf("create_ischema\n");
+  }
+
+  void alter_ischema(const std::string& schemaName, const ISchema& newSchema) {
+    // Your implementation goes here
+    printf("alter_ischema\n");
+  }
+
+  void get_ischema(ISchema& _return, const std::string& schemaName) {
+    // Your implementation goes here
+    printf("get_ischema\n");
+  }
+
+  void drop_ischema(const std::string& schemaName) {
+    // Your implementation goes here
+    printf("drop_ischema\n");
+  }
+
+  void add_schema_version(const SchemaVersion& schemaVersion) {
+    // Your implementation goes here
+    printf("add_schema_version\n");
+  }
+
+  void get_schema_version(SchemaVersion& _return, const std::string& schemaName, const int32_t version) {
+    // Your implementation goes here
+    printf("get_schema_version\n");
+  }
+
+  void get_schema_latest_version(SchemaVersion& _return, const std::string& schemaName) {
+    // Your implementation goes here
+    printf("get_schema_latest_version\n");
+  }
+
+  void get_schema_all_versions(std::vector<SchemaVersion> & _return, const std::string& schemaName) {
+    // Your implementation goes here
+    printf("get_schema_all_versions\n");
+  }
+
+  void drop_schema_version(const std::string& schemaName, const int32_t version) {
+    // Your implementation goes here
+    printf("drop_schema_version\n");
+  }
+
+  void get_schemas_by_cols(FindSchemasByColsResp& _return, const FindSchemasByColsRqst& rqst) {
+    // Your implementation goes here
+    printf("get_schemas_by_cols\n");
+  }
+
+  void map_schema_version_to_serde(const std::string& schemaName, const int32_t version, const std::string& serdeName) {
+    // Your implementation goes here
+    printf("map_schema_version_to_serde\n");
+  }
+
+  void set_schema_version_state(const std::string& schemaName, const int32_t version, const SchemaVersionState::type state) {
+    // Your implementation goes here
+    printf("set_schema_version_state\n");
+  }
+
+  void add_serde(const SerDeInfo& serde) {
+    // Your implementation goes here
+    printf("add_serde\n");
+  }
+
+  void get_serde(SerDeInfo& _return, const std::string& serdeName) {
+    // Your implementation goes here
+    printf("get_serde\n");
+  }
+
 };
 
 int main(int argc, char **argv) {


[12/50] [abbrv] hive git commit: HIVE-18054: Make Lineage work with concurrent queries on a Session (Andrew Sherman, reviewed by Sahil Takiar)

Posted by ga...@apache.org.
HIVE-18054: Make Lineage work with concurrent queries on a Session (Andrew Sherman, reviewed by Sahil Takiar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/646ccce8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/646ccce8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/646ccce8

Branch: refs/heads/standalone-metastore
Commit: 646ccce8ea3e8c944be164f86dbd5d3428bdbc44
Parents: f52e8b4
Author: Andrew Sherman <as...@cloudera.com>
Authored: Sat Dec 16 15:14:54 2017 -0600
Committer: Sahil Takiar <st...@cloudera.com>
Committed: Sat Dec 16 15:24:11 2017 -0600

----------------------------------------------------------------------
 .../java/org/apache/hive/jdbc/ReadableHook.java |  52 +++++++++
 .../apache/hive/jdbc/TestJdbcWithMiniHS2.java   | 114 +++++++++++++++++++
 .../java/org/apache/hadoop/hive/ql/Driver.java  |  46 ++++++--
 .../org/apache/hadoop/hive/ql/QueryState.java   |  35 +++++-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |   6 +-
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |   9 +-
 .../org/apache/hadoop/hive/ql/exec/Task.java    |   4 +
 .../bootstrap/load/table/LoadPartitions.java    |   3 +-
 .../repl/bootstrap/load/table/LoadTable.java    |   3 +-
 .../hadoop/hive/ql/hooks/HookContext.java       |   9 +-
 .../hadoop/hive/ql/hooks/LineageLogger.java     |  56 +++++++--
 .../hive/ql/index/AggregateIndexHandler.java    |   7 +-
 .../hadoop/hive/ql/index/HiveIndexHandler.java  |   6 +-
 .../hive/ql/index/TableBasedIndexHandler.java   |  18 ++-
 .../ql/index/bitmap/BitmapIndexHandler.java     |   8 +-
 .../ql/index/compact/CompactIndexHandler.java   |   8 +-
 .../hive/ql/optimizer/GenMRFileSink1.java       |   2 +-
 .../hive/ql/optimizer/GenMapRedUtils.java       |  27 +++--
 .../hadoop/hive/ql/optimizer/IndexUtils.java    |   6 +-
 .../hive/ql/optimizer/lineage/Generator.java    |   8 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |  15 ++-
 .../hive/ql/parse/ExplainSemanticAnalyzer.java  |   2 +-
 .../hadoop/hive/ql/parse/GenTezUtils.java       |   3 +-
 .../hive/ql/parse/ImportSemanticAnalyzer.java   |   5 +-
 .../hadoop/hive/ql/parse/IndexUpdater.java      |   9 +-
 .../hive/ql/parse/LoadSemanticAnalyzer.java     |   2 +-
 .../ql/parse/ReplicationSemanticAnalyzer.java   |   4 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   8 +-
 .../hadoop/hive/ql/parse/TaskCompiler.java      |  11 +-
 .../hive/ql/parse/spark/GenSparkUtils.java      |   2 +-
 .../apache/hadoop/hive/ql/plan/MoveWork.java    |  25 +---
 .../hadoop/hive/ql/session/LineageState.java    |   2 +-
 .../hadoop/hive/ql/session/SessionState.java    |  15 ---
 ...TestGenMapRedUtilsCreateConditionalTask.java |  18 ++-
 34 files changed, 403 insertions(+), 145 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/itests/hive-unit/src/test/java/org/apache/hive/jdbc/ReadableHook.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/ReadableHook.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/ReadableHook.java
new file mode 100644
index 0000000..2dd283f
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/ReadableHook.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.jdbc;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext;
+import org.apache.hadoop.hive.ql.hooks.HookContext;
+
+/**
+ * An ExecuteWithHookContext that stores HookContexts in memory and makes them available for reading
+ */
+public class ReadableHook implements ExecuteWithHookContext {
+
+  private static List<HookContext> hookList = Collections.synchronizedList(new ArrayList<>());
+
+  @Override
+  public void run(HookContext hookContext) throws Exception {
+    hookList.add(hookContext);
+  }
+
+  /**
+   * @return the stored HookContexts.
+   */
+  public static List<HookContext> getHookList() {
+    return hookList;
+  }
+
+  /**
+   * Clear the stored HookContexts.
+   */
+  public static void clear() {
+    hookList.clear();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
index 70bd29c..ffeee69 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
@@ -40,6 +40,7 @@ import java.sql.Statement;
 import java.sql.Types;
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -47,6 +48,7 @@ import java.util.concurrent.Callable;
 import java.util.concurrent.CancellationException;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.SynchronousQueue;
@@ -64,8 +66,12 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.ObjectStore;
+import org.apache.hadoop.hive.ql.QueryPlan;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.UDF;
+import org.apache.hadoop.hive.ql.hooks.HookContext;
+import org.apache.hadoop.hive.ql.hooks.LineageLogger;
+import org.apache.hadoop.hive.ql.optimizer.lineage.LineageCtx;
 import org.apache.hive.common.util.ReflectionUtil;
 import org.apache.hive.jdbc.miniHS2.MiniHS2;
 import org.apache.hive.service.cli.HiveSQLException;
@@ -205,6 +211,9 @@ public class TestJdbcWithMiniHS2 {
     conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
     conf.setBoolVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED, false);
     conf.setBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER, false);
+    // store post-exec hooks calls so we can look at them later
+    conf.setVar(ConfVars.POSTEXECHOOKS, ReadableHook.class.getName() + "," +
+        LineageLogger.class.getName());
     MiniHS2.Builder builder = new MiniHS2.Builder().withConf(conf).cleanupLocalDirOnStartup(false);
     if (httpMode) {
       builder = builder.withHTTPTransport();
@@ -1503,4 +1512,109 @@ public class TestJdbcWithMiniHS2 {
     stmt.close();
     fsConn.close();
   }
+
+  /**
+   * A test that checks that Lineage is correct when a multiple concurrent
+   * requests are make on a connection
+   */
+  @Test
+  public void testConcurrentLineage() throws Exception {
+    // setup to run concurrent operations
+    Statement stmt = conTestDb.createStatement();
+    setSerializeInTasksInConf(stmt);
+    stmt.execute("drop table if exists testConcurrentLineage1");
+    stmt.execute("drop table if exists testConcurrentLineage2");
+    stmt.execute("create table testConcurrentLineage1 (col1 int)");
+    stmt.execute("create table testConcurrentLineage2 (col2 int)");
+
+    // clear vertices list
+    ReadableHook.clear();
+
+    // run 5 sql inserts concurrently
+    int numThreads = 5;        // set to 1 for single threading
+    int concurrentCalls = 5;
+    ExecutorService pool = Executors.newFixedThreadPool(numThreads);
+    try {
+      List<InsertCallable> tasks = new ArrayList<>();
+      for (int i = 0; i < concurrentCalls; i++) {
+        InsertCallable runner = new InsertCallable(conTestDb);
+        tasks.add(runner);
+      }
+      List<Future<Void>> futures = pool.invokeAll(tasks);
+      for (Future<Void> future : futures) {
+        future.get(20, TimeUnit.SECONDS);
+      }
+      // check to see that the vertices are correct
+      checkVertices();
+    } finally {
+      // clean up
+      stmt.execute("drop table testConcurrentLineage1");
+      stmt.execute("drop table testConcurrentLineage2");
+      stmt.close();
+      pool.shutdownNow();
+    }
+  }
+
+  /**
+   * A Callable that does 2 inserts
+   */
+  private class InsertCallable implements Callable<Void> {
+    private Connection connection;
+
+    InsertCallable(Connection conn) {
+      this.connection = conn;
+    }
+
+    @Override public Void call() throws Exception {
+      doLineageInserts(connection);
+      return null;
+    }
+
+    private void doLineageInserts(Connection connection) throws SQLException {
+      Statement stmt = connection.createStatement();
+      stmt.execute("insert into testConcurrentLineage1 values (1)");
+      stmt.execute("insert into testConcurrentLineage2 values (2)");
+    }
+  }
+  /**
+   * check to see that the vertices derived from the HookContexts are correct
+   */
+  private void checkVertices() {
+    List<Set<LineageLogger.Vertex>> verticesLists = getVerticesFromHooks();
+
+    assertEquals("5 runs of 2 inserts makes 10", 10, verticesLists.size());
+    for (Set<LineageLogger.Vertex> vertices : verticesLists) {
+      assertFalse("Each insert affects a column so should be some vertices",
+          vertices.isEmpty());
+      assertEquals("Each insert affects one column so should be one vertex",
+          1, vertices.size());
+      Iterator<LineageLogger.Vertex> iterator = vertices.iterator();
+      assertTrue(iterator.hasNext());
+      LineageLogger.Vertex vertex = iterator.next();
+      assertEquals(0, vertex.getId());
+      assertEquals(LineageLogger.Vertex.Type.COLUMN, vertex.getType());
+      String label = vertex.getLabel();
+      System.out.println("vertex.getLabel() = " + label);
+      assertTrue("did not see one of the 2 expected column names",
+          label.equals("testjdbcminihs2.testconcurrentlineage1.col1") ||
+              label.equals("testjdbcminihs2.testconcurrentlineage2.col2"));
+    }
+  }
+
+  /**
+   * Use the logic in LineageLogger to get vertices from Hook Contexts
+   */
+  private List<Set<LineageLogger.Vertex>>  getVerticesFromHooks() {
+    List<Set<LineageLogger.Vertex>> verticesLists = new ArrayList<>();
+    List<HookContext> hookList = ReadableHook.getHookList();
+    for (HookContext hookContext : hookList) {
+      QueryPlan plan = hookContext.getQueryPlan();
+      LineageCtx.Index index = hookContext.getIndex();
+      assertNotNull(index);
+      List<LineageLogger.Edge> edges = LineageLogger.getEdges(plan, index);
+      Set<LineageLogger.Vertex> vertices = LineageLogger.getVertices(edges);
+      verticesLists.add(vertices);
+    }
+    return verticesLists;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index d3df015..b168906 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -112,6 +112,7 @@ import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivObjectActionType;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType;
+import org.apache.hadoop.hive.ql.session.LineageState;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
 import org.apache.hadoop.hive.ql.wm.WmContext;
@@ -374,12 +375,20 @@ public class Driver implements CommandProcessor {
     this(getNewQueryState(conf), null);
   }
 
+  // Pass lineageState when a driver instantiates another Driver to run
+  // or compile another query
+  public Driver(HiveConf conf, LineageState lineageState) {
+    this(getNewQueryState(conf, lineageState), null);
+  }
+
   public Driver(HiveConf conf, HiveTxnManager txnMgr) {
     this(getNewQueryState(conf), null, null, txnMgr);
   }
 
-  public Driver(HiveConf conf, Context ctx) {
-    this(getNewQueryState(conf), null, null);
+  // Pass lineageState when a driver instantiates another Driver to run
+  // or compile another query
+  public Driver(HiveConf conf, Context ctx, LineageState lineageState) {
+    this(getNewQueryState(conf, lineageState), null, null);
     this.ctx = ctx;
   }
 
@@ -387,6 +396,12 @@ public class Driver implements CommandProcessor {
     this(getNewQueryState(conf), userName, null);
   }
 
+  // Pass lineageState when a driver instantiates another Driver to run
+  // or compile another query
+  public Driver(HiveConf conf, String userName, LineageState lineageState) {
+    this(getNewQueryState(conf, lineageState), userName, null);
+  }
+
   public Driver(QueryState queryState, String userName) {
     this(queryState, userName, new HooksLoader(queryState.getConf()), null, null);
   }
@@ -425,6 +440,20 @@ public class Driver implements CommandProcessor {
   }
 
   /**
+   * Generating the new QueryState object. Making sure, that the new queryId is generated.
+   * @param conf The HiveConf which should be used
+   * @param lineageState a LineageState to be set in the new QueryState object
+   * @return The new QueryState object
+   */
+  private static QueryState getNewQueryState(HiveConf conf, LineageState lineageState) {
+    return new QueryState.Builder()
+        .withGenerateNewQueryId(true)
+        .withHiveConf(conf)
+        .withLineageState(lineageState)
+        .build();
+  }
+
+  /**
    * Compile a new query. Any currently-planned query associated with this Driver is discarded.
    * Do not reset id for inner queries(index, etc). Task ids are used for task identity check.
    *
@@ -1336,9 +1365,6 @@ public class Driver implements CommandProcessor {
   private void releaseResources() {
     releasePlan();
     releaseDriverContext();
-    if (SessionState.get() != null) {
-      SessionState.get().getLineageState().clear();
-    }
   }
 
   @Override
@@ -2404,9 +2430,6 @@ public class Driver implements CommandProcessor {
     releaseFetchTask();
     releaseResStream();
     releaseContext();
-    if (SessionState.get() != null) {
-      SessionState.get().getLineageState().clear();
-    }
     if(destroyed) {
       if (!hiveLocks.isEmpty()) {
         try {
@@ -2440,9 +2463,6 @@ public class Driver implements CommandProcessor {
       lDrvState.stateLock.unlock();
       LockedDriverState.removeLockedDriverState();
     }
-    if (SessionState.get() != null) {
-      SessionState.get().getLineageState().clear();
-    }
     return 0;
   }
 
@@ -2504,4 +2524,8 @@ public class Driver implements CommandProcessor {
     releaseResources();
     this.queryState = getNewQueryState(queryState.getConf());
   }
+
+  public QueryState getQueryState() {
+    return queryState;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java
index f3a46db..4f0c165 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java
@@ -23,6 +23,7 @@ import java.util.Map;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
 import org.apache.hadoop.hive.ql.plan.HiveOperation;
+import org.apache.hadoop.hive.ql.session.LineageState;
 
 /**
  * The class to store query level info such as queryId. Multiple queries can run
@@ -40,12 +41,17 @@ public class QueryState {
   private HiveOperation commandType;
 
   /**
+   * Per-query Lineage state to track what happens in the query
+   */
+  private LineageState lineageState = new LineageState();
+
+  /**
    * transaction manager used in the query.
    */
   private HiveTxnManager txnManager;
 
   /**
-   * Private constructor, use QueryState.Builder instead
+   * Private constructor, use QueryState.Builder instead.
    * @param conf The query specific configuration object
    */
   private QueryState(HiveConf conf) {
@@ -79,6 +85,14 @@ public class QueryState {
     return queryConf;
   }
 
+  public LineageState getLineageState() {
+    return lineageState;
+  }
+
+  public void setLineageState(LineageState lineageState) {
+    this.lineageState = lineageState;
+  }
+
   public HiveTxnManager getTxnManager() {
     return txnManager;
   }
@@ -95,9 +109,10 @@ public class QueryState {
     private boolean runAsync = false;
     private boolean generateNewQueryId = false;
     private HiveConf hiveConf = null;
+    private LineageState lineageState = null;
 
     /**
-     * Default constructor - use this builder to create a QueryState object
+     * Default constructor - use this builder to create a QueryState object.
      */
     public Builder() {
     }
@@ -149,6 +164,16 @@ public class QueryState {
     }
 
     /**
+     * add a LineageState that will be set in the built QueryState
+     * @param lineageState the source lineageState
+     * @return the builder
+     */
+    public Builder withLineageState(LineageState lineageState) {
+      this.lineageState = lineageState;
+      return this;
+    }
+
+    /**
      * Creates the QueryState object. The default values are:
      * - runAsync false
      * - confOverlay null
@@ -184,7 +209,11 @@ public class QueryState {
         queryConf.setVar(HiveConf.ConfVars.HIVEQUERYID, QueryPlan.makeQueryId());
       }
 
-      return new QueryState(queryConf);
+      QueryState queryState = new QueryState(queryConf);
+      if (lineageState != null) {
+        queryState.setLineageState(lineageState);
+      }
+      return queryState;
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 55ef8de..05041cd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -4478,7 +4478,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
       }
     }
     // Don't set inputs and outputs - the locks have already been taken so it's pointless.
-    MoveWork mw = new MoveWork(null, null, null, null, false, SessionState.get().getLineageState());
+    MoveWork mw = new MoveWork(null, null, null, null, false);
     mw.setMultiFilesDesc(new LoadMultiFilesDesc(srcs, tgts, true, null, null));
     ImportCommitWork icw = new ImportCommitWork(tbl.getDbName(), tbl.getTableName(), mmWriteId, stmtId);
     Task<?> mv = TaskFactory.get(mw, conf), ic = TaskFactory.get(icw, conf);
@@ -4909,7 +4909,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
         Table createdTable = db.getTable(tbl.getDbName(), tbl.getTableName());
         if (crtTbl.isCTAS()) {
           DataContainer dc = new DataContainer(createdTable.getTTable());
-          SessionState.get().getLineageState().setLineage(
+          queryState.getLineageState().setLineage(
                   createdTable.getPath(), dc, createdTable.getCols()
           );
         }
@@ -5137,7 +5137,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
 
       //set lineage info
       DataContainer dc = new DataContainer(tbl.getTTable());
-      SessionState.get().getLineageState().setLineage(new Path(crtView.getViewName()), dc, tbl.getCols());
+      queryState.getLineageState().setLineage(new Path(crtView.getViewName()), dc, tbl.getCols());
     }
     return 0;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index f5a5e71..8387208 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -398,7 +398,7 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
             dc = handleStaticParts(db, table, tbd, ti);
           }
         }
-        if (work.getLineagState() != null && dc != null) {
+        if (dc != null) {
           // If we are doing an update or a delete the number of columns in the table will not
           // match the number of columns in the file sink.  For update there will be one too many
           // (because of the ROW__ID), and in the case of the delete there will be just the
@@ -416,7 +416,7 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
               tableCols = table.getCols();
               break;
           }
-          work.getLineagState().setLineage(tbd.getSourcePath(), dc, tableCols);
+          queryState.getLineageState().setLineage(tbd.getSourcePath(), dc, tableCols);
         }
         releaseLocks(tbd);
       }
@@ -552,10 +552,9 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
       dc = new DataContainer(table.getTTable(), partn.getTPartition());
 
       // Don't set lineage on delete as we don't have all the columns
-      if (work.getLineagState() != null &&
-          work.getLoadTableWork().getWriteType() != AcidUtils.Operation.DELETE &&
+      if (work.getLoadTableWork().getWriteType() != AcidUtils.Operation.DELETE &&
           work.getLoadTableWork().getWriteType() != AcidUtils.Operation.UPDATE) {
-        work.getLineagState().setLineage(tbd.getSourcePath(), dc,
+        queryState.getLineageState().setLineage(tbd.getSourcePath(), dc,
             table.getCols());
       }
       LOG.info("Loading partition " + entry.getKey());

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
index 1f0487f..d75fcf7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
@@ -649,4 +649,8 @@ public abstract class Task<T extends Serializable> implements Serializable, Node
     return true;
   }
 
+  public QueryState getQueryState() {
+    return queryState;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
index 262225f..1a542e3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java
@@ -245,8 +245,7 @@ public class LoadPartitions {
         SessionState.get().getTxnMgr().getCurrentTxnId()
     );
     loadTableWork.setInheritTableSpecs(false);
-    MoveWork work = new MoveWork(new HashSet<>(), new HashSet<>(), loadTableWork, null, false,
-        context.sessionStateLineageState);
+    MoveWork work = new MoveWork(new HashSet<>(), new HashSet<>(), loadTableWork, null, false);
     return TaskFactory.get(work, context.hiveConf);
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
index 545b7a8..f5125a2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java
@@ -233,8 +233,7 @@ public class LoadTable {
         SessionState.get().getTxnMgr().getCurrentTxnId()
     );
     MoveWork moveWork =
-        new MoveWork(new HashSet<>(), new HashSet<>(), loadTableWork, null, false,
-            context.sessionStateLineageState);
+        new MoveWork(new HashSet<>(), new HashSet<>(), loadTableWork, null, false);
     Task<?> loadTableTask = TaskFactory.get(moveWork, context.hiveConf);
     copyTask.addDependentTask(loadTableTask);
     return copyTask;

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
index 7b61730..93f1da7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.hive.ql.exec.TaskRunner;
 import org.apache.hadoop.hive.ql.history.HiveHistory;
 import org.apache.hadoop.hive.ql.log.PerfLogger;
 import org.apache.hadoop.hive.ql.optimizer.lineage.LineageCtx.Index;
-import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.shims.Utils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -88,12 +87,8 @@ public class HookContext {
     inputs = queryPlan.getInputs();
     outputs = queryPlan.getOutputs();
     ugi = Utils.getUGI();
-    linfo= null;
-    depMap = null;
-    if(SessionState.get() != null){
-      linfo = SessionState.get().getLineageState().getLineageInfo();
-      depMap = SessionState.get().getLineageState().getIndex();
-    }
+    linfo = queryState.getLineageState().getLineageInfo();
+    depMap = queryState.getLineageState().getIndex();
     this.userName = userName;
     this.ipAddress = ipAddress;
     this.hiveInstanceAddress = hiveInstanceAddress;

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
index 2f764f8..06eb9c8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.ql.hooks;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import com.google.common.hash.Hasher;
 import com.google.common.hash.Hashing;
@@ -74,7 +75,15 @@ public class LineageLogger implements ExecuteWithHookContext {
 
   private static final String FORMAT_VERSION = "1.0";
 
-  final static class Edge {
+  /**
+   * An edge in lineage.
+   */
+  @VisibleForTesting
+  public static final class Edge {
+
+    /**
+     * The types of Edge.
+     */
     public static enum Type {
       PROJECTION, PREDICATE
     }
@@ -92,7 +101,15 @@ public class LineageLogger implements ExecuteWithHookContext {
     }
   }
 
-  final static class Vertex {
+  /**
+   * A vertex in lineage.
+   */
+  @VisibleForTesting
+  public static final class Vertex {
+
+    /**
+     * A type in lineage.
+     */
     public static enum Type {
       COLUMN, TABLE
     }
@@ -125,6 +142,21 @@ public class LineageLogger implements ExecuteWithHookContext {
       Vertex vertex = (Vertex) obj;
       return label.equals(vertex.label) && type == vertex.type;
     }
+
+    @VisibleForTesting
+    public Type getType() {
+      return type;
+    }
+
+    @VisibleForTesting
+    public String getLabel() {
+      return label;
+    }
+
+    @VisibleForTesting
+    public int getId() {
+      return id;
+    }
   }
 
   @Override
@@ -203,7 +235,7 @@ public class LineageLogger implements ExecuteWithHookContext {
   /**
    * Logger an error to console if available.
    */
-  private void log(String error) {
+  private static void log(String error) {
     LogHelper console = SessionState.getConsole();
     if (console != null) {
       console.printError(error);
@@ -214,7 +246,8 @@ public class LineageLogger implements ExecuteWithHookContext {
    * Based on the final select operator, find out all the target columns.
    * For each target column, find out its sources based on the dependency index.
    */
-  private List<Edge> getEdges(QueryPlan plan, Index index) {
+  @VisibleForTesting
+  public static List<Edge> getEdges(QueryPlan plan, Index index) {
     LinkedHashMap<String, ObjectPair<SelectOperator,
       org.apache.hadoop.hive.ql.metadata.Table>> finalSelOps = index.getFinalSelectOps();
     Map<String, Vertex> vertexCache = new LinkedHashMap<String, Vertex>();
@@ -292,7 +325,7 @@ public class LineageLogger implements ExecuteWithHookContext {
     return edges;
   }
 
-  private void addEdge(Map<String, Vertex> vertexCache, List<Edge> edges,
+  private static void addEdge(Map<String, Vertex> vertexCache, List<Edge> edges,
       Set<BaseColumnInfo> srcCols, Vertex target, String expr, Edge.Type type) {
     Set<Vertex> targets = new LinkedHashSet<Vertex>();
     targets.add(target);
@@ -304,7 +337,7 @@ public class LineageLogger implements ExecuteWithHookContext {
    * If found, add the more targets to this edge's target vertex list.
    * Otherwise, create a new edge and add to edge list.
    */
-  private void addEdge(Map<String, Vertex> vertexCache, List<Edge> edges,
+  private static void addEdge(Map<String, Vertex> vertexCache, List<Edge> edges,
       Set<BaseColumnInfo> srcCols, Set<Vertex> targets, String expr, Edge.Type type) {
     Set<Vertex> sources = createSourceVertices(vertexCache, srcCols);
     Edge edge = findSimilarEdgeBySources(edges, sources, expr, type);
@@ -319,7 +352,7 @@ public class LineageLogger implements ExecuteWithHookContext {
    * Convert a list of columns to a set of vertices.
    * Use cached vertices if possible.
    */
-  private Set<Vertex> createSourceVertices(
+  private static Set<Vertex> createSourceVertices(
       Map<String, Vertex> vertexCache, Collection<BaseColumnInfo> baseCols) {
     Set<Vertex> sources = new LinkedHashSet<Vertex>();
     if (baseCols != null && !baseCols.isEmpty()) {
@@ -346,7 +379,7 @@ public class LineageLogger implements ExecuteWithHookContext {
   /**
    * Find a vertex from a cache, or create one if not.
    */
-  private Vertex getOrCreateVertex(
+  private static Vertex getOrCreateVertex(
       Map<String, Vertex> vertices, String label, Vertex.Type type) {
     Vertex vertex = vertices.get(label);
     if (vertex == null) {
@@ -359,7 +392,7 @@ public class LineageLogger implements ExecuteWithHookContext {
   /**
    * Find an edge that has the same type, expression, and sources.
    */
-  private Edge findSimilarEdgeBySources(
+  private static Edge findSimilarEdgeBySources(
       List<Edge> edges, Set<Vertex> sources, String expr, Edge.Type type) {
     for (Edge edge: edges) {
       if (edge.type == type && StringUtils.equals(edge.expr, expr)
@@ -373,7 +406,7 @@ public class LineageLogger implements ExecuteWithHookContext {
   /**
    * Generate normalized name for a given target column.
    */
-  private String getTargetFieldName(int fieldIndex,
+  private static String getTargetFieldName(int fieldIndex,
       String destTableName, List<String> colNames, List<FieldSchema> fieldSchemas) {
     String fieldName = fieldSchemas.get(fieldIndex).getName();
     String[] parts = fieldName.split("\\.");
@@ -394,7 +427,8 @@ public class LineageLogger implements ExecuteWithHookContext {
    * Get all the vertices of all edges. Targets at first,
    * then sources. Assign id to each vertex.
    */
-  private Set<Vertex> getVertices(List<Edge> edges) {
+  @VisibleForTesting
+  public static Set<Vertex> getVertices(List<Edge> edges) {
     Set<Vertex> vertices = new LinkedHashSet<Vertex>();
     for (Edge edge: edges) {
       vertices.addAll(edge.targets);

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java
index 68709b4..bf06723 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/index/AggregateIndexHandler.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveUtils;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
 import org.apache.hadoop.hive.ql.optimizer.IndexUtils;
 import org.apache.hadoop.hive.ql.plan.PartitionDesc;
-
+import org.apache.hadoop.hive.ql.session.LineageState;
 
 /**
  * Index handler for indexes that have aggregate functions on indexed columns.
@@ -90,7 +90,8 @@ public class AggregateIndexHandler extends CompactIndexHandler {
         Set<WriteEntity> outputs,
         Index index, boolean partitioned,
         PartitionDesc indexTblPartDesc, String indexTableName,
-        PartitionDesc baseTablePartDesc, String baseTableName, String dbName) {
+        PartitionDesc baseTablePartDesc, String baseTableName, String dbName,
+        LineageState lineageState) {
 
       List<FieldSchema> indexField = index.getSd().getCols();
       String indexCols = HiveUtils.getUnparsedColumnNamesFromFieldSchema(indexField);
@@ -152,7 +153,7 @@ public class AggregateIndexHandler extends CompactIndexHandler {
       builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGEMAPREDFILES, false);
       builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGETEZFILES, false);
       Task<?> rootTask = IndexUtils.createRootTask(builderConf, inputs, outputs,
-          command, (LinkedHashMap<String, String>) partSpec, indexTableName, dbName);
+          command, (LinkedHashMap<String, String>) partSpec, indexTableName, dbName, lineageState);
       return rootTask;
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java
index 1e577da..b6c0252 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/index/HiveIndexHandler.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.session.LineageState;
 
 /**
  * HiveIndexHandler defines a pluggable interface for adding new index handlers
@@ -99,6 +100,9 @@ public interface HiveIndexHandler extends Configurable {
    *          outputs for hooks, supplemental outputs going
    *          along with the return value
    *
+   * @param lineageState
+   *          tracks Lineage for the query
+   *
    * @return list of tasks to be executed in parallel for building the index
    *
    * @throws HiveException if plan generation fails
@@ -108,7 +112,7 @@ public interface HiveIndexHandler extends Configurable {
       org.apache.hadoop.hive.metastore.api.Index index,
       List<Partition> indexTblPartitions, List<Partition> baseTblPartitions,
       org.apache.hadoop.hive.ql.metadata.Table indexTbl,
-      Set<ReadEntity> inputs, Set<WriteEntity> outputs)
+      Set<ReadEntity> inputs, Set<WriteEntity> outputs,  LineageState lineageState)
       throws HiveException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java
index 29886ae..744ac29 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/index/TableBasedIndexHandler.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.PartitionDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
+import org.apache.hadoop.hive.ql.session.LineageState;
 
 /**
  * Index handler for indexes that use tables to store indexes.
@@ -51,7 +52,8 @@ public abstract class TableBasedIndexHandler extends AbstractIndexHandler {
       org.apache.hadoop.hive.metastore.api.Index index,
       List<Partition> indexTblPartitions, List<Partition> baseTblPartitions,
       org.apache.hadoop.hive.ql.metadata.Table indexTbl,
-      Set<ReadEntity> inputs, Set<WriteEntity> outputs) throws HiveException {
+      Set<ReadEntity> inputs, Set<WriteEntity> outputs,
+      LineageState lineageState) throws HiveException {
     try {
 
       TableDesc desc = Utilities.getTableDesc(indexTbl);
@@ -66,7 +68,7 @@ public abstract class TableBasedIndexHandler extends AbstractIndexHandler {
         Task<?> indexBuilder = getIndexBuilderMapRedTask(inputs, outputs, index, false,
             new PartitionDesc(desc, null), indexTbl.getTableName(),
             new PartitionDesc(Utilities.getTableDesc(baseTbl), null),
-            baseTbl.getTableName(), indexTbl.getDbName());
+            baseTbl.getTableName(), indexTbl.getDbName(), lineageState);
         indexBuilderTasks.add(indexBuilder);
       } else {
 
@@ -89,7 +91,8 @@ public abstract class TableBasedIndexHandler extends AbstractIndexHandler {
           // for each partition, spawn a map reduce task.
           Task<?> indexBuilder = getIndexBuilderMapRedTask(inputs, outputs, index, true,
               new PartitionDesc(indexPart), indexTbl.getTableName(),
-              new PartitionDesc(basePart), baseTbl.getTableName(), indexTbl.getDbName());
+              new PartitionDesc(basePart), baseTbl.getTableName(), indexTbl.getDbName(),
+              lineageState);
           indexBuilderTasks.add(indexBuilder);
         }
       }
@@ -102,15 +105,18 @@ public abstract class TableBasedIndexHandler extends AbstractIndexHandler {
   protected Task<?> getIndexBuilderMapRedTask(Set<ReadEntity> inputs, Set<WriteEntity> outputs,
       Index index, boolean partitioned,
       PartitionDesc indexTblPartDesc, String indexTableName,
-      PartitionDesc baseTablePartDesc, String baseTableName, String dbName) throws HiveException {
+      PartitionDesc baseTablePartDesc, String baseTableName, String dbName,
+      LineageState lineageState) throws HiveException {
     return getIndexBuilderMapRedTask(inputs, outputs, index.getSd().getCols(),
-        partitioned, indexTblPartDesc, indexTableName, baseTablePartDesc, baseTableName, dbName);
+        partitioned, indexTblPartDesc, indexTableName, baseTablePartDesc, baseTableName, dbName,
+        lineageState);
   }
 
   protected Task<?> getIndexBuilderMapRedTask(Set<ReadEntity> inputs, Set<WriteEntity> outputs,
       List<FieldSchema> indexField, boolean partitioned,
       PartitionDesc indexTblPartDesc, String indexTableName,
-      PartitionDesc baseTablePartDesc, String baseTableName, String dbName) throws HiveException {
+      PartitionDesc baseTablePartDesc, String baseTableName, String dbName,
+      LineageState lineageState) throws HiveException {
     return null;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java
index 7b067a0..9117159 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/index/bitmap/BitmapIndexHandler.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hive.ql.optimizer.IndexUtils;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.PartitionDesc;
+import org.apache.hadoop.hive.ql.session.LineageState;
 import org.apache.hadoop.hive.ql.stats.StatsUtils;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan;
@@ -115,7 +116,7 @@ public class BitmapIndexHandler extends TableBasedIndexHandler {
     LOG.info("Generating tasks for re-entrant QL query: " + qlCommand.toString());
     HiveConf queryConf = new HiveConf(pctx.getConf(), BitmapIndexHandler.class);
     HiveConf.setBoolVar(queryConf, HiveConf.ConfVars.COMPRESSRESULT, false);
-    Driver driver = new Driver(queryConf);
+    Driver driver = new Driver(queryConf, pctx.getQueryState().getLineageState());
     driver.compile(qlCommand.toString(), false);
 
     queryContext.setIndexIntermediateFile(tmpFile);
@@ -222,7 +223,8 @@ public class BitmapIndexHandler extends TableBasedIndexHandler {
   protected Task<?> getIndexBuilderMapRedTask(Set<ReadEntity> inputs, Set<WriteEntity> outputs,
       List<FieldSchema> indexField, boolean partitioned,
       PartitionDesc indexTblPartDesc, String indexTableName,
-      PartitionDesc baseTablePartDesc, String baseTableName, String dbName) throws HiveException {
+      PartitionDesc baseTablePartDesc, String baseTableName, String dbName,
+      LineageState lineageState) throws HiveException {
 
     HiveConf builderConf = new HiveConf(getConf(), BitmapIndexHandler.class);
     HiveConf.setBoolVar(builderConf, HiveConf.ConfVars.HIVEROWOFFSET, true);
@@ -290,7 +292,7 @@ public class BitmapIndexHandler extends TableBasedIndexHandler {
     }
 
     Task<?> rootTask = IndexUtils.createRootTask(builderConf, inputs, outputs,
-        command, partSpec, indexTableName, dbName);
+        command, partSpec, indexTableName, dbName, lineageState);
     return rootTask;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java
index 504b062..73278cd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/index/compact/CompactIndexHandler.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.hive.ql.plan.MapWork;
 import org.apache.hadoop.hive.ql.plan.MapredWork;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.PartitionDesc;
+import org.apache.hadoop.hive.ql.session.LineageState;
 import org.apache.hadoop.hive.ql.stats.StatsUtils;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan;
@@ -94,7 +95,8 @@ public class CompactIndexHandler extends TableBasedIndexHandler {
   protected Task<?> getIndexBuilderMapRedTask(Set<ReadEntity> inputs, Set<WriteEntity> outputs,
       List<FieldSchema> indexField, boolean partitioned,
       PartitionDesc indexTblPartDesc, String indexTableName,
-      PartitionDesc baseTablePartDesc, String baseTableName, String dbName) throws HiveException {
+      PartitionDesc baseTablePartDesc, String baseTableName, String dbName,
+      LineageState lineageState) throws HiveException {
 
     String indexCols = HiveUtils.getUnparsedColumnNamesFromFieldSchema(indexField);
 
@@ -150,7 +152,7 @@ public class CompactIndexHandler extends TableBasedIndexHandler {
     builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGEMAPREDFILES, false);
     builderConf.setBoolVar(HiveConf.ConfVars.HIVEMERGETEZFILES, false);
     Task<?> rootTask = IndexUtils.createRootTask(builderConf, inputs, outputs,
-        command, partSpec, indexTableName, dbName);
+        command, partSpec, indexTableName, dbName, lineageState);
     return rootTask;
   }
 
@@ -189,7 +191,7 @@ public class CompactIndexHandler extends TableBasedIndexHandler {
     LOG.info("Generating tasks for re-entrant QL query: " + qlCommand.toString());
     HiveConf queryConf = new HiveConf(pctx.getConf(), CompactIndexHandler.class);
     HiveConf.setBoolVar(queryConf, HiveConf.ConfVars.COMPRESSRESULT, false);
-    Driver driver = new Driver(queryConf);
+    Driver driver = new Driver(queryConf, pctx.getQueryState().getLineageState());
     driver.compile(qlCommand.toString(), false);
 
     if (pctx.getConf().getBoolVar(ConfVars.HIVE_INDEX_COMPACT_BINARY_SEARCH) && useSorted) {

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
index d7a83f7..bb42dde 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java
@@ -112,7 +112,7 @@ public class GenMRFileSink1 implements NodeProcessor {
       LOG.info("using CombineHiveInputformat for the merge job");
       GenMapRedUtils.createMRWorkForMergingFiles(fsOp, finalName,
           ctx.getDependencyTaskForMultiInsert(), ctx.getMvTask(),
-          hconf, currTask);
+          hconf, currTask, parseCtx.getQueryState().getLineageState());
     }
 
     FileSinkDesc fileSinkDesc = fsOp.getConf();

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
index bdaf105..a0b2678 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
@@ -1229,6 +1229,7 @@ public final class GenMapRedUtils {
    * @param mvTasks
    * @param conf
    * @param currTask
+   * @param lineageState
    * @throws SemanticException
 
    * create a Map-only merge job using CombineHiveInputFormat for all partitions with
@@ -1257,10 +1258,11 @@ public final class GenMapRedUtils {
    *          directories.
    *
    */
-  public static void createMRWorkForMergingFiles (FileSinkOperator fsInput,
-   Path finalName, DependencyCollectionTask dependencyTask,
-   List<Task<MoveWork>> mvTasks, HiveConf conf,
-   Task<? extends Serializable> currTask) throws SemanticException {
+  public static void createMRWorkForMergingFiles(FileSinkOperator fsInput,
+      Path finalName, DependencyCollectionTask dependencyTask,
+      List<Task<MoveWork>> mvTasks, HiveConf conf,
+      Task<? extends Serializable> currTask, LineageState lineageState)
+      throws SemanticException {
 
     //
     // 1. create the operator tree
@@ -1370,8 +1372,7 @@ public final class GenMapRedUtils {
     if (srcMmWriteId == null) {
       // Only create the movework for non-MM table. No action needed for a MM table.
       dummyMv = new MoveWork(null, null, null,
-          new LoadFileDesc(inputDirName, finalName, true, null, null, false), false,
-          SessionState.get().getLineageState());
+          new LoadFileDesc(inputDirName, finalName, true, null, null, false), false);
     }
     // Use the original fsOp path here in case of MM - while the new FSOP merges files inside the
     // MM directory, the original MoveTask still commits based on the parent. Note that this path
@@ -1382,7 +1383,7 @@ public final class GenMapRedUtils {
     Task<MoveWork> mvTask = GenMapRedUtils.findMoveTaskForFsopOutput(
         mvTasks, fsopPath, fsInputDesc.isMmTable());
     ConditionalTask cndTsk = GenMapRedUtils.createCondTask(conf, currTask, dummyMv, work,
-        fsInputDesc.getMergeInputDirName(), finalName, mvTask, dependencyTask);
+        fsInputDesc.getMergeInputDirName(), finalName, mvTask, dependencyTask, lineageState);
 
     // keep the dynamic partition context in conditional task resolver context
     ConditionalResolverMergeFilesCtx mrCtx =
@@ -1730,15 +1731,16 @@ public final class GenMapRedUtils {
    *
    * @param condInputPath A path that the ConditionalTask uses as input for its sub-tasks.
    * @param linkedMoveWork A MoveWork that the ConditionalTask uses to link to its sub-tasks.
+   * @param lineageState A LineageState used to track what changes.
    * @return A new MoveWork that has the Conditional input path as source and the linkedMoveWork as target.
    */
   @VisibleForTesting
-  protected static MoveWork mergeMovePaths(Path condInputPath, MoveWork linkedMoveWork) {
+  protected static MoveWork mergeMovePaths(Path condInputPath, MoveWork linkedMoveWork,
+      LineageState lineageState) {
     MoveWork newWork = new MoveWork(linkedMoveWork);
     LoadFileDesc fileDesc = null;
     LoadTableDesc tableDesc = null;
 
-    LineageState lineageState = SessionState.get().getLineageState();
     if (linkedMoveWork.getLoadFileWork() != null) {
       fileDesc = new LoadFileDesc(linkedMoveWork.getLoadFileWork());
       fileDesc.setSourcePath(condInputPath);
@@ -1776,13 +1778,15 @@ public final class GenMapRedUtils {
    *          a MoveTask that may be linked to the conditional sub-tasks
    * @param dependencyTask
    *          a dependency task that may be linked to the conditional sub-tasks
+   * @param lineageState
+   *          to track activity
    * @return The conditional task
    */
   @SuppressWarnings("unchecked")
   private static ConditionalTask createCondTask(HiveConf conf,
       Task<? extends Serializable> currTask, MoveWork mvWork, Serializable mergeWork,
       Path condInputPath, Path condOutputPath, Task<MoveWork> moveTaskToLink,
-      DependencyCollectionTask dependencyTask) {
+      DependencyCollectionTask dependencyTask, LineageState lineageState) {
     if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
       Utilities.FILE_OP_LOGGER.trace("Creating conditional merge task for " + condInputPath);
     }
@@ -1795,7 +1799,8 @@ public final class GenMapRedUtils {
 
     Serializable workForMoveOnlyTask = moveWork;
     if (shouldMergeMovePaths) {
-      workForMoveOnlyTask = mergeMovePaths(condInputPath, moveTaskToLink.getWork());
+      workForMoveOnlyTask = mergeMovePaths(condInputPath, moveTaskToLink.getWork(),
+          lineageState);
     }
 
     // There are 3 options for this ConditionalTask:

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
index 338c185..f69c9a2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hive.ql.parse.ParseContext;
 import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.ql.session.LineageState;
 
 /**
  * Utility class for index support.
@@ -221,10 +222,11 @@ public final class IndexUtils {
       StringBuilder command,
       LinkedHashMap<String, String> partSpec,
       String indexTableName,
-      String dbName){
+      String dbName,
+      LineageState lineageState){
     // Don't try to index optimize the query to build the index
     HiveConf.setBoolVar(builderConf, HiveConf.ConfVars.HIVEOPTINDEXFILTER, false);
-    Driver driver = new Driver(builderConf, SessionState.get().getUserName());
+    Driver driver = new Driver(builderConf, SessionState.get().getUserName(), lineageState);
     driver.compile(command.toString(), false);
 
     Task<?> rootTask = driver.getPlan().getRootTasks().get(0);

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/Generator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/Generator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/Generator.java
index e6c0771..0d72a1e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/Generator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/Generator.java
@@ -47,7 +47,6 @@ import org.apache.hadoop.hive.ql.optimizer.Transform;
 import org.apache.hadoop.hive.ql.optimizer.lineage.LineageCtx.Index;
 import org.apache.hadoop.hive.ql.parse.ParseContext;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
-import org.apache.hadoop.hive.ql.session.SessionState;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -85,9 +84,10 @@ public class Generator extends Transform {
         return pctx;
       }
     }
-
-    Index index = SessionState.get() != null ?
-      SessionState.get().getLineageState().getIndex() : new Index();
+    Index index = pctx.getQueryState().getLineageState().getIndex();
+    if (index == null) {
+      index = new Index();
+    }
 
     long sTime = System.currentTimeMillis();
     // Create the lineage context

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index a09b796..971a061 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -158,6 +158,7 @@ import org.apache.hadoop.hive.ql.plan.UnlockDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.UnlockTableDesc;
 import org.apache.hadoop.hive.ql.plan.CreateOrAlterWMPoolDesc;
 import org.apache.hadoop.hive.ql.plan.CreateOrDropTriggerToPoolMappingDesc;
+import org.apache.hadoop.hive.ql.session.LineageState;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
 import org.apache.hadoop.hive.serde.serdeConstants;
@@ -1485,8 +1486,8 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
             partSpec == null ? new HashMap<>() : partSpec, null);
         ltd.setLbCtx(lbCtx);
         @SuppressWarnings("unchecked")
-        Task<MoveWork> moveTsk = TaskFactory.get(new MoveWork(
-          null, null, ltd, null, false, SessionState.get().getLineageState()), conf);
+        Task<MoveWork> moveTsk =
+            TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf);
         truncateTask.addDependentTask(moveTsk);
 
         // Recalculate the HDFS stats if auto gather stats is set
@@ -1703,8 +1704,10 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
             indexTbl, db, indexTblPartitions);
       }
 
+      LineageState lineageState = queryState.getLineageState();
       List<Task<?>> ret = handler.generateIndexBuildTaskList(baseTbl,
-          index, indexTblPartitions, baseTblPartitions, indexTbl, getInputs(), getOutputs());
+          index, indexTblPartitions, baseTblPartitions, indexTbl, getInputs(), getOutputs(),
+          lineageState);
       return ret;
     } catch (Exception e) {
       throw new SemanticException(e);
@@ -2146,8 +2149,8 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
       LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc,
           partSpec == null ? new HashMap<>() : partSpec, null);
       ltd.setLbCtx(lbCtx);
-      Task<MoveWork> moveTsk = TaskFactory.get(
-        new MoveWork(null, null, ltd, null, false, SessionState.get().getLineageState()), conf);
+      Task<MoveWork> moveTsk =
+          TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf);
       mergeTask.addDependentTask(moveTsk);
 
       if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
@@ -3539,7 +3542,7 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
       }
       SessionState ss = SessionState.get();
       String uName = (ss == null? null: ss.getUserName());
-      Driver driver = new Driver(conf, uName);
+      Driver driver = new Driver(conf, uName, queryState.getLineageState());
       int rc = driver.compile(cmd.toString(), false);
       if (rc != 0) {
         throw new SemanticException(ErrorMsg.NO_VALID_PARTN.getMsg());

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
index 065c7e5..0eacfc0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
@@ -134,7 +134,7 @@ public class ExplainSemanticAnalyzer extends BaseSemanticAnalyzer {
         runCtx = new Context(conf);
         // runCtx and ctx share the configuration, but not isExplainPlan()
         runCtx.setExplainConfig(config);
-        Driver driver = new Driver(conf, runCtx);
+        Driver driver = new Driver(conf, runCtx, queryState.getLineageState());
         CommandProcessorResponse ret = driver.run(query);
         if(ret.getResponseCode() == 0) {
           // Note that we need to call getResults for simple fetch optimization.

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
index b6f1139..e6d4cbe 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java
@@ -385,7 +385,8 @@ public class GenTezUtils {
           + fileSink.getConf().getDirName() + " to " + finalName);
       GenMapRedUtils.createMRWorkForMergingFiles(fileSink, finalName,
           context.dependencyTask, context.moveTask,
-          hconf, context.currentTask);
+          hconf, context.currentTask,
+          parseContext.getQueryState().getLineageState());
     }
 
     FetchTask fetchTask = parseContext.getFetchTask();

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index 83d53bc..c79df56 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -391,7 +391,8 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
         Utilities.getTableDesc(table), new TreeMap<>(),
         replace ? LoadFileType.REPLACE_ALL : LoadFileType.OVERWRITE_EXISTING, txnId);
     loadTableWork.setStmtId(stmtId);
-    MoveWork mv = new MoveWork(x.getInputs(), x.getOutputs(), loadTableWork, null, false, SessionState.get().getLineageState());
+    MoveWork mv = new MoveWork(x.getInputs(), x.getOutputs(), loadTableWork,
+        null, false);
     Task<?> loadTableTask = TaskFactory.get(mv, x.getConf());
     copyTask.addDependentTask(loadTableTask);
     x.getTasks().add(copyTask);
@@ -495,7 +496,7 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
       loadTableWork.setStmtId(stmtId);
       loadTableWork.setInheritTableSpecs(false);
       Task<?> loadPartTask = TaskFactory.get(new MoveWork(
-          x.getInputs(), x.getOutputs(), loadTableWork, null, false, SessionState.get().getLineageState()), x.getConf());
+          x.getInputs(), x.getOutputs(), loadTableWork, null, false), x.getConf());
       copyTask.addDependentTask(loadPartTask);
       addPartTask.addDependentTask(loadPartTask);
       x.getTasks().add(copyTask);

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java
index f31775e..ccf1e66 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IndexUpdater.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.optimizer.IndexUtils;
 import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
+import org.apache.hadoop.hive.ql.session.LineageState;
 
 import java.io.Serializable;
 import java.util.LinkedList;
@@ -47,12 +48,14 @@ public class IndexUpdater {
   private Hive hive;
   private List<Task<? extends Serializable>> tasks;
   private Set<ReadEntity> inputs;
+  private LineageState lineageState;
 
-
-  public IndexUpdater(List<LoadTableDesc> loadTableWork, Set<ReadEntity> inputs, Configuration conf) {
+  public IndexUpdater(List<LoadTableDesc> loadTableWork, Set<ReadEntity> inputs, Configuration conf,
+      LineageState lineageState) {
     this.loadTableWork = loadTableWork;
     this.inputs = inputs;
     this.conf = new HiveConf(conf, IndexUpdater.class);
+    this.lineageState = lineageState;
     this.tasks = new LinkedList<Task<? extends Serializable>>();
   }
 
@@ -133,7 +136,7 @@ public class IndexUpdater {
   }
 
   private void compileRebuild(String query) {
-    Driver driver = new Driver(this.conf);
+    Driver driver = new Driver(this.conf, lineageState);
     driver.compile(query, false);
     tasks.addAll(driver.getPlan().getRootTasks());
     inputs.addAll(driver.getPlan().getInputs());

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
index cc956da..e600f7a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java
@@ -302,7 +302,7 @@ public class LoadSemanticAnalyzer extends BaseSemanticAnalyzer {
 
     Task<? extends Serializable> childTask = TaskFactory.get(
         new MoveWork(getInputs(), getOutputs(), loadTableWork, null, true,
-            isLocal, SessionState.get().getLineageState()), conf
+            isLocal), conf
     );
     if (rTask != null) {
       rTask.addDependentTask(childTask);

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
index 498b674..80556ae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java
@@ -313,7 +313,7 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
       if ((!evDump) && (tblNameOrPattern != null) && !(tblNameOrPattern.isEmpty())) {
         ReplLoadWork replLoadWork =
             new ReplLoadWork(conf, loadPath.toString(), dbNameOrPattern, tblNameOrPattern,
-                SessionState.get().getLineageState(), SessionState.get().getTxnMgr().getCurrentTxnId());
+                queryState.getLineageState(), SessionState.get().getTxnMgr().getCurrentTxnId());
         rootTasks.add(TaskFactory.get(replLoadWork, conf, true));
         return;
       }
@@ -344,7 +344,7 @@ public class ReplicationSemanticAnalyzer extends BaseSemanticAnalyzer {
         }
 
         ReplLoadWork replLoadWork = new ReplLoadWork(conf, loadPath.toString(), dbNameOrPattern,
-            SessionState.get().getLineageState(), SessionState.get().getTxnMgr().getCurrentTxnId());
+            queryState.getLineageState(), SessionState.get().getTxnMgr().getCurrentTxnId());
         rootTasks.add(TaskFactory.get(replLoadWork, conf, true));
         //
         //        for (FileStatus dir : dirsInLoadPath) {

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 28e3621..dcda8b3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -7336,8 +7336,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   private void handleLineage(LoadTableDesc ltd, Operator output)
       throws SemanticException {
-    if (ltd != null && SessionState.get() != null) {
-      SessionState.get().getLineageState()
+    if (ltd != null) {
+      queryState.getLineageState()
           .mapDirToOp(ltd.getSourcePath(), output);
     } else if ( queryState.getCommandType().equals(HiveOperation.CREATETABLE_AS_SELECT.getOperationName())) {
 
@@ -7350,7 +7350,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         throw new SemanticException(e);
       }
 
-      SessionState.get().getLineageState()
+      queryState.getLineageState()
               .mapDirToOp(tlocation, output);
     }
   }
@@ -11685,7 +11685,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
             pCtx = t.transform(pCtx);
           }
           // we just use view name as location.
-          SessionState.get().getLineageState()
+          queryState.getLineageState()
               .mapDirToOp(new Path(createVwDesc.getViewName()), sinkOp);
         }
         return;

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
index 7b29370..24559b6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
@@ -72,6 +72,7 @@ import org.apache.hadoop.hive.ql.plan.MoveWork;
 import org.apache.hadoop.hive.ql.plan.PlanUtils;
 import org.apache.hadoop.hive.ql.plan.StatsWork;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
+import org.apache.hadoop.hive.ql.session.LineageState;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
 import org.apache.hadoop.hive.serde.serdeConstants;
@@ -105,7 +106,8 @@ public abstract class TaskCompiler {
   }
 
   @SuppressWarnings({"nls", "unchecked"})
-  public void compile(final ParseContext pCtx, final List<Task<? extends Serializable>> rootTasks,
+  public void compile(final ParseContext pCtx,
+      final List<Task<? extends Serializable>> rootTasks,
       final HashSet<ReadEntity> inputs, final HashSet<WriteEntity> outputs) throws SemanticException {
 
     Context ctx = pCtx.getContext();
@@ -218,12 +220,13 @@ public abstract class TaskCompiler {
     } else if (!isCStats) {
       for (LoadTableDesc ltd : loadTableWork) {
         Task<MoveWork> tsk = TaskFactory
-            .get(new MoveWork(null, null, ltd, null, false, SessionState.get().getLineageState()),
+            .get(new MoveWork(null, null, ltd, null, false),
                 conf);
         mvTask.add(tsk);
         // Check to see if we are stale'ing any indexes and auto-update them if we want
         if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEINDEXAUTOUPDATE)) {
-          IndexUpdater indexUpdater = new IndexUpdater(loadTableWork, inputs, conf);
+          IndexUpdater indexUpdater = new IndexUpdater(loadTableWork, inputs, conf,
+              queryState.getLineageState());
           try {
             List<Task<? extends Serializable>> indexUpdateTasks = indexUpdater
                 .generateUpdateTasks();
@@ -248,7 +251,7 @@ public abstract class TaskCompiler {
           oneLoadFileForCtas = false;
         }
         mvTask.add(TaskFactory
-            .get(new MoveWork(null, null, null, lfd, false, SessionState.get().getLineageState()),
+            .get(new MoveWork(null, null, null, lfd, false),
                 conf));
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java
index 604c8ae..c6c7bf7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java
@@ -385,7 +385,7 @@ public class GenSparkUtils {
       LOG.info("using CombineHiveInputformat for the merge job");
       GenMapRedUtils.createMRWorkForMergingFiles(fileSink, finalName,
           context.dependencyTask, context.moveTask,
-          hconf, context.currentTask);
+          hconf, context.currentTask, parseContext.getQueryState().getLineageState());
     }
 
     FetchTask fetchTask = parseContext.getFetchTask();

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java
index 28a3374..49fe540 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
-import org.apache.hadoop.hive.ql.session.LineageState;
 
 /**
  * MoveWork.
@@ -39,13 +38,6 @@ public class MoveWork implements Serializable {
   private LoadTableDesc loadTableWork;
   private LoadFileDesc loadFileWork;
   private LoadMultiFilesDesc loadMultiFilesWork;
-  /*
-  these are sessionState objects that are copied over to work to allow for parallel execution.
-  based on the current use case the methods are selectively synchronized, which might need to be
-  taken care when using other methods.
-   */
-  private final LineageState sessionStateLineageState;
-
   private boolean checkFileFormat;
   private boolean srcLocal;
 
@@ -65,21 +57,18 @@ public class MoveWork implements Serializable {
   private boolean isNoop;
 
   public MoveWork() {
-    sessionStateLineageState = null;
   }
 
 
-  private MoveWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
-      LineageState lineageState) {
+  private MoveWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs) {
     this.inputs = inputs;
     this.outputs = outputs;
-    sessionStateLineageState = lineageState;
   }
 
   public MoveWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
       final LoadTableDesc loadTableWork, final LoadFileDesc loadFileWork,
-      boolean checkFileFormat, boolean srcLocal, LineageState lineageState) {
-    this(inputs, outputs, lineageState);
+      boolean checkFileFormat, boolean srcLocal) {
+    this(inputs, outputs);
     if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
       Utilities.FILE_OP_LOGGER.trace("Creating MoveWork " + System.identityHashCode(this)
         + " with " + loadTableWork + "; " + loadFileWork);
@@ -92,8 +81,8 @@ public class MoveWork implements Serializable {
 
   public MoveWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
       final LoadTableDesc loadTableWork, final LoadFileDesc loadFileWork,
-      boolean checkFileFormat, LineageState lineageState) {
-    this(inputs, outputs, loadTableWork, loadFileWork, checkFileFormat, false, lineageState);
+      boolean checkFileFormat) {
+    this(inputs, outputs, loadTableWork, loadFileWork, checkFileFormat, false);
   }
 
   public MoveWork(final MoveWork o) {
@@ -104,7 +93,6 @@ public class MoveWork implements Serializable {
     srcLocal = o.isSrcLocal();
     inputs = o.getInputs();
     outputs = o.getOutputs();
-    sessionStateLineageState = o.sessionStateLineageState;
   }
 
   @Explain(displayName = "tables", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
@@ -166,7 +154,4 @@ public class MoveWork implements Serializable {
     this.srcLocal = srcLocal;
   }
   
-  public LineageState getLineagState() {
-    return sessionStateLineageState;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/session/LineageState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/LineageState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/LineageState.java
index 056d614..82eeb35 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/LineageState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/LineageState.java
@@ -60,7 +60,7 @@ public class LineageState implements Serializable {
   /**
    * Constructor.
    */
-  LineageState() {
+  public LineageState() {
     dirToFop = new HashMap<>();
     linfo = new LineageInfo();
     index = new Index();

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index bb6ddc6..d03f5e3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -232,11 +232,6 @@ public class SessionState {
    */
   private Map<URI, HadoopShims.HdfsEncryptionShim> hdfsEncryptionShims = Maps.newHashMap();
 
-  /**
-   * Lineage state.
-   */
-  LineageState ls;
-
   private final String userName;
 
   /**
@@ -294,15 +289,6 @@ public class SessionState {
 
   private List<Closeable> cleanupItems = new LinkedList<Closeable>();
 
-  /**
-   * Get the lineage state stored in this session.
-   *
-   * @return LineageState
-   */
-  public LineageState getLineageState() {
-    return ls;
-  }
-
   public HiveConf getConf() {
     return sessionConf;
   }
@@ -387,7 +373,6 @@ public class SessionState {
       LOG.debug("SessionState user: " + userName);
     }
     isSilent = conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONSILENT);
-    ls = new LineageState();
     resourceMaps = new ResourceMaps();
     // Must be deterministic order map for consistent q-test output across Java versions
     overriddenConfigurations = new LinkedHashMap<String, String>();

http://git-wip-us.apache.org/repos/asf/hive/blob/646ccce8/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java
index 3406892..3c007a7 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc;
 import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
 import org.apache.hadoop.hive.ql.plan.MoveWork;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
+import org.apache.hadoop.hive.ql.session.LineageState;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -135,9 +136,10 @@ public class TestGenMapRedUtilsCreateConditionalTask {
   public void testMergePathWithInvalidMoveWorkThrowsException() {
     final Path condInputPath = new Path("s3a://bucket/scratch/-ext-10000");
     final MoveWork mockWork = mock(MoveWork.class);
+    final LineageState lineageState = new LineageState();
 
     when(mockWork.getLoadMultiFilesWork()).thenReturn(new LoadMultiFilesDesc());
-    GenMapRedUtils.mergeMovePaths(condInputPath, mockWork);
+    GenMapRedUtils.mergeMovePaths(condInputPath, mockWork, lineageState);
   }
 
   @Test
@@ -146,12 +148,13 @@ public class TestGenMapRedUtilsCreateConditionalTask {
     final Path condOutputPath = new Path("s3a://bucket/scratch/-ext-10002");
     final Path targetMoveWorkPath = new Path("s3a://bucket/scratch/-ext-10003");
     final MoveWork mockWork = mock(MoveWork.class);
+    final LineageState lineageState = new LineageState();
     MoveWork newWork;
 
     // test using loadFileWork
     when(mockWork.getLoadFileWork()).thenReturn(new LoadFileDesc(
         condOutputPath, targetMoveWorkPath, false, "", "", false));
-    newWork = GenMapRedUtils.mergeMovePaths(condInputPath, mockWork);
+    newWork = GenMapRedUtils.mergeMovePaths(condInputPath, mockWork, lineageState);
     assertNotNull(newWork);
     assertNotEquals(newWork, mockWork);
     assertEquals(condInputPath, newWork.getLoadFileWork().getSourcePath());
@@ -162,7 +165,7 @@ public class TestGenMapRedUtilsCreateConditionalTask {
     reset(mockWork);
     when(mockWork.getLoadTableWork()).thenReturn(new LoadTableDesc(
         condOutputPath, tableDesc, null, null));
-    newWork = GenMapRedUtils.mergeMovePaths(condInputPath, mockWork);
+    newWork = GenMapRedUtils.mergeMovePaths(condInputPath, mockWork, lineageState);
     assertNotNull(newWork);
     assertNotEquals(newWork, mockWork);
     assertEquals(condInputPath, newWork.getLoadTableWork().getSourcePath());
@@ -181,7 +184,8 @@ public class TestGenMapRedUtilsCreateConditionalTask {
     Task<MoveWork> moveTask = createMoveTask(finalDirName, tableLocation);
     List<Task<MoveWork>> moveTaskList = Collections.singletonList(moveTask);
 
-    GenMapRedUtils.createMRWorkForMergingFiles(fileSinkOperator, finalDirName, null, moveTaskList, hiveConf, dummyMRTask);
+    GenMapRedUtils.createMRWorkForMergingFiles(fileSinkOperator, finalDirName, null,
+        moveTaskList, hiveConf, dummyMRTask, new LineageState());
     ConditionalTask conditionalTask = (ConditionalTask)dummyMRTask.getChildTasks().get(0);
     Task<? extends Serializable> moveOnlyTask = conditionalTask.getListTasks().get(0);
     Task<? extends Serializable> mergeOnlyTask = conditionalTask.getListTasks().get(1);
@@ -221,7 +225,8 @@ public class TestGenMapRedUtilsCreateConditionalTask {
     Task<MoveWork> moveTask = createMoveTask(finalDirName, tableLocation);
     List<Task<MoveWork>> moveTaskList = Collections.singletonList(moveTask);
 
-    GenMapRedUtils.createMRWorkForMergingFiles(fileSinkOperator, finalDirName, null, moveTaskList, hiveConf, dummyMRTask);
+    GenMapRedUtils.createMRWorkForMergingFiles(fileSinkOperator, finalDirName, null,
+        moveTaskList, hiveConf, dummyMRTask, new LineageState());
     ConditionalTask conditionalTask = (ConditionalTask)dummyMRTask.getChildTasks().get(0);
     Task<? extends Serializable> moveOnlyTask = conditionalTask.getListTasks().get(0);
     Task<? extends Serializable> mergeOnlyTask = conditionalTask.getListTasks().get(1);
@@ -255,7 +260,8 @@ public class TestGenMapRedUtilsCreateConditionalTask {
     Task<MoveWork> moveTask = createMoveTask(finalDirName, tableLocation);
     List<Task<MoveWork>> moveTaskList = Collections.singletonList(moveTask);
 
-    GenMapRedUtils.createMRWorkForMergingFiles(fileSinkOperator, finalDirName, null, moveTaskList, hiveConf, dummyMRTask);
+    GenMapRedUtils.createMRWorkForMergingFiles(fileSinkOperator, finalDirName, null,
+        moveTaskList, hiveConf, dummyMRTask, new LineageState());
     ConditionalTask conditionalTask = (ConditionalTask)dummyMRTask.getChildTasks().get(0);
     Task<? extends Serializable> moveOnlyTask = conditionalTask.getListTasks().get(0);
     Task<? extends Serializable> mergeOnlyTask = conditionalTask.getListTasks().get(1);


[10/50] [abbrv] hive git commit: HIVE-18263: Ptest execution are multiple times slower sometimes due to dying executor slaves (Adam Szita, reviewed by Barna Zsombor Klara)

Posted by ga...@apache.org.
HIVE-18263: Ptest execution are multiple times slower sometimes due to dying executor slaves (Adam Szita, reviewed by Barna Zsombor Klara)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/856d88db
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/856d88db
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/856d88db

Branch: refs/heads/standalone-metastore
Commit: 856d88db9a5c65c902ce6982710839e07d8548a9
Parents: e120bd8
Author: Peter Vary <pv...@cloudera.com>
Authored: Fri Dec 15 10:11:51 2017 +0100
Committer: Peter Vary <pv...@cloudera.com>
Committed: Fri Dec 15 10:11:51 2017 +0100

----------------------------------------------------------------------
 .../execution/context/CloudExecutionContextProvider.java | 11 +++++++++++
 1 file changed, 11 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/856d88db/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/context/CloudExecutionContextProvider.java
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/context/CloudExecutionContextProvider.java b/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/context/CloudExecutionContextProvider.java
index 8b82497..e806563 100644
--- a/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/context/CloudExecutionContextProvider.java
+++ b/testutils/ptest2/src/main/java/org/apache/hive/ptest/execution/context/CloudExecutionContextProvider.java
@@ -223,6 +223,16 @@ public class CloudExecutionContextProvider implements ExecutionContextProvider {
       LOG.info("Attempting to create " + numRequired + " nodes");
       try {
         result.addAll(mCloudComputeService.createNodes(Math.min(mMaxHostsPerCreateRequest, numRequired)));
+
+        Set<String> newAddresses = new HashSet<String>();
+        for (NodeMetadata node : result) {
+          newAddresses.addAll(node.getPublicAddresses());
+        }
+        synchronized (mTerminatedHosts) {
+          for (String newAddress : newAddresses) {
+            mTerminatedHosts.remove(newAddress);
+          }
+        }
       } catch (RunNodesException e) {
         error = true;
         LOG.warn("Error creating nodes", e);
@@ -332,6 +342,7 @@ public class CloudExecutionContextProvider implements ExecutionContextProvider {
     synchronized (mTerminatedHosts) {
       terminatedHosts.putAll(mTerminatedHosts);
     }
+    LOG.info("Currently tracked terminated hosts: {}", terminatedHosts.keySet().toString());
     for (NodeMetadata node : getRunningNodes()) {
       String ip = publicIpOrHostname(node);
       if (terminatedHosts.containsKey(ip)) {


[35/50] [abbrv] hive git commit: HIVE-17990 Add Thrift and DB storage for Schema Registry objects

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreSchemaMethods.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreSchemaMethods.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreSchemaMethods.java
new file mode 100644
index 0000000..0ceb84a
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreSchemaMethods.java
@@ -0,0 +1,887 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.FindSchemasByColsResp;
+import org.apache.hadoop.hive.metastore.api.FindSchemasByColsRespEntry;
+import org.apache.hadoop.hive.metastore.api.FindSchemasByColsRqst;
+import org.apache.hadoop.hive.metastore.api.ISchema;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.SchemaCompatibility;
+import org.apache.hadoop.hive.metastore.api.SchemaType;
+import org.apache.hadoop.hive.metastore.api.SchemaValidation;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+import org.apache.hadoop.hive.metastore.api.SchemaVersionState;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.SerdeType;
+import org.apache.hadoop.hive.metastore.client.builder.ISchemaBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.SchemaVersionBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.events.AddSchemaVersionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterISchemaEvent;
+import org.apache.hadoop.hive.metastore.events.AlterSchemaVersionEvent;
+import org.apache.hadoop.hive.metastore.events.CreateISchemaEvent;
+import org.apache.hadoop.hive.metastore.events.DropISchemaEvent;
+import org.apache.hadoop.hive.metastore.events.DropSchemaVersionEvent;
+import org.apache.hadoop.hive.metastore.events.PreEventContext;
+import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.apache.thrift.TException;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+// This does the testing using a remote metastore, as that finds more issues in thrift
+public class TestHiveMetaStoreSchemaMethods {
+  private static Map<EventMessage.EventType, Integer> events;
+  private static Map<EventMessage.EventType, Integer> transactionalEvents;
+  private static Map<PreEventContext.PreEventType, Integer> preEvents;
+
+  private static IMetaStoreClient client;
+
+
+  @BeforeClass
+  public static void startMetastore() throws Exception {
+    Configuration conf = MetastoreConf.newMetastoreConf();
+    int port = MetaStoreTestUtils.findFreePort();
+    MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+    MetastoreConf.setClass(conf, ConfVars.EVENT_LISTENERS, SchemaEventListener.class,
+        MetaStoreEventListener.class);
+    MetastoreConf.setClass(conf, ConfVars.TRANSACTIONAL_EVENT_LISTENERS, TransactionalSchemaEventListener.class,
+        MetaStoreEventListener.class);
+    MetastoreConf.setClass(conf, ConfVars.PRE_EVENT_LISTENERS, SchemaPreEventListener.class,
+        MetaStorePreEventListener.class);
+    MetaStoreTestUtils.setConfForStandloneMode(conf);
+    MetaStoreTestUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), conf);
+
+    client = new HiveMetaStoreClient(conf);
+  }
+
+  @Before
+  public void newMaps() {
+    events = new HashMap<>();
+    transactionalEvents = new HashMap<>();
+    preEvents = new HashMap<>();
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void getNonExistentSchema() throws TException {
+    client.getISchema("no.such.schema");
+  }
+
+  @Test
+  public void iSchema() throws TException {
+    String schemaName = uniqueSchemaName();
+    String schemaGroup = "group1";
+    String description = "This is a description";
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName)
+        .setCompatibility(SchemaCompatibility.FORWARD)
+        .setValidationLevel(SchemaValidation.LATEST)
+        .setCanEvolve(false)
+        .setSchemaGroup(schemaGroup)
+        .setDescription(description)
+        .build();
+    client.createISchema(schema);
+
+    Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.CREATE_ISCHEMA));
+    Assert.assertEquals(1, (int)events.get(EventMessage.EventType.CREATE_ISCHEMA));
+    Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.CREATE_ISCHEMA));
+
+    schema = client.getISchema(schemaName);
+    Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.READ_ISCHEMA));
+
+    Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType());
+    Assert.assertEquals(schemaName, schema.getName());
+    Assert.assertEquals(SchemaCompatibility.FORWARD, schema.getCompatibility());
+    Assert.assertEquals(SchemaValidation.LATEST, schema.getValidationLevel());
+    Assert.assertFalse(schema.isCanEvolve());
+    Assert.assertEquals(schemaGroup, schema.getSchemaGroup());
+    Assert.assertEquals(description, schema.getDescription());
+
+    schemaGroup = "new group";
+    description = "new description";
+    schema.setCompatibility(SchemaCompatibility.BOTH);
+    schema.setValidationLevel(SchemaValidation.ALL);
+    schema.setCanEvolve(true);
+    schema.setSchemaGroup(schemaGroup);
+    schema.setDescription(description);
+    client.alterISchema(schemaName, schema);
+    Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.ALTER_ISCHEMA));
+    Assert.assertEquals(1, (int)events.get(EventMessage.EventType.ALTER_ISCHEMA));
+    Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.ALTER_ISCHEMA));
+
+    schema = client.getISchema(schemaName);
+    Assert.assertEquals(2, (int)preEvents.get(PreEventContext.PreEventType.READ_ISCHEMA));
+
+    Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType());
+    Assert.assertEquals(schemaName, schema.getName());
+    Assert.assertEquals(SchemaCompatibility.BOTH, schema.getCompatibility());
+    Assert.assertEquals(SchemaValidation.ALL, schema.getValidationLevel());
+    Assert.assertTrue(schema.isCanEvolve());
+    Assert.assertEquals(schemaGroup, schema.getSchemaGroup());
+    Assert.assertEquals(description, schema.getDescription());
+
+    client.dropISchema(schemaName);
+    Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.DROP_ISCHEMA));
+    Assert.assertEquals(1, (int)events.get(EventMessage.EventType.DROP_ISCHEMA));
+    Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.DROP_ISCHEMA));
+    try {
+      client.getISchema(schemaName);
+      Assert.fail();
+    } catch (NoSuchObjectException e) {
+      // all good
+    }
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void schemaWithInvalidDatabase() throws TException {
+    ISchema schema = new ISchemaBuilder()
+        .setName("thisSchemaDoesntHaveADb")
+        .setDbName("no.such.database")
+        .setSchemaType(SchemaType.AVRO)
+        .build();
+    client.createISchema(schema);
+  }
+
+  @Test(expected = AlreadyExistsException.class)
+  public void schemaAlreadyExists() throws TException {
+    String schemaName = uniqueSchemaName();
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.HIVE)
+        .setName(schemaName)
+        .build();
+    client.createISchema(schema);
+
+    schema = client.getISchema(schemaName);
+    Assert.assertNotNull(schema);
+
+    Assert.assertEquals(SchemaType.HIVE, schema.getSchemaType());
+    Assert.assertEquals(schemaName, schema.getName());
+    Assert.assertEquals(SchemaCompatibility.BACKWARD, schema.getCompatibility());
+    Assert.assertEquals(SchemaValidation.ALL, schema.getValidationLevel());
+    Assert.assertTrue(schema.isCanEvolve());
+
+    // This second attempt to create it should throw
+    client.createISchema(schema);
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void alterNonExistentSchema() throws TException {
+    String schemaName = uniqueSchemaName();
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.HIVE)
+        .setName(schemaName)
+        .setDescription("a new description")
+        .build();
+    client.alterISchema(schemaName, schema);
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void dropNonExistentSchema() throws TException {
+    client.dropISchema("no_such_schema");
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void createVersionOfNonExistentSchema() throws TException {
+    SchemaVersion schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName("noSchemaOfThisNameExists")
+        .setVersion(1)
+        .addCol("a", ColumnType.STRING_TYPE_NAME)
+        .build();
+    client.addSchemaVersion(schemaVersion);
+  }
+
+  @Test
+  public void addSchemaVersion() throws TException {
+    String schemaName = uniqueSchemaName();
+    int version = 1;
+
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName)
+        .build();
+    client.createISchema(schema);
+
+    String description = "very descriptive";
+    String schemaText = "this should look like json, but oh well";
+    String fingerprint = "this should be an md5 string";
+    String versionName = "why would I name a version?";
+    long creationTime = 10;
+    String serdeName = "serde_for_schema37";
+    String serializer = "org.apache.hadoop.hive.metastore.test.Serializer";
+    String deserializer = "org.apache.hadoop.hive.metastore.test.Deserializer";
+    String serdeDescription = "how do you describe a serde?";
+    SchemaVersion schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName(schemaName)
+        .setVersion(version)
+        .addCol("a", ColumnType.INT_TYPE_NAME)
+        .addCol("b", ColumnType.FLOAT_TYPE_NAME)
+        .setCreatedAt(creationTime)
+        .setState(SchemaVersionState.INITIATED)
+        .setDescription(description)
+        .setSchemaText(schemaText)
+        .setFingerprint(fingerprint)
+        .setName(versionName)
+        .setSerdeName(serdeName)
+        .setSerdeSerializerClass(serializer)
+        .setSerdeDeserializerClass(deserializer)
+        .setSerdeDescription(serdeDescription)
+        .build();
+    client.addSchemaVersion(schemaVersion);
+    Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.ADD_SCHEMA_VERSION));
+    Assert.assertEquals(1, (int)events.get(EventMessage.EventType.ADD_SCHEMA_VERSION));
+    Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.ADD_SCHEMA_VERSION));
+
+    schemaVersion = client.getSchemaVersion(schemaName, version);
+    Assert.assertNotNull(schemaVersion);
+    Assert.assertEquals(schemaName, schemaVersion.getSchemaName());
+    Assert.assertEquals(version, schemaVersion.getVersion());
+    Assert.assertEquals(creationTime, schemaVersion.getCreatedAt());
+    Assert.assertEquals(SchemaVersionState.INITIATED, schemaVersion.getState());
+    Assert.assertEquals(description, schemaVersion.getDescription());
+    Assert.assertEquals(schemaText, schemaVersion.getSchemaText());
+    Assert.assertEquals(fingerprint, schemaVersion.getFingerprint());
+    Assert.assertEquals(versionName, schemaVersion.getName());
+    Assert.assertEquals(serdeName, schemaVersion.getSerDe().getName());
+    Assert.assertEquals(serializer, schemaVersion.getSerDe().getSerializerClass());
+    Assert.assertEquals(deserializer, schemaVersion.getSerDe().getDeserializerClass());
+    Assert.assertEquals(serdeDescription, schemaVersion.getSerDe().getDescription());
+    Assert.assertEquals(2, schemaVersion.getColsSize());
+    List<FieldSchema> cols = schemaVersion.getCols();
+    Collections.sort(cols);
+    Assert.assertEquals("a", cols.get(0).getName());
+    Assert.assertEquals(ColumnType.INT_TYPE_NAME, cols.get(0).getType());
+    Assert.assertEquals("b", cols.get(1).getName());
+    Assert.assertEquals(ColumnType.FLOAT_TYPE_NAME, cols.get(1).getType());
+    Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.READ_SCHEMA_VERSION));
+
+    client.dropSchemaVersion(schemaName, version);
+    Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.DROP_SCHEMA_VERSION));
+    Assert.assertEquals(1, (int)events.get(EventMessage.EventType.DROP_SCHEMA_VERSION));
+    Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.DROP_SCHEMA_VERSION));
+    try {
+      client.getSchemaVersion(schemaName, version);
+      Assert.fail();
+    } catch (NoSuchObjectException e) {
+      // all good
+    }
+  }
+
+  // Test that adding multiple versions of the same schema
+  @Test
+  public void multipleSchemaVersions() throws TException {
+    String schemaName = uniqueSchemaName();
+
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName)
+        .build();
+    client.createISchema(schema);
+    SchemaVersion schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName(schemaName)
+        .setVersion(1)
+        .addCol("a", ColumnType.BIGINT_TYPE_NAME)
+        .build();
+    client.addSchemaVersion(schemaVersion);
+
+    schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName(schemaName)
+        .setVersion(2)
+        .addCol("a", ColumnType.BIGINT_TYPE_NAME)
+        .addCol("b", ColumnType.DATE_TYPE_NAME)
+        .build();
+    client.addSchemaVersion(schemaVersion);
+
+    schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName(schemaName)
+        .setVersion(3)
+        .addCol("a", ColumnType.BIGINT_TYPE_NAME)
+        .addCol("b", ColumnType.DATE_TYPE_NAME)
+        .addCol("c", ColumnType.TIMESTAMP_TYPE_NAME)
+        .build();
+    client.addSchemaVersion(schemaVersion);
+    Assert.assertEquals(3, (int)preEvents.get(PreEventContext.PreEventType.ADD_SCHEMA_VERSION));
+    Assert.assertEquals(3, (int)events.get(EventMessage.EventType.ADD_SCHEMA_VERSION));
+    Assert.assertEquals(3, (int)transactionalEvents.get(EventMessage.EventType.ADD_SCHEMA_VERSION));
+
+    schemaVersion = client.getSchemaLatestVersion(schemaName);
+    Assert.assertEquals(3, schemaVersion.getVersion());
+    Assert.assertEquals(3, schemaVersion.getColsSize());
+    List<FieldSchema> cols = schemaVersion.getCols();
+    Collections.sort(cols);
+    Assert.assertEquals("a", cols.get(0).getName());
+    Assert.assertEquals("b", cols.get(1).getName());
+    Assert.assertEquals("c", cols.get(2).getName());
+    Assert.assertEquals(ColumnType.BIGINT_TYPE_NAME, cols.get(0).getType());
+    Assert.assertEquals(ColumnType.DATE_TYPE_NAME, cols.get(1).getType());
+    Assert.assertEquals(ColumnType.TIMESTAMP_TYPE_NAME, cols.get(2).getType());
+    Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.READ_SCHEMA_VERSION));
+
+    List<SchemaVersion> versions = client.getSchemaAllVersions(schemaName);
+    Assert.assertEquals(2, (int)preEvents.get(PreEventContext.PreEventType.READ_SCHEMA_VERSION));
+    Assert.assertEquals(3, versions.size());
+    versions.sort(Comparator.comparingInt(SchemaVersion::getVersion));
+    Assert.assertEquals(1, versions.get(0).getVersion());
+    Assert.assertEquals(1, versions.get(0).getColsSize());
+    Assert.assertEquals(ColumnType.BIGINT_TYPE_NAME, versions.get(0).getCols().get(0).getType());
+
+    Assert.assertEquals(2, versions.get(1).getVersion());
+    Assert.assertEquals(2, versions.get(1).getColsSize());
+    cols = versions.get(1).getCols();
+    Collections.sort(cols);
+    Assert.assertEquals("a", cols.get(0).getName());
+    Assert.assertEquals("b", cols.get(1).getName());
+    Assert.assertEquals(ColumnType.BIGINT_TYPE_NAME, cols.get(0).getType());
+    Assert.assertEquals(ColumnType.DATE_TYPE_NAME, cols.get(1).getType());
+
+    Assert.assertEquals(3, versions.get(2).getVersion());
+    Assert.assertEquals(3, versions.get(2).getColsSize());
+    cols = versions.get(2).getCols();
+    Collections.sort(cols);
+    Assert.assertEquals("a", cols.get(0).getName());
+    Assert.assertEquals("b", cols.get(1).getName());
+    Assert.assertEquals("c", cols.get(2).getName());
+    Assert.assertEquals(ColumnType.BIGINT_TYPE_NAME, cols.get(0).getType());
+    Assert.assertEquals(ColumnType.DATE_TYPE_NAME, cols.get(1).getType());
+    Assert.assertEquals(ColumnType.TIMESTAMP_TYPE_NAME, cols.get(2).getType());
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void nonExistentSchemaVersion() throws TException {
+    String schemaName = uniqueSchemaName();
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName)
+        .build();
+    client.createISchema(schema);
+    client.getSchemaVersion(schemaName, 1);
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void nonExistentSchemaVersionButOtherVersionsExist() throws TException {
+    String schemaName = uniqueSchemaName();
+
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName)
+        .build();
+    client.createISchema(schema);
+
+    SchemaVersion schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName(schemaName)
+        .setVersion(1)
+        .addCol("a", ColumnType.INT_TYPE_NAME)
+        .addCol("b", ColumnType.FLOAT_TYPE_NAME)
+        .build();
+    client.addSchemaVersion(schemaVersion);
+
+    client.getSchemaVersion(schemaName, 2);
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void getLatestSchemaButNoVersions() throws TException {
+    String schemaName = uniqueSchemaName();
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName)
+        .build();
+    client.createISchema(schema);
+    client.getSchemaLatestVersion(schemaName);
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void getLatestSchemaNoSuchSchema() throws TException {
+    client.getSchemaLatestVersion("no.such.schema.with.this.name");
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void getAllSchemaButNoVersions() throws TException {
+    String schemaName = uniqueSchemaName();
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName)
+        .build();
+    client.createISchema(schema);
+    client.getSchemaAllVersions(schemaName);
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void getAllSchemaNoSuchSchema() throws TException {
+    client.getSchemaAllVersions("no.such.schema.with.this.name");
+  }
+
+  @Test(expected = AlreadyExistsException.class)
+  public void addDuplicateSchemaVersion() throws TException {
+    String schemaName = uniqueSchemaName();
+    int version = 1;
+
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName)
+        .build();
+    client.createISchema(schema);
+
+    SchemaVersion schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName(schemaName)
+        .setVersion(version)
+        .addCol("a", ColumnType.INT_TYPE_NAME)
+        .addCol("b", ColumnType.FLOAT_TYPE_NAME)
+        .build();
+    client.addSchemaVersion(schemaVersion);
+
+    client.addSchemaVersion(schemaVersion);
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void mapSerDeNoSuchSchema() throws TException {
+    SerDeInfo serDeInfo = new SerDeInfo(uniqueSerdeName(), "lib", Collections.emptyMap());
+    client.mapSchemaVersionToSerde(uniqueSchemaName(), 1, serDeInfo.getName());
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void mapSerDeNoSuchSchemaVersion() throws TException {
+    SerDeInfo serDeInfo = new SerDeInfo(uniqueSerdeName(), "lib", Collections.emptyMap());
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(uniqueSchemaName())
+        .build();
+    client.createISchema(schema);
+    client.mapSchemaVersionToSerde(schema.getName(), 3, serDeInfo.getName());
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void mapNonExistentSerdeToSchemaVersion() throws TException {
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(uniqueSchemaName())
+        .build();
+    client.createISchema(schema);
+
+    SchemaVersion schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName(schema.getName())
+        .setVersion(1)
+        .addCol("x", ColumnType.BOOLEAN_TYPE_NAME)
+        .build();
+    client.addSchemaVersion(schemaVersion);
+    client.mapSchemaVersionToSerde(schema.getName(), schemaVersion.getVersion(), uniqueSerdeName());
+  }
+
+  @Test
+  public void mapSerdeToSchemaVersion() throws TException {
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(uniqueSchemaName())
+        .build();
+    client.createISchema(schema);
+
+    // Create schema with no serde, then map it
+    SchemaVersion schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName(schema.getName())
+        .setVersion(1)
+        .addCol("x", ColumnType.BOOLEAN_TYPE_NAME)
+        .build();
+    client.addSchemaVersion(schemaVersion);
+
+    SerDeInfo serDeInfo = new SerDeInfo(uniqueSerdeName(), "lib", Collections.emptyMap());
+    client.addSerDe(serDeInfo);
+
+    client.mapSchemaVersionToSerde(schema.getName(), schemaVersion.getVersion(), serDeInfo.getName());
+    schemaVersion = client.getSchemaVersion(schema.getName(), schemaVersion.getVersion());
+    Assert.assertEquals(serDeInfo.getName(), schemaVersion.getSerDe().getName());
+
+    // Create schema with a serde, then remap it
+    String serDeName = uniqueSerdeName();
+    schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName(schema.getName())
+        .setVersion(2)
+        .addCol("x", ColumnType.BOOLEAN_TYPE_NAME)
+        .setSerdeName(serDeName)
+        .setSerdeLib("x")
+        .build();
+    client.addSchemaVersion(schemaVersion);
+
+    schemaVersion = client.getSchemaVersion(schema.getName(), 2);
+    Assert.assertEquals(serDeName, schemaVersion.getSerDe().getName());
+
+    serDeInfo = new SerDeInfo(uniqueSerdeName(), "y", Collections.emptyMap());
+    client.addSerDe(serDeInfo);
+    client.mapSchemaVersionToSerde(schema.getName(), 2, serDeInfo.getName());
+    schemaVersion = client.getSchemaVersion(schema.getName(), 2);
+    Assert.assertEquals(serDeInfo.getName(), schemaVersion.getSerDe().getName());
+
+  }
+
+  @Test
+  public void addSerde() throws TException {
+    String serdeName = uniqueSerdeName();
+    SerDeInfo serDeInfo = new SerDeInfo(serdeName, "serdeLib", Collections.singletonMap("a", "b"));
+    serDeInfo.setSerializerClass("serializer");
+    serDeInfo.setDeserializerClass("deserializer");
+    serDeInfo.setDescription("description");
+    serDeInfo.setSerdeType(SerdeType.SCHEMA_REGISTRY);
+    client.addSerDe(serDeInfo);
+
+    serDeInfo = client.getSerDe(serdeName);
+    Assert.assertEquals(serdeName, serDeInfo.getName());
+    Assert.assertEquals("serdeLib", serDeInfo.getSerializationLib());
+    Assert.assertEquals(1, serDeInfo.getParametersSize());
+    Assert.assertEquals("b", serDeInfo.getParameters().get("a"));
+    Assert.assertEquals("serializer", serDeInfo.getSerializerClass());
+    Assert.assertEquals("deserializer", serDeInfo.getDeserializerClass());
+    Assert.assertEquals("description", serDeInfo.getDescription());
+    Assert.assertEquals(SerdeType.SCHEMA_REGISTRY, serDeInfo.getSerdeType());
+  }
+
+  @Test(expected = AlreadyExistsException.class)
+  public void duplicateSerde() throws TException {
+    String serdeName = uniqueSerdeName();
+    SerDeInfo serDeInfo = new SerDeInfo(serdeName, "x", Collections.emptyMap());
+    client.addSerDe(serDeInfo);
+    client.addSerDe(serDeInfo);
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void noSuchSerDe() throws TException {
+    client.getSerDe(uniqueSerdeName());
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void setVersionStateNoSuchSchema() throws TException {
+    client.setSchemaVersionState("no.such.schema", 1, SchemaVersionState.INITIATED);
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void setVersionStateNoSuchVersion() throws TException {
+    String schemaName = uniqueSchemaName();
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName)
+        .build();
+    client.createISchema(schema);
+    client.setSchemaVersionState(schemaName, 1, SchemaVersionState.INITIATED);
+  }
+
+  @Test
+  public void setVersionState() throws TException {
+    String schemaName = uniqueSchemaName();
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName)
+        .build();
+    client.createISchema(schema);
+
+    SchemaVersion schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName(schemaName)
+        .setVersion(1)
+        .addCol("a", ColumnType.BINARY_TYPE_NAME)
+        .build();
+    client.addSchemaVersion(schemaVersion);
+
+    schemaVersion = client.getSchemaVersion(schemaName, 1);
+    Assert.assertNull(schemaVersion.getState());
+
+    client.setSchemaVersionState(schemaName, 1, SchemaVersionState.INITIATED);
+    Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.ALTER_SCHEMA_VERSION));
+    Assert.assertEquals(1, (int)events.get(EventMessage.EventType.ALTER_SCHEMA_VERSION));
+    Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.ALTER_SCHEMA_VERSION));
+    schemaVersion = client.getSchemaVersion(schemaName, 1);
+    Assert.assertEquals(SchemaVersionState.INITIATED, schemaVersion.getState());
+
+    client.setSchemaVersionState(schemaName, 1, SchemaVersionState.REVIEWED);
+    Assert.assertEquals(2, (int)preEvents.get(PreEventContext.PreEventType.ALTER_SCHEMA_VERSION));
+    Assert.assertEquals(2, (int)events.get(EventMessage.EventType.ALTER_SCHEMA_VERSION));
+    Assert.assertEquals(2, (int)transactionalEvents.get(EventMessage.EventType.ALTER_SCHEMA_VERSION));
+    schemaVersion = client.getSchemaVersion(schemaName, 1);
+    Assert.assertEquals(SchemaVersionState.REVIEWED, schemaVersion.getState());
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void dropNonExistentSchemaVersion() throws TException {
+    client.dropSchemaVersion("ther is no schema named this", 23);
+  }
+
+  @Test
+  public void schemaQuery() throws TException {
+    String schemaName1 = uniqueSchemaName();
+    ISchema schema1 = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName1)
+        .build();
+    client.createISchema(schema1);
+
+    String schemaName2 = uniqueSchemaName();
+    ISchema schema2 = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName2)
+        .build();
+    client.createISchema(schema2);
+
+    SchemaVersion schemaVersion1_1 = new SchemaVersionBuilder()
+        .setSchemaName(schemaName1)
+        .setVersion(1)
+        .addCol("alpha", ColumnType.BIGINT_TYPE_NAME)
+        .addCol("beta", ColumnType.DATE_TYPE_NAME)
+        .build();
+    client.addSchemaVersion(schemaVersion1_1);
+
+    SchemaVersion schemaVersion1_2 = new SchemaVersionBuilder()
+        .setSchemaName(schemaName1)
+        .setVersion(2)
+        .addCol("alpha", ColumnType.BIGINT_TYPE_NAME)
+        .addCol("beta", ColumnType.DATE_TYPE_NAME)
+        .addCol("gamma", ColumnType.BIGINT_TYPE_NAME, "namespace=x")
+        .build();
+    client.addSchemaVersion(schemaVersion1_2);
+
+    SchemaVersion schemaVersion2_1 = new SchemaVersionBuilder()
+        .setSchemaName(schemaName2)
+        .setVersion(1)
+        .addCol("ALPHA", ColumnType.SMALLINT_TYPE_NAME)
+        .addCol("delta", ColumnType.DOUBLE_TYPE_NAME)
+        .build();
+    client.addSchemaVersion(schemaVersion2_1);
+
+    SchemaVersion schemaVersion2_2 = new SchemaVersionBuilder()
+        .setSchemaName(schemaName2)
+        .setVersion(2)
+        .addCol("ALPHA", ColumnType.SMALLINT_TYPE_NAME)
+        .addCol("delta", ColumnType.DOUBLE_TYPE_NAME)
+        .addCol("epsilon", ColumnType.STRING_TYPE_NAME, "namespace=x")
+        .build();
+    client.addSchemaVersion(schemaVersion2_2);
+
+    // Query that should return nothing
+    FindSchemasByColsRqst rqst = new FindSchemasByColsRqst();
+    rqst.setColName("x");
+    rqst.setColNamespace("y");
+    rqst.setType("z");
+    FindSchemasByColsResp rsp = client.getSchemaByCols(rqst);
+    Assert.assertEquals(0, rsp.getSchemaVersionsSize());
+
+    // Query that should fetch one column
+    rqst = new FindSchemasByColsRqst();
+    rqst.setColName("gamma");
+    rsp = client.getSchemaByCols(rqst);
+    Assert.assertEquals(1, rsp.getSchemaVersionsSize());
+    Assert.assertEquals(schemaName1, rsp.getSchemaVersions().get(0).getSchemaName());
+    Assert.assertEquals(2, rsp.getSchemaVersions().get(0).getVersion());
+
+    // fetch 2 in same schema
+    rqst = new FindSchemasByColsRqst();
+    rqst.setColName("beta");
+    rsp = client.getSchemaByCols(rqst);
+    Assert.assertEquals(2, rsp.getSchemaVersionsSize());
+    List<FindSchemasByColsRespEntry> results = new ArrayList<>(rsp.getSchemaVersions());
+    Collections.sort(results);
+    Assert.assertEquals(schemaName1, results.get(0).getSchemaName());
+    Assert.assertEquals(1, results.get(0).getVersion());
+    Assert.assertEquals(schemaName1, results.get(1).getSchemaName());
+    Assert.assertEquals(2, results.get(1).getVersion());
+
+    // fetch across schemas
+    rqst = new FindSchemasByColsRqst();
+    rqst.setColName("alpha");
+    rsp = client.getSchemaByCols(rqst);
+    Assert.assertEquals(4, rsp.getSchemaVersionsSize());
+    results = new ArrayList<>(rsp.getSchemaVersions());
+    Collections.sort(results);
+    Assert.assertEquals(schemaName1, results.get(0).getSchemaName());
+    Assert.assertEquals(1, results.get(0).getVersion());
+    Assert.assertEquals(schemaName1, results.get(1).getSchemaName());
+    Assert.assertEquals(2, results.get(1).getVersion());
+    Assert.assertEquals(schemaName2, results.get(2).getSchemaName());
+    Assert.assertEquals(1, results.get(2).getVersion());
+    Assert.assertEquals(schemaName2, results.get(3).getSchemaName());
+    Assert.assertEquals(2, results.get(3).getVersion());
+
+    // fetch by namespace
+    rqst = new FindSchemasByColsRqst();
+    rqst.setColNamespace("namespace=x");
+    rsp = client.getSchemaByCols(rqst);
+    Assert.assertEquals(2, rsp.getSchemaVersionsSize());
+    results = new ArrayList<>(rsp.getSchemaVersions());
+    Collections.sort(results);
+    Assert.assertEquals(schemaName1, results.get(0).getSchemaName());
+    Assert.assertEquals(2, results.get(0).getVersion());
+    Assert.assertEquals(schemaName2, results.get(1).getSchemaName());
+    Assert.assertEquals(2, results.get(1).getVersion());
+
+    // fetch by name and type
+    rqst = new FindSchemasByColsRqst();
+    rqst.setColName("alpha");
+    rqst.setType(ColumnType.SMALLINT_TYPE_NAME);
+    rsp = client.getSchemaByCols(rqst);
+    Assert.assertEquals(2, rsp.getSchemaVersionsSize());
+    results = new ArrayList<>(rsp.getSchemaVersions());
+    Collections.sort(results);
+    Assert.assertEquals(schemaName2, results.get(0).getSchemaName());
+    Assert.assertEquals(1, results.get(0).getVersion());
+    Assert.assertEquals(schemaName2, results.get(1).getSchemaName());
+    Assert.assertEquals(2, results.get(1).getVersion());
+
+    // Make sure matching name but wrong type doesn't return
+    rqst = new FindSchemasByColsRqst();
+    rqst.setColName("alpha");
+    rqst.setType(ColumnType.STRING_TYPE_NAME);
+    rsp = client.getSchemaByCols(rqst);
+    Assert.assertEquals(0, rsp.getSchemaVersionsSize());
+  }
+
+  @Test(expected = MetaException.class)
+  public void schemaVersionQueryNoNameOrNamespace() throws TException {
+    FindSchemasByColsRqst rqst = new FindSchemasByColsRqst();
+    rqst.setType(ColumnType.STRING_TYPE_NAME);
+    client.getSchemaByCols(rqst);
+  }
+
+  private static int nextSchemaNum = 1;
+
+  private String uniqueSchemaName() {
+    return "uniqueschema" + nextSchemaNum++;
+
+  }
+
+  private String uniqueSerdeName() {
+    return "uniqueSerde" + nextSchemaNum++;
+  }
+
+  public static class SchemaEventListener extends MetaStoreEventListener {
+
+    public SchemaEventListener(Configuration config) {
+      super(config);
+    }
+
+    @Override
+    public void onCreateISchema(CreateISchemaEvent createISchemaEvent) throws MetaException {
+      Integer cnt = events.get(EventMessage.EventType.CREATE_ISCHEMA);
+      events.put(EventMessage.EventType.CREATE_ISCHEMA, cnt == null ? 1 : cnt + 1);
+    }
+
+    @Override
+    public void onAlterISchema(AlterISchemaEvent alterISchemaEvent) throws MetaException {
+      Integer cnt = events.get(EventMessage.EventType.ALTER_ISCHEMA);
+      events.put(EventMessage.EventType.ALTER_ISCHEMA, cnt == null ? 1 : cnt + 1);
+    }
+
+    @Override
+    public void onDropISchema(DropISchemaEvent dropISchemaEvent) throws MetaException {
+      Integer cnt = events.get(EventMessage.EventType.DROP_ISCHEMA);
+      events.put(EventMessage.EventType.DROP_ISCHEMA, cnt == null ? 1 : cnt + 1);
+    }
+
+    @Override
+    public void onAddSchemaVersion(AddSchemaVersionEvent addSchemaVersionEvent) throws
+        MetaException {
+      Integer cnt = events.get(EventMessage.EventType.ADD_SCHEMA_VERSION);
+      events.put(EventMessage.EventType.ADD_SCHEMA_VERSION, cnt == null ? 1 : cnt + 1);
+    }
+
+    @Override
+    public void onAlterSchemaVersion(AlterSchemaVersionEvent alterSchemaVersionEvent) throws
+        MetaException {
+      Integer cnt = events.get(EventMessage.EventType.ALTER_SCHEMA_VERSION);
+      events.put(EventMessage.EventType.ALTER_SCHEMA_VERSION, cnt == null ? 1 : cnt + 1);
+    }
+
+    @Override
+    public void onDropSchemaVersion(DropSchemaVersionEvent dropSchemaVersionEvent) throws
+        MetaException {
+      Integer cnt = events.get(EventMessage.EventType.DROP_SCHEMA_VERSION);
+      events.put(EventMessage.EventType.DROP_SCHEMA_VERSION, cnt == null ? 1 : cnt + 1);
+    }
+  }
+
+  public static class TransactionalSchemaEventListener extends MetaStoreEventListener {
+
+    public TransactionalSchemaEventListener(Configuration config) {
+      super(config);
+    }
+
+    @Override
+    public void onCreateISchema(CreateISchemaEvent createISchemaEvent) throws MetaException {
+      Integer cnt = transactionalEvents.get(EventMessage.EventType.CREATE_ISCHEMA);
+      transactionalEvents.put(EventMessage.EventType.CREATE_ISCHEMA, cnt == null ? 1 : cnt + 1);
+    }
+
+    @Override
+    public void onAlterISchema(AlterISchemaEvent alterISchemaEvent) throws MetaException {
+      Integer cnt = transactionalEvents.get(EventMessage.EventType.ALTER_ISCHEMA);
+      transactionalEvents.put(EventMessage.EventType.ALTER_ISCHEMA, cnt == null ? 1 : cnt + 1);
+    }
+
+    @Override
+    public void onDropISchema(DropISchemaEvent dropISchemaEvent) throws MetaException {
+      Integer cnt = transactionalEvents.get(EventMessage.EventType.DROP_ISCHEMA);
+      transactionalEvents.put(EventMessage.EventType.DROP_ISCHEMA, cnt == null ? 1 : cnt + 1);
+    }
+
+    @Override
+    public void onAddSchemaVersion(AddSchemaVersionEvent addSchemaVersionEvent) throws
+        MetaException {
+      Integer cnt = transactionalEvents.get(EventMessage.EventType.ADD_SCHEMA_VERSION);
+      transactionalEvents.put(EventMessage.EventType.ADD_SCHEMA_VERSION, cnt == null ? 1 : cnt + 1);
+    }
+
+    @Override
+    public void onAlterSchemaVersion(AlterSchemaVersionEvent alterSchemaVersionEvent) throws
+        MetaException {
+      Integer cnt = transactionalEvents.get(EventMessage.EventType.ALTER_SCHEMA_VERSION);
+      transactionalEvents.put(EventMessage.EventType.ALTER_SCHEMA_VERSION, cnt == null ? 1 : cnt + 1);
+    }
+
+    @Override
+    public void onDropSchemaVersion(DropSchemaVersionEvent dropSchemaVersionEvent) throws
+        MetaException {
+      Integer cnt = transactionalEvents.get(EventMessage.EventType.DROP_SCHEMA_VERSION);
+      transactionalEvents.put(EventMessage.EventType.DROP_SCHEMA_VERSION, cnt == null ? 1 : cnt + 1);
+    }
+  }
+
+  public static class SchemaPreEventListener extends MetaStorePreEventListener {
+
+    public SchemaPreEventListener(Configuration config) {
+      super(config);
+    }
+
+    @Override
+    public void onEvent(PreEventContext context) throws MetaException, NoSuchObjectException,
+        InvalidOperationException {
+      Integer cnt = preEvents.get(context.getEventType());
+      preEvents.put(context.getEventType(), cnt == null ? 1 : cnt + 1);
+
+    }
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
index 372dee6..0b7f06d 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.metastore.api.InvalidInputException;
@@ -50,7 +49,6 @@ import org.apache.hadoop.hive.metastore.metrics.Metrics;
 import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
 import org.apache.hadoop.hive.metastore.model.MNotificationLog;
 import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
-import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Before;
@@ -96,36 +94,6 @@ public class TestObjectStore {
     }
   }
 
-  public static class MockPartitionExpressionProxy implements PartitionExpressionProxy {
-    @Override
-    public String convertExprToFilter(byte[] expr) throws MetaException {
-      return null;
-    }
-
-    @Override
-    public boolean filterPartitionsByExpr(List<FieldSchema> partColumns,
-                                          byte[] expr, String defaultPartitionName,
-                                          List<String> partitionNames)
-        throws MetaException {
-      return false;
-    }
-
-    @Override
-    public FileMetadataExprType getMetadataType(String inputFormat) {
-      return null;
-    }
-
-    @Override
-    public SearchArgument createSarg(byte[] expr) {
-      return null;
-    }
-
-    @Override
-    public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) {
-      return null;
-    }
-  }
-
   @Before
   public void setUp() throws Exception {
     Configuration conf = MetastoreConf.newMetastoreConf();

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreSchemaMethods.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreSchemaMethods.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreSchemaMethods.java
new file mode 100644
index 0000000..ffe0e54
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreSchemaMethods.java
@@ -0,0 +1,550 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.ISchema;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.SchemaCompatibility;
+import org.apache.hadoop.hive.metastore.api.SchemaType;
+import org.apache.hadoop.hive.metastore.api.SchemaValidation;
+import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+import org.apache.hadoop.hive.metastore.api.SchemaVersionState;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.ISchemaBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.SchemaVersionBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.thrift.TException;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+
+public class TestObjectStoreSchemaMethods {
+  private ObjectStore objectStore;
+
+  @Before
+  public void setUp() throws Exception {
+    Configuration conf = MetastoreConf.newMetastoreConf();
+    MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS,
+        DefaultPartitionExpressionProxy.class.getName());
+
+    objectStore = new ObjectStore();
+    objectStore.setConf(conf);
+  }
+
+  @Test
+  public void iSchema() throws TException {
+    String dbName = createUniqueDatabaseForTest();
+    ISchema schema = objectStore.getISchema("no.such.schema");
+    Assert.assertNull(schema);
+
+    String schemaName = "schema1";
+    String schemaGroup = "group1";
+    String description = "This is a description";
+    schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName)
+        .setDbName(dbName)
+        .setCompatibility(SchemaCompatibility.FORWARD)
+        .setValidationLevel(SchemaValidation.LATEST)
+        .setCanEvolve(false)
+        .setSchemaGroup(schemaGroup)
+        .setDescription(description)
+        .build();
+    objectStore.createISchema(schema);
+
+    schema = objectStore.getISchema(schemaName);
+    Assert.assertNotNull(schema);
+
+    Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType());
+    Assert.assertEquals(schemaName, schema.getName());
+    Assert.assertEquals(SchemaCompatibility.FORWARD, schema.getCompatibility());
+    Assert.assertEquals(SchemaValidation.LATEST, schema.getValidationLevel());
+    Assert.assertFalse(schema.isCanEvolve());
+    Assert.assertEquals(schemaGroup, schema.getSchemaGroup());
+    Assert.assertEquals(description, schema.getDescription());
+
+    schemaGroup = "new group";
+    description = "new description";
+    schema.setCompatibility(SchemaCompatibility.BOTH);
+    schema.setValidationLevel(SchemaValidation.ALL);
+    schema.setCanEvolve(true);
+    schema.setSchemaGroup(schemaGroup);
+    schema.setDescription(description);
+    objectStore.alterISchema(schemaName, schema);
+
+    schema = objectStore.getISchema(schemaName);
+    Assert.assertNotNull(schema);
+
+    Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType());
+    Assert.assertEquals(schemaName, schema.getName());
+    Assert.assertEquals(SchemaCompatibility.BOTH, schema.getCompatibility());
+    Assert.assertEquals(SchemaValidation.ALL, schema.getValidationLevel());
+    Assert.assertTrue(schema.isCanEvolve());
+    Assert.assertEquals(schemaGroup, schema.getSchemaGroup());
+    Assert.assertEquals(description, schema.getDescription());
+
+    objectStore.dropISchema(schemaName);
+    schema = objectStore.getISchema(schemaName);
+    Assert.assertNull(schema);
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void schemaWithInvalidDatabase() throws MetaException, AlreadyExistsException,
+      NoSuchObjectException {
+    ISchema schema = new ISchemaBuilder()
+        .setName("thisSchemaDoesntHaveADb")
+        .setDbName("no.such.database")
+        .setSchemaType(SchemaType.AVRO)
+        .build();
+    objectStore.createISchema(schema);
+  }
+
+  @Test(expected = AlreadyExistsException.class)
+  public void schemaAlreadyExists() throws TException {
+    String dbName = createUniqueDatabaseForTest();
+    String schemaName = "schema2";
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.HIVE)
+        .setName(schemaName)
+        .setDbName(dbName)
+        .build();
+    objectStore.createISchema(schema);
+
+    schema = objectStore.getISchema(schemaName);
+    Assert.assertNotNull(schema);
+
+    Assert.assertEquals(SchemaType.HIVE, schema.getSchemaType());
+    Assert.assertEquals(schemaName, schema.getName());
+    Assert.assertEquals(SchemaCompatibility.BACKWARD, schema.getCompatibility());
+    Assert.assertEquals(SchemaValidation.ALL, schema.getValidationLevel());
+    Assert.assertTrue(schema.isCanEvolve());
+
+    // This second attempt to create it should throw
+    objectStore.createISchema(schema);
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void alterNonExistentSchema() throws MetaException, NoSuchObjectException {
+    String schemaName = "noSuchSchema";
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.HIVE)
+        .setName(schemaName)
+        .setDescription("a new description")
+        .build();
+    objectStore.alterISchema(schemaName, schema);
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void dropNonExistentSchema() throws MetaException, NoSuchObjectException {
+    objectStore.dropISchema("no_such_schema");
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void createVersionOfNonExistentSchema() throws MetaException, AlreadyExistsException,
+      NoSuchObjectException {
+    SchemaVersion schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName("noSchemaOfThisNameExists")
+        .setVersion(1)
+        .addCol("a", ColumnType.STRING_TYPE_NAME)
+        .build();
+    objectStore.addSchemaVersion(schemaVersion);
+  }
+
+  @Test
+  public void addSchemaVersion() throws TException {
+    String dbName = createUniqueDatabaseForTest();
+    String schemaName = "schema37";
+    int version = 1;
+    SchemaVersion schemaVersion = objectStore.getSchemaVersion(schemaName, version);
+    Assert.assertNull(schemaVersion);
+
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName)
+        .setDbName(dbName)
+        .build();
+    objectStore.createISchema(schema);
+
+    String description = "very descriptive";
+    String schemaText = "this should look like json, but oh well";
+    String fingerprint = "this should be an md5 string";
+    String versionName = "why would I name a version?";
+    long creationTime = 10;
+    String serdeName = "serde_for_schema37";
+    String serializer = "org.apache.hadoop.hive.metastore.test.Serializer";
+    String deserializer = "org.apache.hadoop.hive.metastore.test.Deserializer";
+    String serdeDescription = "how do you describe a serde?";
+    schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName(schemaName)
+        .setVersion(version)
+        .addCol("a", ColumnType.INT_TYPE_NAME)
+        .addCol("b", ColumnType.FLOAT_TYPE_NAME)
+        .setCreatedAt(creationTime)
+        .setState(SchemaVersionState.INITIATED)
+        .setDescription(description)
+        .setSchemaText(schemaText)
+        .setFingerprint(fingerprint)
+        .setName(versionName)
+        .setSerdeName(serdeName)
+        .setSerdeSerializerClass(serializer)
+        .setSerdeDeserializerClass(deserializer)
+        .setSerdeDescription(serdeDescription)
+        .build();
+    objectStore.addSchemaVersion(schemaVersion);
+
+    schemaVersion = objectStore.getSchemaVersion(schemaName, version);
+    Assert.assertNotNull(schemaVersion);
+    Assert.assertEquals(schemaName, schemaVersion.getSchemaName());
+    Assert.assertEquals(version, schemaVersion.getVersion());
+    Assert.assertEquals(creationTime, schemaVersion.getCreatedAt());
+    Assert.assertEquals(SchemaVersionState.INITIATED, schemaVersion.getState());
+    Assert.assertEquals(description, schemaVersion.getDescription());
+    Assert.assertEquals(schemaText, schemaVersion.getSchemaText());
+    Assert.assertEquals(fingerprint, schemaVersion.getFingerprint());
+    Assert.assertEquals(versionName, schemaVersion.getName());
+    Assert.assertEquals(serdeName, schemaVersion.getSerDe().getName());
+    Assert.assertEquals(serializer, schemaVersion.getSerDe().getSerializerClass());
+    Assert.assertEquals(deserializer, schemaVersion.getSerDe().getDeserializerClass());
+    Assert.assertEquals(serdeDescription, schemaVersion.getSerDe().getDescription());
+    Assert.assertEquals(2, schemaVersion.getColsSize());
+    List<FieldSchema> cols = schemaVersion.getCols();
+    Collections.sort(cols);
+    Assert.assertEquals("a", cols.get(0).getName());
+    Assert.assertEquals(ColumnType.INT_TYPE_NAME, cols.get(0).getType());
+    Assert.assertEquals("b", cols.get(1).getName());
+    Assert.assertEquals(ColumnType.FLOAT_TYPE_NAME, cols.get(1).getType());
+
+    objectStore.dropSchemaVersion(schemaName, version);
+    schemaVersion = objectStore.getSchemaVersion(schemaName, version);
+    Assert.assertNull(schemaVersion);
+  }
+
+  // Test that adding multiple versions of the same schema
+  @Test
+  public void multipleSchemaVersions() throws TException {
+    String dbName = createUniqueDatabaseForTest();
+    String schemaName = "schema195";
+
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName)
+        .setDbName(dbName)
+        .build();
+    objectStore.createISchema(schema);
+    SchemaVersion schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName(schemaName)
+        .setVersion(1)
+        .addCol("a", ColumnType.BIGINT_TYPE_NAME)
+        .build();
+    objectStore.addSchemaVersion(schemaVersion);
+
+    schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName(schemaName)
+        .setVersion(2)
+        .addCol("a", ColumnType.BIGINT_TYPE_NAME)
+        .addCol("b", ColumnType.DATE_TYPE_NAME)
+        .build();
+    objectStore.addSchemaVersion(schemaVersion);
+
+    schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName(schemaName)
+        .setVersion(3)
+        .addCol("a", ColumnType.BIGINT_TYPE_NAME)
+        .addCol("b", ColumnType.DATE_TYPE_NAME)
+        .addCol("c", ColumnType.TIMESTAMP_TYPE_NAME)
+        .build();
+    objectStore.addSchemaVersion(schemaVersion);
+
+    schemaVersion = objectStore.getLatestSchemaVersion(schemaName);
+    Assert.assertEquals(3, schemaVersion.getVersion());
+    Assert.assertEquals(3, schemaVersion.getColsSize());
+    List<FieldSchema> cols = schemaVersion.getCols();
+    Collections.sort(cols);
+    Assert.assertEquals("a", cols.get(0).getName());
+    Assert.assertEquals("b", cols.get(1).getName());
+    Assert.assertEquals("c", cols.get(2).getName());
+    Assert.assertEquals(ColumnType.BIGINT_TYPE_NAME, cols.get(0).getType());
+    Assert.assertEquals(ColumnType.DATE_TYPE_NAME, cols.get(1).getType());
+    Assert.assertEquals(ColumnType.TIMESTAMP_TYPE_NAME, cols.get(2).getType());
+
+    schemaVersion = objectStore.getLatestSchemaVersion("no.such.schema.with.this.name");
+    Assert.assertNull(schemaVersion);
+
+    List<SchemaVersion> versions =
+        objectStore.getAllSchemaVersion("there.really.isnt.a.schema.named.this");
+    Assert.assertNull(versions);
+
+    versions = objectStore.getAllSchemaVersion(schemaName);
+    Assert.assertEquals(3, versions.size());
+    versions.sort(Comparator.comparingInt(SchemaVersion::getVersion));
+    Assert.assertEquals(1, versions.get(0).getVersion());
+    Assert.assertEquals(1, versions.get(0).getColsSize());
+    Assert.assertEquals(ColumnType.BIGINT_TYPE_NAME, versions.get(0).getCols().get(0).getType());
+
+    Assert.assertEquals(2, versions.get(1).getVersion());
+    Assert.assertEquals(2, versions.get(1).getColsSize());
+    cols = versions.get(1).getCols();
+    Collections.sort(cols);
+    Assert.assertEquals("a", cols.get(0).getName());
+    Assert.assertEquals("b", cols.get(1).getName());
+    Assert.assertEquals(ColumnType.BIGINT_TYPE_NAME, cols.get(0).getType());
+    Assert.assertEquals(ColumnType.DATE_TYPE_NAME, cols.get(1).getType());
+
+    Assert.assertEquals(3, versions.get(2).getVersion());
+    Assert.assertEquals(3, versions.get(2).getColsSize());
+    cols = versions.get(2).getCols();
+    Collections.sort(cols);
+    Assert.assertEquals("a", cols.get(0).getName());
+    Assert.assertEquals("b", cols.get(1).getName());
+    Assert.assertEquals("c", cols.get(2).getName());
+    Assert.assertEquals(ColumnType.BIGINT_TYPE_NAME, cols.get(0).getType());
+    Assert.assertEquals(ColumnType.DATE_TYPE_NAME, cols.get(1).getType());
+    Assert.assertEquals(ColumnType.TIMESTAMP_TYPE_NAME, cols.get(2).getType());
+  }
+
+  @Test(expected = AlreadyExistsException.class)
+  public void addDuplicateSchemaVersion() throws TException {
+    String dbName = createUniqueDatabaseForTest();
+    String schemaName = "schema1234";
+    int version = 1;
+    SchemaVersion schemaVersion = objectStore.getSchemaVersion(schemaName, version);
+    Assert.assertNull(schemaVersion);
+
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName)
+        .setDbName(dbName)
+        .build();
+    objectStore.createISchema(schema);
+
+    schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName(schemaName)
+        .setVersion(version)
+        .addCol("a", ColumnType.INT_TYPE_NAME)
+        .addCol("b", ColumnType.FLOAT_TYPE_NAME)
+        .build();
+    objectStore.addSchemaVersion(schemaVersion);
+
+    objectStore.addSchemaVersion(schemaVersion);
+  }
+
+  @Test
+  public void alterSchemaVersion() throws TException {
+    String dbName = createUniqueDatabaseForTest();
+    String schemaName = "schema371234";
+    int version = 1;
+    SchemaVersion schemaVersion = objectStore.getSchemaVersion(schemaName, version);
+    Assert.assertNull(schemaVersion);
+
+    ISchema schema = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName)
+        .setDbName(dbName)
+        .build();
+    objectStore.createISchema(schema);
+
+    schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName(schemaName)
+        .setVersion(version)
+        .addCol("a", ColumnType.INT_TYPE_NAME)
+        .addCol("b", ColumnType.FLOAT_TYPE_NAME)
+        .setState(SchemaVersionState.INITIATED)
+        .build();
+    objectStore.addSchemaVersion(schemaVersion);
+
+    schemaVersion = objectStore.getSchemaVersion(schemaName, version);
+    Assert.assertNotNull(schemaVersion);
+    Assert.assertEquals(schemaName, schemaVersion.getSchemaName());
+    Assert.assertEquals(version, schemaVersion.getVersion());
+    Assert.assertEquals(SchemaVersionState.INITIATED, schemaVersion.getState());
+
+    schemaVersion.setState(SchemaVersionState.REVIEWED);
+    String serdeName = "serde for " + schemaName;
+    SerDeInfo serde = new SerDeInfo(serdeName, "", Collections.emptyMap());
+    String serializer = "org.apache.hadoop.hive.metastore.test.Serializer";
+    String deserializer = "org.apache.hadoop.hive.metastore.test.Deserializer";
+    serde.setSerializerClass(serializer);
+    serde.setDeserializerClass(deserializer);
+    schemaVersion.setSerDe(serde);
+    objectStore.alterSchemaVersion(schemaName, version, schemaVersion);
+
+    schemaVersion = objectStore.getSchemaVersion(schemaName, version);
+    Assert.assertNotNull(schemaVersion);
+    Assert.assertEquals(schemaName, schemaVersion.getSchemaName());
+    Assert.assertEquals(version, schemaVersion.getVersion());
+    Assert.assertEquals(SchemaVersionState.REVIEWED, schemaVersion.getState());
+    Assert.assertEquals(serdeName, schemaVersion.getSerDe().getName());
+    Assert.assertEquals(serializer, schemaVersion.getSerDe().getSerializerClass());
+    Assert.assertEquals(deserializer, schemaVersion.getSerDe().getDeserializerClass());
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void alterNonExistentSchemaVersion() throws MetaException, AlreadyExistsException,
+      NoSuchObjectException {
+    String schemaName = "schema3723asdflj";
+    int version = 37;
+    SchemaVersion schemaVersion = new SchemaVersionBuilder()
+        .setSchemaName(schemaName)
+        .setVersion(version)
+        .addCol("a", ColumnType.INT_TYPE_NAME)
+        .addCol("b", ColumnType.FLOAT_TYPE_NAME)
+        .setState(SchemaVersionState.INITIATED)
+        .build();
+    objectStore.alterSchemaVersion(schemaName, version, schemaVersion);
+  }
+
+  @Test(expected = NoSuchObjectException.class)
+  public void dropNonExistentSchemaVersion() throws NoSuchObjectException, MetaException {
+    objectStore.dropSchemaVersion("ther is no schema named this", 23);
+  }
+
+  @Test
+  public void schemaQuery() throws TException {
+    String dbName = createUniqueDatabaseForTest();
+    String schemaName1 = "a_schema1";
+    ISchema schema1 = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName1)
+        .setDbName(dbName)
+        .build();
+    objectStore.createISchema(schema1);
+
+    String schemaName2 = "a_schema2";
+    ISchema schema2 = new ISchemaBuilder()
+        .setSchemaType(SchemaType.AVRO)
+        .setName(schemaName2)
+        .setDbName(dbName)
+        .build();
+    objectStore.createISchema(schema2);
+
+    SchemaVersion schemaVersion1_1 = new SchemaVersionBuilder()
+        .setSchemaName(schemaName1)
+        .setVersion(1)
+        .addCol("alpha", ColumnType.BIGINT_TYPE_NAME)
+        .addCol("beta", ColumnType.DATE_TYPE_NAME)
+        .build();
+    objectStore.addSchemaVersion(schemaVersion1_1);
+
+    SchemaVersion schemaVersion1_2 = new SchemaVersionBuilder()
+        .setSchemaName(schemaName1)
+        .setVersion(2)
+        .addCol("alpha", ColumnType.BIGINT_TYPE_NAME)
+        .addCol("beta", ColumnType.DATE_TYPE_NAME)
+        .addCol("gamma", ColumnType.BIGINT_TYPE_NAME, "namespace=x")
+        .build();
+    objectStore.addSchemaVersion(schemaVersion1_2);
+
+    SchemaVersion schemaVersion2_1 = new SchemaVersionBuilder()
+        .setSchemaName(schemaName2)
+        .setVersion(1)
+        .addCol("ALPHA", ColumnType.SMALLINT_TYPE_NAME)
+        .addCol("delta", ColumnType.DOUBLE_TYPE_NAME)
+        .build();
+    objectStore.addSchemaVersion(schemaVersion2_1);
+
+    SchemaVersion schemaVersion2_2 = new SchemaVersionBuilder()
+        .setSchemaName(schemaName2)
+        .setVersion(2)
+        .addCol("ALPHA", ColumnType.SMALLINT_TYPE_NAME)
+        .addCol("delta", ColumnType.DOUBLE_TYPE_NAME)
+        .addCol("epsilon", ColumnType.STRING_TYPE_NAME, "namespace=x")
+        .build();
+    objectStore.addSchemaVersion(schemaVersion2_2);
+
+    // Query that should return nothing
+    List<SchemaVersion> results = objectStore.getSchemaVersionsByColumns("x", "y", "z");
+    Assert.assertEquals(0, results.size());
+
+    // Query that should fetch one column
+    results = objectStore.getSchemaVersionsByColumns("gamma", null, null);
+    Assert.assertEquals(1, results.size());
+    Assert.assertEquals(schemaName1, results.get(0).getSchemaName());
+    Assert.assertEquals(2, results.get(0).getVersion());
+
+    // fetch 2 in same schema
+    results = objectStore.getSchemaVersionsByColumns("beta", null, null);
+    Assert.assertEquals(2, results.size());
+    Collections.sort(results);
+    Assert.assertEquals(schemaName1, results.get(0).getSchemaName());
+    Assert.assertEquals(1, results.get(0).getVersion());
+    Assert.assertEquals(schemaName1, results.get(1).getSchemaName());
+    Assert.assertEquals(2, results.get(1).getVersion());
+
+    // fetch across schemas
+    results = objectStore.getSchemaVersionsByColumns("alpha", null, null);
+    Assert.assertEquals(4, results.size());
+    Collections.sort(results);
+    Assert.assertEquals(schemaName1, results.get(0).getSchemaName());
+    Assert.assertEquals(1, results.get(0).getVersion());
+    Assert.assertEquals(schemaName1, results.get(1).getSchemaName());
+    Assert.assertEquals(2, results.get(1).getVersion());
+    Assert.assertEquals(schemaName2, results.get(2).getSchemaName());
+    Assert.assertEquals(1, results.get(2).getVersion());
+    Assert.assertEquals(schemaName2, results.get(3).getSchemaName());
+    Assert.assertEquals(2, results.get(3).getVersion());
+
+    // fetch by namespace
+    results = objectStore.getSchemaVersionsByColumns(null, "namespace=x", null);
+    Assert.assertEquals(2, results.size());
+    Collections.sort(results);
+    Assert.assertEquals(schemaName1, results.get(0).getSchemaName());
+    Assert.assertEquals(2, results.get(0).getVersion());
+    Assert.assertEquals(schemaName2, results.get(1).getSchemaName());
+    Assert.assertEquals(2, results.get(1).getVersion());
+
+    // fetch by name and type
+    results = objectStore.getSchemaVersionsByColumns("alpha", null, ColumnType.SMALLINT_TYPE_NAME);
+    Assert.assertEquals(2, results.size());
+    Collections.sort(results);
+    Assert.assertEquals(schemaName2, results.get(0).getSchemaName());
+    Assert.assertEquals(1, results.get(0).getVersion());
+    Assert.assertEquals(schemaName2, results.get(1).getSchemaName());
+    Assert.assertEquals(2, results.get(1).getVersion());
+
+    // Make sure matching name but wrong type doesn't return
+    results = objectStore.getSchemaVersionsByColumns("alpha", null, ColumnType.STRING_TYPE_NAME); Assert.assertEquals(0, results.size());
+  }
+
+  @Test(expected = MetaException.class)
+  public void schemaVersionQueryNoNameOrNamespace() throws MetaException {
+    objectStore.getSchemaVersionsByColumns(null, null, ColumnType.STRING_TYPE_NAME);
+  }
+
+  private static int dbNum = 1;
+  private String createUniqueDatabaseForTest() throws MetaException, InvalidObjectException {
+    String dbName = "uniquedbfortest" + dbNum++;
+    Database db = new DatabaseBuilder()
+        .setName(dbName)
+        .setLocation("somewhere")
+        .setDescription("descriptive")
+        .build();
+    objectStore.createDatabase(db);
+    return dbName;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
index d95fcfa..49dce94 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog;
 import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
 import org.apache.hadoop.hive.metastore.ObjectStore;
 import org.apache.hadoop.hive.metastore.TableType;
-import org.apache.hadoop.hive.metastore.TestObjectStore.MockPartitionExpressionProxy;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;


[41/50] [abbrv] hive git commit: HIVE-17990 Add Thrift and DB storage for Schema Registry objects

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 6ca56cb..3ca6f9a 100644
--- a/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -1381,6 +1381,106 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
    * @throws \metastore\MetaException
    */
   public function create_or_drop_wm_trigger_to_pool_mapping(\metastore\WMCreateOrDropTriggerToPoolMappingRequest $request);
+  /**
+   * @param \metastore\ISchema $schema
+   * @throws \metastore\AlreadyExistsException
+   * @throws \metastore\NoSuchObjectException
+   * @throws \metastore\MetaException
+   */
+  public function create_ischema(\metastore\ISchema $schema);
+  /**
+   * @param string $schemaName
+   * @param \metastore\ISchema $newSchema
+   * @throws \metastore\NoSuchObjectException
+   * @throws \metastore\MetaException
+   */
+  public function alter_ischema($schemaName, \metastore\ISchema $newSchema);
+  /**
+   * @param string $schemaName
+   * @return \metastore\ISchema
+   * @throws \metastore\NoSuchObjectException
+   * @throws \metastore\MetaException
+   */
+  public function get_ischema($schemaName);
+  /**
+   * @param string $schemaName
+   * @throws \metastore\NoSuchObjectException
+   * @throws \metastore\InvalidOperationException
+   * @throws \metastore\MetaException
+   */
+  public function drop_ischema($schemaName);
+  /**
+   * @param \metastore\SchemaVersion $schemaVersion
+   * @throws \metastore\AlreadyExistsException
+   * @throws \metastore\NoSuchObjectException
+   * @throws \metastore\MetaException
+   */
+  public function add_schema_version(\metastore\SchemaVersion $schemaVersion);
+  /**
+   * @param string $schemaName
+   * @param int $version
+   * @return \metastore\SchemaVersion
+   * @throws \metastore\NoSuchObjectException
+   * @throws \metastore\MetaException
+   */
+  public function get_schema_version($schemaName, $version);
+  /**
+   * @param string $schemaName
+   * @return \metastore\SchemaVersion
+   * @throws \metastore\NoSuchObjectException
+   * @throws \metastore\MetaException
+   */
+  public function get_schema_latest_version($schemaName);
+  /**
+   * @param string $schemaName
+   * @return \metastore\SchemaVersion[]
+   * @throws \metastore\NoSuchObjectException
+   * @throws \metastore\MetaException
+   */
+  public function get_schema_all_versions($schemaName);
+  /**
+   * @param string $schemaName
+   * @param int $version
+   * @throws \metastore\NoSuchObjectException
+   * @throws \metastore\MetaException
+   */
+  public function drop_schema_version($schemaName, $version);
+  /**
+   * @param \metastore\FindSchemasByColsRqst $rqst
+   * @return \metastore\FindSchemasByColsResp
+   * @throws \metastore\MetaException
+   */
+  public function get_schemas_by_cols(\metastore\FindSchemasByColsRqst $rqst);
+  /**
+   * @param string $schemaName
+   * @param int $version
+   * @param string $serdeName
+   * @throws \metastore\NoSuchObjectException
+   * @throws \metastore\MetaException
+   */
+  public function map_schema_version_to_serde($schemaName, $version, $serdeName);
+  /**
+   * @param string $schemaName
+   * @param int $version
+   * @param int $state
+   * @throws \metastore\NoSuchObjectException
+   * @throws \metastore\InvalidOperationException
+   * @throws \metastore\MetaException
+   */
+  public function set_schema_version_state($schemaName, $version, $state);
+  /**
+   * @param \metastore\SerDeInfo $serde
+   * @throws \metastore\AlreadyExistsException
+   * @throws \metastore\MetaException
+   */
+  public function add_serde(\metastore\SerDeInfo $serde);
+  /**
+   * @param string $serdeName
+   * @return \metastore\SerDeInfo
+   * @throws \metastore\NoSuchObjectException
+   * @throws \metastore\MetaException
+   */
+  public function get_serde($serdeName);
 }
 
 class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metastore\ThriftHiveMetastoreIf {
@@ -11583,327 +11683,4322 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas
     throw new \Exception("create_or_drop_wm_trigger_to_pool_mapping failed: unknown result");
   }
 
-}
-
-// HELPER FUNCTIONS AND STRUCTURES
+  public function create_ischema(\metastore\ISchema $schema)
+  {
+    $this->send_create_ischema($schema);
+    $this->recv_create_ischema();
+  }
 
-class ThriftHiveMetastore_getMetaConf_args {
-  static $_TSPEC;
+  public function send_create_ischema(\metastore\ISchema $schema)
+  {
+    $args = new \metastore\ThriftHiveMetastore_create_ischema_args();
+    $args->schema = $schema;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'create_ischema', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('create_ischema', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
 
-  /**
-   * @var string
-   */
-  public $key = null;
+  public function recv_create_ischema()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_create_ischema_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
 
-  public function __construct($vals=null) {
-    if (!isset(self::$_TSPEC)) {
-      self::$_TSPEC = array(
-        1 => array(
-          'var' => 'key',
-          'type' => TType::STRING,
-          ),
-        );
-    }
-    if (is_array($vals)) {
-      if (isset($vals['key'])) {
-        $this->key = $vals['key'];
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
       }
+      $result = new \metastore\ThriftHiveMetastore_create_ischema_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->o1 !== null) {
+      throw $result->o1;
     }
+    if ($result->o2 !== null) {
+      throw $result->o2;
+    }
+    if ($result->o3 !== null) {
+      throw $result->o3;
+    }
+    return;
   }
 
-  public function getName() {
-    return 'ThriftHiveMetastore_getMetaConf_args';
+  public function alter_ischema($schemaName, \metastore\ISchema $newSchema)
+  {
+    $this->send_alter_ischema($schemaName, $newSchema);
+    $this->recv_alter_ischema();
   }
 
-  public function read($input)
+  public function send_alter_ischema($schemaName, \metastore\ISchema $newSchema)
   {
-    $xfer = 0;
-    $fname = null;
-    $ftype = 0;
-    $fid = 0;
-    $xfer += $input->readStructBegin($fname);
-    while (true)
+    $args = new \metastore\ThriftHiveMetastore_alter_ischema_args();
+    $args->schemaName = $schemaName;
+    $args->newSchema = $newSchema;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
     {
-      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
-      if ($ftype == TType::STOP) {
-        break;
-      }
-      switch ($fid)
-      {
-        case 1:
-          if ($ftype == TType::STRING) {
-            $xfer += $input->readString($this->key);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        default:
-          $xfer += $input->skip($ftype);
-          break;
-      }
-      $xfer += $input->readFieldEnd();
+      thrift_protocol_write_binary($this->output_, 'alter_ischema', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
     }
-    $xfer += $input->readStructEnd();
-    return $xfer;
-  }
-
-  public function write($output) {
-    $xfer = 0;
-    $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_args');
-    if ($this->key !== null) {
-      $xfer += $output->writeFieldBegin('key', TType::STRING, 1);
-      $xfer += $output->writeString($this->key);
-      $xfer += $output->writeFieldEnd();
+    else
+    {
+      $this->output_->writeMessageBegin('alter_ischema', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
     }
-    $xfer += $output->writeFieldStop();
-    $xfer += $output->writeStructEnd();
-    return $xfer;
   }
 
-}
-
-class ThriftHiveMetastore_getMetaConf_result {
-  static $_TSPEC;
-
-  /**
-   * @var string
-   */
-  public $success = null;
-  /**
-   * @var \metastore\MetaException
-   */
-  public $o1 = null;
+  public function recv_alter_ischema()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_alter_ischema_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
 
-  public function __construct($vals=null) {
-    if (!isset(self::$_TSPEC)) {
-      self::$_TSPEC = array(
-        0 => array(
-          'var' => 'success',
-          'type' => TType::STRING,
-          ),
-        1 => array(
-          'var' => 'o1',
-          'type' => TType::STRUCT,
-          'class' => '\metastore\MetaException',
-          ),
-        );
-    }
-    if (is_array($vals)) {
-      if (isset($vals['success'])) {
-        $this->success = $vals['success'];
-      }
-      if (isset($vals['o1'])) {
-        $this->o1 = $vals['o1'];
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
       }
+      $result = new \metastore\ThriftHiveMetastore_alter_ischema_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
     }
+    if ($result->o1 !== null) {
+      throw $result->o1;
+    }
+    if ($result->o2 !== null) {
+      throw $result->o2;
+    }
+    return;
   }
 
-  public function getName() {
-    return 'ThriftHiveMetastore_getMetaConf_result';
+  public function get_ischema($schemaName)
+  {
+    $this->send_get_ischema($schemaName);
+    return $this->recv_get_ischema();
   }
 
-  public function read($input)
+  public function send_get_ischema($schemaName)
   {
-    $xfer = 0;
-    $fname = null;
-    $ftype = 0;
-    $fid = 0;
-    $xfer += $input->readStructBegin($fname);
-    while (true)
+    $args = new \metastore\ThriftHiveMetastore_get_ischema_args();
+    $args->schemaName = $schemaName;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
     {
-      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
-      if ($ftype == TType::STOP) {
-        break;
-      }
-      switch ($fid)
-      {
-        case 0:
-          if ($ftype == TType::STRING) {
-            $xfer += $input->readString($this->success);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 1:
-          if ($ftype == TType::STRUCT) {
-            $this->o1 = new \metastore\MetaException();
-            $xfer += $this->o1->read($input);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        default:
-          $xfer += $input->skip($ftype);
-          break;
-      }
-      $xfer += $input->readFieldEnd();
+      thrift_protocol_write_binary($this->output_, 'get_ischema', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('get_ischema', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
     }
-    $xfer += $input->readStructEnd();
-    return $xfer;
   }
 
-  public function write($output) {
-    $xfer = 0;
-    $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_result');
-    if ($this->success !== null) {
-      $xfer += $output->writeFieldBegin('success', TType::STRING, 0);
-      $xfer += $output->writeString($this->success);
-      $xfer += $output->writeFieldEnd();
+  public function recv_get_ischema()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_ischema_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_get_ischema_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
     }
-    if ($this->o1 !== null) {
-      $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
-      $xfer += $this->o1->write($output);
-      $xfer += $output->writeFieldEnd();
+    if ($result->success !== null) {
+      return $result->success;
     }
-    $xfer += $output->writeFieldStop();
-    $xfer += $output->writeStructEnd();
-    return $xfer;
+    if ($result->o1 !== null) {
+      throw $result->o1;
+    }
+    if ($result->o2 !== null) {
+      throw $result->o2;
+    }
+    throw new \Exception("get_ischema failed: unknown result");
   }
 
-}
+  public function drop_ischema($schemaName)
+  {
+    $this->send_drop_ischema($schemaName);
+    $this->recv_drop_ischema();
+  }
 
-class ThriftHiveMetastore_setMetaConf_args {
-  static $_TSPEC;
+  public function send_drop_ischema($schemaName)
+  {
+    $args = new \metastore\ThriftHiveMetastore_drop_ischema_args();
+    $args->schemaName = $schemaName;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'drop_ischema', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('drop_ischema', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
 
-  /**
-   * @var string
-   */
-  public $key = null;
-  /**
-   * @var string
-   */
-  public $value = null;
+  public function recv_drop_ischema()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_drop_ischema_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
 
-  public function __construct($vals=null) {
-    if (!isset(self::$_TSPEC)) {
-      self::$_TSPEC = array(
-        1 => array(
-          'var' => 'key',
-          'type' => TType::STRING,
-          ),
-        2 => array(
-          'var' => 'value',
-          'type' => TType::STRING,
-          ),
-        );
-    }
-    if (is_array($vals)) {
-      if (isset($vals['key'])) {
-        $this->key = $vals['key'];
-      }
-      if (isset($vals['value'])) {
-        $this->value = $vals['value'];
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
       }
+      $result = new \metastore\ThriftHiveMetastore_drop_ischema_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->o1 !== null) {
+      throw $result->o1;
+    }
+    if ($result->o2 !== null) {
+      throw $result->o2;
+    }
+    if ($result->o3 !== null) {
+      throw $result->o3;
     }
+    return;
   }
 
-  public function getName() {
-    return 'ThriftHiveMetastore_setMetaConf_args';
+  public function add_schema_version(\metastore\SchemaVersion $schemaVersion)
+  {
+    $this->send_add_schema_version($schemaVersion);
+    $this->recv_add_schema_version();
   }
 
-  public function read($input)
+  public function send_add_schema_version(\metastore\SchemaVersion $schemaVersion)
   {
-    $xfer = 0;
-    $fname = null;
-    $ftype = 0;
-    $fid = 0;
-    $xfer += $input->readStructBegin($fname);
-    while (true)
+    $args = new \metastore\ThriftHiveMetastore_add_schema_version_args();
+    $args->schemaVersion = $schemaVersion;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
     {
-      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
-      if ($ftype == TType::STOP) {
-        break;
-      }
-      switch ($fid)
-      {
-        case 1:
-          if ($ftype == TType::STRING) {
-            $xfer += $input->readString($this->key);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 2:
-          if ($ftype == TType::STRING) {
-            $xfer += $input->readString($this->value);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        default:
-          $xfer += $input->skip($ftype);
-          break;
-      }
-      $xfer += $input->readFieldEnd();
+      thrift_protocol_write_binary($this->output_, 'add_schema_version', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('add_schema_version', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
     }
-    $xfer += $input->readStructEnd();
-    return $xfer;
   }
 
-  public function write($output) {
-    $xfer = 0;
-    $xfer += $output->writeStructBegin('ThriftHiveMetastore_setMetaConf_args');
-    if ($this->key !== null) {
-      $xfer += $output->writeFieldBegin('key', TType::STRING, 1);
-      $xfer += $output->writeString($this->key);
-      $xfer += $output->writeFieldEnd();
+  public function recv_add_schema_version()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_add_schema_version_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_add_schema_version_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
     }
-    if ($this->value !== null) {
-      $xfer += $output->writeFieldBegin('value', TType::STRING, 2);
-      $xfer += $output->writeString($this->value);
-      $xfer += $output->writeFieldEnd();
+    if ($result->o1 !== null) {
+      throw $result->o1;
     }
-    $xfer += $output->writeFieldStop();
-    $xfer += $output->writeStructEnd();
-    return $xfer;
+    if ($result->o2 !== null) {
+      throw $result->o2;
+    }
+    if ($result->o3 !== null) {
+      throw $result->o3;
+    }
+    return;
   }
 
-}
-
-class ThriftHiveMetastore_setMetaConf_result {
-  static $_TSPEC;
-
-  /**
-   * @var \metastore\MetaException
-   */
-  public $o1 = null;
+  public function get_schema_version($schemaName, $version)
+  {
+    $this->send_get_schema_version($schemaName, $version);
+    return $this->recv_get_schema_version();
+  }
 
-  public function __construct($vals=null) {
-    if (!isset(self::$_TSPEC)) {
-      self::$_TSPEC = array(
-        1 => array(
-          'var' => 'o1',
-          'type' => TType::STRUCT,
-          'class' => '\metastore\MetaException',
-          ),
-        );
+  public function send_get_schema_version($schemaName, $version)
+  {
+    $args = new \metastore\ThriftHiveMetastore_get_schema_version_args();
+    $args->schemaName = $schemaName;
+    $args->version = $version;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'get_schema_version', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
     }
-    if (is_array($vals)) {
-      if (isset($vals['o1'])) {
-        $this->o1 = $vals['o1'];
-      }
+    else
+    {
+      $this->output_->writeMessageBegin('get_schema_version', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
     }
   }
 
-  public function getName() {
-    return 'ThriftHiveMetastore_setMetaConf_result';
-  }
-
-  public function read($input)
+  public function recv_get_schema_version()
   {
-    $xfer = 0;
-    $fname = null;
-    $ftype = 0;
-    $fid = 0;
-    $xfer += $input->readStructBegin($fname);
-    while (true)
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_schema_version_result', $this->input_->isStrictRead());
+    else
     {
-      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
-      if ($ftype == TType::STOP) {
-        break;
-      }
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_get_schema_version_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->success !== null) {
+      return $result->success;
+    }
+    if ($result->o1 !== null) {
+      throw $result->o1;
+    }
+    if ($result->o2 !== null) {
+      throw $result->o2;
+    }
+    throw new \Exception("get_schema_version failed: unknown result");
+  }
+
+  public function get_schema_latest_version($schemaName)
+  {
+    $this->send_get_schema_latest_version($schemaName);
+    return $this->recv_get_schema_latest_version();
+  }
+
+  public function send_get_schema_latest_version($schemaName)
+  {
+    $args = new \metastore\ThriftHiveMetastore_get_schema_latest_version_args();
+    $args->schemaName = $schemaName;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'get_schema_latest_version', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('get_schema_latest_version', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_get_schema_latest_version()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_schema_latest_version_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_get_schema_latest_version_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->success !== null) {
+      return $result->success;
+    }
+    if ($result->o1 !== null) {
+      throw $result->o1;
+    }
+    if ($result->o2 !== null) {
+      throw $result->o2;
+    }
+    throw new \Exception("get_schema_latest_version failed: unknown result");
+  }
+
+  public function get_schema_all_versions($schemaName)
+  {
+    $this->send_get_schema_all_versions($schemaName);
+    return $this->recv_get_schema_all_versions();
+  }
+
+  public function send_get_schema_all_versions($schemaName)
+  {
+    $args = new \metastore\ThriftHiveMetastore_get_schema_all_versions_args();
+    $args->schemaName = $schemaName;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'get_schema_all_versions', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('get_schema_all_versions', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_get_schema_all_versions()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_schema_all_versions_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_get_schema_all_versions_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->success !== null) {
+      return $result->success;
+    }
+    if ($result->o1 !== null) {
+      throw $result->o1;
+    }
+    if ($result->o2 !== null) {
+      throw $result->o2;
+    }
+    throw new \Exception("get_schema_all_versions failed: unknown result");
+  }
+
+  public function drop_schema_version($schemaName, $version)
+  {
+    $this->send_drop_schema_version($schemaName, $version);
+    $this->recv_drop_schema_version();
+  }
+
+  public function send_drop_schema_version($schemaName, $version)
+  {
+    $args = new \metastore\ThriftHiveMetastore_drop_schema_version_args();
+    $args->schemaName = $schemaName;
+    $args->version = $version;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'drop_schema_version', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('drop_schema_version', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_drop_schema_version()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_drop_schema_version_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_drop_schema_version_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->o1 !== null) {
+      throw $result->o1;
+    }
+    if ($result->o2 !== null) {
+      throw $result->o2;
+    }
+    return;
+  }
+
+  public function get_schemas_by_cols(\metastore\FindSchemasByColsRqst $rqst)
+  {
+    $this->send_get_schemas_by_cols($rqst);
+    return $this->recv_get_schemas_by_cols();
+  }
+
+  public function send_get_schemas_by_cols(\metastore\FindSchemasByColsRqst $rqst)
+  {
+    $args = new \metastore\ThriftHiveMetastore_get_schemas_by_cols_args();
+    $args->rqst = $rqst;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'get_schemas_by_cols', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('get_schemas_by_cols', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_get_schemas_by_cols()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_schemas_by_cols_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_get_schemas_by_cols_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->success !== null) {
+      return $result->success;
+    }
+    if ($result->o1 !== null) {
+      throw $result->o1;
+    }
+    throw new \Exception("get_schemas_by_cols failed: unknown result");
+  }
+
+  public function map_schema_version_to_serde($schemaName, $version, $serdeName)
+  {
+    $this->send_map_schema_version_to_serde($schemaName, $version, $serdeName);
+    $this->recv_map_schema_version_to_serde();
+  }
+
+  public function send_map_schema_version_to_serde($schemaName, $version, $serdeName)
+  {
+    $args = new \metastore\ThriftHiveMetastore_map_schema_version_to_serde_args();
+    $args->schemaName = $schemaName;
+    $args->version = $version;
+    $args->serdeName = $serdeName;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'map_schema_version_to_serde', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('map_schema_version_to_serde', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_map_schema_version_to_serde()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_map_schema_version_to_serde_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_map_schema_version_to_serde_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->o1 !== null) {
+      throw $result->o1;
+    }
+    if ($result->o2 !== null) {
+      throw $result->o2;
+    }
+    return;
+  }
+
+  public function set_schema_version_state($schemaName, $version, $state)
+  {
+    $this->send_set_schema_version_state($schemaName, $version, $state);
+    $this->recv_set_schema_version_state();
+  }
+
+  public function send_set_schema_version_state($schemaName, $version, $state)
+  {
+    $args = new \metastore\ThriftHiveMetastore_set_schema_version_state_args();
+    $args->schemaName = $schemaName;
+    $args->version = $version;
+    $args->state = $state;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'set_schema_version_state', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('set_schema_version_state', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_set_schema_version_state()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_set_schema_version_state_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_set_schema_version_state_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->o1 !== null) {
+      throw $result->o1;
+    }
+    if ($result->o2 !== null) {
+      throw $result->o2;
+    }
+    if ($result->o3 !== null) {
+      throw $result->o3;
+    }
+    return;
+  }
+
+  public function add_serde(\metastore\SerDeInfo $serde)
+  {
+    $this->send_add_serde($serde);
+    $this->recv_add_serde();
+  }
+
+  public function send_add_serde(\metastore\SerDeInfo $serde)
+  {
+    $args = new \metastore\ThriftHiveMetastore_add_serde_args();
+    $args->serde = $serde;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'add_serde', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('add_serde', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_add_serde()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_add_serde_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_add_serde_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->o1 !== null) {
+      throw $result->o1;
+    }
+    if ($result->o2 !== null) {
+      throw $result->o2;
+    }
+    return;
+  }
+
+  public function get_serde($serdeName)
+  {
+    $this->send_get_serde($serdeName);
+    return $this->recv_get_serde();
+  }
+
+  public function send_get_serde($serdeName)
+  {
+    $args = new \metastore\ThriftHiveMetastore_get_serde_args();
+    $args->serdeName = $serdeName;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'get_serde', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('get_serde', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_get_serde()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_serde_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_get_serde_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->success !== null) {
+      return $result->success;
+    }
+    if ($result->o1 !== null) {
+      throw $result->o1;
+    }
+    if ($result->o2 !== null) {
+      throw $result->o2;
+    }
+    throw new \Exception("get_serde failed: unknown result");
+  }
+
+}
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+class ThriftHiveMetastore_getMetaConf_args {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $key = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'key',
+          'type' => TType::STRING,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['key'])) {
+        $this->key = $vals['key'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_getMetaConf_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->key);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_args');
+    if ($this->key !== null) {
+      $xfer += $output->writeFieldBegin('key', TType::STRING, 1);
+      $xfer += $output->writeString($this->key);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_getMetaConf_result {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $success = null;
+  /**
+   * @var \metastore\MetaException
+   */
+  public $o1 = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        0 => array(
+          'var' => 'success',
+          'type' => TType::STRING,
+          ),
+        1 => array(
+          'var' => 'o1',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\MetaException',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['success'])) {
+        $this->success = $vals['success'];
+      }
+      if (isset($vals['o1'])) {
+        $this->o1 = $vals['o1'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_getMetaConf_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 0:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->success);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 1:
+          if ($ftype == TType::STRUCT) {
+            $this->o1 = new \metastore\MetaException();
+            $xfer += $this->o1->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_result');
+    if ($this->success !== null) {
+      $xfer += $output->writeFieldBegin('success', TType::STRING, 0);
+      $xfer += $output->writeString($this->success);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o1 !== null) {
+      $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+      $xfer += $this->o1->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_setMetaConf_args {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $key = null;
+  /**
+   * @var string
+   */
+  public $value = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'key',
+          'type' => TType::STRING,
+          ),
+        2 => array(
+          'var' => 'value',
+          'type' => TType::STRING,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['key'])) {
+        $this->key = $vals['key'];
+      }
+      if (isset($vals['value'])) {
+        $this->value = $vals['value'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_setMetaConf_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->key);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->value);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_setMetaConf_args');
+    if ($this->key !== null) {
+      $xfer += $output->writeFieldBegin('key', TType::STRING, 1);
+      $xfer += $output->writeString($this->key);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->value !== null) {
+      $xfer += $output->writeFieldBegin('value', TType::STRING, 2);
+      $xfer += $output->writeString($this->value);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_setMetaConf_result {
+  static $_TSPEC;
+
+  /**
+   * @var \metastore\MetaException
+   */
+  public $o1 = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'o1',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\MetaException',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['o1'])) {
+        $this->o1 = $vals['o1'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_setMetaConf_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRUCT) {
+            $this->o1 = new \metastore\MetaException();
+            $xfer += $this->o1->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_setMetaConf_result');
+    if ($this->o1 !== null) {
+      $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+      $xfer += $this->o1->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_create_database_args {
+  static $_TSPEC;
+
+  /**
+   * @var \metastore\Database
+   */
+  public $database = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'database',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\Database',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['database'])) {
+        $this->database = $vals['database'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_create_database_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRUCT) {
+            $this->database = new \metastore\Database();
+            $xfer += $this->database->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_database_args');
+    if ($this->database !== null) {
+      if (!is_object($this->database)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('database', TType::STRUCT, 1);
+      $xfer += $this->database->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_create_database_result {
+  static $_TSPEC;
+
+  /**
+   * @var \metastore\AlreadyExistsException
+   */
+  public $o1 = null;
+  /**
+   * @var \metastore\InvalidObjectException
+   */
+  public $o2 = null;
+  /**
+   * @var \metastore\MetaException
+   */
+  public $o3 = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'o1',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\AlreadyExistsException',
+          ),
+        2 => array(
+          'var' => 'o2',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\InvalidObjectException',
+          ),
+        3 => array(
+          'var' => 'o3',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\MetaException',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['o1'])) {
+        $this->o1 = $vals['o1'];
+      }
+      if (isset($vals['o2'])) {
+        $this->o2 = $vals['o2'];
+      }
+      if (isset($vals['o3'])) {
+        $this->o3 = $vals['o3'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_create_database_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRUCT) {
+            $this->o1 = new \metastore\AlreadyExistsException();
+            $xfer += $this->o1->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRUCT) {
+            $this->o2 = new \metastore\InvalidObjectException();
+            $xfer += $this->o2->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 3:
+          if ($ftype == TType::STRUCT) {
+            $this->o3 = new \metastore\MetaException();
+            $xfer += $this->o3->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_database_result');
+    if ($this->o1 !== null) {
+      $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+      $xfer += $this->o1->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o2 !== null) {
+      $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+      $xfer += $this->o2->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o3 !== null) {
+      $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3);
+      $xfer += $this->o3->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_get_database_args {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $name = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'name',
+          'type' => TType::STRING,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['name'])) {
+        $this->name = $vals['name'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_get_database_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->name);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_database_args');
+    if ($this->name !== null) {
+      $xfer += $output->writeFieldBegin('name', TType::STRING, 1);
+      $xfer += $output->writeString($this->name);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_get_database_result {
+  static $_TSPEC;
+
+  /**
+   * @var \metastore\Database
+   */
+  public $success = null;
+  /**
+   * @var \metastore\NoSuchObjectException
+   */
+  public $o1 = null;
+  /**
+   * @var \metastore\MetaException
+   */
+  public $o2 = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        0 => array(
+          'var' => 'success',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\Database',
+          ),
+        1 => array(
+          'var' => 'o1',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\NoSuchObjectException',
+          ),
+        2 => array(
+          'var' => 'o2',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\MetaException',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['success'])) {
+        $this->success = $vals['success'];
+      }
+      if (isset($vals['o1'])) {
+        $this->o1 = $vals['o1'];
+      }
+      if (isset($vals['o2'])) {
+        $this->o2 = $vals['o2'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_get_database_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 0:
+          if ($ftype == TType::STRUCT) {
+            $this->success = new \metastore\Database();
+            $xfer += $this->success->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 1:
+          if ($ftype == TType::STRUCT) {
+            $this->o1 = new \metastore\NoSuchObjectException();
+            $xfer += $this->o1->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRUCT) {
+            $this->o2 = new \metastore\MetaException();
+            $xfer += $this->o2->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_database_result');
+    if ($this->success !== null) {
+      if (!is_object($this->success)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0);
+      $xfer += $this->success->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o1 !== null) {
+      $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+      $xfer += $this->o1->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o2 !== null) {
+      $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+      $xfer += $this->o2->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_drop_database_args {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $name = null;
+  /**
+   * @var bool
+   */
+  public $deleteData = null;
+  /**
+   * @var bool
+   */
+  public $cascade = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'name',
+          'type' => TType::STRING,
+          ),
+        2 => array(
+          'var' => 'deleteData',
+          'type' => TType::BOOL,
+          ),
+        3 => array(
+          'var' => 'cascade',
+          'type' => TType::BOOL,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['name'])) {
+        $this->name = $vals['name'];
+      }
+      if (isset($vals['deleteData'])) {
+        $this->deleteData = $vals['deleteData'];
+      }
+      if (isset($vals['cascade'])) {
+        $this->cascade = $vals['cascade'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_drop_database_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->name);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::BOOL) {
+            $xfer += $input->readBool($this->deleteData);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 3:
+          if ($ftype == TType::BOOL) {
+            $xfer += $input->readBool($this->cascade);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_database_args');
+    if ($this->name !== null) {
+      $xfer += $output->writeFieldBegin('name', TType::STRING, 1);
+      $xfer += $output->writeString($this->name);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->deleteData !== null) {
+      $xfer += $output->writeFieldBegin('deleteData', TType::BOOL, 2);
+      $xfer += $output->writeBool($this->deleteData);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->cascade !== null) {
+      $xfer += $output->writeFieldBegin('cascade', TType::BOOL, 3);
+      $xfer += $output->writeBool($this->cascade);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_drop_database_result {
+  static $_TSPEC;
+
+  /**
+   * @var \metastore\NoSuchObjectException
+   */
+  public $o1 = null;
+  /**
+   * @var \metastore\InvalidOperationException
+   */
+  public $o2 = null;
+  /**
+   * @var \metastore\MetaException
+   */
+  public $o3 = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'o1',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\NoSuchObjectException',
+          ),
+        2 => array(
+          'var' => 'o2',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\InvalidOperationException',
+          ),
+        3 => array(
+          'var' => 'o3',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\MetaException',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['o1'])) {
+        $this->o1 = $vals['o1'];
+      }
+      if (isset($vals['o2'])) {
+        $this->o2 = $vals['o2'];
+      }
+      if (isset($vals['o3'])) {
+        $this->o3 = $vals['o3'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_drop_database_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRUCT) {
+            $this->o1 = new \metastore\NoSuchObjectException();
+            $xfer += $this->o1->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRUCT) {
+            $this->o2 = new \metastore\InvalidOperationException();
+            $xfer += $this->o2->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 3:
+          if ($ftype == TType::STRUCT) {
+            $this->o3 = new \metastore\MetaException();
+            $xfer += $this->o3->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_database_result');
+    if ($this->o1 !== null) {
+      $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+      $xfer += $this->o1->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o2 !== null) {
+      $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+      $xfer += $this->o2->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o3 !== null) {
+      $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3);
+      $xfer += $this->o3->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_get_databases_args {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $pattern = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'pattern',
+          'type' => TType::STRING,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['pattern'])) {
+        $this->pattern = $vals['pattern'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_get_databases_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->pattern);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_databases_args');
+    if ($this->pattern !== null) {
+      $xfer += $output->writeFieldBegin('pattern', TType::STRING, 1);
+      $xfer += $output->writeString($this->pattern);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_get_databases_result {
+  static $_TSPEC;
+
+  /**
+   * @var string[]
+   */
+  public $success = null;
+  /**
+   * @var \metastore\MetaException
+   */
+  public $o1 = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        0 => array(
+          'var' => 'success',
+          'type' => TType::LST,
+          'etype' => TType::STRING,
+          'elem' => array(
+            'type' => TType::STRING,
+            ),
+          ),
+        1 => array(
+          'var' => 'o1',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\MetaException',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['success'])) {
+        $this->success = $vals['success'];
+      }
+      if (isset($vals['o1'])) {
+        $this->o1 = $vals['o1'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_get_databases_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 0:
+          if ($ftype == TType::LST) {
+            $this->success = array();
+            $_size715 = 0;
+            $_etype718 = 0;
+            $xfer += $input->readListBegin($_etype718, $_size715);
+            for ($_i719 = 0; $_i719 < $_size715; ++$_i719)
+            {
+              $elem720 = null;
+              $xfer += $input->readString($elem720);
+              $this->success []= $elem720;
+            }
+            $xfer += $input->readListEnd();
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 1:
+          if ($ftype == TType::STRUCT) {
+            $this->o1 = new \metastore\MetaException();
+            $xfer += $this->o1->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_databases_result');
+    if ($this->success !== null) {
+      if (!is_array($this->success)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('success', TType::LST, 0);
+      {
+        $output->writeListBegin(TType::STRING, count($this->success));
+        {
+          foreach ($this->success as $iter721)
+          {
+            $xfer += $output->writeString($iter721);
+          }
+        }
+        $output->writeListEnd();
+      }
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o1 !== null) {
+      $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+      $xfer += $this->o1->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_get_all_databases_args {
+  static $_TSPEC;
+
+
+  public function __construct() {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        );
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_get_all_databases_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_databases_args');
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_get_all_databases_result {
+  static $_TSPEC;
+
+  /**
+   * @var string[]
+   */
+  public $success = null;
+  /**
+   * @var \metastore\MetaException
+   */
+  public $o1 = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        0 => array(
+          'var' => 'success',
+          'type' => TType::LST,
+          'etype' => TType::STRING,
+          'elem' => array(
+            'type' => TType::STRING,
+            ),
+          ),
+        1 => array(
+          'var' => 'o1',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\MetaException',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['success'])) {
+        $this->success = $vals['success'];
+      }
+      if (isset($vals['o1'])) {
+        $this->o1 = $vals['o1'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_get_all_databases_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 0:
+          if ($ftype == TType::LST) {
+            $this->success = array();
+            $_size722 = 0;
+            $_etype725 = 0;
+            $xfer += $input->readListBegin($_etype725, $_size722);
+            for ($_i726 = 0; $_i726 < $_size722; ++$_i726)
+            {
+              $elem727 = null;
+              $xfer += $input->readString($elem727);
+              $this->success []= $elem727;
+            }
+            $xfer += $input->readListEnd();
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 1:
+          if ($ftype == TType::STRUCT) {
+            $this->o1 = new \metastore\MetaException();
+            $xfer += $this->o1->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_databases_result');
+    if ($this->success !== null) {
+      if (!is_array($this->success)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('success', TType::LST, 0);
+      {
+        $output->writeListBegin(TType::STRING, count($this->success));
+        {
+          foreach ($this->success as $iter728)
+          {
+            $xfer += $output->writeString($iter728);
+          }
+        }
+        $output->writeListEnd();
+      }
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o1 !== null) {
+      $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+      $xfer += $this->o1->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_alter_database_args {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $dbname = null;
+  /**
+   * @var \metastore\Database
+   */
+  public $db = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'dbname',
+          'type' => TType::STRING,
+          ),
+        2 => array(
+          'var' => 'db',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\Database',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['dbname'])) {
+        $this->dbname = $vals['dbname'];
+      }
+      if (isset($vals['db'])) {
+        $this->db = $vals['db'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_alter_database_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->dbname);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRUCT) {
+            $this->db = new \metastore\Database();
+            $xfer += $this->db->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_database_args');
+    if ($this->dbname !== null) {
+      $xfer += $output->writeFieldBegin('dbname', TType::STRING, 1);
+      $xfer += $output->writeString($this->dbname);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->db !== null) {
+      if (!is_object($this->db)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('db', TType::STRUCT, 2);
+      $xfer += $this->db->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_alter_database_result {
+  static $_TSPEC;
+
+  /**
+   * @var \metastore\MetaException
+   */
+  public $o1 = null;
+  /**
+   * @var \metastore\NoSuchObjectException
+   */
+  public $o2 = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'o1',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\MetaException',
+          ),
+        2 => array(
+          'var' => 'o2',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\NoSuchObjectException',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['o1'])) {
+        $this->o1 = $vals['o1'];
+      }
+      if (isset($vals['o2'])) {
+        $this->o2 = $vals['o2'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_alter_database_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRUCT) {
+            $this->o1 = new \metastore\MetaException();
+            $xfer += $this->o1->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRUCT) {
+            $this->o2 = new \metastore\NoSuchObjectException();
+            $xfer += $this->o2->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_alter_database_result');
+    if ($this->o1 !== null) {
+      $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+      $xfer += $this->o1->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o2 !== null) {
+      $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+      $xfer += $this->o2->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_get_type_args {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $name = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'name',
+          'type' => TType::STRING,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['name'])) {
+        $this->name = $vals['name'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_get_type_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->name);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_type_args');
+    if ($this->name !== null) {
+      $xfer += $output->writeFieldBegin('name', TType::STRING, 1);
+      $xfer += $output->writeString($this->name);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_get_type_result {
+  static $_TSPEC;
+
+  /**
+   * @var \metastore\Type
+   */
+  public $success = null;
+  /**
+   * @var \metastore\MetaException
+   */
+  public $o1 = null;
+  /**
+   * @var \metastore\NoSuchObjectException
+   */
+  public $o2 = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        0 => array(
+          'var' => 'success',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\Type',
+          ),
+        1 => array(
+          'var' => 'o1',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\MetaException',
+          ),
+        2 => array(
+          'var' => 'o2',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\NoSuchObjectException',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['success'])) {
+        $this->success = $vals['success'];
+      }
+      if (isset($vals['o1'])) {
+        $this->o1 = $vals['o1'];
+      }
+      if (isset($vals['o2'])) {
+        $this->o2 = $vals['o2'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_get_type_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 0:
+          if ($ftype == TType::STRUCT) {
+            $this->success = new \metastore\Type();
+            $xfer += $this->success->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 1:
+          if ($ftype == TType::STRUCT) {
+            $this->o1 = new \metastore\MetaException();
+            $xfer += $this->o1->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRUCT) {
+            $this->o2 = new \metastore\NoSuchObjectException();
+            $xfer += $this->o2->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_type_result');
+    if ($this->success !== null) {
+      if (!is_object($this->success)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0);
+      $xfer += $this->success->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o1 !== null) {
+      $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+      $xfer += $this->o1->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o2 !== null) {
+      $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+      $xfer += $this->o2->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_create_type_args {
+  static $_TSPEC;
+
+  /**
+   * @var \metastore\Type
+   */
+  public $type = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'type',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\Type',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['type'])) {
+        $this->type = $vals['type'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_create_type_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRUCT) {
+            $this->type = new \metastore\Type();
+            $xfer += $this->type->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_type_args');
+    if ($this->type !== null) {
+      if (!is_object($this->type)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('type', TType::STRUCT, 1);
+      $xfer += $this->type->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_create_type_result {
+  static $_TSPEC;
+
+  /**
+   * @var bool
+   */
+  public $success = null;
+  /**
+   * @var \metastore\AlreadyExistsException
+   */
+  public $o1 = null;
+  /**
+   * @var \metastore\InvalidObjectException
+   */
+  public $o2 = null;
+  /**
+   * @var \metastore\MetaException
+   */
+  public $o3 = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        0 => array(
+          'var' => 'success',
+          'type' => TType::BOOL,
+          ),
+        1 => array(
+          'var' => 'o1',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\AlreadyExistsException',
+          ),
+        2 => array(
+          'var' => 'o2',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\InvalidObjectException',
+          ),
+        3 => array(
+          'var' => 'o3',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\MetaException',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['success'])) {
+        $this->success = $vals['success'];
+      }
+      if (isset($vals['o1'])) {
+        $this->o1 = $vals['o1'];
+      }
+      if (isset($vals['o2'])) {
+        $this->o2 = $vals['o2'];
+      }
+      if (isset($vals['o3'])) {
+        $this->o3 = $vals['o3'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_create_type_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+  

<TRUNCATED>

[42/50] [abbrv] hive git commit: HIVE-17990 Add Thrift and DB storage for Schema Registry objects

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index fc57141..192d0db 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -400,6 +400,34 @@ import org.slf4j.LoggerFactory;
 
     public WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException;
 
+    public void create_ischema(ISchema schema) throws AlreadyExistsException, NoSuchObjectException, MetaException, org.apache.thrift.TException;
+
+    public void alter_ischema(String schemaName, ISchema newSchema) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+
+    public ISchema get_ischema(String schemaName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+
+    public void drop_ischema(String schemaName) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException;
+
+    public void add_schema_version(SchemaVersion schemaVersion) throws AlreadyExistsException, NoSuchObjectException, MetaException, org.apache.thrift.TException;
+
+    public SchemaVersion get_schema_version(String schemaName, int version) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+
+    public SchemaVersion get_schema_latest_version(String schemaName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+
+    public List<SchemaVersion> get_schema_all_versions(String schemaName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+
+    public void drop_schema_version(String schemaName, int version) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+
+    public FindSchemasByColsResp get_schemas_by_cols(FindSchemasByColsRqst rqst) throws MetaException, org.apache.thrift.TException;
+
+    public void map_schema_version_to_serde(String schemaName, int version, String serdeName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+
+    public void set_schema_version_state(String schemaName, int version, SchemaVersionState state) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException;
+
+    public void add_serde(SerDeInfo serde) throws AlreadyExistsException, MetaException, org.apache.thrift.TException;
+
+    public SerDeInfo get_serde(String serdeName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+
   }
 
   @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public interface AsyncIface extends com.facebook.fb303.FacebookService .AsyncIface {
@@ -762,6 +790,34 @@ import org.slf4j.LoggerFactory;
 
     public void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
+    public void create_ischema(ISchema schema, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void alter_ischema(String schemaName, ISchema newSchema, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void get_ischema(String schemaName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void drop_ischema(String schemaName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void add_schema_version(SchemaVersion schemaVersion, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void get_schema_version(String schemaName, int version, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void get_schema_latest_version(String schemaName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void get_schema_all_versions(String schemaName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void drop_schema_version(String schemaName, int version, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void get_schemas_by_cols(FindSchemasByColsRqst rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void map_schema_version_to_serde(String schemaName, int version, String serdeName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void set_schema_version_state(String schemaName, int version, SchemaVersionState state, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void add_serde(SerDeInfo serde, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void get_serde(String serdeName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
   }
 
   @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Client extends com.facebook.fb303.FacebookService.Client implements Iface {
@@ -5967,6 +6023,404 @@ import org.slf4j.LoggerFactory;
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "create_or_drop_wm_trigger_to_pool_mapping failed: unknown result");
     }
 
+    public void create_ischema(ISchema schema) throws AlreadyExistsException, NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      send_create_ischema(schema);
+      recv_create_ischema();
+    }
+
+    public void send_create_ischema(ISchema schema) throws org.apache.thrift.TException
+    {
+      create_ischema_args args = new create_ischema_args();
+      args.setSchema(schema);
+      sendBase("create_ischema", args);
+    }
+
+    public void recv_create_ischema() throws AlreadyExistsException, NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      create_ischema_result result = new create_ischema_result();
+      receiveBase(result, "create_ischema");
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      if (result.o3 != null) {
+        throw result.o3;
+      }
+      return;
+    }
+
+    public void alter_ischema(String schemaName, ISchema newSchema) throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      send_alter_ischema(schemaName, newSchema);
+      recv_alter_ischema();
+    }
+
+    public void send_alter_ischema(String schemaName, ISchema newSchema) throws org.apache.thrift.TException
+    {
+      alter_ischema_args args = new alter_ischema_args();
+      args.setSchemaName(schemaName);
+      args.setNewSchema(newSchema);
+      sendBase("alter_ischema", args);
+    }
+
+    public void recv_alter_ischema() throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      alter_ischema_result result = new alter_ischema_result();
+      receiveBase(result, "alter_ischema");
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      return;
+    }
+
+    public ISchema get_ischema(String schemaName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      send_get_ischema(schemaName);
+      return recv_get_ischema();
+    }
+
+    public void send_get_ischema(String schemaName) throws org.apache.thrift.TException
+    {
+      get_ischema_args args = new get_ischema_args();
+      args.setSchemaName(schemaName);
+      sendBase("get_ischema", args);
+    }
+
+    public ISchema recv_get_ischema() throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      get_ischema_result result = new get_ischema_result();
+      receiveBase(result, "get_ischema");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_ischema failed: unknown result");
+    }
+
+    public void drop_ischema(String schemaName) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException
+    {
+      send_drop_ischema(schemaName);
+      recv_drop_ischema();
+    }
+
+    public void send_drop_ischema(String schemaName) throws org.apache.thrift.TException
+    {
+      drop_ischema_args args = new drop_ischema_args();
+      args.setSchemaName(schemaName);
+      sendBase("drop_ischema", args);
+    }
+
+    public void recv_drop_ischema() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException
+    {
+      drop_ischema_result result = new drop_ischema_result();
+      receiveBase(result, "drop_ischema");
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      if (result.o3 != null) {
+        throw result.o3;
+      }
+      return;
+    }
+
+    public void add_schema_version(SchemaVersion schemaVersion) throws AlreadyExistsException, NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      send_add_schema_version(schemaVersion);
+      recv_add_schema_version();
+    }
+
+    public void send_add_schema_version(SchemaVersion schemaVersion) throws org.apache.thrift.TException
+    {
+      add_schema_version_args args = new add_schema_version_args();
+      args.setSchemaVersion(schemaVersion);
+      sendBase("add_schema_version", args);
+    }
+
+    public void recv_add_schema_version() throws AlreadyExistsException, NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      add_schema_version_result result = new add_schema_version_result();
+      receiveBase(result, "add_schema_version");
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      if (result.o3 != null) {
+        throw result.o3;
+      }
+      return;
+    }
+
+    public SchemaVersion get_schema_version(String schemaName, int version) throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      send_get_schema_version(schemaName, version);
+      return recv_get_schema_version();
+    }
+
+    public void send_get_schema_version(String schemaName, int version) throws org.apache.thrift.TException
+    {
+      get_schema_version_args args = new get_schema_version_args();
+      args.setSchemaName(schemaName);
+      args.setVersion(version);
+      sendBase("get_schema_version", args);
+    }
+
+    public SchemaVersion recv_get_schema_version() throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      get_schema_version_result result = new get_schema_version_result();
+      receiveBase(result, "get_schema_version");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_schema_version failed: unknown result");
+    }
+
+    public SchemaVersion get_schema_latest_version(String schemaName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      send_get_schema_latest_version(schemaName);
+      return recv_get_schema_latest_version();
+    }
+
+    public void send_get_schema_latest_version(String schemaName) throws org.apache.thrift.TException
+    {
+      get_schema_latest_version_args args = new get_schema_latest_version_args();
+      args.setSchemaName(schemaName);
+      sendBase("get_schema_latest_version", args);
+    }
+
+    public SchemaVersion recv_get_schema_latest_version() throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      get_schema_latest_version_result result = new get_schema_latest_version_result();
+      receiveBase(result, "get_schema_latest_version");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_schema_latest_version failed: unknown result");
+    }
+
+    public List<SchemaVersion> get_schema_all_versions(String schemaName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      send_get_schema_all_versions(schemaName);
+      return recv_get_schema_all_versions();
+    }
+
+    public void send_get_schema_all_versions(String schemaName) throws org.apache.thrift.TException
+    {
+      get_schema_all_versions_args args = new get_schema_all_versions_args();
+      args.setSchemaName(schemaName);
+      sendBase("get_schema_all_versions", args);
+    }
+
+    public List<SchemaVersion> recv_get_schema_all_versions() throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      get_schema_all_versions_result result = new get_schema_all_versions_result();
+      receiveBase(result, "get_schema_all_versions");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_schema_all_versions failed: unknown result");
+    }
+
+    public void drop_schema_version(String schemaName, int version) throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      send_drop_schema_version(schemaName, version);
+      recv_drop_schema_version();
+    }
+
+    public void send_drop_schema_version(String schemaName, int version) throws org.apache.thrift.TException
+    {
+      drop_schema_version_args args = new drop_schema_version_args();
+      args.setSchemaName(schemaName);
+      args.setVersion(version);
+      sendBase("drop_schema_version", args);
+    }
+
+    public void recv_drop_schema_version() throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      drop_schema_version_result result = new drop_schema_version_result();
+      receiveBase(result, "drop_schema_version");
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      return;
+    }
+
+    public FindSchemasByColsResp get_schemas_by_cols(FindSchemasByColsRqst rqst) throws MetaException, org.apache.thrift.TException
+    {
+      send_get_schemas_by_cols(rqst);
+      return recv_get_schemas_by_cols();
+    }
+
+    public void send_get_schemas_by_cols(FindSchemasByColsRqst rqst) throws org.apache.thrift.TException
+    {
+      get_schemas_by_cols_args args = new get_schemas_by_cols_args();
+      args.setRqst(rqst);
+      sendBase("get_schemas_by_cols", args);
+    }
+
+    public FindSchemasByColsResp recv_get_schemas_by_cols() throws MetaException, org.apache.thrift.TException
+    {
+      get_schemas_by_cols_result result = new get_schemas_by_cols_result();
+      receiveBase(result, "get_schemas_by_cols");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_schemas_by_cols failed: unknown result");
+    }
+
+    public void map_schema_version_to_serde(String schemaName, int version, String serdeName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      send_map_schema_version_to_serde(schemaName, version, serdeName);
+      recv_map_schema_version_to_serde();
+    }
+
+    public void send_map_schema_version_to_serde(String schemaName, int version, String serdeName) throws org.apache.thrift.TException
+    {
+      map_schema_version_to_serde_args args = new map_schema_version_to_serde_args();
+      args.setSchemaName(schemaName);
+      args.setVersion(version);
+      args.setSerdeName(serdeName);
+      sendBase("map_schema_version_to_serde", args);
+    }
+
+    public void recv_map_schema_version_to_serde() throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      map_schema_version_to_serde_result result = new map_schema_version_to_serde_result();
+      receiveBase(result, "map_schema_version_to_serde");
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      return;
+    }
+
+    public void set_schema_version_state(String schemaName, int version, SchemaVersionState state) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException
+    {
+      send_set_schema_version_state(schemaName, version, state);
+      recv_set_schema_version_state();
+    }
+
+    public void send_set_schema_version_state(String schemaName, int version, SchemaVersionState state) throws org.apache.thrift.TException
+    {
+      set_schema_version_state_args args = new set_schema_version_state_args();
+      args.setSchemaName(schemaName);
+      args.setVersion(version);
+      args.setState(state);
+      sendBase("set_schema_version_state", args);
+    }
+
+    public void recv_set_schema_version_state() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException
+    {
+      set_schema_version_state_result result = new set_schema_version_state_result();
+      receiveBase(result, "set_schema_version_state");
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      if (result.o3 != null) {
+        throw result.o3;
+      }
+      return;
+    }
+
+    public void add_serde(SerDeInfo serde) throws AlreadyExistsException, MetaException, org.apache.thrift.TException
+    {
+      send_add_serde(serde);
+      recv_add_serde();
+    }
+
+    public void send_add_serde(SerDeInfo serde) throws org.apache.thrift.TException
+    {
+      add_serde_args args = new add_serde_args();
+      args.setSerde(serde);
+      sendBase("add_serde", args);
+    }
+
+    public void recv_add_serde() throws AlreadyExistsException, MetaException, org.apache.thrift.TException
+    {
+      add_serde_result result = new add_serde_result();
+      receiveBase(result, "add_serde");
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      return;
+    }
+
+    public SerDeInfo get_serde(String serdeName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      send_get_serde(serdeName);
+      return recv_get_serde();
+    }
+
+    public void send_get_serde(String serdeName) throws org.apache.thrift.TException
+    {
+      get_serde_args args = new get_serde_args();
+      args.setSerdeName(serdeName);
+      sendBase("get_serde", args);
+    }
+
+    public SerDeInfo recv_get_serde() throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      get_serde_result result = new get_serde_result();
+      receiveBase(result, "get_serde");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_serde failed: unknown result");
+    }
+
   }
   @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class AsyncClient extends com.facebook.fb303.FacebookService.AsyncClient implements AsyncIface {
     @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Factory implements org.apache.thrift.async.TAsyncClientFactory<AsyncClient> {
@@ -12211,6 +12665,475 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    public void create_ischema(ISchema schema, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      create_ischema_call method_call = new create_ischema_call(schema, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_ischema_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private ISchema schema;
+      public create_ischema_call(ISchema schema, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.schema = schema;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("create_ischema", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        create_ischema_args args = new create_ischema_args();
+        args.setSchema(schema);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public void getResult() throws AlreadyExistsException, NoSuchObjectException, MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        (new Client(prot)).recv_create_ischema();
+      }
+    }
+
+    public void alter_ischema(String schemaName, ISchema newSchema, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      alter_ischema_call method_call = new alter_ischema_call(schemaName, newSchema, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_ischema_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String schemaName;
+      private ISchema newSchema;
+      public alter_ischema_call(String schemaName, ISchema newSchema, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.schemaName = schemaName;
+        this.newSchema = newSchema;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("alter_ischema", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        alter_ischema_args args = new alter_ischema_args();
+        args.setSchemaName(schemaName);
+        args.setNewSchema(newSchema);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public void getResult() throws NoSuchObjectException, MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        (new Client(prot)).recv_alter_ischema();
+      }
+    }
+
+    public void get_ischema(String schemaName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      get_ischema_call method_call = new get_ischema_call(schemaName, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_ischema_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String schemaName;
+      public get_ischema_call(String schemaName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.schemaName = schemaName;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_ischema", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        get_ischema_args args = new get_ischema_args();
+        args.setSchemaName(schemaName);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public ISchema getResult() throws NoSuchObjectException, MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_get_ischema();
+      }
+    }
+
+    public void drop_ischema(String schemaName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      drop_ischema_call method_call = new drop_ischema_call(schemaName, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_ischema_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String schemaName;
+      public drop_ischema_call(String schemaName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.schemaName = schemaName;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("drop_ischema", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        drop_ischema_args args = new drop_ischema_args();
+        args.setSchemaName(schemaName);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public void getResult() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        (new Client(prot)).recv_drop_ischema();
+      }
+    }
+
+    public void add_schema_version(SchemaVersion schemaVersion, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      add_schema_version_call method_call = new add_schema_version_call(schemaVersion, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_schema_version_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private SchemaVersion schemaVersion;
+      public add_schema_version_call(SchemaVersion schemaVersion, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.schemaVersion = schemaVersion;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("add_schema_version", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        add_schema_version_args args = new add_schema_version_args();
+        args.setSchemaVersion(schemaVersion);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public void getResult() throws AlreadyExistsException, NoSuchObjectException, MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        (new Client(prot)).recv_add_schema_version();
+      }
+    }
+
+    public void get_schema_version(String schemaName, int version, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      get_schema_version_call method_call = new get_schema_version_call(schemaName, version, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_schema_version_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String schemaName;
+      private int version;
+      public get_schema_version_call(String schemaName, int version, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.schemaName = schemaName;
+        this.version = version;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_schema_version", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        get_schema_version_args args = new get_schema_version_args();
+        args.setSchemaName(schemaName);
+        args.setVersion(version);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public SchemaVersion getResult() throws NoSuchObjectException, MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_get_schema_version();
+      }
+    }
+
+    public void get_schema_latest_version(String schemaName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      get_schema_latest_version_call method_call = new get_schema_latest_version_call(schemaName, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_schema_latest_version_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String schemaName;
+      public get_schema_latest_version_call(String schemaName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.schemaName = schemaName;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_schema_latest_version", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        get_schema_latest_version_args args = new get_schema_latest_version_args();
+        args.setSchemaName(schemaName);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public SchemaVersion getResult() throws NoSuchObjectException, MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_get_schema_latest_version();
+      }
+    }
+
+    public void get_schema_all_versions(String schemaName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      get_schema_all_versions_call method_call = new get_schema_all_versions_call(schemaName, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_schema_all_versions_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String schemaName;
+      public get_schema_all_versions_call(String schemaName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.schemaName = schemaName;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_schema_all_versions", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        get_schema_all_versions_args args = new get_schema_all_versions_args();
+        args.setSchemaName(schemaName);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public List<SchemaVersion> getResult() throws NoSuchObjectException, MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_get_schema_all_versions();
+      }
+    }
+
+    public void drop_schema_version(String schemaName, int version, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      drop_schema_version_call method_call = new drop_schema_version_call(schemaName, version, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_schema_version_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String schemaName;
+      private int version;
+      public drop_schema_version_call(String schemaName, int version, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.schemaName = schemaName;
+        this.version = version;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("drop_schema_version", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        drop_schema_version_args args = new drop_schema_version_args();
+        args.setSchemaName(schemaName);
+        args.setVersion(version);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public void getResult() throws NoSuchObjectException, MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        (new Client(prot)).recv_drop_schema_version();
+      }
+    }
+
+    public void get_schemas_by_cols(FindSchemasByColsRqst rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      get_schemas_by_cols_call method_call = new get_schemas_by_cols_call(rqst, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_schemas_by_cols_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private FindSchemasByColsRqst rqst;
+      public get_schemas_by_cols_call(FindSchemasByColsRqst rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.rqst = rqst;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_schemas_by_cols", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        get_schemas_by_cols_args args = new get_schemas_by_cols_args();
+        args.setRqst(rqst);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public FindSchemasByColsResp getResult() throws MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_get_schemas_by_cols();
+      }
+    }
+
+    public void map_schema_version_to_serde(String schemaName, int version, String serdeName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      map_schema_version_to_serde_call method_call = new map_schema_version_to_serde_call(schemaName, version, serdeName, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class map_schema_version_to_serde_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String schemaName;
+      private int version;
+      private String serdeName;
+      public map_schema_version_to_serde_call(String schemaName, int version, String serdeName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.schemaName = schemaName;
+        this.version = version;
+        this.serdeName = serdeName;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("map_schema_version_to_serde", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        map_schema_version_to_serde_args args = new map_schema_version_to_serde_args();
+        args.setSchemaName(schemaName);
+        args.setVersion(version);
+        args.setSerdeName(serdeName);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public void getResult() throws NoSuchObjectException, MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        (new Client(prot)).recv_map_schema_version_to_serde();
+      }
+    }
+
+    public void set_schema_version_state(String schemaName, int version, SchemaVersionState state, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      set_schema_version_state_call method_call = new set_schema_version_state_call(schemaName, version, state, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class set_schema_version_state_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String schemaName;
+      private int version;
+      private SchemaVersionState state;
+      public set_schema_version_state_call(String schemaName, int version, SchemaVersionState state, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.schemaName = schemaName;
+        this.version = version;
+        this.state = state;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("set_schema_version_state", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        set_schema_version_state_args args = new set_schema_version_state_args();
+        args.setSchemaName(schemaName);
+        args.setVersion(version);
+        args.setState(state);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public void getResult() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        (new Client(prot)).recv_set_schema_version_state();
+      }
+    }
+
+    public void add_serde(SerDeInfo serde, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      add_serde_call method_call = new add_serde_call(serde, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_serde_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private SerDeInfo serde;
+      public add_serde_call(SerDeInfo serde, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.serde = serde;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("add_serde", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        add_serde_args args = new add_serde_args();
+        args.setSerde(serde);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public void getResult() throws AlreadyExistsException, MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        (new Client(prot)).recv_add_serde();
+      }
+    }
+
+    public void get_serde(String serdeName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      get_serde_call method_call = new get_serde_call(serdeName, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_serde_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String serdeName;
+      public get_serde_call(String serdeName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.serdeName = serdeName;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_serde", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        get_serde_args args = new get_serde_args();
+        args.setSerdeName(serdeName);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public SerDeInfo getResult() throws NoSuchObjectException, MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_get_serde();
+      }
+    }
+
   }
 
   @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Processor<I extends Iface> extends com.facebook.fb303.FacebookService.Processor<I> implements org.apache.thrift.TProcessor {
@@ -12403,6 +13326,20 @@ import org.slf4j.LoggerFactory;
       processMap.put("create_or_update_wm_mapping", new create_or_update_wm_mapping());
       processMap.put("drop_wm_mapping", new drop_wm_mapping());
       processMap.put("create_or_drop_wm_trigger_to_pool_mapping", new create_or_drop_wm_trigger_to_pool_mapping());
+      processMap.put("create_ischema", new create_ischema());
+      processMap.put("alter_ischema", new alter_ischema());
+      processMap.put("get_ischema", new get_ischema());
+      processMap.put("drop_ischema", new drop_ischema());
+      processMap.put("add_schema_version", new add_schema_version());
+      processMap.put("get_schema_version", new get_schema_version());
+      processMap.put("get_schema_latest_version", new get_schema_latest_version());
+      processMap.put("get_schema_all_versions", new get_schema_all_versions());
+      processMap.put("drop_schema_version", new drop_schema_version());
+      processMap.put("get_schemas_by_cols", new get_schemas_by_cols());
+      processMap.put("map_schema_version_to_serde", new map_schema_version_to_serde());
+      processMap.put("set_schema_version_state", new set_schema_version_state());
+      processMap.put("add_serde", new add_serde());
+      processMap.put("get_serde", new get_serde());
       return processMap;
     }
 
@@ -16998,6 +17935,376 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_ischema<I extends Iface> extends org.apache.thrift.ProcessFunction<I, create_ischema_args> {
+      public create_ischema() {
+        super("create_ischema");
+      }
+
+      public create_ischema_args getEmptyArgsInstance() {
+        return new create_ischema_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public create_ischema_result getResult(I iface, create_ischema_args args) throws org.apache.thrift.TException {
+        create_ischema_result result = new create_ischema_result();
+        try {
+          iface.create_ischema(args.schema);
+        } catch (AlreadyExistsException o1) {
+          result.o1 = o1;
+        } catch (NoSuchObjectException o2) {
+          result.o2 = o2;
+        } catch (MetaException o3) {
+          result.o3 = o3;
+        }
+        return result;
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_ischema<I extends Iface> extends org.apache.thrift.ProcessFunction<I, alter_ischema_args> {
+      public alter_ischema() {
+        super("alter_ischema");
+      }
+
+      public alter_ischema_args getEmptyArgsInstance() {
+        return new alter_ischema_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public alter_ischema_result getResult(I iface, alter_ischema_args args) throws org.apache.thrift.TException {
+        alter_ischema_result result = new alter_ischema_result();
+        try {
+          iface.alter_ischema(args.schemaName, args.newSchema);
+        } catch (NoSuchObjectException o1) {
+          result.o1 = o1;
+        } catch (MetaException o2) {
+          result.o2 = o2;
+        }
+        return result;
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_ischema<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_ischema_args> {
+      public get_ischema() {
+        super("get_ischema");
+      }
+
+      public get_ischema_args getEmptyArgsInstance() {
+        return new get_ischema_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public get_ischema_result getResult(I iface, get_ischema_args args) throws org.apache.thrift.TException {
+        get_ischema_result result = new get_ischema_result();
+        try {
+          result.success = iface.get_ischema(args.schemaName);
+        } catch (NoSuchObjectException o1) {
+          result.o1 = o1;
+        } catch (MetaException o2) {
+          result.o2 = o2;
+        }
+        return result;
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_ischema<I extends Iface> extends org.apache.thrift.ProcessFunction<I, drop_ischema_args> {
+      public drop_ischema() {
+        super("drop_ischema");
+      }
+
+      public drop_ischema_args getEmptyArgsInstance() {
+        return new drop_ischema_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public drop_ischema_result getResult(I iface, drop_ischema_args args) throws org.apache.thrift.TException {
+        drop_ischema_result result = new drop_ischema_result();
+        try {
+          iface.drop_ischema(args.schemaName);
+        } catch (NoSuchObjectException o1) {
+          result.o1 = o1;
+        } catch (InvalidOperationException o2) {
+          result.o2 = o2;
+        } catch (MetaException o3) {
+          result.o3 = o3;
+        }
+        return result;
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_schema_version<I extends Iface> extends org.apache.thrift.ProcessFunction<I, add_schema_version_args> {
+      public add_schema_version() {
+        super("add_schema_version");
+      }
+
+      public add_schema_version_args getEmptyArgsInstance() {
+        return new add_schema_version_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public add_schema_version_result getResult(I iface, add_schema_version_args args) throws org.apache.thrift.TException {
+        add_schema_version_result result = new add_schema_version_result();
+        try {
+          iface.add_schema_version(args.schemaVersion);
+        } catch (AlreadyExistsException o1) {
+          result.o1 = o1;
+        } catch (NoSuchObjectException o2) {
+          result.o2 = o2;
+        } catch (MetaException o3) {
+          result.o3 = o3;
+        }
+        return result;
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_schema_version<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_schema_version_args> {
+      public get_schema_version() {
+        super("get_schema_version");
+      }
+
+      public get_schema_version_args getEmptyArgsInstance() {
+        return new get_schema_version_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public get_schema_version_result getResult(I iface, get_schema_version_args args) throws org.apache.thrift.TException {
+        get_schema_version_result result = new get_schema_version_result();
+        try {
+          result.success = iface.get_schema_version(args.schemaName, args.version);
+        } catch (NoSuchObjectException o1) {
+          result.o1 = o1;
+        } catch (MetaException o2) {
+          result.o2 = o2;
+        }
+        return result;
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_schema_latest_version<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_schema_latest_version_args> {
+      public get_schema_latest_version() {
+        super("get_schema_latest_version");
+      }
+
+      public get_schema_latest_version_args getEmptyArgsInstance() {
+        return new get_schema_latest_version_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public get_schema_latest_version_result getResult(I iface, get_schema_latest_version_args args) throws org.apache.thrift.TException {
+        get_schema_latest_version_result result = new get_schema_latest_version_result();
+        try {
+          result.success = iface.get_schema_latest_version(args.schemaName);
+        } catch (NoSuchObjectException o1) {
+          result.o1 = o1;
+        } catch (MetaException o2) {
+          result.o2 = o2;
+        }
+        return result;
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_schema_all_versions<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_schema_all_versions_args> {
+      public get_schema_all_versions() {
+        super("get_schema_all_versions");
+      }
+
+      public get_schema_all_versions_args getEmptyArgsInstance() {
+        return new get_schema_all_versions_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public get_schema_all_versions_result getResult(I iface, get_schema_all_versions_args args) throws org.apache.thrift.TException {
+        get_schema_all_versions_result result = new get_schema_all_versions_result();
+        try {
+          result.success = iface.get_schema_all_versions(args.schemaName);
+        } catch (NoSuchObjectException o1) {
+          result.o1 = o1;
+        } catch (MetaException o2) {
+          result.o2 = o2;
+        }
+        return result;
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_schema_version<I extends Iface> extends org.apache.thrift.ProcessFunction<I, drop_schema_version_args> {
+      public drop_schema_version() {
+        super("drop_schema_version");
+      }
+
+      public drop_schema_version_args getEmptyArgsInstance() {
+        return new drop_schema_version_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public drop_schema_version_result getResult(I iface, drop_schema_version_args args) throws org.apache.thrift.TException {
+        drop_schema_version_result result = new drop_schema_version_result();
+        try {
+          iface.drop_schema_version(args.schemaName, args.version);
+        } catch (NoSuchObjectException o1) {
+          result.o1 = o1;
+        } catch (MetaException o2) {
+          result.o2 = o2;
+        }
+        return result;
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_schemas_by_cols<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_schemas_by_cols_args> {
+      public get_schemas_by_cols() {
+        super("get_schemas_by_cols");
+      }
+
+      public get_schemas_by_cols_args getEmptyArgsInstance() {
+        return new get_schemas_by_cols_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public get_schemas_by_cols_result getResult(I iface, get_schemas_by_cols_args args) throws org.apache.thrift.TException {
+        get_schemas_by_cols_result result = new get_schemas_by_cols_result();
+        try {
+          result.success = iface.get_schemas_by_cols(args.rqst);
+        } catch (MetaException o1) {
+          result.o1 = o1;
+        }
+        return result;
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class map_schema_version_to_serde<I extends Iface> extends org.apache.thrift.ProcessFunction<I, map_schema_version_to_serde_args> {
+      public map_schema_version_to_serde() {
+        super("map_schema_version_to_serde");
+      }
+
+      public map_schema_version_to_serde_args getEmptyArgsInstance() {
+        return new map_schema_version_to_serde_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public map_schema_version_to_serde_result getResult(I iface, map_schema_version_to_serde_args args) throws org.apache.thrift.TException {
+        map_schema_version_to_serde_result result = new map_schema_version_to_serde_result();
+        try {
+          iface.map_schema_version_to_serde(args.schemaName, args.version, args.serdeName);
+        } catch (NoSuchObjectException o1) {
+          result.o1 = o1;
+        } catch (MetaException o2) {
+          result.o2 = o2;
+        }
+        return result;
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class set_schema_version_state<I extends Iface> extends org.apache.thrift.ProcessFunction<I, set_schema_version_state_args> {
+      public set_schema_version_state() {
+        super("set_schema_version_state");
+      }
+
+      public set_schema_version_state_args getEmptyArgsInstance() {
+        return new set_schema_version_state_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public set_schema_version_state_result getResult(I iface, set_schema_version_state_args args) throws org.apache.thrift.TException {
+        set_schema_version_state_result result = new set_schema_version_state_result();
+        try {
+          iface.set_schema_version_state(args.schemaName, args.version, args.state);
+        } catch (NoSuchObjectException o1) {
+          result.o1 = o1;
+        } catch (InvalidOperationException o2) {
+          result.o2 = o2;
+        } catch (MetaException o3) {
+          result.o3 = o3;
+        }
+        return result;
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_serde<I extends Iface> extends org.apache.thrift.ProcessFunction<I, add_serde_args> {
+      public add_serde() {
+        super("add_serde");
+      }
+
+      public add_serde_args getEmptyArgsInstance() {
+        return new add_serde_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public add_serde_result getResult(I iface, add_serde_args args) throws org.apache.thrift.TException {
+        add_serde_result result = new add_serde_result();
+        try {
+          iface.add_serde(args.serde);
+        } catch (AlreadyExistsException o1) {
+          result.o1 = o1;
+        } catch (MetaException o2) {
+          result.o2 = o2;
+        }
+        return result;
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_serde<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_serde_args> {
+      public get_serde() {
+        super("get_serde");
+      }
+
+      public get_serde_args getEmptyArgsInstance() {
+        return new get_serde_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public get_serde_result getResult(I iface, get_serde_args args) throws org.apache.thrift.TException {
+        get_serde_result result = new get_serde_result();
+        try {
+          result.success = iface.get_serde(args.serdeName);
+        } catch (NoSuchObjectException o1) {
+          result.o1 = o1;
+        } catch (MetaException o2) {
+          result.o2 = o2;
+        }
+        return result;
+      }
+    }
+
   }
 
   @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class AsyncProcessor<I extends AsyncIface> extends com.facebook.fb303.FacebookService.AsyncProcessor<I> {
@@ -17190,6 +18497,20 @@ import org.slf4j.LoggerFactory;
       processMap.put("create_or_update_wm_mapping", new create_or_update_wm_mapping());
       processMap.put("drop_wm_mapping", new drop_wm_mapping());
       processMap.put("create_or_drop_wm_trigger_to_pool_mapping", new create_or_drop_wm_trigger_to_pool_mapping());
+      processMap.put("create_ischema", new create_ischema());
+      processMap.put("alter_ischema", new alter_ischema());
+      processMap.put("get_ischema", new get_ischema());
+      processMap.put("drop_ischema", new drop_ischema());
+      processMap.put("add_schema_version", new add_schema_version());
+      processMap.put("get_schema_version", new get_schema_version());
+      processMap.put("get_schema_latest_version", new get_schema_latest_version());
+      processMap.put("get_schema_all_versions", new get_schema_all_versions());
+      processMap.put("drop_schema_version", new drop_schema_version());
+      processMap.put("get_schemas_by_cols", new get_schemas_by_cols());
+      processMap.put("map_schema_version_to_serde", new map_schema_version_to_serde());
+      processMap.put("set_schema_version_state", new set_schema_version_state());
+      processMap.put("add_serde", new add_serde());
+      processMap.put("get_serde", new get_serde());
       return processMap;
     }
 
@@ -28156,6 +29477,881 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_ischema<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, create_ischema_args, Void> {
+      public create_ischema() {
+        super("create_ischema");
+      }
+
+      public create_ischema_args getEmptyArgsInstance() {
+        return new create_ischema_args();
+      }
+
+      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Void>() { 
+          public void onComplete(Void o) {
+            create_ischema_result result = new create_ischema_result();
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            create_ischema_result result = new create_ischema_result();
+            if (e instanceof AlreadyExistsException) {
+                        result.o1 = (AlreadyExistsException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof NoSuchObjectException) {
+                        result.o2 = (NoSuchObjectException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o3 = (MetaException) e;
+                        result.setO3IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, create_ischema_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.create_ischema(args.schema,resultHandler);
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_ischema<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_ischema_args, Void> {
+      public alter_ischema() {
+        super("alter_ischema");
+      }
+
+      public alter_ischema_args getEmptyArgsInstance() {
+        return new alter_ischema_args();
+      }
+
+      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Void>() { 
+          public void onComplete(Void o) {
+            alter_ischema_result result = new alter_ischema_result();
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            alter_ischema_result result = new alter_ischema_result();
+            if (e instanceof NoSuchObjectException) {
+                        result.o1 = (NoSuchObjectException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o2 = (MetaException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, alter_ischema_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.alter_ischema(args.schemaName, args.newSchema,resultHandler);
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_ischema<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_ischema_args, ISchema> {
+      public get_ischema() {
+        super("get_ischema");
+      }
+
+      public get_ischema_args getEmptyArgsInstance() {
+        return new get_ischema_args();
+      }
+
+      public AsyncMethodCallback<ISchema> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<ISchema>() { 
+          public void onComplete(ISchema o) {
+            get_ischema_result result = new get_ischema_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            get_ischema_result result = new get_ischema_result();
+            if (e instanceof NoSuchObjectException) {
+                        result.o1 = (NoSuchObjectException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o2 = (MetaException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, get_ischema_args args, org.apache.thrift.async.AsyncMethodCallback<ISchema> resultHandler) throws TException {
+        iface.get_ischema(args.schemaName,resultHandler);
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_ischema<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_ischema_args, Void> {
+      public drop_ischema() {
+        super("drop_ischema");
+      }
+
+      public drop_ischema_args getEmptyArgsInstance() {
+        return new drop_ischema_args();
+      }
+
+      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Void>() { 
+          public void onComplete(Void o) {
+            drop_ischema_result result = new drop_ischema_result();
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            drop_ischema_result result = new drop_ischema_result();
+            if (e instanceof NoSuchObjectException) {
+                        result.o1 = (NoSuchObjectException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof InvalidOperationException) {
+                        result.o2 = (InvalidOperationException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o3 = (MetaException) e;
+                        result.setO3IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, drop_ischema_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.drop_ischema(args.schemaName,resultHandler);
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_schema_version<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_schema_version_args, Void> {
+      public add_schema_version() {
+        super("add_schema_version");
+      }
+
+      public add_schema_version_args getEmptyArgsInstance() {
+        return new add_schema_version_args();
+      }
+
+      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Void>() { 
+          public void onComplete(Void o) {
+            add_schema_version_result result = new add_schema_version_result();
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            add_schema_version_result result = new add_schema_version_result();
+            if (e instanceof AlreadyExistsException) {
+                        result.o1 = (AlreadyExistsException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof NoSuchObjectException) {
+                        result.o2 = (NoSuchObjectException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o3 = (MetaException) e;
+                        result.setO3IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, add_schema_version_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.add_schema_version(args.schemaVersion,resultHandler);
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_schema_version<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_schema_version_args, SchemaVersion> {
+      public get_schema_version() {
+        super("get_schema_version");
+      }
+
+      public get_schema_version_args getEmptyArgsInstance() {
+        return new get_schema_version_args();
+      }
+
+      public AsyncMethodCallback<SchemaVersion> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<SchemaVersion>() { 
+          public void onComplete(SchemaVersion o) {
+            get_schema_version_result result = new get_schema_version_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            get_schema_version_result result = new get_schema_version_result();
+            if (e instanceof NoSuchObjectException) {
+                        result.o1 = (NoSuchObjectException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o2 = (MetaException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, get_schema_version_args args, org.apache.thrift.async.AsyncMethodCallback<SchemaVersion> resultHandler) throws TException {
+        iface.get_schema_version(args.schemaName, args.version,resultHandler);
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_schema_latest_version<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_schema_latest_version_args, SchemaVersion> {
+      public get_schema_latest_version() {
+        super("get_schema_latest_version");
+      }
+
+      public get_schema_latest_version_args getEmptyArgsInstance() {
+        return new get_schema_latest_version_args();
+      }
+
+      public AsyncMethodCallback<SchemaVersion> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<SchemaVersion>() { 
+          public void onComplete(SchemaVersion o) {
+            get_schema_latest_version_result result = new get_schema_latest_version_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            get_schema_latest_version_result result = new get_schema_latest_version_result();
+            if (e instanceof NoSuchObjectException) {
+                        result.o1 = (NoSuchObjectException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o2 = (MetaException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+             else 
+            {


<TRUNCATED>

[15/50] [abbrv] hive git commit: HIVE-18111: Fix temp path for Spark DPP sink (Rui reviewed by Sahil)

Posted by ga...@apache.org.
HIVE-18111: Fix temp path for Spark DPP sink (Rui reviewed by Sahil)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8ced3bc7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8ced3bc7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8ced3bc7

Branch: refs/heads/standalone-metastore
Commit: 8ced3bc7c595be7700088a4487363b6151e6c3d2
Parents: 11227eb
Author: Rui Li <li...@apache.org>
Authored: Mon Dec 18 11:20:46 2017 +0800
Committer: Rui Li <li...@apache.org>
Committed: Mon Dec 18 11:20:46 2017 +0800

----------------------------------------------------------------------
 .../SparkDynamicPartitionPruningResolver.java   |   3 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  |   5 +
 .../spark/CombineEquivalentWorkResolver.java    |  18 +-
 .../hive/ql/parse/spark/GenSparkUtils.java      |  28 +-
 .../SparkPartitionPruningSinkOperator.java      |  14 +
 .../spark_dynamic_partition_pruning_4.q         |  21 +-
 .../spark_dynamic_partition_pruning_4.q.out     | 329 +++++++++++++++++++
 7 files changed, 384 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/8ced3bc7/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkDynamicPartitionPruningResolver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkDynamicPartitionPruningResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkDynamicPartitionPruningResolver.java
index bcd3825..278e8a2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkDynamicPartitionPruningResolver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkDynamicPartitionPruningResolver.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.OperatorUtils;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.spark.SparkTask;
-import org.apache.hadoop.hive.ql.exec.spark.SparkUtilities;
 import org.apache.hadoop.hive.ql.lib.Dispatcher;
 import org.apache.hadoop.hive.ql.lib.Node;
 import org.apache.hadoop.hive.ql.lib.TaskGraphWalker;
@@ -117,7 +116,7 @@ public class SparkDynamicPartitionPruningResolver implements PhysicalPlanResolve
     OperatorUtils.removeBranch(pruningSinkOp);
 
     // Remove all event source info from the target MapWork
-    String sourceWorkId = SparkUtilities.getWorkId(sourceWork);
+    String sourceWorkId = pruningSinkOp.getUniqueId();
     SparkPartitionPruningSinkDesc pruningSinkDesc = pruningSinkOp.getConf();
     targetMapWork.getEventSourceTableDescMap().get(sourceWorkId).remove(pruningSinkDesc.getTable());
     targetMapWork.getEventSourceColumnNameMap().get(sourceWorkId).remove(pruningSinkDesc.getTargetColumnName());

http://git-wip-us.apache.org/repos/asf/hive/blob/8ced3bc7/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index 8ce2c33..6f28970 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -41,6 +41,7 @@ import java.util.regex.Pattern;
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.calcite.util.Pair;
 import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.hadoop.hive.ql.parse.spark.SparkPartitionPruningSinkOperator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.Path;
@@ -4640,6 +4641,10 @@ public class Vectorizer implements PhysicalPlanResolver {
             vectorOp = OperatorFactory.getVectorOperator(
                 op.getCompilationOpContext(), sparkPartitionPruningSinkDesc,
                 vContext, vectorSparkPartitionPruningSinkDesc);
+            // need to maintain the unique ID so that target map works can
+            // read the output
+            ((SparkPartitionPruningSinkOperator) vectorOp).setUniqueId(
+                ((SparkPartitionPruningSinkOperator) op).getUniqueId());
             isNative = true;
           }
           break;

http://git-wip-us.apache.org/repos/asf/hive/blob/8ced3bc7/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/CombineEquivalentWorkResolver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/CombineEquivalentWorkResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/CombineEquivalentWorkResolver.java
index 988579e..6e502eb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/CombineEquivalentWorkResolver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/spark/CombineEquivalentWorkResolver.java
@@ -85,7 +85,7 @@ public class CombineEquivalentWorkResolver implements PhysicalPlanResolver {
     };
 
     // maps from a work to the DPPs it contains
-    private Map<BaseWork, List<SparkPartitionPruningSinkDesc>> workToDpps = new HashMap<>();
+    private Map<BaseWork, List<SparkPartitionPruningSinkOperator>> workToDpps = new HashMap<>();
 
     @Override
     public Object dispatch(Node nd, Stack<Node> stack, Object... nodeOutputs) throws SemanticException {
@@ -215,16 +215,15 @@ public class CombineEquivalentWorkResolver implements PhysicalPlanResolver {
         if (workSet.size() > 1) {
           Iterator<BaseWork> iterator = workSet.iterator();
           BaseWork first = iterator.next();
-          List<SparkPartitionPruningSinkDesc> dppList1 = workToDpps.get(first);
-          String firstId = SparkUtilities.getWorkId(first);
+          List<SparkPartitionPruningSinkOperator> dppList1 = workToDpps.get(first);
           while (iterator.hasNext()) {
             BaseWork next = iterator.next();
             if (dppList1 != null) {
-              List<SparkPartitionPruningSinkDesc> dppList2 = workToDpps.get(next);
+              List<SparkPartitionPruningSinkOperator> dppList2 = workToDpps.get(next);
               // equivalent works must have dpp lists of same size
               for (int i = 0; i < dppList1.size(); i++) {
-                combineEquivalentDPPSinks(dppList1.get(i), dppList2.get(i),
-                    firstId, SparkUtilities.getWorkId(next));
+                combineEquivalentDPPSinks(dppList1.get(i).getConf(), dppList2.get(i).getConf(),
+                    dppList1.get(i).getUniqueId(), dppList2.get(i).getUniqueId());
               }
             }
             replaceWork(next, first, sparkWork);
@@ -391,10 +390,11 @@ public class CombineEquivalentWorkResolver implements PhysicalPlanResolver {
       }
 
       if (firstOperator instanceof SparkPartitionPruningSinkOperator) {
-        List<SparkPartitionPruningSinkDesc> dpps = workToDpps.computeIfAbsent(first, k -> new ArrayList<>());
-        dpps.add(((SparkPartitionPruningSinkOperator) firstOperator).getConf());
+        List<SparkPartitionPruningSinkOperator> dpps = workToDpps.computeIfAbsent(
+            first, k -> new ArrayList<>());
+        dpps.add(((SparkPartitionPruningSinkOperator) firstOperator));
         dpps = workToDpps.computeIfAbsent(second, k -> new ArrayList<>());
-        dpps.add(((SparkPartitionPruningSinkOperator) secondOperator).getConf());
+        dpps.add(((SparkPartitionPruningSinkOperator) secondOperator));
       }
       return true;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/8ced3bc7/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java
index c6c7bf7..232ed45 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java
@@ -49,7 +49,6 @@ import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
 import org.apache.hadoop.hive.ql.exec.TableScanOperator;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.UnionOperator;
-import org.apache.hadoop.hive.ql.exec.spark.SparkUtilities;
 import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils;
 import org.apache.hadoop.hive.ql.optimizer.spark.SparkPartitionPruningSinkDesc;
 import org.apache.hadoop.hive.ql.optimizer.spark.SparkSortMergeJoinFactory;
@@ -464,16 +463,12 @@ public class GenSparkUtils {
         targetWork != null,
         "No targetWork found for tablescan " + ts);
 
-    String targetId = SparkUtilities.getWorkId(targetWork);
-
-    BaseWork sourceWork = getEnclosingWork(pruningSink, context);
-    String sourceId = SparkUtilities.getWorkId(sourceWork);
+    String sourceId = pruningSink.getUniqueId();
 
     // set up temporary path to communicate between the small/big table
     Path tmpPath = targetWork.getTmpPathForPartitionPruning();
     if (tmpPath == null) {
-      Path baseTmpPath = context.parseContext.getContext().getMRTmpPath();
-      tmpPath = SparkUtilities.generateTmpPathForPartitionPruning(baseTmpPath, targetId);
+      tmpPath = getDPPOutputPath(context.parseContext.getContext());
       targetWork.setTmpPathForPartitionPruning(tmpPath);
       LOG.info("Setting tmp path between source work and target work:\n" + tmpPath);
     }
@@ -509,6 +504,10 @@ public class GenSparkUtils {
     keys.add(desc.getTargetPartKey());
   }
 
+  private Path getDPPOutputPath(Context context) {
+    return new Path(context.getMRScratchDir(), "_dpp_output_");
+  }
+
   public static SparkEdgeProperty getEdgeProperty(HiveConf conf, ReduceSinkOperator reduceSink,
       ReduceWork reduceWork) throws SemanticException {
     boolean useSparkGroupBy = conf.getBoolVar(HiveConf.ConfVars.SPARK_USE_GROUPBY_SHUFFLE);
@@ -682,19 +681,4 @@ public class GenSparkUtils {
     }
     return false;
   }
-
-  /**
-   * getEncosingWork finds the BaseWork any given operator belongs to.
-   */
-  public BaseWork getEnclosingWork(Operator<?> op, GenSparkProcContext procCtx) {
-    List<Operator<?>> ops = new ArrayList<Operator<?>>();
-    OperatorUtils.findRoots(op, ops);
-    for (Operator<?> r : ops) {
-      BaseWork work = procCtx.rootToWorkMap.get(r);
-      if (work != null) {
-        return work;
-      }
-    }
-    return null;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/8ced3bc7/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkPartitionPruningSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkPartitionPruningSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkPartitionPruningSinkOperator.java
index bd9de09..966ffe7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkPartitionPruningSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkPartitionPruningSinkOperator.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.parse.spark;
 import java.io.BufferedOutputStream;
 import java.io.IOException;
 import java.io.ObjectOutputStream;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -55,6 +56,9 @@ public class SparkPartitionPruningSinkOperator extends Operator<SparkPartitionPr
   protected transient Serializer serializer;
   protected transient DataOutputBuffer buffer;
   protected static final Logger LOG = LoggerFactory.getLogger(SparkPartitionPruningSinkOperator.class);
+  private static final AtomicLong SEQUENCE_NUM = new AtomicLong(0);
+
+  private transient String uniqueId = null;
 
   /** Kryo ctor. */
   @VisibleForTesting
@@ -202,4 +206,14 @@ public class SparkPartitionPruningSinkOperator extends Operator<SparkPartitionPr
     return "SPARKPRUNINGSINK";
   }
 
+  public synchronized String getUniqueId() {
+    if (uniqueId == null) {
+      uniqueId = getOperatorId() + "_" + SEQUENCE_NUM.getAndIncrement();
+    }
+    return uniqueId;
+  }
+
+  public synchronized void setUniqueId(String uniqueId) {
+    this.uniqueId = uniqueId;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/8ced3bc7/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_4.q b/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_4.q
index 240128f..e5f4874 100644
--- a/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_4.q
+++ b/ql/src/test/queries/clientpositive/spark_dynamic_partition_pruning_4.q
@@ -153,5 +153,24 @@ select * from
 union all
   (select part2.key, part2.value from part2 join top on part2.p=top.key and part2.q=top.value);
 
+-- The following test case makes sure target map works can read from multiple DPP sinks,
+-- when the DPP sinks have different target lists
+-- see HIVE-18111
+
+create table foo(key string);
+insert into table foo values ('1'),('2');
+
+set hive.cbo.enable = false;
+
+explain
+select p from part2 where p in (select max(key) from foo)
+union all
+select p from part1 where p in (select max(key) from foo union all select min(key) from foo);
+
+select p from part2 where p in (select max(key) from foo)
+union all
+select p from part1 where p in (select max(key) from foo union all select min(key) from foo);
+
+drop table foo;
 drop table part1;
-drop table part2;
\ No newline at end of file
+drop table part2;

http://git-wip-us.apache.org/repos/asf/hive/blob/8ced3bc7/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_4.q.out b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_4.q.out
index 20fa5a7..a06c3e3 100644
--- a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_4.q.out
+++ b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_4.q.out
@@ -1873,6 +1873,335 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+PREHOOK: query: create table foo(key string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@foo
+POSTHOOK: query: create table foo(key string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@foo
+PREHOOK: query: insert into table foo values ('1'),('2')
+PREHOOK: type: QUERY
+PREHOOK: Output: default@foo
+POSTHOOK: query: insert into table foo values ('1'),('2')
+POSTHOOK: type: QUERY
+POSTHOOK: Output: default@foo
+POSTHOOK: Lineage: foo.key SIMPLE [(values__tmp__table__9)values__tmp__table__9.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: explain
+select p from part2 where p in (select max(key) from foo)
+union all
+select p from part1 where p in (select max(key) from foo union all select min(key) from foo)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select p from part2 where p in (select max(key) from foo)
+union all
+select p from part1 where p in (select max(key) from foo union all select min(key) from foo)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-2
+    Spark
+      Edges:
+        Reducer 12 <- Map 11 (GROUP, 1)
+        Reducer 16 <- Map 15 (GROUP, 1)
+#### A masked pattern was here ####
+      Vertices:
+        Map 11 
+            Map Operator Tree:
+                TableScan
+                  alias: foo
+                  Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: max(key)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: string)
+        Map 15 
+            Map Operator Tree:
+                TableScan
+                  alias: foo
+                  Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: min(key)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: string)
+        Reducer 12 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: max(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: _col0 is not null (type: boolean)
+                  Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                  Group By Operator
+                    keys: _col0 (type: string)
+                    mode: hash
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: _col0 (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: string)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                        Spark Partition Pruning Sink Operator
+                          Target column: [1:p (string), 5:p (string)]
+                          partition key expr: [p, p]
+                          Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                          target works: [Map 1, Map 5]
+        Reducer 16 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: min(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: _col0 is not null (type: boolean)
+                  Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                  Group By Operator
+                    keys: _col0 (type: string)
+                    mode: hash
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: _col0 (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        keys: _col0 (type: string)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+                        Spark Partition Pruning Sink Operator
+                          Target column: [5:p (string)]
+                          partition key expr: [p]
+                          Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+                          target works: [Map 5]
+
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 10 <- Map 9 (GROUP, 1)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 4), Reducer 4 (PARTITION-LEVEL SORT, 4)
+        Reducer 4 <- Map 3 (GROUP, 1)
+        Reducer 6 <- Map 5 (PARTITION-LEVEL SORT, 4), Reducer 10 (PARTITION-LEVEL SORT, 4), Reducer 4 (PARTITION-LEVEL SORT, 4)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: part2
+                  Statistics: Num rows: 8 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: p (type: string)
+                    sort order: +
+                    Map-reduce partition columns: p (type: string)
+                    Statistics: Num rows: 8 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: foo
+                  Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: max(key)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: string)
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: part1
+                  Statistics: Num rows: 8 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: p (type: string)
+                    sort order: +
+                    Map-reduce partition columns: p (type: string)
+                    Statistics: Num rows: 8 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+        Map 9 
+            Map Operator Tree:
+                TableScan
+                  alias: foo
+                  Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: min(key)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: string)
+        Reducer 10 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: min(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: _col0 is not null (type: boolean)
+                  Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                  Group By Operator
+                    keys: _col0 (type: string)
+                    mode: hash
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: NONE
+        Reducer 2 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                outputColumnNames: _col2
+                Statistics: Num rows: 8 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col2 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 8 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 16 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 4 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: max(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: _col0 is not null (type: boolean)
+                  Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                  Group By Operator
+                    keys: _col0 (type: string)
+                    mode: hash
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE
+        Reducer 6 
+            Reduce Operator Tree:
+              Join Operator
+                condition map:
+                     Left Semi Join 0 to 1
+                outputColumnNames: _col2
+                Statistics: Num rows: 8 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col2 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 8 Data size: 26 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 16 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select p from part2 where p in (select max(key) from foo)
+union all
+select p from part1 where p in (select max(key) from foo union all select min(key) from foo)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@foo
+PREHOOK: Input: default@part1
+PREHOOK: Input: default@part1@p=1/q=1
+PREHOOK: Input: default@part1@p=1/q=2
+PREHOOK: Input: default@part1@p=2/q=1
+PREHOOK: Input: default@part1@p=2/q=2
+PREHOOK: Input: default@part2
+PREHOOK: Input: default@part2@p=3/q=3
+PREHOOK: Input: default@part2@p=3/q=4
+PREHOOK: Input: default@part2@p=4/q=3
+PREHOOK: Input: default@part2@p=4/q=4
+#### A masked pattern was here ####
+POSTHOOK: query: select p from part2 where p in (select max(key) from foo)
+union all
+select p from part1 where p in (select max(key) from foo union all select min(key) from foo)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@foo
+POSTHOOK: Input: default@part1
+POSTHOOK: Input: default@part1@p=1/q=1
+POSTHOOK: Input: default@part1@p=1/q=2
+POSTHOOK: Input: default@part1@p=2/q=1
+POSTHOOK: Input: default@part1@p=2/q=2
+POSTHOOK: Input: default@part2
+POSTHOOK: Input: default@part2@p=3/q=3
+POSTHOOK: Input: default@part2@p=3/q=4
+POSTHOOK: Input: default@part2@p=4/q=3
+POSTHOOK: Input: default@part2@p=4/q=4
+#### A masked pattern was here ####
+1
+1
+1
+1
+2
+2
+2
+2
+PREHOOK: query: drop table foo
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@foo
+PREHOOK: Output: default@foo
+POSTHOOK: query: drop table foo
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@foo
+POSTHOOK: Output: default@foo
 PREHOOK: query: drop table part1
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@part1


[29/50] [abbrv] hive git commit: HIVE-17983 Make the standalone metastore generate tarballs etc.

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/oracle/hive-schema-2.3.0.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/oracle/hive-schema-2.3.0.oracle.sql b/standalone-metastore/src/main/sql/oracle/hive-schema-2.3.0.oracle.sql
new file mode 100644
index 0000000..bda635f
--- /dev/null
+++ b/standalone-metastore/src/main/sql/oracle/hive-schema-2.3.0.oracle.sql
@@ -0,0 +1,926 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+   NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+   CLASS_NAME VARCHAR2(128) NOT NULL,
+   TABLE_NAME VARCHAR2(128) NOT NULL,
+   TYPE VARCHAR2(4) NOT NULL,
+   OWNER VARCHAR2(2) NOT NULL,
+   VERSION VARCHAR2(20) NOT NULL,
+   INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(767) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_COL_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+    CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+    CD_ID NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    "COLUMN_NAME" VARCHAR2(767) NOT NULL,
+    TYPE_NAME CLOB NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+    PART_ID NUMBER NOT NULL,
+    PART_KEY_VAL VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+    DB_ID NUMBER NOT NULL,
+    "DESC" VARCHAR2(4000) NULL,
+    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    OWNER_TYPE VARCHAR2(10) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+    PART_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+    SERDE_ID NUMBER NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    SLIB VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+    TYPES_ID NUMBER NOT NULL,
+    TYPE_NAME VARCHAR2(128) NULL,
+    TYPE1 VARCHAR2(767) NULL,
+    TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+    TBL_ID NUMBER NOT NULL,
+    PKEY_COMMENT VARCHAR2(4000) NULL,
+    PKEY_NAME VARCHAR2(128) NOT NULL,
+    PKEY_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+    ROLE_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    ROLE_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+CREATE TABLE PARTITIONS
+(
+    PART_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    PART_NAME VARCHAR2(767) NULL,
+    SD_ID NUMBER NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+
+-- Table INDEX_PARAMS for join relationship
+CREATE TABLE INDEX_PARAMS
+(
+    INDEX_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+
+-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+CREATE TABLE TBL_COL_PRIVS
+(
+    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(767) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_COL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+
+-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+CREATE TABLE IDXS
+(
+    INDEX_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
+    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
+    INDEX_NAME VARCHAR2(128) NULL,
+    INDEX_TBL_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    ORIG_TBL_ID NUMBER NULL,
+    SD_ID NUMBER NULL
+);
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+
+-- Table BUCKETING_COLS for join relationship
+CREATE TABLE BUCKETING_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    BUCKET_COL_NAME VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TYPE_FIELDS for join relationship
+CREATE TABLE TYPE_FIELDS
+(
+    TYPE_NAME NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    FIELD_NAME VARCHAR2(128) NOT NULL,
+    FIELD_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+
+-- Table SD_PARAMS for join relationship
+CREATE TABLE SD_PARAMS
+(
+    SD_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE CLOB NULL
+);
+
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+
+-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE TABLE GLOBAL_PRIVS
+(
+    USER_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    USER_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+
+-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+CREATE TABLE SDS
+(
+    SD_ID NUMBER NOT NULL,
+    CD_ID NUMBER NULL,
+    INPUT_FORMAT VARCHAR2(4000) NULL,
+    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
+    LOCATION VARCHAR2(4000) NULL,
+    NUM_BUCKETS NUMBER (10) NOT NULL,
+    OUTPUT_FORMAT VARCHAR2(4000) NULL,
+    SERDE_ID NUMBER NULL,
+    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
+);
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+
+-- Table TABLE_PARAMS for join relationship
+CREATE TABLE TABLE_PARAMS
+(
+    TBL_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE CLOB NULL
+);
+
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+
+-- Table SORT_COLS for join relationship
+CREATE TABLE SORT_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(767) NULL,
+    "ORDER" NUMBER (10) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+CREATE TABLE TBL_PRIVS
+(
+    TBL_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+
+-- Table DATABASE_PARAMS for join relationship
+CREATE TABLE DATABASE_PARAMS
+(
+    DB_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(180) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+
+-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+CREATE TABLE ROLE_MAP
+(
+    ROLE_GRANT_ID NUMBER NOT NULL,
+    ADD_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    ROLE_ID NUMBER NULL
+);
+
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+
+-- Table SERDE_PARAMS for join relationship
+CREATE TABLE SERDE_PARAMS
+(
+    SERDE_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE CLOB NULL
+);
+
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+
+-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+CREATE TABLE PART_PRIVS
+(
+    PART_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+
+-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+CREATE TABLE DB_PRIVS
+(
+    DB_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    DB_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+
+-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+CREATE TABLE TBLS
+(
+    TBL_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    OWNER VARCHAR2(767) NULL,
+    RETENTION NUMBER (10) NOT NULL,
+    SD_ID NUMBER NULL,
+    TBL_NAME VARCHAR2(256) NULL,
+    TBL_TYPE VARCHAR2(128) NULL,
+    VIEW_EXPANDED_TEXT CLOB NULL,
+    VIEW_ORIGINAL_TEXT CLOB NULL,
+    IS_REWRITE_ENABLED NUMBER(1) NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0))
+);
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+
+-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE TABLE PARTITION_EVENTS
+(
+    PART_NAME_ID NUMBER NOT NULL,
+    DB_NAME VARCHAR2(128) NULL,
+    EVENT_TIME NUMBER NOT NULL,
+    EVENT_TYPE NUMBER (10) NOT NULL,
+    PARTITION_NAME VARCHAR2(767) NULL,
+    TBL_NAME VARCHAR2(256) NULL
+);
+
+ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+
+-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+CREATE TABLE SKEWED_STRING_LIST
+(
+    STRING_LIST_ID NUMBER NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+
+CREATE TABLE SKEWED_STRING_LIST_VALUES
+(
+    STRING_LIST_ID NUMBER NOT NULL,
+    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_NAMES
+(
+    SD_ID NUMBER NOT NULL,
+    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+(
+    SD_ID NUMBER NOT NULL,
+    STRING_LIST_ID_KID NUMBER NOT NULL,
+    "LOCATION" VARCHAR2(4000) NULL
+);
+
+CREATE TABLE MASTER_KEYS
+(
+    KEY_ID NUMBER (10) NOT NULL,
+    MASTER_KEY VARCHAR2(767) NULL
+);
+
+CREATE TABLE DELEGATION_TOKENS
+(
+    TOKEN_IDENT VARCHAR2(767) NOT NULL,
+    TOKEN VARCHAR2(767) NULL
+);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_VALUES
+(
+    SD_ID_OID NUMBER NOT NULL,
+    STRING_LIST_ID_EID NUMBER NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+-- column statistics
+
+CREATE TABLE TAB_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(256) NOT NULL,
+ COLUMN_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ TBL_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+CREATE TABLE VERSION (
+  VER_ID NUMBER NOT NULL,
+  SCHEMA_VERSION VARCHAR(127) NOT NULL,
+  VERSION_COMMENT VARCHAR(255)
+);
+ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
+
+CREATE TABLE PART_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(256) NOT NULL,
+ PARTITION_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ PART_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+
+CREATE TABLE FUNCS (
+  FUNC_ID NUMBER NOT NULL,
+  CLASS_NAME VARCHAR2(4000),
+  CREATE_TIME NUMBER(10) NOT NULL,
+  DB_ID NUMBER,
+  FUNC_NAME VARCHAR2(128),
+  FUNC_TYPE NUMBER(10) NOT NULL,
+  OWNER_NAME VARCHAR2(128),
+  OWNER_TYPE VARCHAR2(10)
+);
+
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+
+CREATE TABLE FUNC_RU (
+  FUNC_ID NUMBER NOT NULL,
+  RESOURCE_TYPE NUMBER(10) NOT NULL,
+  RESOURCE_URI VARCHAR2(4000),
+  INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX);
+
+CREATE TABLE NOTIFICATION_LOG
+(
+    NL_ID NUMBER NOT NULL,
+    EVENT_ID NUMBER NOT NULL,
+    EVENT_TIME NUMBER(10) NOT NULL,
+    EVENT_TYPE VARCHAR2(32) NOT NULL,
+    DB_NAME VARCHAR2(128),
+    TBL_NAME VARCHAR2(256),
+    MESSAGE CLOB NULL,
+    MESSAGE_FORMAT VARCHAR(16) NULL
+);
+
+ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID);
+
+CREATE TABLE NOTIFICATION_SEQUENCE
+(
+    NNI_ID NUMBER NOT NULL,
+    NEXT_EVENT_ID NUMBER NOT NULL
+);
+
+ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID);
+
+
+
+-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table COLUMNS_V2
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+
+
+-- Constraints for table PARTITION_KEY_VALS
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+
+
+-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
+
+
+-- Constraints for table PARTITION_PARAMS
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+
+
+-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+
+-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
+
+
+-- Constraints for table PARTITION_KEYS
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+
+
+-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+
+
+-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+
+CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+
+CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+
+
+-- Constraints for table INDEX_PARAMS
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+
+
+-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+
+
+-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
+
+CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+
+CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
+
+
+-- Constraints for table BUCKETING_COLS
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+
+
+-- Constraints for table TYPE_FIELDS
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+
+
+-- Constraints for table SD_PARAMS
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+
+
+-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+CREATE INDEX SDS_N50 ON SDS (CD_ID);
+
+
+-- Constraints for table TABLE_PARAMS
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+
+
+-- Constraints for table SORT_COLS
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+
+
+-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+
+CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DATABASE_PARAMS
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+
+
+-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+
+CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SERDE_PARAMS
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+
+
+-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+
+
+-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+
+
+-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+
+CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+
+CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+
+
+-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+
+
+-- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED;
+
+CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID);
+
+CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+
+
+-- Constraints for table FUNC_RU for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED;
+
+CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+
+CREATE TABLE KEY_CONSTRAINTS
+(
+  CHILD_CD_ID NUMBER,
+  CHILD_INTEGER_IDX NUMBER,
+  CHILD_TBL_ID NUMBER,
+  PARENT_CD_ID NUMBER NOT NULL,
+  PARENT_INTEGER_IDX NUMBER NOT NULL,
+  PARENT_TBL_ID NUMBER NOT NULL,
+  POSITION NUMBER NOT NULL,
+  CONSTRAINT_NAME VARCHAR(400) NOT NULL,
+  CONSTRAINT_TYPE NUMBER NOT NULL,
+  UPDATE_RULE NUMBER,
+  DELETE_RULE NUMBER,
+  ENABLE_VALIDATE_RELY NUMBER NOT NULL
+) ;
+
+ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION);
+
+CREATE INDEX CONSTRAINTS_PT_INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID);
+
+
+------------------------------
+-- Transaction and lock tables
+------------------------------
+--@hive-txn-schema-2.3.0.oracle.sql;
+
+CREATE TABLE TXNS (
+  TXN_ID NUMBER(19) PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED NUMBER(19) NOT NULL,
+  TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL,
+  TXN_AGENT_INFO varchar2(128),
+  TXN_META_INFO varchar2(128),
+  TXN_HEARTBEAT_COUNT number(10)
+) ROWDEPENDENCIES;
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID),
+  TC_DATABASE VARCHAR2(128) NOT NULL,
+  TC_TABLE VARCHAR2(256),
+  TC_PARTITION VARCHAR2(767) NULL,
+  TC_OPERATION_TYPE char(1) NOT NULL
+) ROWDEPENDENCIES;
+
+CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID NUMBER(19),
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+) ROWDEPENDENCIES;
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID NUMBER(19) NOT NULL,
+  HL_LOCK_INT_ID NUMBER(19) NOT NULL,
+  HL_TXNID NUMBER(19),
+  HL_DB VARCHAR2(128) NOT NULL,
+  HL_TABLE VARCHAR2(128),
+  HL_PARTITION VARCHAR2(767),
+  HL_LOCK_STATE CHAR(1) NOT NULL,
+  HL_LOCK_TYPE CHAR(1) NOT NULL,
+  HL_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  HL_ACQUIRED_AT NUMBER(19),
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  HL_HEARTBEAT_COUNT number(10),
+  HL_AGENT_INFO varchar2(128),
+  HL_BLOCKEDBY_EXT_ID number(19),
+  HL_BLOCKEDBY_INT_ID number(19),
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+) ROWDEPENDENCIES;
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID NUMBER(19) PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_TBLPROPERTIES varchar(2048),
+  CQ_WORKER_ID varchar(128),
+  CQ_START NUMBER(19),
+  CQ_RUN_AS varchar(128),
+  CQ_HIGHEST_TXN_ID NUMBER(19),
+  CQ_META_INFO BLOB,
+  CQ_HADOOP_JOB_ID varchar2(32)
+) ROWDEPENDENCIES;
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+CREATE TABLE COMPLETED_COMPACTIONS (
+  CC_ID NUMBER(19) PRIMARY KEY,
+  CC_DATABASE varchar(128) NOT NULL,
+  CC_TABLE varchar(128) NOT NULL,
+  CC_PARTITION varchar(767),
+  CC_STATE char(1) NOT NULL,
+  CC_TYPE char(1) NOT NULL,
+  CC_TBLPROPERTIES varchar(2048),
+  CC_WORKER_ID varchar(128),
+  CC_START NUMBER(19),
+  CC_END NUMBER(19),
+  CC_RUN_AS varchar(128),
+  CC_HIGHEST_TXN_ID NUMBER(19),
+  CC_META_INFO BLOB,
+  CC_HADOOP_JOB_ID varchar2(32)
+) ROWDEPENDENCIES;
+
+CREATE TABLE AUX_TABLE (
+  MT_KEY1 varchar2(128) NOT NULL,
+  MT_KEY2 number(19) NOT NULL,
+  MT_COMMENT varchar2(255),
+  PRIMARY KEY(MT_KEY1, MT_KEY2)
+);
+
+CREATE TABLE WRITE_SET (
+  WS_DATABASE varchar2(128) NOT NULL,
+  WS_TABLE varchar2(128) NOT NULL,
+  WS_PARTITION varchar2(767),
+  WS_TXNID number(19) NOT NULL,
+  WS_COMMIT_ID number(19) NOT NULL,
+  WS_OPERATION_TYPE char(1) NOT NULL
+);
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '2.3.0', 'Hive release version 2.3.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql b/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
new file mode 100644
index 0000000..feaf2a6
--- /dev/null
+++ b/standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql
@@ -0,0 +1,1014 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+   NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+   CLASS_NAME VARCHAR2(128) NOT NULL,
+   TABLE_NAME VARCHAR2(128) NOT NULL,
+   TYPE VARCHAR2(4) NOT NULL,
+   OWNER VARCHAR2(2) NOT NULL,
+   VERSION VARCHAR2(20) NOT NULL,
+   INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(767) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_COL_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+    CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+    CD_ID NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    "COLUMN_NAME" VARCHAR2(767) NOT NULL,
+    TYPE_NAME CLOB NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+    PART_ID NUMBER NOT NULL,
+    PART_KEY_VAL VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+    DB_ID NUMBER NOT NULL,
+    "DESC" VARCHAR2(4000) NULL,
+    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    OWNER_TYPE VARCHAR2(10) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+    PART_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+    SERDE_ID NUMBER NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    SLIB VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+    TYPES_ID NUMBER NOT NULL,
+    TYPE_NAME VARCHAR2(128) NULL,
+    TYPE1 VARCHAR2(767) NULL,
+    TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+    TBL_ID NUMBER NOT NULL,
+    PKEY_COMMENT VARCHAR2(4000) NULL,
+    PKEY_NAME VARCHAR2(128) NOT NULL,
+    PKEY_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+    ROLE_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    ROLE_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+CREATE TABLE PARTITIONS
+(
+    PART_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    PART_NAME VARCHAR2(767) NULL,
+    SD_ID NUMBER NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+
+-- Table INDEX_PARAMS for join relationship
+CREATE TABLE INDEX_PARAMS
+(
+    INDEX_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+
+-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+CREATE TABLE TBL_COL_PRIVS
+(
+    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(767) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_COL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+
+-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+CREATE TABLE IDXS
+(
+    INDEX_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
+    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
+    INDEX_NAME VARCHAR2(128) NULL,
+    INDEX_TBL_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    ORIG_TBL_ID NUMBER NULL,
+    SD_ID NUMBER NULL
+);
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+
+-- Table BUCKETING_COLS for join relationship
+CREATE TABLE BUCKETING_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    BUCKET_COL_NAME VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TYPE_FIELDS for join relationship
+CREATE TABLE TYPE_FIELDS
+(
+    TYPE_NAME NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    FIELD_NAME VARCHAR2(128) NOT NULL,
+    FIELD_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+
+-- Table SD_PARAMS for join relationship
+CREATE TABLE SD_PARAMS
+(
+    SD_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE CLOB NULL
+);
+
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+
+-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE TABLE GLOBAL_PRIVS
+(
+    USER_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    USER_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+
+-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+CREATE TABLE SDS
+(
+    SD_ID NUMBER NOT NULL,
+    CD_ID NUMBER NULL,
+    INPUT_FORMAT VARCHAR2(4000) NULL,
+    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
+    LOCATION VARCHAR2(4000) NULL,
+    NUM_BUCKETS NUMBER (10) NOT NULL,
+    OUTPUT_FORMAT VARCHAR2(4000) NULL,
+    SERDE_ID NUMBER NULL,
+    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
+);
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+
+-- Table TABLE_PARAMS for join relationship
+CREATE TABLE TABLE_PARAMS
+(
+    TBL_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE CLOB NULL
+);
+
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+
+-- Table SORT_COLS for join relationship
+CREATE TABLE SORT_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(767) NULL,
+    "ORDER" NUMBER (10) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+CREATE TABLE TBL_PRIVS
+(
+    TBL_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+
+-- Table DATABASE_PARAMS for join relationship
+CREATE TABLE DATABASE_PARAMS
+(
+    DB_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(180) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+
+-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+CREATE TABLE ROLE_MAP
+(
+    ROLE_GRANT_ID NUMBER NOT NULL,
+    ADD_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    ROLE_ID NUMBER NULL
+);
+
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+
+-- Table SERDE_PARAMS for join relationship
+CREATE TABLE SERDE_PARAMS
+(
+    SERDE_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE CLOB NULL
+);
+
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+
+-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+CREATE TABLE PART_PRIVS
+(
+    PART_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+
+-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+CREATE TABLE DB_PRIVS
+(
+    DB_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    DB_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+
+-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+CREATE TABLE TBLS
+(
+    TBL_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    OWNER VARCHAR2(767) NULL,
+    RETENTION NUMBER (10) NOT NULL,
+    SD_ID NUMBER NULL,
+    TBL_NAME VARCHAR2(256) NULL,
+    TBL_TYPE VARCHAR2(128) NULL,
+    VIEW_EXPANDED_TEXT CLOB NULL,
+    VIEW_ORIGINAL_TEXT CLOB NULL,
+    IS_REWRITE_ENABLED NUMBER(1) NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0))
+);
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+
+-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE TABLE PARTITION_EVENTS
+(
+    PART_NAME_ID NUMBER NOT NULL,
+    DB_NAME VARCHAR2(128) NULL,
+    EVENT_TIME NUMBER NOT NULL,
+    EVENT_TYPE NUMBER (10) NOT NULL,
+    PARTITION_NAME VARCHAR2(767) NULL,
+    TBL_NAME VARCHAR2(256) NULL
+);
+
+ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+
+-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+CREATE TABLE SKEWED_STRING_LIST
+(
+    STRING_LIST_ID NUMBER NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+
+CREATE TABLE SKEWED_STRING_LIST_VALUES
+(
+    STRING_LIST_ID NUMBER NOT NULL,
+    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_NAMES
+(
+    SD_ID NUMBER NOT NULL,
+    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+(
+    SD_ID NUMBER NOT NULL,
+    STRING_LIST_ID_KID NUMBER NOT NULL,
+    "LOCATION" VARCHAR2(4000) NULL
+);
+
+CREATE TABLE MASTER_KEYS
+(
+    KEY_ID NUMBER (10) NOT NULL,
+    MASTER_KEY VARCHAR2(767) NULL
+);
+
+CREATE TABLE DELEGATION_TOKENS
+(
+    TOKEN_IDENT VARCHAR2(767) NOT NULL,
+    TOKEN VARCHAR2(767) NULL
+);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_VALUES
+(
+    SD_ID_OID NUMBER NOT NULL,
+    STRING_LIST_ID_EID NUMBER NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+-- column statistics
+
+CREATE TABLE TAB_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(256) NOT NULL,
+ COLUMN_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ TBL_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ BIT_VECTOR BLOB,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+CREATE TABLE VERSION (
+  VER_ID NUMBER NOT NULL,
+  SCHEMA_VERSION VARCHAR(127) NOT NULL,
+  VERSION_COMMENT VARCHAR(255)
+);
+ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
+
+CREATE TABLE PART_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(256) NOT NULL,
+ PARTITION_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ PART_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ BIT_VECTOR BLOB,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+
+CREATE TABLE FUNCS (
+  FUNC_ID NUMBER NOT NULL,
+  CLASS_NAME VARCHAR2(4000),
+  CREATE_TIME NUMBER(10) NOT NULL,
+  DB_ID NUMBER,
+  FUNC_NAME VARCHAR2(128),
+  FUNC_TYPE NUMBER(10) NOT NULL,
+  OWNER_NAME VARCHAR2(128),
+  OWNER_TYPE VARCHAR2(10)
+);
+
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+
+CREATE TABLE FUNC_RU (
+  FUNC_ID NUMBER NOT NULL,
+  RESOURCE_TYPE NUMBER(10) NOT NULL,
+  RESOURCE_URI VARCHAR2(4000),
+  INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX);
+
+CREATE TABLE NOTIFICATION_LOG
+(
+    NL_ID NUMBER NOT NULL,
+    EVENT_ID NUMBER NOT NULL,
+    EVENT_TIME NUMBER(10) NOT NULL,
+    EVENT_TYPE VARCHAR2(32) NOT NULL,
+    DB_NAME VARCHAR2(128),
+    TBL_NAME VARCHAR2(256),
+    MESSAGE CLOB NULL,
+    MESSAGE_FORMAT VARCHAR(16) NULL
+);
+
+ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID);
+
+CREATE TABLE NOTIFICATION_SEQUENCE
+(
+    NNI_ID NUMBER NOT NULL,
+    NEXT_EVENT_ID NUMBER NOT NULL
+);
+
+ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID);
+
+-- Tables to manage resource plans.
+
+CREATE TABLE WM_RESOURCEPLAN
+(
+    RP_ID NUMBER NOT NULL,
+    "NAME" VARCHAR2(128) NOT NULL,
+    QUERY_PARALLELISM NUMBER(10),
+    STATUS VARCHAR2(20) NOT NULL
+);
+
+ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID);
+
+CREATE TABLE WM_POOL
+(
+    POOL_ID NUMBER NOT NULL,
+    RP_ID NUMBER NOT NULL,
+    PATH VARCHAR2(1024) NOT NULL,
+    PARENT_POOL_ID NUMBER,
+    ALLOC_FRACTION NUMBER,
+    QUERY_PARALLELISM NUMBER(10)
+);
+
+ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID);
+
+CREATE TABLE WM_TRIGGER
+(
+    TRIGGER_ID NUMBER NOT NULL,
+    RP_ID NUMBER NOT NULL,
+    "NAME" VARCHAR2(128) NOT NULL,
+    TRIGGER_EXPRESSION VARCHAR2(1024),
+    ACTION_EXPRESSION VARCHAR2(1024)
+);
+
+ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID);
+
+CREATE TABLE WM_POOL_TO_TRIGGER
+(
+    POOL_ID NUMBER NOT NULL,
+    TRIGGER_ID NUMBER NOT NULL
+);
+
+ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID);
+
+CREATE TABLE WM_MAPPING
+(
+    MAPPING_ID NUMBER NOT NULL,
+    RP_ID NUMBER NOT NULL,
+    ENTITY_TYPE VARCHAR2(10) NOT NULL,
+    ENTITY_NAME VARCHAR2(128) NOT NULL,
+    POOL_ID NUMBER NOT NULL,
+    ORDERING NUMBER(10)
+);
+
+ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID);
+
+-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table COLUMNS_V2
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+
+
+-- Constraints for table PARTITION_KEY_VALS
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+
+
+-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
+
+
+-- Constraints for table PARTITION_PARAMS
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+
+
+-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+
+-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
+
+
+-- Constraints for table PARTITION_KEYS
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+
+
+-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+
+
+-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+
+CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+
+CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+
+
+-- Constraints for table INDEX_PARAMS
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+
+
+-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+
+
+-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
+
+CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+
+CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
+
+
+-- Constraints for table BUCKETING_COLS
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+
+
+-- Constraints for table TYPE_FIELDS
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+
+
+-- Constraints for table SD_PARAMS
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+
+
+-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+CREATE INDEX SDS_N50 ON SDS (CD_ID);
+
+
+-- Constraints for table TABLE_PARAMS
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+
+
+-- Constraints for table SORT_COLS
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+
+
+-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+
+CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DATABASE_PARAMS
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+
+
+-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+
+CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SERDE_PARAMS
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+
+
+-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+
+
+-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+
+
+-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+
+CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+
+CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+
+
+-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+
+
+-- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED;
+
+CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID);
+
+CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+
+
+-- Constraints for table FUNC_RU for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED;
+
+CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+
+CREATE TABLE KEY_CONSTRAINTS
+(
+  CHILD_CD_ID NUMBER,
+  CHILD_INTEGER_IDX NUMBER,
+  CHILD_TBL_ID NUMBER,
+  PARENT_CD_ID NUMBER NOT NULL,
+  PARENT_INTEGER_IDX NUMBER NOT NULL,
+  PARENT_TBL_ID NUMBER NOT NULL,
+  POSITION NUMBER NOT NULL,
+  CONSTRAINT_NAME VARCHAR(400) NOT NULL,
+  CONSTRAINT_TYPE NUMBER NOT NULL,
+  UPDATE_RULE NUMBER,
+  DELETE_RULE NUMBER,
+  ENABLE_VALIDATE_RELY NUMBER NOT NULL
+) ;
+
+ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION);
+
+CREATE INDEX CONSTRAINTS_PT_INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID);
+
+CREATE INDEX CONSTRAINTS_CT_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE);
+
+-- Table for METASTORE_DB_PROPERTIES and its constraints
+CREATE TABLE METASTORE_DB_PROPERTIES
+(
+  PROPERTY_KEY VARCHAR(255) NOT NULL,
+  PROPERTY_VALUE VARCHAR(1000) NOT NULL,
+  DESCRIPTION VARCHAR(1000)
+);
+
+ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY);
+
+-- Constraints for resource plan tables.
+
+CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME");
+
+CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH);
+
+ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+
+ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
+
+CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME");
+
+ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+
+ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+
+ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID);
+
+CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME);
+
+ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+
+ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+
+------------------------------
+-- Transaction and lock tables
+------------------------------
+CREATE TABLE TXNS (
+  TXN_ID NUMBER(19) PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED NUMBER(19) NOT NULL,
+  TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL,
+  TXN_AGENT_INFO varchar2(128),
+  TXN_META_INFO varchar2(128),
+  TXN_HEARTBEAT_COUNT number(10)
+) ROWDEPENDENCIES;
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID),
+  TC_DATABASE VARCHAR2(128) NOT NULL,
+  TC_TABLE VARCHAR2(256),
+  TC_PARTITION VARCHAR2(767) NULL,
+  TC_OPERATION_TYPE char(1) NOT NULL
+) ROWDEPENDENCIES;
+
+CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID NUMBER(19),
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+) ROWDEPENDENCIES;
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID NUMBER(19) NOT NULL,
+  HL_LOCK_INT_ID NUMBER(19) NOT NULL,
+  HL_TXNID NUMBER(19),
+  HL_DB VARCHAR2(128) NOT NULL,
+  HL_TABLE VARCHAR2(128),
+  HL_PARTITION VARCHAR2(767),
+  HL_LOCK_STATE CHAR(1) NOT NULL,
+  HL_LOCK_TYPE CHAR(1) NOT NULL,
+  HL_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  HL_ACQUIRED_AT NUMBER(19),
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  HL_HEARTBEAT_COUNT number(10),
+  HL_AGENT_INFO varchar2(128),
+  HL_BLOCKEDBY_EXT_ID number(19),
+  HL_BLOCKEDBY_INT_ID number(19),
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+) ROWDEPENDENCIES;
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID NUMBER(19) PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_TBLPROPERTIES varchar(2048),
+  CQ_WORKER_ID varchar(128),
+  CQ_START NUMBER(19),
+  CQ_RUN_AS varchar(128),
+  CQ_HIGHEST_TXN_ID NUMBER(19),
+  CQ_META_INFO BLOB,
+  CQ_HADOOP_JOB_ID varchar2(32)
+) ROWDEPENDENCIES;
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+CREATE TABLE COMPLETED_COMPACTIONS (
+  CC_ID NUMBER(19) PRIMARY KEY,
+  CC_DATABASE varchar(128) NOT NULL,
+  CC_TABLE varchar(128) NOT NULL,
+  CC_PARTITION varchar(767),
+  CC_STATE char(1) NOT NULL,
+  CC_TYPE char(1) NOT NULL,
+  CC_TBLPROPERTIES varchar(2048),
+  CC_WORKER_ID varchar(128),
+  CC_START NUMBER(19),
+  CC_END NUMBER(19),
+  CC_RUN_AS varchar(128),
+  CC_HIGHEST_TXN_ID NUMBER(19),
+  CC_META_INFO BLOB,
+  CC_HADOOP_JOB_ID varchar2(32)
+) ROWDEPENDENCIES;
+
+CREATE TABLE AUX_TABLE (
+  MT_KEY1 varchar2(128) NOT NULL,
+  MT_KEY2 number(19) NOT NULL,
+  MT_COMMENT varchar2(255),
+  PRIMARY KEY(MT_KEY1, MT_KEY2)
+);
+
+CREATE TABLE WRITE_SET (
+  WS_DATABASE varchar2(128) NOT NULL,
+  WS_TABLE varchar2(128) NOT NULL,
+  WS_PARTITION varchar2(767),
+  WS_TXNID number(19) NOT NULL,
+  WS_COMMIT_ID number(19) NOT NULL,
+  WS_OPERATION_TYPE char(1) NOT NULL
+);
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '3.0.0', 'Hive release version 3.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql b/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
new file mode 100644
index 0000000..a24948a
--- /dev/null
+++ b/standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
@@ -0,0 +1,107 @@
+SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual;
+
+--@041-HIVE-16556.oracle.sql;
+CREATE TABLE METASTORE_DB_PROPERTIES
+(
+  PROPERTY_KEY VARCHAR(255) NOT NULL,
+  PROPERTY_VALUE VARCHAR(1000) NOT NULL,
+  DESCRIPTION VARCHAR(1000)
+);
+
+ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY);
+
+--@042-HIVE-16575.oracle.sql;
+CREATE INDEX CONSTRAINTS_CT_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE);
+
+--@043-HIVE-16922.oracle.sql;
+UPDATE SERDE_PARAMS
+SET PARAM_KEY='collection.delim'
+WHERE PARAM_KEY='colelction.delim';
+
+--@044-HIVE-16997.oracle.sql;
+ALTER TABLE PART_COL_STATS ADD BIT_VECTOR BLOB NULL;
+ALTER TABLE TAB_COL_STATS ADD BIT_VECTOR BLOB NULL;
+
+--@045-HIVE-16886.oracle.sql;
+INSERT INTO NOTIFICATION_SEQUENCE (NNI_ID, NEXT_EVENT_ID) SELECT 1,1 FROM DUAL WHERE NOT EXISTS ( SELECT NEXT_EVENT_ID FROM NOTIFICATION_SEQUENCE);
+
+--@046-HIVE-17566.oracle.sql;
+CREATE TABLE WM_RESOURCEPLAN
+(
+    RP_ID NUMBER NOT NULL,
+    "NAME" VARCHAR2(128) NOT NULL,
+    QUERY_PARALLELISM NUMBER(10),
+    STATUS VARCHAR2(20) NOT NULL
+);
+
+ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID);
+
+CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME");
+
+
+CREATE TABLE WM_POOL
+(
+    POOL_ID NUMBER NOT NULL,
+    RP_ID NUMBER NOT NULL,
+    PATH VARCHAR2(1024) NOT NULL,
+    PARENT_POOL_ID NUMBER,
+    ALLOC_FRACTION NUMBER,
+    QUERY_PARALLELISM NUMBER(10)
+);
+
+ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID);
+
+CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH);
+ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
+
+
+CREATE TABLE WM_TRIGGER
+(
+    TRIGGER_ID NUMBER NOT NULL,
+    RP_ID NUMBER NOT NULL,
+    "NAME" VARCHAR2(128) NOT NULL,
+    TRIGGER_EXPRESSION VARCHAR2(1024),
+    ACTION_EXPRESSION VARCHAR2(1024)
+);
+
+ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID);
+
+CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME");
+
+ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+
+
+CREATE TABLE WM_POOL_TO_TRIGGER
+(
+    POOL_ID NUMBER NOT NULL,
+    TRIGGER_ID NUMBER NOT NULL
+);
+
+ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID);
+
+ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+
+ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID);
+
+
+CREATE TABLE WM_MAPPING
+(
+    MAPPING_ID NUMBER NOT NULL,
+    RP_ID NUMBER NOT NULL,
+    ENTITY_TYPE VARCHAR2(10) NOT NULL,
+    ENTITY_NAME VARCHAR2(128) NOT NULL,
+    POOL_ID NUMBER NOT NULL,
+    ORDERING NUMBER(10)
+);
+
+ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID);
+
+CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME);
+
+ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+
+ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+
+UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
+SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual;

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/oracle/upgrade.order.oracle
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/oracle/upgrade.order.oracle b/standalone-metastore/src/main/sql/oracle/upgrade.order.oracle
new file mode 100644
index 0000000..15531df
--- /dev/null
+++ b/standalone-metastore/src/main/sql/oracle/upgrade.order.oracle
@@ -0,0 +1 @@
+2.3.0-to-3.0.0

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/postgres/create-user.postgres.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/postgres/create-user.postgres.sql b/standalone-metastore/src/main/sql/postgres/create-user.postgres.sql
new file mode 100644
index 0000000..90e68dc
--- /dev/null
+++ b/standalone-metastore/src/main/sql/postgres/create-user.postgres.sql
@@ -0,0 +1,2 @@
+CREATE ROLE _REPLACE_WITH_USER_ LOGIN PASSWORD '_REPLACE_WITH_PASSWD_';
+CREATE DATABASE _REPLACE_WITH_DB_ OWNER _REPLACE_WITH_USER_;


[40/50] [abbrv] hive git commit: HIVE-17990 Add Thrift and DB storage for Schema Registry objects

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
index e78a851..3342454 100644
--- a/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
+++ b/standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php
@@ -142,6 +142,67 @@ final class EventRequestType {
   );
 }
 
+final class SerdeType {
+  const HIVE = 1;
+  const SCHEMA_REGISTRY = 2;
+  static public $__names = array(
+    1 => 'HIVE',
+    2 => 'SCHEMA_REGISTRY',
+  );
+}
+
+final class SchemaType {
+  const HIVE = 1;
+  const AVRO = 2;
+  static public $__names = array(
+    1 => 'HIVE',
+    2 => 'AVRO',
+  );
+}
+
+final class SchemaCompatibility {
+  const NONE = 1;
+  const BACKWARD = 2;
+  const FORWARD = 3;
+  const BOTH = 4;
+  static public $__names = array(
+    1 => 'NONE',
+    2 => 'BACKWARD',
+    3 => 'FORWARD',
+    4 => 'BOTH',
+  );
+}
+
+final class SchemaValidation {
+  const LATEST = 1;
+  const ALL = 2;
+  static public $__names = array(
+    1 => 'LATEST',
+    2 => 'ALL',
+  );
+}
+
+final class SchemaVersionState {
+  const INITIATED = 1;
+  const START_REVIEW = 2;
+  const CHANGES_REQUIRED = 3;
+  const REVIEWED = 4;
+  const ENABLED = 5;
+  const DISABLED = 6;
+  const ARCHIVED = 7;
+  const DELETED = 8;
+  static public $__names = array(
+    1 => 'INITIATED',
+    2 => 'START_REVIEW',
+    3 => 'CHANGES_REQUIRED',
+    4 => 'REVIEWED',
+    5 => 'ENABLED',
+    6 => 'DISABLED',
+    7 => 'ARCHIVED',
+    8 => 'DELETED',
+  );
+}
+
 final class FunctionType {
   const JAVA = 1;
   static public $__names = array(
@@ -4021,6 +4082,22 @@ class SerDeInfo {
    * @var array
    */
   public $parameters = null;
+  /**
+   * @var string
+   */
+  public $description = null;
+  /**
+   * @var string
+   */
+  public $serializerClass = null;
+  /**
+   * @var string
+   */
+  public $deserializerClass = null;
+  /**
+   * @var int
+   */
+  public $serdeType = null;
 
   public function __construct($vals=null) {
     if (!isset(self::$_TSPEC)) {
@@ -4045,6 +4122,22 @@ class SerDeInfo {
             'type' => TType::STRING,
             ),
           ),
+        4 => array(
+          'var' => 'description',
+          'type' => TType::STRING,
+          ),
+        5 => array(
+          'var' => 'serializerClass',
+          'type' => TType::STRING,
+          ),
+        6 => array(
+          'var' => 'deserializerClass',
+          'type' => TType::STRING,
+          ),
+        7 => array(
+          'var' => 'serdeType',
+          'type' => TType::I32,
+          ),
         );
     }
     if (is_array($vals)) {
@@ -4057,6 +4150,18 @@ class SerDeInfo {
       if (isset($vals['parameters'])) {
         $this->parameters = $vals['parameters'];
       }
+      if (isset($vals['description'])) {
+        $this->description = $vals['description'];
+      }
+      if (isset($vals['serializerClass'])) {
+        $this->serializerClass = $vals['serializerClass'];
+      }
+      if (isset($vals['deserializerClass'])) {
+        $this->deserializerClass = $vals['deserializerClass'];
+      }
+      if (isset($vals['serdeType'])) {
+        $this->serdeType = $vals['serdeType'];
+      }
     }
   }
 
@@ -4113,6 +4218,34 @@ class SerDeInfo {
             $xfer += $input->skip($ftype);
           }
           break;
+        case 4:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->description);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 5:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->serializerClass);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 6:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->deserializerClass);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 7:
+          if ($ftype == TType::I32) {
+            $xfer += $input->readI32($this->serdeType);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
         default:
           $xfer += $input->skip($ftype);
           break;
@@ -4154,6 +4287,26 @@ class SerDeInfo {
       }
       $xfer += $output->writeFieldEnd();
     }
+    if ($this->description !== null) {
+      $xfer += $output->writeFieldBegin('description', TType::STRING, 4);
+      $xfer += $output->writeString($this->description);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->serializerClass !== null) {
+      $xfer += $output->writeFieldBegin('serializerClass', TType::STRING, 5);
+      $xfer += $output->writeString($this->serializerClass);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->deserializerClass !== null) {
+      $xfer += $output->writeFieldBegin('deserializerClass', TType::STRING, 6);
+      $xfer += $output->writeString($this->deserializerClass);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->serdeType !== null) {
+      $xfer += $output->writeFieldBegin('serdeType', TType::I32, 7);
+      $xfer += $output->writeI32($this->serdeType);
+      $xfer += $output->writeFieldEnd();
+    }
     $xfer += $output->writeFieldStop();
     $xfer += $output->writeStructEnd();
     return $xfer;
@@ -24016,6 +24169,879 @@ class WMCreateOrDropTriggerToPoolMappingResponse {
 
 }
 
+class ISchema {
+  static $_TSPEC;
+
+  /**
+   * @var int
+   */
+  public $schemaType = null;
+  /**
+   * @var string
+   */
+  public $name = null;
+  /**
+   * @var string
+   */
+  public $dbName = null;
+  /**
+   * @var int
+   */
+  public $compatibility = null;
+  /**
+   * @var int
+   */
+  public $validationLevel = null;
+  /**
+   * @var bool
+   */
+  public $canEvolve = null;
+  /**
+   * @var string
+   */
+  public $schemaGroup = null;
+  /**
+   * @var string
+   */
+  public $description = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'schemaType',
+          'type' => TType::I32,
+          ),
+        2 => array(
+          'var' => 'name',
+          'type' => TType::STRING,
+          ),
+        3 => array(
+          'var' => 'dbName',
+          'type' => TType::STRING,
+          ),
+        4 => array(
+          'var' => 'compatibility',
+          'type' => TType::I32,
+          ),
+        5 => array(
+          'var' => 'validationLevel',
+          'type' => TType::I32,
+          ),
+        6 => array(
+          'var' => 'canEvolve',
+          'type' => TType::BOOL,
+          ),
+        7 => array(
+          'var' => 'schemaGroup',
+          'type' => TType::STRING,
+          ),
+        8 => array(
+          'var' => 'description',
+          'type' => TType::STRING,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['schemaType'])) {
+        $this->schemaType = $vals['schemaType'];
+      }
+      if (isset($vals['name'])) {
+        $this->name = $vals['name'];
+      }
+      if (isset($vals['dbName'])) {
+        $this->dbName = $vals['dbName'];
+      }
+      if (isset($vals['compatibility'])) {
+        $this->compatibility = $vals['compatibility'];
+      }
+      if (isset($vals['validationLevel'])) {
+        $this->validationLevel = $vals['validationLevel'];
+      }
+      if (isset($vals['canEvolve'])) {
+        $this->canEvolve = $vals['canEvolve'];
+      }
+      if (isset($vals['schemaGroup'])) {
+        $this->schemaGroup = $vals['schemaGroup'];
+      }
+      if (isset($vals['description'])) {
+        $this->description = $vals['description'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ISchema';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::I32) {
+            $xfer += $input->readI32($this->schemaType);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->name);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 3:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->dbName);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 4:
+          if ($ftype == TType::I32) {
+            $xfer += $input->readI32($this->compatibility);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 5:
+          if ($ftype == TType::I32) {
+            $xfer += $input->readI32($this->validationLevel);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 6:
+          if ($ftype == TType::BOOL) {
+            $xfer += $input->readBool($this->canEvolve);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 7:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->schemaGroup);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 8:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->description);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ISchema');
+    if ($this->schemaType !== null) {
+      $xfer += $output->writeFieldBegin('schemaType', TType::I32, 1);
+      $xfer += $output->writeI32($this->schemaType);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->name !== null) {
+      $xfer += $output->writeFieldBegin('name', TType::STRING, 2);
+      $xfer += $output->writeString($this->name);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->dbName !== null) {
+      $xfer += $output->writeFieldBegin('dbName', TType::STRING, 3);
+      $xfer += $output->writeString($this->dbName);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->compatibility !== null) {
+      $xfer += $output->writeFieldBegin('compatibility', TType::I32, 4);
+      $xfer += $output->writeI32($this->compatibility);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->validationLevel !== null) {
+      $xfer += $output->writeFieldBegin('validationLevel', TType::I32, 5);
+      $xfer += $output->writeI32($this->validationLevel);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->canEvolve !== null) {
+      $xfer += $output->writeFieldBegin('canEvolve', TType::BOOL, 6);
+      $xfer += $output->writeBool($this->canEvolve);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->schemaGroup !== null) {
+      $xfer += $output->writeFieldBegin('schemaGroup', TType::STRING, 7);
+      $xfer += $output->writeString($this->schemaGroup);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->description !== null) {
+      $xfer += $output->writeFieldBegin('description', TType::STRING, 8);
+      $xfer += $output->writeString($this->description);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class SchemaVersion {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $schemaName = null;
+  /**
+   * @var int
+   */
+  public $version = null;
+  /**
+   * @var int
+   */
+  public $createdAt = null;
+  /**
+   * @var \metastore\FieldSchema[]
+   */
+  public $cols = null;
+  /**
+   * @var int
+   */
+  public $state = null;
+  /**
+   * @var string
+   */
+  public $description = null;
+  /**
+   * @var string
+   */
+  public $schemaText = null;
+  /**
+   * @var string
+   */
+  public $fingerprint = null;
+  /**
+   * @var string
+   */
+  public $name = null;
+  /**
+   * @var \metastore\SerDeInfo
+   */
+  public $serDe = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'schemaName',
+          'type' => TType::STRING,
+          ),
+        2 => array(
+          'var' => 'version',
+          'type' => TType::I32,
+          ),
+        3 => array(
+          'var' => 'createdAt',
+          'type' => TType::I64,
+          ),
+        4 => array(
+          'var' => 'cols',
+          'type' => TType::LST,
+          'etype' => TType::STRUCT,
+          'elem' => array(
+            'type' => TType::STRUCT,
+            'class' => '\metastore\FieldSchema',
+            ),
+          ),
+        5 => array(
+          'var' => 'state',
+          'type' => TType::I32,
+          ),
+        6 => array(
+          'var' => 'description',
+          'type' => TType::STRING,
+          ),
+        7 => array(
+          'var' => 'schemaText',
+          'type' => TType::STRING,
+          ),
+        8 => array(
+          'var' => 'fingerprint',
+          'type' => TType::STRING,
+          ),
+        9 => array(
+          'var' => 'name',
+          'type' => TType::STRING,
+          ),
+        10 => array(
+          'var' => 'serDe',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\SerDeInfo',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['schemaName'])) {
+        $this->schemaName = $vals['schemaName'];
+      }
+      if (isset($vals['version'])) {
+        $this->version = $vals['version'];
+      }
+      if (isset($vals['createdAt'])) {
+        $this->createdAt = $vals['createdAt'];
+      }
+      if (isset($vals['cols'])) {
+        $this->cols = $vals['cols'];
+      }
+      if (isset($vals['state'])) {
+        $this->state = $vals['state'];
+      }
+      if (isset($vals['description'])) {
+        $this->description = $vals['description'];
+      }
+      if (isset($vals['schemaText'])) {
+        $this->schemaText = $vals['schemaText'];
+      }
+      if (isset($vals['fingerprint'])) {
+        $this->fingerprint = $vals['fingerprint'];
+      }
+      if (isset($vals['name'])) {
+        $this->name = $vals['name'];
+      }
+      if (isset($vals['serDe'])) {
+        $this->serDe = $vals['serDe'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'SchemaVersion';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->schemaName);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::I32) {
+            $xfer += $input->readI32($this->version);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 3:
+          if ($ftype == TType::I64) {
+            $xfer += $input->readI64($this->createdAt);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 4:
+          if ($ftype == TType::LST) {
+            $this->cols = array();
+            $_size701 = 0;
+            $_etype704 = 0;
+            $xfer += $input->readListBegin($_etype704, $_size701);
+            for ($_i705 = 0; $_i705 < $_size701; ++$_i705)
+            {
+              $elem706 = null;
+              $elem706 = new \metastore\FieldSchema();
+              $xfer += $elem706->read($input);
+              $this->cols []= $elem706;
+            }
+            $xfer += $input->readListEnd();
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 5:
+          if ($ftype == TType::I32) {
+            $xfer += $input->readI32($this->state);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 6:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->description);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 7:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->schemaText);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 8:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->fingerprint);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 9:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->name);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 10:
+          if ($ftype == TType::STRUCT) {
+            $this->serDe = new \metastore\SerDeInfo();
+            $xfer += $this->serDe->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('SchemaVersion');
+    if ($this->schemaName !== null) {
+      $xfer += $output->writeFieldBegin('schemaName', TType::STRING, 1);
+      $xfer += $output->writeString($this->schemaName);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->version !== null) {
+      $xfer += $output->writeFieldBegin('version', TType::I32, 2);
+      $xfer += $output->writeI32($this->version);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->createdAt !== null) {
+      $xfer += $output->writeFieldBegin('createdAt', TType::I64, 3);
+      $xfer += $output->writeI64($this->createdAt);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->cols !== null) {
+      if (!is_array($this->cols)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('cols', TType::LST, 4);
+      {
+        $output->writeListBegin(TType::STRUCT, count($this->cols));
+        {
+          foreach ($this->cols as $iter707)
+          {
+            $xfer += $iter707->write($output);
+          }
+        }
+        $output->writeListEnd();
+      }
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->state !== null) {
+      $xfer += $output->writeFieldBegin('state', TType::I32, 5);
+      $xfer += $output->writeI32($this->state);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->description !== null) {
+      $xfer += $output->writeFieldBegin('description', TType::STRING, 6);
+      $xfer += $output->writeString($this->description);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->schemaText !== null) {
+      $xfer += $output->writeFieldBegin('schemaText', TType::STRING, 7);
+      $xfer += $output->writeString($this->schemaText);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->fingerprint !== null) {
+      $xfer += $output->writeFieldBegin('fingerprint', TType::STRING, 8);
+      $xfer += $output->writeString($this->fingerprint);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->name !== null) {
+      $xfer += $output->writeFieldBegin('name', TType::STRING, 9);
+      $xfer += $output->writeString($this->name);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->serDe !== null) {
+      if (!is_object($this->serDe)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('serDe', TType::STRUCT, 10);
+      $xfer += $this->serDe->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class FindSchemasByColsRqst {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $colName = null;
+  /**
+   * @var string
+   */
+  public $colNamespace = null;
+  /**
+   * @var string
+   */
+  public $type = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'colName',
+          'type' => TType::STRING,
+          ),
+        2 => array(
+          'var' => 'colNamespace',
+          'type' => TType::STRING,
+          ),
+        3 => array(
+          'var' => 'type',
+          'type' => TType::STRING,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['colName'])) {
+        $this->colName = $vals['colName'];
+      }
+      if (isset($vals['colNamespace'])) {
+        $this->colNamespace = $vals['colNamespace'];
+      }
+      if (isset($vals['type'])) {
+        $this->type = $vals['type'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'FindSchemasByColsRqst';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->colName);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->colNamespace);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 3:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->type);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('FindSchemasByColsRqst');
+    if ($this->colName !== null) {
+      $xfer += $output->writeFieldBegin('colName', TType::STRING, 1);
+      $xfer += $output->writeString($this->colName);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->colNamespace !== null) {
+      $xfer += $output->writeFieldBegin('colNamespace', TType::STRING, 2);
+      $xfer += $output->writeString($this->colNamespace);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->type !== null) {
+      $xfer += $output->writeFieldBegin('type', TType::STRING, 3);
+      $xfer += $output->writeString($this->type);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class FindSchemasByColsRespEntry {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $schemaName = null;
+  /**
+   * @var int
+   */
+  public $version = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'schemaName',
+          'type' => TType::STRING,
+          ),
+        2 => array(
+          'var' => 'version',
+          'type' => TType::I32,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['schemaName'])) {
+        $this->schemaName = $vals['schemaName'];
+      }
+      if (isset($vals['version'])) {
+        $this->version = $vals['version'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'FindSchemasByColsRespEntry';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->schemaName);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::I32) {
+            $xfer += $input->readI32($this->version);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('FindSchemasByColsRespEntry');
+    if ($this->schemaName !== null) {
+      $xfer += $output->writeFieldBegin('schemaName', TType::STRING, 1);
+      $xfer += $output->writeString($this->schemaName);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->version !== null) {
+      $xfer += $output->writeFieldBegin('version', TType::I32, 2);
+      $xfer += $output->writeI32($this->version);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class FindSchemasByColsResp {
+  static $_TSPEC;
+
+  /**
+   * @var \metastore\FindSchemasByColsRespEntry[]
+   */
+  public $schemaVersions = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'schemaVersions',
+          'type' => TType::LST,
+          'etype' => TType::STRUCT,
+          'elem' => array(
+            'type' => TType::STRUCT,
+            'class' => '\metastore\FindSchemasByColsRespEntry',
+            ),
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['schemaVersions'])) {
+        $this->schemaVersions = $vals['schemaVersions'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'FindSchemasByColsResp';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::LST) {
+            $this->schemaVersions = array();
+            $_size708 = 0;
+            $_etype711 = 0;
+            $xfer += $input->readListBegin($_etype711, $_size708);
+            for ($_i712 = 0; $_i712 < $_size708; ++$_i712)
+            {
+              $elem713 = null;
+              $elem713 = new \metastore\FindSchemasByColsRespEntry();
+              $xfer += $elem713->read($input);
+              $this->schemaVersions []= $elem713;
+            }
+            $xfer += $input->readListEnd();
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('FindSchemasByColsResp');
+    if ($this->schemaVersions !== null) {
+      if (!is_array($this->schemaVersions)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('schemaVersions', TType::LST, 1);
+      {
+        $output->writeListBegin(TType::STRUCT, count($this->schemaVersions));
+        {
+          foreach ($this->schemaVersions as $iter714)
+          {
+            $xfer += $iter714->write($output);
+          }
+        }
+        $output->writeListEnd();
+      }
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
 class MetaException extends TException {
   static $_TSPEC;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b3cb8526/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
index 5533044..f0d93e2 100755
--- a/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
+++ b/standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
@@ -203,6 +203,20 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
   print('  WMCreateOrUpdateMappingResponse create_or_update_wm_mapping(WMCreateOrUpdateMappingRequest request)')
   print('  WMDropMappingResponse drop_wm_mapping(WMDropMappingRequest request)')
   print('  WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest request)')
+  print('  void create_ischema(ISchema schema)')
+  print('  void alter_ischema(string schemaName, ISchema newSchema)')
+  print('  ISchema get_ischema(string schemaName)')
+  print('  void drop_ischema(string schemaName)')
+  print('  void add_schema_version(SchemaVersion schemaVersion)')
+  print('  SchemaVersion get_schema_version(string schemaName, i32 version)')
+  print('  SchemaVersion get_schema_latest_version(string schemaName)')
+  print('   get_schema_all_versions(string schemaName)')
+  print('  void drop_schema_version(string schemaName, i32 version)')
+  print('  FindSchemasByColsResp get_schemas_by_cols(FindSchemasByColsRqst rqst)')
+  print('  void map_schema_version_to_serde(string schemaName, i32 version, string serdeName)')
+  print('  void set_schema_version_state(string schemaName, i32 version, SchemaVersionState state)')
+  print('  void add_serde(SerDeInfo serde)')
+  print('  SerDeInfo get_serde(string serdeName)')
   print('  string getName()')
   print('  string getVersion()')
   print('  fb_status getStatus()')
@@ -1346,6 +1360,90 @@ elif cmd == 'create_or_drop_wm_trigger_to_pool_mapping':
     sys.exit(1)
   pp.pprint(client.create_or_drop_wm_trigger_to_pool_mapping(eval(args[0]),))
 
+elif cmd == 'create_ischema':
+  if len(args) != 1:
+    print('create_ischema requires 1 args')
+    sys.exit(1)
+  pp.pprint(client.create_ischema(eval(args[0]),))
+
+elif cmd == 'alter_ischema':
+  if len(args) != 2:
+    print('alter_ischema requires 2 args')
+    sys.exit(1)
+  pp.pprint(client.alter_ischema(args[0],eval(args[1]),))
+
+elif cmd == 'get_ischema':
+  if len(args) != 1:
+    print('get_ischema requires 1 args')
+    sys.exit(1)
+  pp.pprint(client.get_ischema(args[0],))
+
+elif cmd == 'drop_ischema':
+  if len(args) != 1:
+    print('drop_ischema requires 1 args')
+    sys.exit(1)
+  pp.pprint(client.drop_ischema(args[0],))
+
+elif cmd == 'add_schema_version':
+  if len(args) != 1:
+    print('add_schema_version requires 1 args')
+    sys.exit(1)
+  pp.pprint(client.add_schema_version(eval(args[0]),))
+
+elif cmd == 'get_schema_version':
+  if len(args) != 2:
+    print('get_schema_version requires 2 args')
+    sys.exit(1)
+  pp.pprint(client.get_schema_version(args[0],eval(args[1]),))
+
+elif cmd == 'get_schema_latest_version':
+  if len(args) != 1:
+    print('get_schema_latest_version requires 1 args')
+    sys.exit(1)
+  pp.pprint(client.get_schema_latest_version(args[0],))
+
+elif cmd == 'get_schema_all_versions':
+  if len(args) != 1:
+    print('get_schema_all_versions requires 1 args')
+    sys.exit(1)
+  pp.pprint(client.get_schema_all_versions(args[0],))
+
+elif cmd == 'drop_schema_version':
+  if len(args) != 2:
+    print('drop_schema_version requires 2 args')
+    sys.exit(1)
+  pp.pprint(client.drop_schema_version(args[0],eval(args[1]),))
+
+elif cmd == 'get_schemas_by_cols':
+  if len(args) != 1:
+    print('get_schemas_by_cols requires 1 args')
+    sys.exit(1)
+  pp.pprint(client.get_schemas_by_cols(eval(args[0]),))
+
+elif cmd == 'map_schema_version_to_serde':
+  if len(args) != 3:
+    print('map_schema_version_to_serde requires 3 args')
+    sys.exit(1)
+  pp.pprint(client.map_schema_version_to_serde(args[0],eval(args[1]),args[2],))
+
+elif cmd == 'set_schema_version_state':
+  if len(args) != 3:
+    print('set_schema_version_state requires 3 args')
+    sys.exit(1)
+  pp.pprint(client.set_schema_version_state(args[0],eval(args[1]),eval(args[2]),))
+
+elif cmd == 'add_serde':
+  if len(args) != 1:
+    print('add_serde requires 1 args')
+    sys.exit(1)
+  pp.pprint(client.add_serde(eval(args[0]),))
+
+elif cmd == 'get_serde':
+  if len(args) != 1:
+    print('get_serde requires 1 args')
+    sys.exit(1)
+  pp.pprint(client.get_serde(args[0],))
+
 elif cmd == 'getName':
   if len(args) != 0:
     print('getName requires 0 args')


[31/50] [abbrv] hive git commit: HIVE-17983 Make the standalone metastore generate tarballs etc.

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/mssql/hive-schema-2.3.0.mssql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mssql/hive-schema-2.3.0.mssql.sql b/standalone-metastore/src/main/sql/mssql/hive-schema-2.3.0.mssql.sql
new file mode 100644
index 0000000..c117a32
--- /dev/null
+++ b/standalone-metastore/src/main/sql/mssql/hive-schema-2.3.0.mssql.sql
@@ -0,0 +1,1023 @@
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+------------------------------------------------------------------
+-- DataNucleus SchemaTool (ran at 08/04/2014 15:10:15)
+------------------------------------------------------------------
+-- Complete schema required for the following classes:-
+--     org.apache.hadoop.hive.metastore.model.MColumnDescriptor
+--     org.apache.hadoop.hive.metastore.model.MDBPrivilege
+--     org.apache.hadoop.hive.metastore.model.MDatabase
+--     org.apache.hadoop.hive.metastore.model.MDelegationToken
+--     org.apache.hadoop.hive.metastore.model.MFieldSchema
+--     org.apache.hadoop.hive.metastore.model.MFunction
+--     org.apache.hadoop.hive.metastore.model.MGlobalPrivilege
+--     org.apache.hadoop.hive.metastore.model.MIndex
+--     org.apache.hadoop.hive.metastore.model.MMasterKey
+--     org.apache.hadoop.hive.metastore.model.MOrder
+--     org.apache.hadoop.hive.metastore.model.MPartition
+--     org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege
+--     org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics
+--     org.apache.hadoop.hive.metastore.model.MPartitionEvent
+--     org.apache.hadoop.hive.metastore.model.MPartitionPrivilege
+--     org.apache.hadoop.hive.metastore.model.MResourceUri
+--     org.apache.hadoop.hive.metastore.model.MRole
+--     org.apache.hadoop.hive.metastore.model.MRoleMap
+--     org.apache.hadoop.hive.metastore.model.MSerDeInfo
+--     org.apache.hadoop.hive.metastore.model.MStorageDescriptor
+--     org.apache.hadoop.hive.metastore.model.MStringList
+--     org.apache.hadoop.hive.metastore.model.MTable
+--     org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege
+--     org.apache.hadoop.hive.metastore.model.MTableColumnStatistics
+--     org.apache.hadoop.hive.metastore.model.MTablePrivilege
+--     org.apache.hadoop.hive.metastore.model.MType
+--     org.apache.hadoop.hive.metastore.model.MVersionTable
+--
+-- Table MASTER_KEYS for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+CREATE TABLE MASTER_KEYS
+(
+    KEY_ID int NOT NULL,
+    MASTER_KEY nvarchar(767) NULL
+);
+
+ALTER TABLE MASTER_KEYS ADD CONSTRAINT MASTER_KEYS_PK PRIMARY KEY (KEY_ID);
+
+-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+CREATE TABLE IDXS
+(
+    INDEX_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    DEFERRED_REBUILD bit NOT NULL,
+    INDEX_HANDLER_CLASS nvarchar(4000) NULL,
+    INDEX_NAME nvarchar(128) NULL,
+    INDEX_TBL_ID bigint NULL,
+    LAST_ACCESS_TIME int NOT NULL,
+    ORIG_TBL_ID bigint NULL,
+    SD_ID bigint NULL
+);
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+
+-- Table PART_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics]
+CREATE TABLE PART_COL_STATS
+(
+    CS_ID bigint NOT NULL,
+    AVG_COL_LEN float NULL,
+    "COLUMN_NAME" nvarchar(767) NOT NULL,
+    COLUMN_TYPE nvarchar(128) NOT NULL,
+    DB_NAME nvarchar(128) NOT NULL,
+    BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
+    BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
+    DOUBLE_HIGH_VALUE float NULL,
+    DOUBLE_LOW_VALUE float NULL,
+    LAST_ANALYZED bigint NOT NULL,
+    LONG_HIGH_VALUE bigint NULL,
+    LONG_LOW_VALUE bigint NULL,
+    MAX_COL_LEN bigint NULL,
+    NUM_DISTINCTS bigint NULL,
+    NUM_FALSES bigint NULL,
+    NUM_NULLS bigint NOT NULL,
+    NUM_TRUES bigint NULL,
+    PART_ID bigint NULL,
+    PARTITION_NAME nvarchar(767) NOT NULL,
+    "TABLE_NAME" nvarchar(256) NOT NULL
+);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
+
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+
+-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+CREATE TABLE PART_PRIVS
+(
+    PART_GRANT_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PART_ID bigint NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    PART_PRIV nvarchar(128) NULL
+);
+
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+
+-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+CREATE TABLE SKEWED_STRING_LIST
+(
+    STRING_LIST_ID bigint NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+    ROLE_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    OWNER_NAME nvarchar(128) NULL,
+    ROLE_NAME nvarchar(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+CREATE TABLE PARTITIONS
+(
+    PART_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    LAST_ACCESS_TIME int NOT NULL,
+    PART_NAME nvarchar(767) NULL,
+    SD_ID bigint NULL,
+    TBL_ID bigint NULL
+);
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+
+-- Table CDS for classes [org.apache.hadoop.hive.metastore.model.MColumnDescriptor]
+CREATE TABLE CDS
+(
+    CD_ID bigint NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table VERSION for classes [org.apache.hadoop.hive.metastore.model.MVersionTable]
+CREATE TABLE VERSION
+(
+    VER_ID bigint NOT NULL,
+    SCHEMA_VERSION nvarchar(127) NOT NULL,
+    VERSION_COMMENT nvarchar(255) NOT NULL
+);
+
+ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+
+-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE TABLE GLOBAL_PRIVS
+(
+    USER_GRANT_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    USER_PRIV nvarchar(128) NULL
+);
+
+ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+
+-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+    PART_COLUMN_GRANT_ID bigint NOT NULL,
+    "COLUMN_NAME" nvarchar(767) NULL,
+    CREATE_TIME int NOT NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PART_ID bigint NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    PART_COL_PRIV nvarchar(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+
+-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+CREATE TABLE DB_PRIVS
+(
+    DB_GRANT_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    DB_ID bigint NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    DB_PRIV nvarchar(128) NULL
+);
+
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+
+-- Table TAB_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics]
+CREATE TABLE TAB_COL_STATS
+(
+    CS_ID bigint NOT NULL,
+    AVG_COL_LEN float NULL,
+    "COLUMN_NAME" nvarchar(767) NOT NULL,
+    COLUMN_TYPE nvarchar(128) NOT NULL,
+    DB_NAME nvarchar(128) NOT NULL,
+    BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
+    BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
+    DOUBLE_HIGH_VALUE float NULL,
+    DOUBLE_LOW_VALUE float NULL,
+    LAST_ANALYZED bigint NOT NULL,
+    LONG_HIGH_VALUE bigint NULL,
+    LONG_LOW_VALUE bigint NULL,
+    MAX_COL_LEN bigint NULL,
+    NUM_DISTINCTS bigint NULL,
+    NUM_FALSES bigint NULL,
+    NUM_NULLS bigint NOT NULL,
+    NUM_TRUES bigint NULL,
+    TBL_ID bigint NULL,
+    "TABLE_NAME" nvarchar(256) NOT NULL
+);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+    TYPES_ID bigint NOT NULL,
+    TYPE_NAME nvarchar(128) NULL,
+    TYPE1 nvarchar(767) NULL,
+    TYPE2 nvarchar(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+CREATE TABLE TBL_PRIVS
+(
+    TBL_GRANT_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    TBL_PRIV nvarchar(128) NULL,
+    TBL_ID bigint NULL
+);
+
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+    DB_ID bigint NOT NULL,
+    "DESC" nvarchar(4000) NULL,
+    DB_LOCATION_URI nvarchar(4000) NOT NULL,
+    "NAME" nvarchar(128) NULL,
+    OWNER_NAME nvarchar(128) NULL,
+    OWNER_TYPE nvarchar(10) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+CREATE TABLE TBL_COL_PRIVS
+(
+    TBL_COLUMN_GRANT_ID bigint NOT NULL,
+    "COLUMN_NAME" nvarchar(767) NULL,
+    CREATE_TIME int NOT NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    TBL_COL_PRIV nvarchar(128) NULL,
+    TBL_ID bigint NULL
+);
+
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+
+-- Table DELEGATION_TOKENS for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+CREATE TABLE DELEGATION_TOKENS
+(
+    TOKEN_IDENT nvarchar(767) NOT NULL,
+    TOKEN nvarchar(767) NULL
+);
+
+ALTER TABLE DELEGATION_TOKENS ADD CONSTRAINT DELEGATION_TOKENS_PK PRIMARY KEY (TOKEN_IDENT);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+    SERDE_ID bigint NOT NULL,
+    "NAME" nvarchar(128) NULL,
+    SLIB nvarchar(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table FUNCS for classes [org.apache.hadoop.hive.metastore.model.MFunction]
+CREATE TABLE FUNCS
+(
+    FUNC_ID bigint NOT NULL,
+    CLASS_NAME nvarchar(4000) NULL,
+    CREATE_TIME int NOT NULL,
+    DB_ID bigint NULL,
+    FUNC_NAME nvarchar(128) NULL,
+    FUNC_TYPE int NOT NULL,
+    OWNER_NAME nvarchar(128) NULL,
+    OWNER_TYPE nvarchar(10) NULL
+);
+
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+
+-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+CREATE TABLE ROLE_MAP
+(
+    ROLE_GRANT_ID bigint NOT NULL,
+    ADD_TIME int NOT NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    ROLE_ID bigint NULL
+);
+
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+
+-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+CREATE TABLE TBLS
+(
+    TBL_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    DB_ID bigint NULL,
+    LAST_ACCESS_TIME int NOT NULL,
+    OWNER nvarchar(767) NULL,
+    RETENTION int NOT NULL,
+    SD_ID bigint NULL,
+    TBL_NAME nvarchar(256) NULL,
+    TBL_TYPE nvarchar(128) NULL,
+    VIEW_EXPANDED_TEXT text NULL,
+    VIEW_ORIGINAL_TEXT text NULL,
+    IS_REWRITE_ENABLED bit NOT NULL
+);
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+
+-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+CREATE TABLE SDS
+(
+    SD_ID bigint NOT NULL,
+    CD_ID bigint NULL,
+    INPUT_FORMAT nvarchar(4000) NULL,
+    IS_COMPRESSED bit NOT NULL,
+    IS_STOREDASSUBDIRECTORIES bit NOT NULL,
+    LOCATION nvarchar(4000) NULL,
+    NUM_BUCKETS int NOT NULL,
+    OUTPUT_FORMAT nvarchar(4000) NULL,
+    SERDE_ID bigint NULL
+);
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+
+-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE TABLE PARTITION_EVENTS
+(
+    PART_NAME_ID bigint NOT NULL,
+    DB_NAME nvarchar(128) NULL,
+    EVENT_TIME bigint NOT NULL,
+    EVENT_TYPE int NOT NULL,
+    PARTITION_NAME nvarchar(767) NULL,
+    TBL_NAME nvarchar(256) NULL
+);
+
+ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+
+-- Table SORT_COLS for join relationship
+CREATE TABLE SORT_COLS
+(
+    SD_ID bigint NOT NULL,
+    "COLUMN_NAME" nvarchar(767) NULL,
+    "ORDER" int NOT NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table SKEWED_COL_NAMES for join relationship
+CREATE TABLE SKEWED_COL_NAMES
+(
+    SD_ID bigint NOT NULL,
+    SKEWED_COL_NAME nvarchar(255) NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table SKEWED_COL_VALUE_LOC_MAP for join relationship
+CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+(
+    SD_ID bigint NOT NULL,
+    STRING_LIST_ID_KID bigint NOT NULL,
+    LOCATION nvarchar(4000) NULL
+);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+
+-- Table SKEWED_STRING_LIST_VALUES for join relationship
+CREATE TABLE SKEWED_STRING_LIST_VALUES
+(
+    STRING_LIST_ID bigint NOT NULL,
+    STRING_LIST_VALUE nvarchar(255) NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+    PART_ID bigint NOT NULL,
+    PART_KEY_VAL nvarchar(255) NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+    TBL_ID bigint NOT NULL,
+    PKEY_COMMENT nvarchar(4000) NULL,
+    PKEY_NAME nvarchar(128) NOT NULL,
+    PKEY_TYPE nvarchar(767) NOT NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+
+-- Table SKEWED_VALUES for join relationship
+CREATE TABLE SKEWED_VALUES
+(
+    SD_ID_OID bigint NOT NULL,
+    STRING_LIST_ID_EID bigint NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+
+-- Table SD_PARAMS for join relationship
+CREATE TABLE SD_PARAMS
+(
+    SD_ID bigint NOT NULL,
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE varchar(max) NULL
+);
+
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+
+-- Table FUNC_RU for join relationship
+CREATE TABLE FUNC_RU
+(
+    FUNC_ID bigint NOT NULL,
+    RESOURCE_TYPE int NOT NULL,
+    RESOURCE_URI nvarchar(4000) NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID,INTEGER_IDX);
+
+-- Table TYPE_FIELDS for join relationship
+CREATE TABLE TYPE_FIELDS
+(
+    TYPE_NAME bigint NOT NULL,
+    COMMENT nvarchar(256) NULL,
+    FIELD_NAME nvarchar(128) NOT NULL,
+    FIELD_TYPE nvarchar(767) NOT NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+
+-- Table BUCKETING_COLS for join relationship
+CREATE TABLE BUCKETING_COLS
+(
+    SD_ID bigint NOT NULL,
+    BUCKET_COL_NAME nvarchar(255) NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table DATABASE_PARAMS for join relationship
+CREATE TABLE DATABASE_PARAMS
+(
+    DB_ID bigint NOT NULL,
+    PARAM_KEY nvarchar(180) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
+);
+
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+
+-- Table INDEX_PARAMS for join relationship
+CREATE TABLE INDEX_PARAMS
+(
+    INDEX_ID bigint NOT NULL,
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
+);
+
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+    CD_ID bigint NOT NULL,
+    COMMENT nvarchar(256) NULL,
+    "COLUMN_NAME" nvarchar(767) NOT NULL,
+    TYPE_NAME varchar(max) NOT NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+
+-- Table SERDE_PARAMS for join relationship
+CREATE TABLE SERDE_PARAMS
+(
+    SERDE_ID bigint NOT NULL,
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE varchar(max) NULL
+);
+
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+    PART_ID bigint NOT NULL,
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+
+-- Table TABLE_PARAMS for join relationship
+CREATE TABLE TABLE_PARAMS
+(
+    TBL_ID bigint NOT NULL,
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE varchar(max) NULL
+);
+
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+
+CREATE TABLE NOTIFICATION_LOG
+(
+    NL_ID bigint NOT NULL,
+    EVENT_ID bigint NOT NULL,
+    EVENT_TIME int NOT NULL,
+    EVENT_TYPE nvarchar(32) NOT NULL,
+    DB_NAME nvarchar(128) NULL,
+    TBL_NAME nvarchar(256) NULL,
+    MESSAGE_FORMAT nvarchar(16),
+    MESSAGE text NULL
+);
+
+ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID);
+
+CREATE TABLE NOTIFICATION_SEQUENCE
+(
+    NNI_ID bigint NOT NULL,
+    NEXT_EVENT_ID bigint NOT NULL
+);
+
+ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID);
+
+-- Constraints for table MASTER_KEYS for class(es) [org.apache.hadoop.hive.metastore.model.MMasterKey]
+
+-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+
+CREATE INDEX IDXS_N50 ON IDXS (ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N49 ON IDXS (INDEX_TBL_ID);
+
+
+-- Constraints for table PART_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics]
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+
+
+-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+
+
+-- Constraints for table SKEWED_STRING_LIST for class(es) [org.apache.hadoop.hive.metastore.model.MStringList]
+
+-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+
+
+-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+
+CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+
+CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+
+
+-- Constraints for table CDS for class(es) [org.apache.hadoop.hive.metastore.model.MColumnDescriptor]
+
+-- Constraints for table VERSION for class(es) [org.apache.hadoop.hive.metastore.model.MVersionTable]
+
+-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+
+CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+
+
+-- Constraints for table TAB_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics]
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS (TBL_ID);
+
+
+-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+CREATE UNIQUE INDEX UNIQUETYPE ON TYPES (TYPE_NAME);
+
+
+-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+
+CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE UNIQUE INDEX UNIQUEDATABASE ON DBS ("NAME");
+
+
+-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+
+
+-- Constraints for table DELEGATION_TOKENS for class(es) [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+
+-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+
+-- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunction]
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+
+CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME,DB_ID);
+
+CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+
+
+-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) ;
+
+CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+
+CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+
+CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+
+CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+
+CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+
+
+-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ;
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ;
+
+CREATE INDEX SDS_N50 ON SDS (CD_ID);
+
+CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+
+
+-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+
+
+-- Constraints for table SORT_COLS
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+
+
+-- Constraints for table SKEWED_COL_NAMES
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+CREATE INDEX SKEWED_COL_NAMES_N49 ON SKEWED_COL_NAMES (SD_ID);
+
+
+-- Constraints for table SKEWED_COL_VALUE_LOC_MAP
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
+
+CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N50 ON SKEWED_COL_VALUE_LOC_MAP (STRING_LIST_ID_KID);
+
+CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N49 ON SKEWED_COL_VALUE_LOC_MAP (SD_ID);
+
+
+-- Constraints for table SKEWED_STRING_LIST_VALUES
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
+
+CREATE INDEX SKEWED_STRING_LIST_VALUES_N49 ON SKEWED_STRING_LIST_VALUES (STRING_LIST_ID);
+
+
+-- Constraints for table PARTITION_KEY_VALS
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+
+
+-- Constraints for table PARTITION_KEYS
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+
+
+-- Constraints for table SKEWED_VALUES
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) ;
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
+
+CREATE INDEX SKEWED_VALUES_N50 ON SKEWED_VALUES (SD_ID_OID);
+
+CREATE INDEX SKEWED_VALUES_N49 ON SKEWED_VALUES (STRING_LIST_ID_EID);
+
+
+-- Constraints for table SD_PARAMS
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+
+
+-- Constraints for table FUNC_RU
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) ;
+
+CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+
+
+-- Constraints for table TYPE_FIELDS
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) ;
+
+CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+
+
+-- Constraints for table BUCKETING_COLS
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+
+
+-- Constraints for table DATABASE_PARAMS
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+
+CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+
+
+-- Constraints for table INDEX_PARAMS
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) ;
+
+CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+
+
+-- Constraints for table COLUMNS_V2
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ;
+
+CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+
+
+-- Constraints for table SERDE_PARAMS
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ;
+
+CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+
+
+-- Constraints for table PARTITION_PARAMS
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+
+
+-- Constraints for table TABLE_PARAMS
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+
+
+
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+-- Transaction and Lock Tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+CREATE TABLE COMPACTION_QUEUE(
+	CQ_ID bigint NOT NULL,
+	CQ_DATABASE nvarchar(128) NOT NULL,
+	CQ_TABLE nvarchar(128) NOT NULL,
+	CQ_PARTITION nvarchar(767) NULL,
+	CQ_STATE char(1) NOT NULL,
+	CQ_TYPE char(1) NOT NULL,
+	CQ_TBLPROPERTIES nvarchar(2048) NULL,
+	CQ_WORKER_ID nvarchar(128) NULL,
+	CQ_START bigint NULL,
+	CQ_RUN_AS nvarchar(128) NULL,
+	CQ_HIGHEST_TXN_ID bigint NULL,
+    CQ_META_INFO varbinary(2048) NULL,
+	CQ_HADOOP_JOB_ID nvarchar(128) NULL,
+PRIMARY KEY CLUSTERED 
+(
+	CQ_ID ASC
+)
+);
+
+CREATE TABLE COMPLETED_COMPACTIONS (
+	CC_ID bigint NOT NULL,
+	CC_DATABASE nvarchar(128) NOT NULL,
+	CC_TABLE nvarchar(128) NOT NULL,
+	CC_PARTITION nvarchar(767) NULL,
+	CC_STATE char(1) NOT NULL,
+	CC_TYPE char(1) NOT NULL,
+	CC_TBLPROPERTIES nvarchar(2048) NULL,
+	CC_WORKER_ID nvarchar(128) NULL,
+	CC_START bigint NULL,
+	CC_END bigint NULL,
+	CC_RUN_AS nvarchar(128) NULL,
+	CC_HIGHEST_TXN_ID bigint NULL,
+    CC_META_INFO varbinary(2048) NULL,
+	CC_HADOOP_JOB_ID nvarchar(128) NULL,
+PRIMARY KEY CLUSTERED 
+(
+	CC_ID ASC
+)
+);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS(
+	CTC_TXNID bigint NULL,
+	CTC_DATABASE nvarchar(128) NOT NULL,
+	CTC_TABLE nvarchar(128) NULL,
+	CTC_PARTITION nvarchar(767) NULL
+);
+
+CREATE TABLE HIVE_LOCKS(
+	HL_LOCK_EXT_ID bigint NOT NULL,
+	HL_LOCK_INT_ID bigint NOT NULL,
+	HL_TXNID bigint NULL,
+	HL_DB nvarchar(128) NOT NULL,
+	HL_TABLE nvarchar(128) NULL,
+	HL_PARTITION nvarchar(767) NULL,
+	HL_LOCK_STATE char(1) NOT NULL,
+	HL_LOCK_TYPE char(1) NOT NULL,
+	HL_LAST_HEARTBEAT bigint NOT NULL,
+	HL_ACQUIRED_AT bigint NULL,
+	HL_USER nvarchar(128) NOT NULL,
+	HL_HOST nvarchar(128) NOT NULL,
+    HL_HEARTBEAT_COUNT int NULL,
+    HL_AGENT_INFO nvarchar(128) NULL,
+    HL_BLOCKEDBY_EXT_ID bigint NULL,
+    HL_BLOCKEDBY_INT_ID bigint NULL,
+PRIMARY KEY CLUSTERED 
+(
+	HL_LOCK_EXT_ID ASC,
+	HL_LOCK_INT_ID ASC
+)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID(
+	NCQ_NEXT bigint NOT NULL
+);
+
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+CREATE TABLE NEXT_LOCK_ID(
+	NL_NEXT bigint NOT NULL
+);
+
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE NEXT_TXN_ID(
+	NTXN_NEXT bigint NOT NULL
+);
+
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE TXNS(
+	TXN_ID bigint NOT NULL,
+	TXN_STATE char(1) NOT NULL,
+	TXN_STARTED bigint NOT NULL,
+	TXN_LAST_HEARTBEAT bigint NOT NULL,
+	TXN_USER nvarchar(128) NOT NULL,
+	TXN_HOST nvarchar(128) NOT NULL,
+    TXN_AGENT_INFO nvarchar(128) NULL,
+    TXN_META_INFO nvarchar(128) NULL,
+    TXN_HEARTBEAT_COUNT int NULL,
+PRIMARY KEY CLUSTERED 
+(
+	TXN_ID ASC
+)
+);
+
+CREATE TABLE TXN_COMPONENTS(
+	TC_TXNID bigint NULL,
+	TC_DATABASE nvarchar(128) NOT NULL,
+	TC_TABLE nvarchar(128) NULL,
+	TC_PARTITION nvarchar(767) NULL,
+	TC_OPERATION_TYPE char(1) NOT NULL
+);
+
+ALTER TABLE TXN_COMPONENTS  WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID);
+
+CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+
+CREATE TABLE AUX_TABLE (
+  MT_KEY1 nvarchar(128) NOT NULL,
+  MT_KEY2 bigint NOT NULL,
+  MT_COMMENT nvarchar(255) NULL,
+  PRIMARY KEY CLUSTERED
+(
+    MT_KEY1 ASC,
+    MT_KEY2 ASC
+)
+);
+
+CREATE TABLE KEY_CONSTRAINTS
+(
+  CHILD_CD_ID BIGINT,
+  CHILD_INTEGER_IDX INT,
+  CHILD_TBL_ID BIGINT,
+  PARENT_CD_ID BIGINT NOT NULL,
+  PARENT_INTEGER_IDX INT NOT NULL,
+  PARENT_TBL_ID BIGINT NOT NULL,
+  POSITION INT NOT NULL,
+  CONSTRAINT_NAME VARCHAR(400) NOT NULL,
+  CONSTRAINT_TYPE SMALLINT NOT NULL,
+  UPDATE_RULE SMALLINT,
+  DELETE_RULE SMALLINT,
+  ENABLE_VALIDATE_RELY SMALLINT NOT NULL
+) ;
+
+ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION);
+
+CREATE INDEX CONSTRAINTS_PARENT_TBL_ID__INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID);
+
+CREATE TABLE WRITE_SET (
+  WS_DATABASE nvarchar(128) NOT NULL,
+  WS_TABLE nvarchar(128) NOT NULL,
+  WS_PARTITION nvarchar(767),
+  WS_TXNID bigint NOT NULL,
+  WS_COMMIT_ID bigint NOT NULL,
+  WS_OPERATION_TYPE char(1) NOT NULL
+);
+
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '2.3.0', 'Hive release version 2.3.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql b/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
new file mode 100644
index 0000000..e8e0fd2
--- /dev/null
+++ b/standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql
@@ -0,0 +1,1112 @@
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+------------------------------------------------------------------
+-- DataNucleus SchemaTool (ran at 08/04/2014 15:10:15)
+------------------------------------------------------------------
+-- Complete schema required for the following classes:-
+--     org.apache.hadoop.hive.metastore.model.MColumnDescriptor
+--     org.apache.hadoop.hive.metastore.model.MDBPrivilege
+--     org.apache.hadoop.hive.metastore.model.MDatabase
+--     org.apache.hadoop.hive.metastore.model.MDelegationToken
+--     org.apache.hadoop.hive.metastore.model.MFieldSchema
+--     org.apache.hadoop.hive.metastore.model.MFunction
+--     org.apache.hadoop.hive.metastore.model.MGlobalPrivilege
+--     org.apache.hadoop.hive.metastore.model.MIndex
+--     org.apache.hadoop.hive.metastore.model.MMasterKey
+--     org.apache.hadoop.hive.metastore.model.MOrder
+--     org.apache.hadoop.hive.metastore.model.MPartition
+--     org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege
+--     org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics
+--     org.apache.hadoop.hive.metastore.model.MPartitionEvent
+--     org.apache.hadoop.hive.metastore.model.MPartitionPrivilege
+--     org.apache.hadoop.hive.metastore.model.MResourceUri
+--     org.apache.hadoop.hive.metastore.model.MRole
+--     org.apache.hadoop.hive.metastore.model.MRoleMap
+--     org.apache.hadoop.hive.metastore.model.MSerDeInfo
+--     org.apache.hadoop.hive.metastore.model.MStorageDescriptor
+--     org.apache.hadoop.hive.metastore.model.MStringList
+--     org.apache.hadoop.hive.metastore.model.MTable
+--     org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege
+--     org.apache.hadoop.hive.metastore.model.MTableColumnStatistics
+--     org.apache.hadoop.hive.metastore.model.MTablePrivilege
+--     org.apache.hadoop.hive.metastore.model.MType
+--     org.apache.hadoop.hive.metastore.model.MVersionTable
+--
+-- Table MASTER_KEYS for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+CREATE TABLE MASTER_KEYS
+(
+    KEY_ID int NOT NULL,
+    MASTER_KEY nvarchar(767) NULL
+);
+
+ALTER TABLE MASTER_KEYS ADD CONSTRAINT MASTER_KEYS_PK PRIMARY KEY (KEY_ID);
+
+-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+CREATE TABLE IDXS
+(
+    INDEX_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    DEFERRED_REBUILD bit NOT NULL,
+    INDEX_HANDLER_CLASS nvarchar(4000) NULL,
+    INDEX_NAME nvarchar(128) NULL,
+    INDEX_TBL_ID bigint NULL,
+    LAST_ACCESS_TIME int NOT NULL,
+    ORIG_TBL_ID bigint NULL,
+    SD_ID bigint NULL
+);
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+
+-- Table PART_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics]
+CREATE TABLE PART_COL_STATS
+(
+    CS_ID bigint NOT NULL,
+    AVG_COL_LEN float NULL,
+    "COLUMN_NAME" nvarchar(767) NOT NULL,
+    COLUMN_TYPE nvarchar(128) NOT NULL,
+    DB_NAME nvarchar(128) NOT NULL,
+    BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
+    BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
+    DOUBLE_HIGH_VALUE float NULL,
+    DOUBLE_LOW_VALUE float NULL,
+    LAST_ANALYZED bigint NOT NULL,
+    LONG_HIGH_VALUE bigint NULL,
+    LONG_LOW_VALUE bigint NULL,
+    MAX_COL_LEN bigint NULL,
+    NUM_DISTINCTS bigint NULL,
+    BIT_VECTOR varbinary(max) NULL,
+    NUM_FALSES bigint NULL,
+    NUM_NULLS bigint NOT NULL,
+    NUM_TRUES bigint NULL,
+    PART_ID bigint NULL,
+    PARTITION_NAME nvarchar(767) NOT NULL,
+    "TABLE_NAME" nvarchar(256) NOT NULL
+);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
+
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+
+-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+CREATE TABLE PART_PRIVS
+(
+    PART_GRANT_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PART_ID bigint NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    PART_PRIV nvarchar(128) NULL
+);
+
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+
+-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+CREATE TABLE SKEWED_STRING_LIST
+(
+    STRING_LIST_ID bigint NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+    ROLE_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    OWNER_NAME nvarchar(128) NULL,
+    ROLE_NAME nvarchar(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+CREATE TABLE PARTITIONS
+(
+    PART_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    LAST_ACCESS_TIME int NOT NULL,
+    PART_NAME nvarchar(767) NULL,
+    SD_ID bigint NULL,
+    TBL_ID bigint NULL
+);
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+
+-- Table CDS for classes [org.apache.hadoop.hive.metastore.model.MColumnDescriptor]
+CREATE TABLE CDS
+(
+    CD_ID bigint NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table VERSION for classes [org.apache.hadoop.hive.metastore.model.MVersionTable]
+CREATE TABLE VERSION
+(
+    VER_ID bigint NOT NULL,
+    SCHEMA_VERSION nvarchar(127) NOT NULL,
+    VERSION_COMMENT nvarchar(255) NOT NULL
+);
+
+ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+
+-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE TABLE GLOBAL_PRIVS
+(
+    USER_GRANT_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    USER_PRIV nvarchar(128) NULL
+);
+
+ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+
+-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+    PART_COLUMN_GRANT_ID bigint NOT NULL,
+    "COLUMN_NAME" nvarchar(767) NULL,
+    CREATE_TIME int NOT NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PART_ID bigint NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    PART_COL_PRIV nvarchar(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+
+-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+CREATE TABLE DB_PRIVS
+(
+    DB_GRANT_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    DB_ID bigint NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    DB_PRIV nvarchar(128) NULL
+);
+
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+
+-- Table TAB_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics]
+CREATE TABLE TAB_COL_STATS
+(
+    CS_ID bigint NOT NULL,
+    AVG_COL_LEN float NULL,
+    "COLUMN_NAME" nvarchar(767) NOT NULL,
+    COLUMN_TYPE nvarchar(128) NOT NULL,
+    DB_NAME nvarchar(128) NOT NULL,
+    BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
+    BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
+    DOUBLE_HIGH_VALUE float NULL,
+    DOUBLE_LOW_VALUE float NULL,
+    LAST_ANALYZED bigint NOT NULL,
+    LONG_HIGH_VALUE bigint NULL,
+    LONG_LOW_VALUE bigint NULL,
+    MAX_COL_LEN bigint NULL,
+    NUM_DISTINCTS bigint NULL,
+    BIT_VECTOR varbinary(max) NULL,
+    NUM_FALSES bigint NULL,
+    NUM_NULLS bigint NOT NULL,
+    NUM_TRUES bigint NULL,
+    TBL_ID bigint NULL,
+    "TABLE_NAME" nvarchar(256) NOT NULL
+);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+    TYPES_ID bigint NOT NULL,
+    TYPE_NAME nvarchar(128) NULL,
+    TYPE1 nvarchar(767) NULL,
+    TYPE2 nvarchar(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+CREATE TABLE TBL_PRIVS
+(
+    TBL_GRANT_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    TBL_PRIV nvarchar(128) NULL,
+    TBL_ID bigint NULL
+);
+
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+    DB_ID bigint NOT NULL,
+    "DESC" nvarchar(4000) NULL,
+    DB_LOCATION_URI nvarchar(4000) NOT NULL,
+    "NAME" nvarchar(128) NULL,
+    OWNER_NAME nvarchar(128) NULL,
+    OWNER_TYPE nvarchar(10) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+CREATE TABLE TBL_COL_PRIVS
+(
+    TBL_COLUMN_GRANT_ID bigint NOT NULL,
+    "COLUMN_NAME" nvarchar(767) NULL,
+    CREATE_TIME int NOT NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    TBL_COL_PRIV nvarchar(128) NULL,
+    TBL_ID bigint NULL
+);
+
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+
+-- Table DELEGATION_TOKENS for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+CREATE TABLE DELEGATION_TOKENS
+(
+    TOKEN_IDENT nvarchar(767) NOT NULL,
+    TOKEN nvarchar(767) NULL
+);
+
+ALTER TABLE DELEGATION_TOKENS ADD CONSTRAINT DELEGATION_TOKENS_PK PRIMARY KEY (TOKEN_IDENT);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+    SERDE_ID bigint NOT NULL,
+    "NAME" nvarchar(128) NULL,
+    SLIB nvarchar(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table FUNCS for classes [org.apache.hadoop.hive.metastore.model.MFunction]
+CREATE TABLE FUNCS
+(
+    FUNC_ID bigint NOT NULL,
+    CLASS_NAME nvarchar(4000) NULL,
+    CREATE_TIME int NOT NULL,
+    DB_ID bigint NULL,
+    FUNC_NAME nvarchar(128) NULL,
+    FUNC_TYPE int NOT NULL,
+    OWNER_NAME nvarchar(128) NULL,
+    OWNER_TYPE nvarchar(10) NULL
+);
+
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+
+-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+CREATE TABLE ROLE_MAP
+(
+    ROLE_GRANT_ID bigint NOT NULL,
+    ADD_TIME int NOT NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    ROLE_ID bigint NULL
+);
+
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+
+-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+CREATE TABLE TBLS
+(
+    TBL_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    DB_ID bigint NULL,
+    LAST_ACCESS_TIME int NOT NULL,
+    OWNER nvarchar(767) NULL,
+    RETENTION int NOT NULL,
+    SD_ID bigint NULL,
+    TBL_NAME nvarchar(256) NULL,
+    TBL_TYPE nvarchar(128) NULL,
+    VIEW_EXPANDED_TEXT text NULL,
+    VIEW_ORIGINAL_TEXT text NULL,
+    IS_REWRITE_ENABLED bit NOT NULL
+);
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+
+-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+CREATE TABLE SDS
+(
+    SD_ID bigint NOT NULL,
+    CD_ID bigint NULL,
+    INPUT_FORMAT nvarchar(4000) NULL,
+    IS_COMPRESSED bit NOT NULL,
+    IS_STOREDASSUBDIRECTORIES bit NOT NULL,
+    LOCATION nvarchar(4000) NULL,
+    NUM_BUCKETS int NOT NULL,
+    OUTPUT_FORMAT nvarchar(4000) NULL,
+    SERDE_ID bigint NULL
+);
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+
+-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE TABLE PARTITION_EVENTS
+(
+    PART_NAME_ID bigint NOT NULL,
+    DB_NAME nvarchar(128) NULL,
+    EVENT_TIME bigint NOT NULL,
+    EVENT_TYPE int NOT NULL,
+    PARTITION_NAME nvarchar(767) NULL,
+    TBL_NAME nvarchar(256) NULL
+);
+
+ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+
+-- Table SORT_COLS for join relationship
+CREATE TABLE SORT_COLS
+(
+    SD_ID bigint NOT NULL,
+    "COLUMN_NAME" nvarchar(767) NULL,
+    "ORDER" int NOT NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table SKEWED_COL_NAMES for join relationship
+CREATE TABLE SKEWED_COL_NAMES
+(
+    SD_ID bigint NOT NULL,
+    SKEWED_COL_NAME nvarchar(255) NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table SKEWED_COL_VALUE_LOC_MAP for join relationship
+CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+(
+    SD_ID bigint NOT NULL,
+    STRING_LIST_ID_KID bigint NOT NULL,
+    LOCATION nvarchar(4000) NULL
+);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+
+-- Table SKEWED_STRING_LIST_VALUES for join relationship
+CREATE TABLE SKEWED_STRING_LIST_VALUES
+(
+    STRING_LIST_ID bigint NOT NULL,
+    STRING_LIST_VALUE nvarchar(255) NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+    PART_ID bigint NOT NULL,
+    PART_KEY_VAL nvarchar(255) NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+    TBL_ID bigint NOT NULL,
+    PKEY_COMMENT nvarchar(4000) NULL,
+    PKEY_NAME nvarchar(128) NOT NULL,
+    PKEY_TYPE nvarchar(767) NOT NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+
+-- Table SKEWED_VALUES for join relationship
+CREATE TABLE SKEWED_VALUES
+(
+    SD_ID_OID bigint NOT NULL,
+    STRING_LIST_ID_EID bigint NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+
+-- Table SD_PARAMS for join relationship
+CREATE TABLE SD_PARAMS
+(
+    SD_ID bigint NOT NULL,
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE varchar(max) NULL
+);
+
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+
+-- Table FUNC_RU for join relationship
+CREATE TABLE FUNC_RU
+(
+    FUNC_ID bigint NOT NULL,
+    RESOURCE_TYPE int NOT NULL,
+    RESOURCE_URI nvarchar(4000) NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID,INTEGER_IDX);
+
+-- Table TYPE_FIELDS for join relationship
+CREATE TABLE TYPE_FIELDS
+(
+    TYPE_NAME bigint NOT NULL,
+    COMMENT nvarchar(256) NULL,
+    FIELD_NAME nvarchar(128) NOT NULL,
+    FIELD_TYPE nvarchar(767) NOT NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+
+-- Table BUCKETING_COLS for join relationship
+CREATE TABLE BUCKETING_COLS
+(
+    SD_ID bigint NOT NULL,
+    BUCKET_COL_NAME nvarchar(255) NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table DATABASE_PARAMS for join relationship
+CREATE TABLE DATABASE_PARAMS
+(
+    DB_ID bigint NOT NULL,
+    PARAM_KEY nvarchar(180) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
+);
+
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+
+-- Table INDEX_PARAMS for join relationship
+CREATE TABLE INDEX_PARAMS
+(
+    INDEX_ID bigint NOT NULL,
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
+);
+
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+    CD_ID bigint NOT NULL,
+    COMMENT nvarchar(256) NULL,
+    "COLUMN_NAME" nvarchar(767) NOT NULL,
+    TYPE_NAME varchar(max) NOT NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+
+-- Table SERDE_PARAMS for join relationship
+CREATE TABLE SERDE_PARAMS
+(
+    SERDE_ID bigint NOT NULL,
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE varchar(max) NULL
+);
+
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+    PART_ID bigint NOT NULL,
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+
+-- Table TABLE_PARAMS for join relationship
+CREATE TABLE TABLE_PARAMS
+(
+    TBL_ID bigint NOT NULL,
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE varchar(max) NULL
+);
+
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+
+CREATE TABLE NOTIFICATION_LOG
+(
+    NL_ID bigint NOT NULL,
+    EVENT_ID bigint NOT NULL,
+    EVENT_TIME int NOT NULL,
+    EVENT_TYPE nvarchar(32) NOT NULL,
+    DB_NAME nvarchar(128) NULL,
+    TBL_NAME nvarchar(256) NULL,
+    MESSAGE_FORMAT nvarchar(16),
+    MESSAGE text NULL
+);
+
+ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID);
+
+CREATE TABLE NOTIFICATION_SEQUENCE
+(
+    NNI_ID bigint NOT NULL,
+    NEXT_EVENT_ID bigint NOT NULL
+);
+
+ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID);
+
+-- Tables to manage resource plans.
+
+CREATE TABLE WM_RESOURCEPLAN
+(
+    RP_ID bigint NOT NULL,
+    "NAME" nvarchar(128) NOT NULL,
+    QUERY_PARALLELISM int,
+    STATUS nvarchar(20) NOT NULL
+);
+
+ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID);
+
+CREATE TABLE WM_POOL
+(
+    POOL_ID bigint NOT NULL,
+    RP_ID bigint NOT NULL,
+    PATH nvarchar(1024) NOT NULL,
+    PARENT_POOL_ID bigint,
+    ALLOC_FRACTION float,
+    QUERY_PARALLELISM int
+);
+
+ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID);
+
+CREATE TABLE WM_TRIGGER
+(
+    TRIGGER_ID bigint NOT NULL,
+    RP_ID bigint NOT NULL,
+    "NAME" nvarchar(128) NOT NULL,
+    TRIGGER_EXPRESSION nvarchar(1024),
+    ACTION_EXPRESSION nvarchar(1024)
+);
+
+ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID);
+
+CREATE TABLE WM_POOL_TO_TRIGGER
+(
+    POOL_ID bigint NOT NULL,
+    TRIGGER_ID bigint NOT NULL
+);
+
+ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID);
+
+CREATE TABLE WM_MAPPING
+(
+    MAPPING_ID bigint NOT NULL,
+    RP_ID bigint NOT NULL,
+    ENTITY_TYPE nvarchar(10) NOT NULL,
+    ENTITY_NAME nvarchar(128) NOT NULL,
+    POOL_ID bigint NOT NULL,
+    ORDERING int
+);
+
+ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID);
+
+-- Constraints for table MASTER_KEYS for class(es) [org.apache.hadoop.hive.metastore.model.MMasterKey]
+
+-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+
+CREATE INDEX IDXS_N50 ON IDXS (ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N49 ON IDXS (INDEX_TBL_ID);
+
+
+-- Constraints for table PART_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics]
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+
+
+-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+
+
+-- Constraints for table SKEWED_STRING_LIST for class(es) [org.apache.hadoop.hive.metastore.model.MStringList]
+
+-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+
+
+-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+
+CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+
+CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+
+
+-- Constraints for table CDS for class(es) [org.apache.hadoop.hive.metastore.model.MColumnDescriptor]
+
+-- Constraints for table VERSION for class(es) [org.apache.hadoop.hive.metastore.model.MVersionTable]
+
+-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+
+CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+
+
+-- Constraints for table TAB_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics]
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS (TBL_ID);
+
+
+-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+CREATE UNIQUE INDEX UNIQUETYPE ON TYPES (TYPE_NAME);
+
+
+-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+
+CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE UNIQUE INDEX UNIQUEDATABASE ON DBS ("NAME");
+
+
+-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+
+
+-- Constraints for table DELEGATION_TOKENS for class(es) [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+
+-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+
+-- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunction]
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+
+CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME,DB_ID);
+
+CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+
+
+-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) ;
+
+CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+
+CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+
+CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+
+CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+
+CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+
+
+-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ;
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ;
+
+CREATE INDEX SDS_N50 ON SDS (CD_ID);
+
+CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+
+
+-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+
+
+-- Constraints for table SORT_COLS
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+
+
+-- Constraints for table SKEWED_COL_NAMES
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+CREATE INDEX SKEWED_COL_NAMES_N49 ON SKEWED_COL_NAMES (SD_ID);
+
+
+-- Constraints for table SKEWED_COL_VALUE_LOC_MAP
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
+
+CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N50 ON SKEWED_COL_VALUE_LOC_MAP (STRING_LIST_ID_KID);
+
+CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N49 ON SKEWED_COL_VALUE_LOC_MAP (SD_ID);
+
+
+-- Constraints for table SKEWED_STRING_LIST_VALUES
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
+
+CREATE INDEX SKEWED_STRING_LIST_VALUES_N49 ON SKEWED_STRING_LIST_VALUES (STRING_LIST_ID);
+
+
+-- Constraints for table PARTITION_KEY_VALS
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+
+
+-- Constraints for table PARTITION_KEYS
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+
+
+-- Constraints for table SKEWED_VALUES
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) ;
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
+
+CREATE INDEX SKEWED_VALUES_N50 ON SKEWED_VALUES (SD_ID_OID);
+
+CREATE INDEX SKEWED_VALUES_N49 ON SKEWED_VALUES (STRING_LIST_ID_EID);
+
+
+-- Constraints for table SD_PARAMS
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+
+
+-- Constraints for table FUNC_RU
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) ;
+
+CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+
+
+-- Constraints for table TYPE_FIELDS
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) ;
+
+CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+
+
+-- Constraints for table BUCKETING_COLS
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+
+
+-- Constraints for table DATABASE_PARAMS
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+
+CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+
+
+-- Constraints for table INDEX_PARAMS
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) ;
+
+CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+
+
+-- Constraints for table COLUMNS_V2
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ;
+
+CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+
+
+-- Constraints for table SERDE_PARAMS
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ;
+
+CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+
+
+-- Constraints for table PARTITION_PARAMS
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+
+
+-- Constraints for table TABLE_PARAMS
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+
+-- Constraints for resource plan tables.
+
+CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME");
+
+CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH);
+
+ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+
+ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
+
+CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME");
+
+ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+
+ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+
+ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID);
+
+CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME);
+
+ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+
+ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+
+
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+-- Transaction and Lock Tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+CREATE TABLE COMPACTION_QUEUE(
+	CQ_ID bigint NOT NULL,
+	CQ_DATABASE nvarchar(128) NOT NULL,
+	CQ_TABLE nvarchar(128) NOT NULL,
+	CQ_PARTITION nvarchar(767) NULL,
+	CQ_STATE char(1) NOT NULL,
+	CQ_TYPE char(1) NOT NULL,
+	CQ_TBLPROPERTIES nvarchar(2048) NULL,
+	CQ_WORKER_ID nvarchar(128) NULL,
+	CQ_START bigint NULL,
+	CQ_RUN_AS nvarchar(128) NULL,
+	CQ_HIGHEST_TXN_ID bigint NULL,
+    CQ_META_INFO varbinary(2048) NULL,
+	CQ_HADOOP_JOB_ID nvarchar(128) NULL,
+PRIMARY KEY CLUSTERED 
+(
+	CQ_ID ASC
+)
+);
+
+CREATE TABLE COMPLETED_COMPACTIONS (
+	CC_ID bigint NOT NULL,
+	CC_DATABASE nvarchar(128) NOT NULL,
+	CC_TABLE nvarchar(128) NOT NULL,
+	CC_PARTITION nvarchar(767) NULL,
+	CC_STATE char(1) NOT NULL,
+	CC_TYPE char(1) NOT NULL,
+	CC_TBLPROPERTIES nvarchar(2048) NULL,
+	CC_WORKER_ID nvarchar(128) NULL,
+	CC_START bigint NULL,
+	CC_END bigint NULL,
+	CC_RUN_AS nvarchar(128) NULL,
+	CC_HIGHEST_TXN_ID bigint NULL,
+    CC_META_INFO varbinary(2048) NULL,
+	CC_HADOOP_JOB_ID nvarchar(128) NULL,
+PRIMARY KEY CLUSTERED 
+(
+	CC_ID ASC
+)
+);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS(
+	CTC_TXNID bigint NULL,
+	CTC_DATABASE nvarchar(128) NOT NULL,
+	CTC_TABLE nvarchar(128) NULL,
+	CTC_PARTITION nvarchar(767) NULL
+);
+
+CREATE TABLE HIVE_LOCKS(
+	HL_LOCK_EXT_ID bigint NOT NULL,
+	HL_LOCK_INT_ID bigint NOT NULL,
+	HL_TXNID bigint NULL,
+	HL_DB nvarchar(128) NOT NULL,
+	HL_TABLE nvarchar(128) NULL,
+	HL_PARTITION nvarchar(767) NULL,
+	HL_LOCK_STATE char(1) NOT NULL,
+	HL_LOCK_TYPE char(1) NOT NULL,
+	HL_LAST_HEARTBEAT bigint NOT NULL,
+	HL_ACQUIRED_AT bigint NULL,
+	HL_USER nvarchar(128) NOT NULL,
+	HL_HOST nvarchar(128) NOT NULL,
+    HL_HEARTBEAT_COUNT int NULL,
+    HL_AGENT_INFO nvarchar(128) NULL,
+    HL_BLOCKEDBY_EXT_ID bigint NULL,
+    HL_BLOCKEDBY_INT_ID bigint NULL,
+PRIMARY KEY CLUSTERED 
+(
+	HL_LOCK_EXT_ID ASC,
+	HL_LOCK_INT_ID ASC
+)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID(
+	NCQ_NEXT bigint NOT NULL
+);
+
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+CREATE TABLE NEXT_LOCK_ID(
+	NL_NEXT bigint NOT NULL
+);
+
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE NEXT_TXN_ID(
+	NTXN_NEXT bigint NOT NULL
+);
+
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE TXNS(
+	TXN_ID bigint NOT NULL,
+	TXN_STATE char(1) NOT NULL,
+	TXN_STARTED bigint NOT NULL,
+	TXN_LAST_HEARTBEAT bigint NOT NULL,
+	TXN_USER nvarchar(128) NOT NULL,
+	TXN_HOST nvarchar(128) NOT NULL,
+    TXN_AGENT_INFO nvarchar(128) NULL,
+    TXN_META_INFO nvarchar(128) NULL,
+    TXN_HEARTBEAT_COUNT int NULL,
+PRIMARY KEY CLUSTERED 
+(
+	TXN_ID ASC
+)
+);
+
+CREATE TABLE TXN_COMPONENTS(
+	TC_TXNID bigint NULL,
+	TC_DATABASE nvarchar(128) NOT NULL,
+	TC_TABLE nvarchar(128) NULL,
+	TC_PARTITION nvarchar(767) NULL,
+	TC_OPERATION_TYPE char(1) NOT NULL
+);
+
+ALTER TABLE TXN_COMPONENTS  WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID);
+
+CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+
+CREATE TABLE AUX_TABLE (
+  MT_KEY1 nvarchar(128) NOT NULL,
+  MT_KEY2 bigint NOT NULL,
+  MT_COMMENT nvarchar(255) NULL,
+  PRIMARY KEY CLUSTERED
+(
+    MT_KEY1 ASC,
+    MT_KEY2 ASC
+)
+);
+
+CREATE TABLE KEY_CONSTRAINTS
+(
+  CHILD_CD_ID BIGINT,
+  CHILD_INTEGER_IDX INT,
+  CHILD_TBL_ID BIGINT,
+  PARENT_CD_ID BIGINT NOT NULL,
+  PARENT_INTEGER_IDX INT NOT NULL,
+  PARENT_TBL_ID BIGINT NOT NULL,
+  POSITION INT NOT NULL,
+  CONSTRAINT_NAME VARCHAR(400) NOT NULL,
+  CONSTRAINT_TYPE SMALLINT NOT NULL,
+  UPDATE_RULE SMALLINT,
+  DELETE_RULE SMALLINT,
+  ENABLE_VALIDATE_RELY SMALLINT NOT NULL
+) ;
+
+ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION);
+
+CREATE INDEX CONSTRAINTS_PARENT_TBL_ID__INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID);
+
+CREATE INDEX CONSTRAINTS_CONSTRAINT_TYPE_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE);
+
+CREATE TABLE WRITE_SET (
+  WS_DATABASE nvarchar(128) NOT NULL,
+  WS_TABLE nvarchar(128) NOT NULL,
+  WS_PARTITION nvarchar(767),
+  WS_TXNID bigint NOT NULL,
+  WS_COMMIT_ID bigint NOT NULL,
+  WS_OPERATION_TYPE char(1) NOT NULL
+);
+
+CREATE TABLE METASTORE_DB_PROPERTIES (
+  PROPERTY_KEY VARCHAR(255) NOT NULL,
+  PROPERTY_VALUE VARCHAR(1000) NOT NULL,
+  DESCRIPTION VARCHAR(1000)
+);
+
+ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY);
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '3.0.0', 'Hive release version 3.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql b/standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql
new file mode 100644
index 0000000..60d51ff
--- /dev/null
+++ b/standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql
@@ -0,0 +1,106 @@
+SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0' AS MESSAGE;
+
+-- :r 026-HIVE-16556.mssql.sql
+CREATE TABLE METASTORE_DB_PROPERTIES (
+  PROPERTY_KEY VARCHAR(255) NOT NULL,
+  PROPERTY_VALUE VARCHAR(1000) NOT NULL,
+  DESCRIPTION VARCHAR(1000)
+);
+
+ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY);
+
+--:r 027-HIVE-16575.mssql.sql
+CREATE INDEX CONSTRAINTS_CONSTRAINT_TYPE_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE);
+
+--:r 028-HIVE-16922.mssql.sql
+UPDATE SERDE_PARAMS
+SET PARAM_KEY='collection.delim'
+WHERE PARAM_KEY='colelction.delim';
+
+--:r 029-HIVE-16997.mssql.sql
+ALTER TABLE PART_COL_STATS ADD BIT_VECTOR VARBINARY(MAX);
+ALTER TABLE TAB_COL_STATS ADD BIT_VECTOR VARBINARY(MAX);
+
+--:r 030-HIVE-16886.mssql.sql
+INSERT INTO NOTIFICATION_SEQUENCE (NNI_ID, NEXT_EVENT_ID) SELECT 1,1 WHERE NOT EXISTS (SELECT NEXT_EVENT_ID FROM NOTIFICATION_SEQUENCE);
+
+--:r 031-HIVE-17566.mssql.sql
+CREATE TABLE WM_RESOURCEPLAN
+(
+    RP_ID bigint NOT NULL,
+    "NAME" nvarchar(128) NOT NULL,
+    QUERY_PARALLELISM int,
+    STATUS nvarchar(20) NOT NULL
+);
+
+ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID);
+
+CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME");
+
+
+CREATE TABLE WM_POOL
+(
+    POOL_ID bigint NOT NULL,
+    RP_ID bigint NOT NULL,
+    PATH nvarchar(1024) NOT NULL,
+    PARENT_POOL_ID bigint,
+    ALLOC_FRACTION float,
+    QUERY_PARALLELISM int
+);
+
+ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID);
+
+CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH);
+ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK2 FOREIGN KEY (PARENT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
+
+
+CREATE TABLE WM_TRIGGER
+(
+    TRIGGER_ID bigint NOT NULL,
+    RP_ID bigint NOT NULL,
+    "NAME" nvarchar(128) NOT NULL,
+    TRIGGER_EXPRESSION nvarchar(1024),
+    ACTION_EXPRESSION nvarchar(1024)
+);
+
+ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID);
+
+CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME");
+
+ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+
+
+CREATE TABLE WM_POOL_TO_TRIGGER
+(
+    POOL_ID bigint NOT NULL,
+    TRIGGER_ID bigint NOT NULL
+);
+
+ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID);
+
+ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+
+ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID);
+
+
+CREATE TABLE WM_MAPPING
+(
+    MAPPING_ID bigint NOT NULL,
+    RP_ID bigint NOT NULL,
+    ENTITY_TYPE nvarchar(10) NOT NULL,
+    ENTITY_NAME nvarchar(128) NOT NULL,
+    POOL_ID bigint NOT NULL,
+    ORDERING int
+);
+
+ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID);
+
+CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME);
+
+ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+
+ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+
+UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
+SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS MESSAGE;

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/mssql/upgrade.order.mssql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mssql/upgrade.order.mssql b/standalone-metastore/src/main/sql/mssql/upgrade.order.mssql
new file mode 100644
index 0000000..15531df
--- /dev/null
+++ b/standalone-metastore/src/main/sql/mssql/upgrade.order.mssql
@@ -0,0 +1 @@
+2.3.0-to-3.0.0

http://git-wip-us.apache.org/repos/asf/hive/blob/b9526a7a/standalone-metastore/src/main/sql/mysql/create-user.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/sql/mysql/create-user.mysql.sql b/standalone-metastore/src/main/sql/mysql/create-user.mysql.sql
new file mode 100644
index 0000000..811f6f5
--- /dev/null
+++ b/standalone-metastore/src/main/sql/mysql/create-user.mysql.sql
@@ -0,0 +1,8 @@
+CREATE DATABASE _REPLACE_WITH_DB_;
+CREATE USER '_REPLACE_WITH_USER_'@'localhost' IDENTIFIED BY '_REPLACE_WITH_PASSWD_';
+CREATE USER '_REPLACE_WITH_USER_'@'%' IDENTIFIED BY '_REPLACE_WITH_PASSWD_';
+REVOKE ALL PRIVILEGES, GRANT OPTION FROM '_REPLACE_WITH_USER_'@'localhost';
+REVOKE ALL PRIVILEGES, GRANT OPTION FROM '_REPLACE_WITH_USER_'@'%';
+GRANT ALL PRIVILEGES ON _REPLACE_WITH_DB_.* TO '_REPLACE_WITH_USER_'@'localhost';
+GRANT ALL PRIVILEGES ON _REPLACE_WITH_DB_.* TO '_REPLACE_WITH_USER_'@'%';
+FLUSH PRIVILEGES;


[05/50] [abbrv] hive git commit: HIVE-18271: Druid Insert into fails with exception when committing files (Jason Dere, reviewed by Ashutosh Chauhan)

Posted by ga...@apache.org.
HIVE-18271: Druid Insert into fails with exception when committing files (Jason Dere, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a96564cb
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a96564cb
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a96564cb

Branch: refs/heads/standalone-metastore
Commit: a96564cbc787ed8665c1bb6b3c3a0e9d8440b926
Parents: 8ab523b
Author: Jason Dere <jd...@hortonworks.com>
Authored: Thu Dec 14 10:59:41 2017 -0800
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Thu Dec 14 10:59:41 2017 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/exec/FileSinkOperator.java     | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a96564cb/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 219d1ad..1ec6ac8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -403,7 +403,6 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
   protected transient FileSystem fs;
   protected transient Serializer serializer;
   protected final transient LongWritable row_count = new LongWritable();
-  private transient boolean isNativeTable = true;
 
   /**
    * The evaluators for the multiFile sprayer. If the table under consideration has 1000 buckets,
@@ -486,7 +485,6 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
     try {
       this.hconf = hconf;
       filesCreated = false;
-      isNativeTable = !conf.getTableInfo().isNonNative();
       isTemporary = conf.isTemporary();
       multiFileSpray = conf.isMultiFileSpray();
       this.isBucketed = hconf.getInt(hive_metastoreConstants.BUCKET_COUNT, 0) > 0;
@@ -716,7 +714,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
       assert filesIdx == numFiles;
 
       // in recent hadoop versions, use deleteOnExit to clean tmp files.
-      if (isNativeTable && fs != null && fsp != null && !conf.isMmTable()) {
+      if (isNativeTable() && fs != null && fsp != null && !conf.isMmTable()) {
         autoDelete = fs.deleteOnExit(fsp.outPaths[0]);
       }
     } catch (Exception e) {
@@ -730,7 +728,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
   protected void createBucketForFileIdx(FSPaths fsp, int filesIdx)
       throws HiveException {
     try {
-      fsp.initializeBucketPaths(filesIdx, taskId, isNativeTable, isSkewedStoredAsSubDirectories);
+      fsp.initializeBucketPaths(filesIdx, taskId, isNativeTable(), isSkewedStoredAsSubDirectories);
       if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
         Utilities.FILE_OP_LOGGER.trace("createBucketForFileIdx " + filesIdx + ": final path " + fsp.finalPaths[filesIdx]
           + "; out path " + fsp.outPaths[filesIdx] +" (spec path " + specPath + ", tmp path "
@@ -740,7 +738,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
         LOG.info("New Final Path: FS " + fsp.finalPaths[filesIdx]);
       }
 
-      if (isNativeTable && !conf.isMmTable()) {
+      if (isNativeTable() && !conf.isMmTable()) {
         // in recent hadoop versions, use deleteOnExit to clean tmp files.
         autoDelete = fs.deleteOnExit(fsp.outPaths[filesIdx]);
       }
@@ -1318,7 +1316,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
           }
         }
 
-        if (isNativeTable) {
+        if (isNativeTable()) {
           fsp.commit(fs, commitPaths);
         }
       }
@@ -1335,7 +1333,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
       // Hadoop always call close() even if an Exception was thrown in map() or
       // reduce().
       for (FSPaths fsp : valToPaths.values()) {
-        fsp.abortWriters(fs, abort, !autoDelete && isNativeTable && !conf.isMmTable());
+        fsp.abortWriters(fs, abort, !autoDelete && isNativeTable() && !conf.isMmTable());
       }
     }
     fsp = prevFsp = null;
@@ -1359,7 +1357,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
   public void jobCloseOp(Configuration hconf, boolean success)
       throws HiveException {
     try {
-      if ((conf != null) && isNativeTable) {
+      if ((conf != null) && isNativeTable()) {
         Path specPath = conf.getDirName();
         String unionSuffix = null;
         DynamicPartitionCtx dpCtx = conf.getDynPartCtx();
@@ -1585,4 +1583,8 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
     }
     return conf;
   }
+
+  private boolean isNativeTable() {
+    return !conf.getTableInfo().isNonNative();
+  }
 }