You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/07/03 18:00:28 UTC

[04/10] hive git commit: HIVE-19975 : Checking writeIdList per table may not check the commit level of a partition on a partitioned table (Sergey Shelukhin)

http://git-wip-us.apache.org/repos/asf/hive/blob/a47a80fe/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index 10e2388..183f977 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -208,7 +208,9 @@ import org.slf4j.LoggerFactory;
 
     public void alter_partitions(String db_name, String tbl_name, List<Partition> new_parts) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
 
-    public AlterPartitionsResponse alter_partitions_with_environment_context(AlterPartitionsRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
+    public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
+
+    public AlterPartitionsResponse alter_partitions_with_environment_context_req(AlterPartitionsRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
 
     public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
 
@@ -628,7 +630,9 @@ import org.slf4j.LoggerFactory;
 
     public void alter_partitions(String db_name, String tbl_name, List<Partition> new_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
-    public void alter_partitions_with_environment_context(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void alter_partitions_with_environment_context_req(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
     public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
@@ -3405,23 +3409,52 @@ import org.slf4j.LoggerFactory;
       return;
     }
 
-    public AlterPartitionsResponse alter_partitions_with_environment_context(AlterPartitionsRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException
+    public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException
     {
-      send_alter_partitions_with_environment_context(req);
-      return recv_alter_partitions_with_environment_context();
+      send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context);
+      recv_alter_partitions_with_environment_context();
     }
 
-    public void send_alter_partitions_with_environment_context(AlterPartitionsRequest req) throws org.apache.thrift.TException
+    public void send_alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context) throws org.apache.thrift.TException
     {
       alter_partitions_with_environment_context_args args = new alter_partitions_with_environment_context_args();
-      args.setReq(req);
+      args.setDb_name(db_name);
+      args.setTbl_name(tbl_name);
+      args.setNew_parts(new_parts);
+      args.setEnvironment_context(environment_context);
       sendBase("alter_partitions_with_environment_context", args);
     }
 
-    public AlterPartitionsResponse recv_alter_partitions_with_environment_context() throws InvalidOperationException, MetaException, org.apache.thrift.TException
+    public void recv_alter_partitions_with_environment_context() throws InvalidOperationException, MetaException, org.apache.thrift.TException
     {
       alter_partitions_with_environment_context_result result = new alter_partitions_with_environment_context_result();
       receiveBase(result, "alter_partitions_with_environment_context");
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      return;
+    }
+
+    public AlterPartitionsResponse alter_partitions_with_environment_context_req(AlterPartitionsRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException
+    {
+      send_alter_partitions_with_environment_context_req(req);
+      return recv_alter_partitions_with_environment_context_req();
+    }
+
+    public void send_alter_partitions_with_environment_context_req(AlterPartitionsRequest req) throws org.apache.thrift.TException
+    {
+      alter_partitions_with_environment_context_req_args args = new alter_partitions_with_environment_context_req_args();
+      args.setReq(req);
+      sendBase("alter_partitions_with_environment_context_req", args);
+    }
+
+    public AlterPartitionsResponse recv_alter_partitions_with_environment_context_req() throws InvalidOperationException, MetaException, org.apache.thrift.TException
+    {
+      alter_partitions_with_environment_context_req_result result = new alter_partitions_with_environment_context_req_result();
+      receiveBase(result, "alter_partitions_with_environment_context_req");
       if (result.isSetSuccess()) {
         return result.success;
       }
@@ -3431,7 +3464,7 @@ import org.slf4j.LoggerFactory;
       if (result.o2 != null) {
         throw result.o2;
       }
-      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "alter_partitions_with_environment_context failed: unknown result");
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "alter_partitions_with_environment_context_req failed: unknown result");
     }
 
     public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException
@@ -9896,23 +9929,64 @@ import org.slf4j.LoggerFactory;
       }
     }
 
-    public void alter_partitions_with_environment_context(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
       checkReady();
-      alter_partitions_with_environment_context_call method_call = new alter_partitions_with_environment_context_call(req, resultHandler, this, ___protocolFactory, ___transport);
+      alter_partitions_with_environment_context_call method_call = new alter_partitions_with_environment_context_call(db_name, tbl_name, new_parts, environment_context, resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
       ___manager.call(method_call);
     }
 
     @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context_call extends org.apache.thrift.async.TAsyncMethodCall {
-      private AlterPartitionsRequest req;
-      public alter_partitions_with_environment_context_call(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      private String db_name;
+      private String tbl_name;
+      private List<Partition> new_parts;
+      private EnvironmentContext environment_context;
+      public alter_partitions_with_environment_context_call(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
         super(client, protocolFactory, transport, resultHandler, false);
-        this.req = req;
+        this.db_name = db_name;
+        this.tbl_name = tbl_name;
+        this.new_parts = new_parts;
+        this.environment_context = environment_context;
       }
 
       public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
         prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("alter_partitions_with_environment_context", org.apache.thrift.protocol.TMessageType.CALL, 0));
         alter_partitions_with_environment_context_args args = new alter_partitions_with_environment_context_args();
+        args.setDb_name(db_name);
+        args.setTbl_name(tbl_name);
+        args.setNew_parts(new_parts);
+        args.setEnvironment_context(environment_context);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public void getResult() throws InvalidOperationException, MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        (new Client(prot)).recv_alter_partitions_with_environment_context();
+      }
+    }
+
+    public void alter_partitions_with_environment_context_req(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      alter_partitions_with_environment_context_req_call method_call = new alter_partitions_with_environment_context_req_call(req, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context_req_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private AlterPartitionsRequest req;
+      public alter_partitions_with_environment_context_req_call(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.req = req;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("alter_partitions_with_environment_context_req", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        alter_partitions_with_environment_context_req_args args = new alter_partitions_with_environment_context_req_args();
         args.setReq(req);
         args.write(prot);
         prot.writeMessageEnd();
@@ -9924,7 +9998,7 @@ import org.slf4j.LoggerFactory;
         }
         org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
         org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
-        return (new Client(prot)).recv_alter_partitions_with_environment_context();
+        return (new Client(prot)).recv_alter_partitions_with_environment_context_req();
       }
     }
 
@@ -14119,6 +14193,7 @@ import org.slf4j.LoggerFactory;
       processMap.put("alter_partition", new alter_partition());
       processMap.put("alter_partitions", new alter_partitions());
       processMap.put("alter_partitions_with_environment_context", new alter_partitions_with_environment_context());
+      processMap.put("alter_partitions_with_environment_context_req", new alter_partitions_with_environment_context_req());
       processMap.put("alter_partition_with_environment_context", new alter_partition_with_environment_context());
       processMap.put("rename_partition", new rename_partition());
       processMap.put("partition_name_has_valid_characters", new partition_name_has_valid_characters());
@@ -16465,7 +16540,33 @@ import org.slf4j.LoggerFactory;
       public alter_partitions_with_environment_context_result getResult(I iface, alter_partitions_with_environment_context_args args) throws org.apache.thrift.TException {
         alter_partitions_with_environment_context_result result = new alter_partitions_with_environment_context_result();
         try {
-          result.success = iface.alter_partitions_with_environment_context(args.req);
+          iface.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context);
+        } catch (InvalidOperationException o1) {
+          result.o1 = o1;
+        } catch (MetaException o2) {
+          result.o2 = o2;
+        }
+        return result;
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context_req<I extends Iface> extends org.apache.thrift.ProcessFunction<I, alter_partitions_with_environment_context_req_args> {
+      public alter_partitions_with_environment_context_req() {
+        super("alter_partitions_with_environment_context_req");
+      }
+
+      public alter_partitions_with_environment_context_req_args getEmptyArgsInstance() {
+        return new alter_partitions_with_environment_context_req_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public alter_partitions_with_environment_context_req_result getResult(I iface, alter_partitions_with_environment_context_req_args args) throws org.apache.thrift.TException {
+        alter_partitions_with_environment_context_req_result result = new alter_partitions_with_environment_context_req_result();
+        try {
+          result.success = iface.alter_partitions_with_environment_context_req(args.req);
         } catch (InvalidOperationException o1) {
           result.o1 = o1;
         } catch (MetaException o2) {
@@ -19673,6 +19774,7 @@ import org.slf4j.LoggerFactory;
       processMap.put("alter_partition", new alter_partition());
       processMap.put("alter_partitions", new alter_partitions());
       processMap.put("alter_partitions_with_environment_context", new alter_partitions_with_environment_context());
+      processMap.put("alter_partitions_with_environment_context_req", new alter_partitions_with_environment_context_req());
       processMap.put("alter_partition_with_environment_context", new alter_partition_with_environment_context());
       processMap.put("rename_partition", new rename_partition());
       processMap.put("partition_name_has_valid_characters", new partition_name_has_valid_characters());
@@ -25023,7 +25125,7 @@ import org.slf4j.LoggerFactory;
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partitions_with_environment_context_args, AlterPartitionsResponse> {
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partitions_with_environment_context_args, Void> {
       public alter_partitions_with_environment_context() {
         super("alter_partitions_with_environment_context");
       }
@@ -25032,12 +25134,11 @@ import org.slf4j.LoggerFactory;
         return new alter_partitions_with_environment_context_args();
       }
 
-      public AsyncMethodCallback<AlterPartitionsResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<AlterPartitionsResponse>() { 
-          public void onComplete(AlterPartitionsResponse o) {
+        return new AsyncMethodCallback<Void>() { 
+          public void onComplete(Void o) {
             alter_partitions_with_environment_context_result result = new alter_partitions_with_environment_context_result();
-            result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -25080,25 +25181,26 @@ import org.slf4j.LoggerFactory;
         return false;
       }
 
-      public void start(I iface, alter_partitions_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<AlterPartitionsResponse> resultHandler) throws TException {
-        iface.alter_partitions_with_environment_context(args.req,resultHandler);
+      public void start(I iface, alter_partitions_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.alter_partitions_with_environment_context(args.db_name, args.tbl_name, args.new_parts, args.environment_context,resultHandler);
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partition_with_environment_context_args, Void> {
-      public alter_partition_with_environment_context() {
-        super("alter_partition_with_environment_context");
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context_req<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partitions_with_environment_context_req_args, AlterPartitionsResponse> {
+      public alter_partitions_with_environment_context_req() {
+        super("alter_partitions_with_environment_context_req");
       }
 
-      public alter_partition_with_environment_context_args getEmptyArgsInstance() {
-        return new alter_partition_with_environment_context_args();
+      public alter_partitions_with_environment_context_req_args getEmptyArgsInstance() {
+        return new alter_partitions_with_environment_context_req_args();
       }
 
-      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<AlterPartitionsResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Void>() { 
-          public void onComplete(Void o) {
-            alter_partition_with_environment_context_result result = new alter_partition_with_environment_context_result();
+        return new AsyncMethodCallback<AlterPartitionsResponse>() { 
+          public void onComplete(AlterPartitionsResponse o) {
+            alter_partitions_with_environment_context_req_result result = new alter_partitions_with_environment_context_req_result();
+            result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -25110,7 +25212,7 @@ import org.slf4j.LoggerFactory;
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            alter_partition_with_environment_context_result result = new alter_partition_with_environment_context_result();
+            alter_partitions_with_environment_context_req_result result = new alter_partitions_with_environment_context_req_result();
             if (e instanceof InvalidOperationException) {
                         result.o1 = (InvalidOperationException) e;
                         result.setO1IsSet(true);
@@ -25141,25 +25243,25 @@ import org.slf4j.LoggerFactory;
         return false;
       }
 
-      public void start(I iface, alter_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
-        iface.alter_partition_with_environment_context(args.db_name, args.tbl_name, args.new_part, args.environment_context,resultHandler);
+      public void start(I iface, alter_partitions_with_environment_context_req_args args, org.apache.thrift.async.AsyncMethodCallback<AlterPartitionsResponse> resultHandler) throws TException {
+        iface.alter_partitions_with_environment_context_req(args.req,resultHandler);
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class rename_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, rename_partition_args, Void> {
-      public rename_partition() {
-        super("rename_partition");
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partition_with_environment_context_args, Void> {
+      public alter_partition_with_environment_context() {
+        super("alter_partition_with_environment_context");
       }
 
-      public rename_partition_args getEmptyArgsInstance() {
-        return new rename_partition_args();
+      public alter_partition_with_environment_context_args getEmptyArgsInstance() {
+        return new alter_partition_with_environment_context_args();
       }
 
       public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
         return new AsyncMethodCallback<Void>() { 
           public void onComplete(Void o) {
-            rename_partition_result result = new rename_partition_result();
+            alter_partition_with_environment_context_result result = new alter_partition_with_environment_context_result();
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -25171,7 +25273,7 @@ import org.slf4j.LoggerFactory;
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            rename_partition_result result = new rename_partition_result();
+            alter_partition_with_environment_context_result result = new alter_partition_with_environment_context_result();
             if (e instanceof InvalidOperationException) {
                         result.o1 = (InvalidOperationException) e;
                         result.setO1IsSet(true);
@@ -25202,254 +25304,25 @@ import org.slf4j.LoggerFactory;
         return false;
       }
 
-      public void start(I iface, rename_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
-        iface.rename_partition(args.db_name, args.tbl_name, args.part_vals, args.new_part,resultHandler);
-      }
-    }
-
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class partition_name_has_valid_characters<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, partition_name_has_valid_characters_args, Boolean> {
-      public partition_name_has_valid_characters() {
-        super("partition_name_has_valid_characters");
-      }
-
-      public partition_name_has_valid_characters_args getEmptyArgsInstance() {
-        return new partition_name_has_valid_characters_args();
-      }
-
-      public AsyncMethodCallback<Boolean> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Boolean>() { 
-          public void onComplete(Boolean o) {
-            partition_name_has_valid_characters_result result = new partition_name_has_valid_characters_result();
-            result.success = o;
-            result.setSuccessIsSet(true);
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            partition_name_has_valid_characters_result result = new partition_name_has_valid_characters_result();
-            if (e instanceof MetaException) {
-                        result.o1 = (MetaException) e;
-                        result.setO1IsSet(true);
-                        msg = result;
-            }
-             else 
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, partition_name_has_valid_characters_args args, org.apache.thrift.async.AsyncMethodCallback<Boolean> resultHandler) throws TException {
-        iface.partition_name_has_valid_characters(args.part_vals, args.throw_exception,resultHandler);
-      }
-    }
-
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_config_value<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_config_value_args, String> {
-      public get_config_value() {
-        super("get_config_value");
-      }
-
-      public get_config_value_args getEmptyArgsInstance() {
-        return new get_config_value_args();
-      }
-
-      public AsyncMethodCallback<String> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<String>() { 
-          public void onComplete(String o) {
-            get_config_value_result result = new get_config_value_result();
-            result.success = o;
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            get_config_value_result result = new get_config_value_result();
-            if (e instanceof ConfigValSecurityException) {
-                        result.o1 = (ConfigValSecurityException) e;
-                        result.setO1IsSet(true);
-                        msg = result;
-            }
-             else 
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, get_config_value_args args, org.apache.thrift.async.AsyncMethodCallback<String> resultHandler) throws TException {
-        iface.get_config_value(args.name, args.defaultValue,resultHandler);
-      }
-    }
-
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class partition_name_to_vals<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, partition_name_to_vals_args, List<String>> {
-      public partition_name_to_vals() {
-        super("partition_name_to_vals");
-      }
-
-      public partition_name_to_vals_args getEmptyArgsInstance() {
-        return new partition_name_to_vals_args();
-      }
-
-      public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<List<String>>() { 
-          public void onComplete(List<String> o) {
-            partition_name_to_vals_result result = new partition_name_to_vals_result();
-            result.success = o;
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            partition_name_to_vals_result result = new partition_name_to_vals_result();
-            if (e instanceof MetaException) {
-                        result.o1 = (MetaException) e;
-                        result.setO1IsSet(true);
-                        msg = result;
-            }
-             else 
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, partition_name_to_vals_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
-        iface.partition_name_to_vals(args.part_name,resultHandler);
-      }
-    }
-
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class partition_name_to_spec<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, partition_name_to_spec_args, Map<String,String>> {
-      public partition_name_to_spec() {
-        super("partition_name_to_spec");
-      }
-
-      public partition_name_to_spec_args getEmptyArgsInstance() {
-        return new partition_name_to_spec_args();
-      }
-
-      public AsyncMethodCallback<Map<String,String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Map<String,String>>() { 
-          public void onComplete(Map<String,String> o) {
-            partition_name_to_spec_result result = new partition_name_to_spec_result();
-            result.success = o;
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            partition_name_to_spec_result result = new partition_name_to_spec_result();
-            if (e instanceof MetaException) {
-                        result.o1 = (MetaException) e;
-                        result.setO1IsSet(true);
-                        msg = result;
-            }
-             else 
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, partition_name_to_spec_args args, org.apache.thrift.async.AsyncMethodCallback<Map<String,String>> resultHandler) throws TException {
-        iface.partition_name_to_spec(args.part_name,resultHandler);
+      public void start(I iface, alter_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.alter_partition_with_environment_context(args.db_name, args.tbl_name, args.new_part, args.environment_context,resultHandler);
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class markPartitionForEvent<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, markPartitionForEvent_args, Void> {
-      public markPartitionForEvent() {
-        super("markPartitionForEvent");
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class rename_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, rename_partition_args, Void> {
+      public rename_partition() {
+        super("rename_partition");
       }
 
-      public markPartitionForEvent_args getEmptyArgsInstance() {
-        return new markPartitionForEvent_args();
+      public rename_partition_args getEmptyArgsInstance() {
+        return new rename_partition_args();
       }
 
       public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
         return new AsyncMethodCallback<Void>() { 
           public void onComplete(Void o) {
-            markPartitionForEvent_result result = new markPartitionForEvent_result();
+            rename_partition_result result = new rename_partition_result();
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -25461,37 +25334,17 @@ import org.slf4j.LoggerFactory;
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            markPartitionForEvent_result result = new markPartitionForEvent_result();
-            if (e instanceof MetaException) {
-                        result.o1 = (MetaException) e;
+            rename_partition_result result = new rename_partition_result();
+            if (e instanceof InvalidOperationException) {
+                        result.o1 = (InvalidOperationException) e;
                         result.setO1IsSet(true);
                         msg = result;
             }
-            else             if (e instanceof NoSuchObjectException) {
-                        result.o2 = (NoSuchObjectException) e;
+            else             if (e instanceof MetaException) {
+                        result.o2 = (MetaException) e;
                         result.setO2IsSet(true);
                         msg = result;
             }
-            else             if (e instanceof UnknownDBException) {
-                        result.o3 = (UnknownDBException) e;
-                        result.setO3IsSet(true);
-                        msg = result;
-            }
-            else             if (e instanceof UnknownTableException) {
-                        result.o4 = (UnknownTableException) e;
-                        result.setO4IsSet(true);
-                        msg = result;
-            }
-            else             if (e instanceof UnknownPartitionException) {
-                        result.o5 = (UnknownPartitionException) e;
-                        result.setO5IsSet(true);
-                        msg = result;
-            }
-            else             if (e instanceof InvalidPartitionException) {
-                        result.o6 = (InvalidPartitionException) e;
-                        result.setO6IsSet(true);
-                        msg = result;
-            }
              else 
             {
               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
@@ -25512,25 +25365,25 @@ import org.slf4j.LoggerFactory;
         return false;
       }
 
-      public void start(I iface, markPartitionForEvent_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
-        iface.markPartitionForEvent(args.db_name, args.tbl_name, args.part_vals, args.eventType,resultHandler);
+      public void start(I iface, rename_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.rename_partition(args.db_name, args.tbl_name, args.part_vals, args.new_part,resultHandler);
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class isPartitionMarkedForEvent<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, isPartitionMarkedForEvent_args, Boolean> {
-      public isPartitionMarkedForEvent() {
-        super("isPartitionMarkedForEvent");
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class partition_name_has_valid_characters<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, partition_name_has_valid_characters_args, Boolean> {
+      public partition_name_has_valid_characters() {
+        super("partition_name_has_valid_characters");
       }
 
-      public isPartitionMarkedForEvent_args getEmptyArgsInstance() {
-        return new isPartitionMarkedForEvent_args();
+      public partition_name_has_valid_characters_args getEmptyArgsInstance() {
+        return new partition_name_has_valid_characters_args();
       }
 
       public AsyncMethodCallback<Boolean> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
         return new AsyncMethodCallback<Boolean>() { 
           public void onComplete(Boolean o) {
-            isPartitionMarkedForEvent_result result = new isPartitionMarkedForEvent_result();
+            partition_name_has_valid_characters_result result = new partition_name_has_valid_characters_result();
             result.success = o;
             result.setSuccessIsSet(true);
             try {
@@ -25544,37 +25397,347 @@ import org.slf4j.LoggerFactory;
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            isPartitionMarkedForEvent_result result = new isPartitionMarkedForEvent_result();
+            partition_name_has_valid_characters_result result = new partition_name_has_valid_characters_result();
             if (e instanceof MetaException) {
                         result.o1 = (MetaException) e;
                         result.setO1IsSet(true);
                         msg = result;
             }
-            else             if (e instanceof NoSuchObjectException) {
-                        result.o2 = (NoSuchObjectException) e;
-                        result.setO2IsSet(true);
-                        msg = result;
-            }
-            else             if (e instanceof UnknownDBException) {
-                        result.o3 = (UnknownDBException) e;
-                        result.setO3IsSet(true);
-                        msg = result;
-            }
-            else             if (e instanceof UnknownTableException) {
-                        result.o4 = (UnknownTableException) e;
-                        result.setO4IsSet(true);
-                        msg = result;
-            }
-            else             if (e instanceof UnknownPartitionException) {
-                        result.o5 = (UnknownPartitionException) e;
-                        result.setO5IsSet(true);
-                        msg = result;
-            }
-            else             if (e instanceof InvalidPartitionException) {
-                        result.o6 = (InvalidPartitionException) e;
-                        result.setO6IsSet(true);
-                        msg = result;
-            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, partition_name_has_valid_characters_args args, org.apache.thrift.async.AsyncMethodCallback<Boolean> resultHandler) throws TException {
+        iface.partition_name_has_valid_characters(args.part_vals, args.throw_exception,resultHandler);
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_config_value<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_config_value_args, String> {
+      public get_config_value() {
+        super("get_config_value");
+      }
+
+      public get_config_value_args getEmptyArgsInstance() {
+        return new get_config_value_args();
+      }
+
+      public AsyncMethodCallback<String> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<String>() { 
+          public void onComplete(String o) {
+            get_config_value_result result = new get_config_value_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            get_config_value_result result = new get_config_value_result();
+            if (e instanceof ConfigValSecurityException) {
+                        result.o1 = (ConfigValSecurityException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, get_config_value_args args, org.apache.thrift.async.AsyncMethodCallback<String> resultHandler) throws TException {
+        iface.get_config_value(args.name, args.defaultValue,resultHandler);
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class partition_name_to_vals<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, partition_name_to_vals_args, List<String>> {
+      public partition_name_to_vals() {
+        super("partition_name_to_vals");
+      }
+
+      public partition_name_to_vals_args getEmptyArgsInstance() {
+        return new partition_name_to_vals_args();
+      }
+
+      public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<List<String>>() { 
+          public void onComplete(List<String> o) {
+            partition_name_to_vals_result result = new partition_name_to_vals_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            partition_name_to_vals_result result = new partition_name_to_vals_result();
+            if (e instanceof MetaException) {
+                        result.o1 = (MetaException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, partition_name_to_vals_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
+        iface.partition_name_to_vals(args.part_name,resultHandler);
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class partition_name_to_spec<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, partition_name_to_spec_args, Map<String,String>> {
+      public partition_name_to_spec() {
+        super("partition_name_to_spec");
+      }
+
+      public partition_name_to_spec_args getEmptyArgsInstance() {
+        return new partition_name_to_spec_args();
+      }
+
+      public AsyncMethodCallback<Map<String,String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Map<String,String>>() { 
+          public void onComplete(Map<String,String> o) {
+            partition_name_to_spec_result result = new partition_name_to_spec_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            partition_name_to_spec_result result = new partition_name_to_spec_result();
+            if (e instanceof MetaException) {
+                        result.o1 = (MetaException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, partition_name_to_spec_args args, org.apache.thrift.async.AsyncMethodCallback<Map<String,String>> resultHandler) throws TException {
+        iface.partition_name_to_spec(args.part_name,resultHandler);
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class markPartitionForEvent<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, markPartitionForEvent_args, Void> {
+      public markPartitionForEvent() {
+        super("markPartitionForEvent");
+      }
+
+      public markPartitionForEvent_args getEmptyArgsInstance() {
+        return new markPartitionForEvent_args();
+      }
+
+      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Void>() { 
+          public void onComplete(Void o) {
+            markPartitionForEvent_result result = new markPartitionForEvent_result();
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            markPartitionForEvent_result result = new markPartitionForEvent_result();
+            if (e instanceof MetaException) {
+                        result.o1 = (MetaException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof NoSuchObjectException) {
+                        result.o2 = (NoSuchObjectException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof UnknownDBException) {
+                        result.o3 = (UnknownDBException) e;
+                        result.setO3IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof UnknownTableException) {
+                        result.o4 = (UnknownTableException) e;
+                        result.setO4IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof UnknownPartitionException) {
+                        result.o5 = (UnknownPartitionException) e;
+                        result.setO5IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof InvalidPartitionException) {
+                        result.o6 = (InvalidPartitionException) e;
+                        result.setO6IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, markPartitionForEvent_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.markPartitionForEvent(args.db_name, args.tbl_name, args.part_vals, args.eventType,resultHandler);
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class isPartitionMarkedForEvent<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, isPartitionMarkedForEvent_args, Boolean> {
+      public isPartitionMarkedForEvent() {
+        super("isPartitionMarkedForEvent");
+      }
+
+      public isPartitionMarkedForEvent_args getEmptyArgsInstance() {
+        return new isPartitionMarkedForEvent_args();
+      }
+
+      public AsyncMethodCallback<Boolean> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Boolean>() { 
+          public void onComplete(Boolean o) {
+            isPartitionMarkedForEvent_result result = new isPartitionMarkedForEvent_result();
+            result.success = o;
+            result.setSuccessIsSet(true);
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            isPartitionMarkedForEvent_result result = new isPartitionMarkedForEvent_result();
+            if (e instanceof MetaException) {
+                        result.o1 = (MetaException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof NoSuchObjectException) {
+                        result.o2 = (NoSuchObjectException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof UnknownDBException) {
+                        result.o3 = (UnknownDBException) e;
+                        result.setO3IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof UnknownTableException) {
+                        result.o4 = (UnknownTableException) e;
+                        result.setO4IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof UnknownPartitionException) {
+                        result.o5 = (UnknownPartitionException) e;
+                        result.setO5IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof InvalidPartitionException) {
+                        result.o6 = (InvalidPartitionException) e;
+                        result.setO6IsSet(true);
+                        msg = result;
+            }
              else 
             {
               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
@@ -118597,7 +118760,974 @@ import org.slf4j.LoggerFactory;
     }
 
     @Override
-    public int compareTo(get_part_specs_by_filter_result other) {
+    public int compareTo(get_part_specs_by_filter_result other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+
+      lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetSuccess()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetO1()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetO2()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+      }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("get_part_specs_by_filter_result(");
+      boolean first = true;
+
+      sb.append("success:");
+      if (this.success == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.success);
+      }
+      first = false;
+      if (!first) sb.append(", ");
+      sb.append("o1:");
+      if (this.o1 == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.o1);
+      }
+      first = false;
+      if (!first) sb.append(", ");
+      sb.append("o2:");
+      if (this.o2 == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.o2);
+      }
+      first = false;
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class get_part_specs_by_filter_resultStandardSchemeFactory implements SchemeFactory {
+      public get_part_specs_by_filter_resultStandardScheme getScheme() {
+        return new get_part_specs_by_filter_resultStandardScheme();
+      }
+    }
+
+    private static class get_part_specs_by_filter_resultStandardScheme extends StandardScheme<get_part_specs_by_filter_result> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            case 0: // SUCCESS
+              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+                {
+                  org.apache.thrift.protocol.TList _list1368 = iprot.readListBegin();
+                  struct.success = new ArrayList<PartitionSpec>(_list1368.size);
+                  PartitionSpec _elem1369;
+                  for (int _i1370 = 0; _i1370 < _list1368.size; ++_i1370)
+                  {
+                    _elem1369 = new PartitionSpec();
+                    _elem1369.read(iprot);
+                    struct.success.add(_elem1369);
+                  }
+                  iprot.readListEnd();
+                }
+                struct.setSuccessIsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            case 1: // O1
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.o1 = new MetaException();
+                struct.o1.read(iprot);
+                struct.setO1IsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            case 2: // O2
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.o2 = new NoSuchObjectException();
+                struct.o2.read(iprot);
+                struct.setO2IsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        if (struct.success != null) {
+          oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
+            for (PartitionSpec _iter1371 : struct.success)
+            {
+              _iter1371.write(oprot);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+        if (struct.o1 != null) {
+          oprot.writeFieldBegin(O1_FIELD_DESC);
+          struct.o1.write(oprot);
+          oprot.writeFieldEnd();
+        }
+        if (struct.o2 != null) {
+          oprot.writeFieldBegin(O2_FIELD_DESC);
+          struct.o2.write(oprot);
+          oprot.writeFieldEnd();
+        }
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class get_part_specs_by_filter_resultTupleSchemeFactory implements SchemeFactory {
+      public get_part_specs_by_filter_resultTupleScheme getScheme() {
+        return new get_part_specs_by_filter_resultTupleScheme();
+      }
+    }
+
+    private static class get_part_specs_by_filter_resultTupleScheme extends TupleScheme<get_part_specs_by_filter_result> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+        BitSet optionals = new BitSet();
+        if (struct.isSetSuccess()) {
+          optionals.set(0);
+        }
+        if (struct.isSetO1()) {
+          optionals.set(1);
+        }
+        if (struct.isSetO2()) {
+          optionals.set(2);
+        }
+        oprot.writeBitSet(optionals, 3);
+        if (struct.isSetSuccess()) {
+          {
+            oprot.writeI32(struct.success.size());
+            for (PartitionSpec _iter1372 : struct.success)
+            {
+              _iter1372.write(oprot);
+            }
+          }
+        }
+        if (struct.isSetO1()) {
+          struct.o1.write(oprot);
+        }
+        if (struct.isSetO2()) {
+          struct.o2.write(oprot);
+        }
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_filter_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+        BitSet incoming = iprot.readBitSet(3);
+        if (incoming.get(0)) {
+          {
+            org.apache.thrift.protocol.TList _list1373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<PartitionSpec>(_list1373.size);
+            PartitionSpec _elem1374;
+            for (int _i1375 = 0; _i1375 < _list1373.size; ++_i1375)
+            {
+              _elem1374 = new PartitionSpec();
+              _elem1374.read(iprot);
+              struct.success.add(_elem1374);
+            }
+          }
+          struct.setSuccessIsSet(true);
+        }
+        if (incoming.get(1)) {
+          struct.o1 = new MetaException();
+          struct.o1.read(iprot);
+          struct.setO1IsSet(true);
+        }
+        if (incoming.get(2)) {
+          struct.o2 = new NoSuchObjectException();
+          struct.o2.read(iprot);
+          struct.setO2IsSet(true);
+        }
+      }
+    }
+
+  }
+
+  @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_expr_args implements org.apache.thrift.TBase<get_partitions_by_expr_args, get_partitions_by_expr_args._Fields>, java.io.Serializable, Cloneable, Comparable<get_partitions_by_expr_args>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_by_expr_args");
+
+    private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new get_partitions_by_expr_argsStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new get_partitions_by_expr_argsTupleSchemeFactory());
+    }
+
+    private PartitionsByExprRequest req; // required
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+      REQ((short)1, "req");
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          case 1: // REQ
+            return REQ;
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+
+    // isset id assignments
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionsByExprRequest.class)));
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_by_expr_args.class, metaDataMap);
+    }
+
+    public get_partitions_by_expr_args() {
+    }
+
+    public get_partitions_by_expr_args(
+      PartitionsByExprRequest req)
+    {
+      this();
+      this.req = req;
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public get_partitions_by_expr_args(get_partitions_by_expr_args other) {
+      if (other.isSetReq()) {
+        this.req = new PartitionsByExprRequest(other.req);
+      }
+    }
+
+    public get_partitions_by_expr_args deepCopy() {
+      return new get_partitions_by_expr_args(this);
+    }
+
+    @Override
+    public void clear() {
+      this.req = null;
+    }
+
+    public PartitionsByExprRequest getReq() {
+      return this.req;
+    }
+
+    public void setReq(PartitionsByExprRequest req) {
+      this.req = req;
+    }
+
+    public void unsetReq() {
+      this.req = null;
+    }
+
+    /** Returns true if field req is set (has been assigned a value) and false otherwise */
+    public boolean isSetReq() {
+      return this.req != null;
+    }
+
+    public void setReqIsSet(boolean value) {
+      if (!value) {
+        this.req = null;
+      }
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      case REQ:
+        if (value == null) {
+          unsetReq();
+        } else {
+          setReq((PartitionsByExprRequest)value);
+        }
+        break;
+
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      case REQ:
+        return getReq();
+
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      case REQ:
+        return isSetReq();
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof get_partitions_by_expr_args)
+        return this.equals((get_partitions_by_expr_args)that);
+      return false;
+    }
+
+    public boolean equals(get_partitions_by_expr_args that) {
+      if (that == null)
+        return false;
+
+      boolean this_present_req = true && this.isSetReq();
+      boolean that_present_req = true && that.isSetReq();
+      if (this_present_req || that_present_req) {
+        if (!(this_present_req && that_present_req))
+          return false;
+        if (!this.req.equals(that.req))
+          return false;
+      }
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      List<Object> list = new ArrayList<Object>();
+
+      boolean present_req = true && (isSetReq());
+      list.add(present_req);
+      if (present_req)
+        list.add(req);
+
+      return list.hashCode();
+    }
+
+    @Override
+    public int compareTo(get_partitions_by_expr_args other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+
+      lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetReq()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+    }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("get_partitions_by_expr_args(");
+      boolean first = true;
+
+      sb.append("req:");
+      if (this.req == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.req);
+      }
+      first = false;
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+      if (req != null) {
+        req.validate();
+      }
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class get_partitions_by_expr_argsStandardSchemeFactory implements SchemeFactory {
+      public get_partitions_by_expr_argsStandardScheme getScheme() {
+        return new get_partitions_by_expr_argsStandardScheme();
+      }
+    }
+
+    private static class get_partitions_by_expr_argsStandardScheme extends StandardScheme<get_partitions_by_expr_args> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            case 1: // REQ
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.req = new PartitionsByExprRequest();
+                struct.req.read(iprot);
+                struct.setReqIsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        if (struct.req != null) {
+          oprot.writeFieldBegin(REQ_FIELD_DESC);
+          struct.req.write(oprot);
+          oprot.writeFieldEnd();
+        }
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class get_partitions_by_expr_argsTupleSchemeFactory implements SchemeFactory {
+      public get_partitions_by_expr_argsTupleScheme getScheme() {
+        return new get_partitions_by_expr_argsTupleScheme();
+      }
+    }
+
+    private static class get_partitions_by_expr_argsTupleScheme extends TupleScheme<get_partitions_by_expr_args> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+        BitSet optionals = new BitSet();
+        if (struct.isSetReq()) {
+          optionals.set(0);
+        }
+        oprot.writeBitSet(optionals, 1);
+        if (struct.isSetReq()) {
+          struct.req.write(oprot);
+        }
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_expr_args struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+        BitSet incoming = iprot.readBitSet(1);
+        if (incoming.get(0)) {
+          struct.req = new PartitionsByExprRequest();
+          struct.req.read(iprot);
+          struct.setReqIsSet(true);
+        }
+      }
+    }
+
+  }
+
+  @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_partitions_by_expr_result implements org.apache.thrift.TBase<get_partitions_by_expr_result, get_partitions_by_expr_result._Fields>, java.io.Serializable, Cloneable, Comparable<get_partitions_by_expr_result>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_by_expr_result");
+
+    private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0);
+    private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+    private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new get_partitions_by_expr_resultStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new get_partitions_by_expr_resultTupleSchemeFactory());
+    }
+
+    private PartitionsByExprResult success; // required
+    private MetaException o1; // required
+    private NoSuchObjectException o2; // required
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+      SUCCESS((short)0, "success"),
+      O1((short)1, "o1"),
+      O2((short)2, "o2");
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          case 0: // SUCCESS
+            return SUCCESS;
+          case 1: // O1
+            return O1;
+          case 2: // O2
+            return O2;
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(String name) {
+        return byName.get(name);
+      }
+
+      private final short _thriftId;
+      private final String _fieldName;
+
+      _Fields(short thriftId, String fieldName) {
+        _thriftId = thriftId;
+        _fieldName = fieldName;
+      }
+
+      public short getThriftFieldId() {
+        return _thriftId;
+      }
+
+      public String getFieldName() {
+        return _fieldName;
+      }
+    }
+
+    // isset id assignments
+    public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+    static {
+      Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+      tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionsByExprResult.class)));
+      tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+      tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_by_expr_result.class, metaDataMap);
+    }
+
+    public get_partitions_by_expr_result() {
+    }
+
+    public get_partitions_by_expr_result(
+      PartitionsByExprResult success,
+      MetaException o1,
+      NoSuchObjectException o2)
+    {
+      this();
+      this.success = success;
+      this.o1 = o1;
+      this.o2 = o2;
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public get_partitions_by_expr_result(get_partitions_by_expr_result other) {
+      if (other.isSetSuccess()) {
+        this.success = new PartitionsByExprResult(other.success);
+      }
+      if (other.isSetO1()) {
+        this.o1 = new MetaException(other.o1);
+      }
+      if (other.isSetO2()) {
+        this.o2 = new NoSuchObjectException(other.o2);
+      }
+    }
+
+    public get_partitions_by_expr_result deepCopy() {
+      return new get_partitions_by_expr_result(this);
+    }
+
+    @Override
+    public void clear() {
+      this.success = null;
+      this.o1 = null;
+      this.o2 = null;
+    }
+
+    public PartitionsByExprResult getSuccess() {
+      return this.success;
+    }
+
+    public void setSuccess(PartitionsByExprResult success) {
+      this.success = success;
+    }
+
+    public void unsetSuccess() {
+      this.success = null;
+    }
+
+    /** Returns true if field success is set (has been assigned a value) and false otherwise */
+    public boolean isSetSuccess() {
+      return this.success != null;
+    }
+
+    public void setSuccessIsSet(boolean value) {
+      if (!value) {
+        this.success = null;
+      }
+    }
+
+    public MetaException getO1() {
+      return this.o1;
+    }
+
+    public void setO1(MetaException o1) {
+      this.o1 = o1;
+    }
+
+    public void unsetO1() {
+      this.o1 = null;
+    }
+
+    /** Returns true if field o1 is set (has been assigned a value) and false otherwise */
+    public boolean isSetO1() {
+      return this.o1 != null;
+    }
+
+    public void setO1IsSet(boolean value) {
+      if (!value) {
+        this.o1 = null;
+      }
+    }
+
+    public NoSuchObjectException getO2() {
+      return this.o2;
+    }
+
+    public void setO2(NoSuchObjectException o2) {
+      this.o2 = o2;
+    }
+
+    public void unsetO2() {
+      this.o2 = null;
+    }
+
+    /** Returns true if field o2 is set (has been assigned a value) and false otherwise */
+    public boolean isSetO2() {
+      return this.o2 != null;
+    }
+
+    public void setO2IsSet(boolean value) {
+      if (!value) {
+        this.o2 = null;
+      }
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      case SUCCESS:
+        if (value == null) {
+          unsetSuccess();
+        } else {
+          setSuccess((PartitionsByExprResult)value);
+        }
+        break;
+
+      case O1:
+        if (value == null) {
+          unsetO1();
+        } else {
+          setO1((MetaException)value);
+        }
+        break;
+
+      case O2:
+        if (value == null) {
+          unsetO2();
+        } else {
+          setO2((NoSuchObjectException)value);
+        }
+        break;
+
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      case SUCCESS:
+        return getSuccess();
+
+      case O1:
+        return getO1();
+
+      case O2:
+        return ge

<TRUNCATED>