You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2018/02/16 11:24:31 UTC

[2/5] hive git commit: HIVE-18387: Minimize time that REBUILD locks the materialized view (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

http://git-wip-us.apache.org/repos/asf/hive/blob/9e27ad08/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index d5e3527..05064cb 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -116,6 +116,8 @@ import org.slf4j.LoggerFactory;
 
     public Map<String,Materialization> get_materialization_invalidation_info(String dbname, List<String> tbl_names) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException;
 
+    public void update_creation_metadata(String dbname, String tbl_name, CreationMetadata creation_metadata) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException;
+
     public List<String> get_table_names_by_filter(String dbname, String filter, short max_tables) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException;
 
     public void alter_table(String dbname, String tbl_name, Table new_tbl) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
@@ -482,6 +484,8 @@ import org.slf4j.LoggerFactory;
 
     public void get_materialization_invalidation_info(String dbname, List<String> tbl_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
+    public void update_creation_metadata(String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
     public void get_table_names_by_filter(String dbname, String filter, short max_tables, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
     public void alter_table(String dbname, String tbl_name, Table new_tbl, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -1855,6 +1859,37 @@ import org.slf4j.LoggerFactory;
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_materialization_invalidation_info failed: unknown result");
     }
 
+    public void update_creation_metadata(String dbname, String tbl_name, CreationMetadata creation_metadata) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException
+    {
+      send_update_creation_metadata(dbname, tbl_name, creation_metadata);
+      recv_update_creation_metadata();
+    }
+
+    public void send_update_creation_metadata(String dbname, String tbl_name, CreationMetadata creation_metadata) throws org.apache.thrift.TException
+    {
+      update_creation_metadata_args args = new update_creation_metadata_args();
+      args.setDbname(dbname);
+      args.setTbl_name(tbl_name);
+      args.setCreation_metadata(creation_metadata);
+      sendBase("update_creation_metadata", args);
+    }
+
+    public void recv_update_creation_metadata() throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException
+    {
+      update_creation_metadata_result result = new update_creation_metadata_result();
+      receiveBase(result, "update_creation_metadata");
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      if (result.o3 != null) {
+        throw result.o3;
+      }
+      return;
+    }
+
     public List<String> get_table_names_by_filter(String dbname, String filter, short max_tables) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException
     {
       send_get_table_names_by_filter(dbname, filter, max_tables);
@@ -7323,6 +7358,44 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    public void update_creation_metadata(String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      update_creation_metadata_call method_call = new update_creation_metadata_call(dbname, tbl_name, creation_metadata, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_creation_metadata_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String dbname;
+      private String tbl_name;
+      private CreationMetadata creation_metadata;
+      public update_creation_metadata_call(String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.dbname = dbname;
+        this.tbl_name = tbl_name;
+        this.creation_metadata = creation_metadata;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("update_creation_metadata", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        update_creation_metadata_args args = new update_creation_metadata_args();
+        args.setDbname(dbname);
+        args.setTbl_name(tbl_name);
+        args.setCreation_metadata(creation_metadata);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public void getResult() throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        (new Client(prot)).recv_update_creation_metadata();
+      }
+    }
+
     public void get_table_names_by_filter(String dbname, String filter, short max_tables, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
       checkReady();
       get_table_names_by_filter_call method_call = new get_table_names_by_filter_call(dbname, filter, max_tables, resultHandler, this, ___protocolFactory, ___transport);
@@ -12395,6 +12468,7 @@ import org.slf4j.LoggerFactory;
       processMap.put("get_table_req", new get_table_req());
       processMap.put("get_table_objects_by_name_req", new get_table_objects_by_name_req());
       processMap.put("get_materialization_invalidation_info", new get_materialization_invalidation_info());
+      processMap.put("update_creation_metadata", new update_creation_metadata());
       processMap.put("get_table_names_by_filter", new get_table_names_by_filter());
       processMap.put("alter_table", new alter_table());
       processMap.put("alter_table_with_environment_context", new alter_table_with_environment_context());
@@ -13508,6 +13582,34 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_creation_metadata<I extends Iface> extends org.apache.thrift.ProcessFunction<I, update_creation_metadata_args> {
+      public update_creation_metadata() {
+        super("update_creation_metadata");
+      }
+
+      public update_creation_metadata_args getEmptyArgsInstance() {
+        return new update_creation_metadata_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public update_creation_metadata_result getResult(I iface, update_creation_metadata_args args) throws org.apache.thrift.TException {
+        update_creation_metadata_result result = new update_creation_metadata_result();
+        try {
+          iface.update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata);
+        } catch (MetaException o1) {
+          result.o1 = o1;
+        } catch (InvalidOperationException o2) {
+          result.o2 = o2;
+        } catch (UnknownDBException o3) {
+          result.o3 = o3;
+        }
+        return result;
+      }
+    }
+
     @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_names_by_filter<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_table_names_by_filter_args> {
       public get_table_names_by_filter() {
         super("get_table_names_by_filter");
@@ -17236,6 +17338,7 @@ import org.slf4j.LoggerFactory;
       processMap.put("get_table_req", new get_table_req());
       processMap.put("get_table_objects_by_name_req", new get_table_objects_by_name_req());
       processMap.put("get_materialization_invalidation_info", new get_materialization_invalidation_info());
+      processMap.put("update_creation_metadata", new update_creation_metadata());
       processMap.put("get_table_names_by_filter", new get_table_names_by_filter());
       processMap.put("alter_table", new alter_table());
       processMap.put("alter_table_with_environment_context", new alter_table_with_environment_context());
@@ -19673,21 +19776,20 @@ import org.slf4j.LoggerFactory;
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_names_by_filter<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_names_by_filter_args, List<String>> {
-      public get_table_names_by_filter() {
-        super("get_table_names_by_filter");
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_creation_metadata<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, update_creation_metadata_args, Void> {
+      public update_creation_metadata() {
+        super("update_creation_metadata");
       }
 
-      public get_table_names_by_filter_args getEmptyArgsInstance() {
-        return new get_table_names_by_filter_args();
+      public update_creation_metadata_args getEmptyArgsInstance() {
+        return new update_creation_metadata_args();
       }
 
-      public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<List<String>>() { 
-          public void onComplete(List<String> o) {
-            get_table_names_by_filter_result result = new get_table_names_by_filter_result();
-            result.success = o;
+        return new AsyncMethodCallback<Void>() { 
+          public void onComplete(Void o) {
+            update_creation_metadata_result result = new update_creation_metadata_result();
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -19699,7 +19801,7 @@ import org.slf4j.LoggerFactory;
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            get_table_names_by_filter_result result = new get_table_names_by_filter_result();
+            update_creation_metadata_result result = new update_creation_metadata_result();
             if (e instanceof MetaException) {
                         result.o1 = (MetaException) e;
                         result.setO1IsSet(true);
@@ -19735,208 +19837,25 @@ import org.slf4j.LoggerFactory;
         return false;
       }
 
-      public void start(I iface, get_table_names_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
-        iface.get_table_names_by_filter(args.dbname, args.filter, args.max_tables,resultHandler);
+      public void start(I iface, update_creation_metadata_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata,resultHandler);
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_args, Void> {
-      public alter_table() {
-        super("alter_table");
-      }
-
-      public alter_table_args getEmptyArgsInstance() {
-        return new alter_table_args();
-      }
-
-      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Void>() { 
-          public void onComplete(Void o) {
-            alter_table_result result = new alter_table_result();
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            alter_table_result result = new alter_table_result();
-            if (e instanceof InvalidOperationException) {
-                        result.o1 = (InvalidOperationException) e;
-                        result.setO1IsSet(true);
-                        msg = result;
-            }
-            else             if (e instanceof MetaException) {
-                        result.o2 = (MetaException) e;
-                        result.setO2IsSet(true);
-                        msg = result;
-            }
-             else 
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, alter_table_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
-        iface.alter_table(args.dbname, args.tbl_name, args.new_tbl,resultHandler);
-      }
-    }
-
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_with_environment_context_args, Void> {
-      public alter_table_with_environment_context() {
-        super("alter_table_with_environment_context");
-      }
-
-      public alter_table_with_environment_context_args getEmptyArgsInstance() {
-        return new alter_table_with_environment_context_args();
-      }
-
-      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Void>() { 
-          public void onComplete(Void o) {
-            alter_table_with_environment_context_result result = new alter_table_with_environment_context_result();
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            alter_table_with_environment_context_result result = new alter_table_with_environment_context_result();
-            if (e instanceof InvalidOperationException) {
-                        result.o1 = (InvalidOperationException) e;
-                        result.setO1IsSet(true);
-                        msg = result;
-            }
-            else             if (e instanceof MetaException) {
-                        result.o2 = (MetaException) e;
-                        result.setO2IsSet(true);
-                        msg = result;
-            }
-             else 
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, alter_table_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
-        iface.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context,resultHandler);
-      }
-    }
-
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table_with_cascade<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_with_cascade_args, Void> {
-      public alter_table_with_cascade() {
-        super("alter_table_with_cascade");
-      }
-
-      public alter_table_with_cascade_args getEmptyArgsInstance() {
-        return new alter_table_with_cascade_args();
-      }
-
-      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Void>() { 
-          public void onComplete(Void o) {
-            alter_table_with_cascade_result result = new alter_table_with_cascade_result();
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            alter_table_with_cascade_result result = new alter_table_with_cascade_result();
-            if (e instanceof InvalidOperationException) {
-                        result.o1 = (InvalidOperationException) e;
-                        result.setO1IsSet(true);
-                        msg = result;
-            }
-            else             if (e instanceof MetaException) {
-                        result.o2 = (MetaException) e;
-                        result.setO2IsSet(true);
-                        msg = result;
-            }
-             else 
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, alter_table_with_cascade_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
-        iface.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade,resultHandler);
-      }
-    }
-
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partition_args, Partition> {
-      public add_partition() {
-        super("add_partition");
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_names_by_filter<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_names_by_filter_args, List<String>> {
+      public get_table_names_by_filter() {
+        super("get_table_names_by_filter");
       }
 
-      public add_partition_args getEmptyArgsInstance() {
-        return new add_partition_args();
+      public get_table_names_by_filter_args getEmptyArgsInstance() {
+        return new get_table_names_by_filter_args();
       }
 
-      public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Partition>() { 
-          public void onComplete(Partition o) {
-            add_partition_result result = new add_partition_result();
+        return new AsyncMethodCallback<List<String>>() { 
+          public void onComplete(List<String> o) {
+            get_table_names_by_filter_result result = new get_table_names_by_filter_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -19949,19 +19868,19 @@ import org.slf4j.LoggerFactory;
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            add_partition_result result = new add_partition_result();
-            if (e instanceof InvalidObjectException) {
-                        result.o1 = (InvalidObjectException) e;
+            get_table_names_by_filter_result result = new get_table_names_by_filter_result();
+            if (e instanceof MetaException) {
+                        result.o1 = (MetaException) e;
                         result.setO1IsSet(true);
                         msg = result;
             }
-            else             if (e instanceof AlreadyExistsException) {
-                        result.o2 = (AlreadyExistsException) e;
+            else             if (e instanceof InvalidOperationException) {
+                        result.o2 = (InvalidOperationException) e;
                         result.setO2IsSet(true);
                         msg = result;
             }
-            else             if (e instanceof MetaException) {
-                        result.o3 = (MetaException) e;
+            else             if (e instanceof UnknownDBException) {
+                        result.o3 = (UnknownDBException) e;
                         result.setO3IsSet(true);
                         msg = result;
             }
@@ -19985,26 +19904,25 @@ import org.slf4j.LoggerFactory;
         return false;
       }
 
-      public void start(I iface, add_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
-        iface.add_partition(args.new_part,resultHandler);
+      public void start(I iface, get_table_names_by_filter_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
+        iface.get_table_names_by_filter(args.dbname, args.filter, args.max_tables,resultHandler);
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partition_with_environment_context_args, Partition> {
-      public add_partition_with_environment_context() {
-        super("add_partition_with_environment_context");
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_args, Void> {
+      public alter_table() {
+        super("alter_table");
       }
 
-      public add_partition_with_environment_context_args getEmptyArgsInstance() {
-        return new add_partition_with_environment_context_args();
+      public alter_table_args getEmptyArgsInstance() {
+        return new alter_table_args();
       }
 
-      public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Partition>() { 
-          public void onComplete(Partition o) {
-            add_partition_with_environment_context_result result = new add_partition_with_environment_context_result();
-            result.success = o;
+        return new AsyncMethodCallback<Void>() { 
+          public void onComplete(Void o) {
+            alter_table_result result = new alter_table_result();
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -20016,20 +19934,15 @@ import org.slf4j.LoggerFactory;
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            add_partition_with_environment_context_result result = new add_partition_with_environment_context_result();
-            if (e instanceof InvalidObjectException) {
-                        result.o1 = (InvalidObjectException) e;
+            alter_table_result result = new alter_table_result();
+            if (e instanceof InvalidOperationException) {
+                        result.o1 = (InvalidOperationException) e;
                         result.setO1IsSet(true);
                         msg = result;
             }
-            else             if (e instanceof AlreadyExistsException) {
-                        result.o2 = (AlreadyExistsException) e;
-                        result.setO2IsSet(true);
-                        msg = result;
-            }
             else             if (e instanceof MetaException) {
-                        result.o3 = (MetaException) e;
-                        result.setO3IsSet(true);
+                        result.o2 = (MetaException) e;
+                        result.setO2IsSet(true);
                         msg = result;
             }
              else 
@@ -20052,27 +19965,25 @@ import org.slf4j.LoggerFactory;
         return false;
       }
 
-      public void start(I iface, add_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
-        iface.add_partition_with_environment_context(args.new_part, args.environment_context,resultHandler);
+      public void start(I iface, alter_table_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.alter_table(args.dbname, args.tbl_name, args.new_tbl,resultHandler);
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_args, Integer> {
-      public add_partitions() {
-        super("add_partitions");
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_with_environment_context_args, Void> {
+      public alter_table_with_environment_context() {
+        super("alter_table_with_environment_context");
       }
 
-      public add_partitions_args getEmptyArgsInstance() {
-        return new add_partitions_args();
+      public alter_table_with_environment_context_args getEmptyArgsInstance() {
+        return new alter_table_with_environment_context_args();
       }
 
-      public AsyncMethodCallback<Integer> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Integer>() { 
-          public void onComplete(Integer o) {
-            add_partitions_result result = new add_partitions_result();
-            result.success = o;
-            result.setSuccessIsSet(true);
+        return new AsyncMethodCallback<Void>() { 
+          public void onComplete(Void o) {
+            alter_table_with_environment_context_result result = new alter_table_with_environment_context_result();
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -20084,20 +19995,15 @@ import org.slf4j.LoggerFactory;
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            add_partitions_result result = new add_partitions_result();
-            if (e instanceof InvalidObjectException) {
-                        result.o1 = (InvalidObjectException) e;
+            alter_table_with_environment_context_result result = new alter_table_with_environment_context_result();
+            if (e instanceof InvalidOperationException) {
+                        result.o1 = (InvalidOperationException) e;
                         result.setO1IsSet(true);
                         msg = result;
             }
-            else             if (e instanceof AlreadyExistsException) {
-                        result.o2 = (AlreadyExistsException) e;
-                        result.setO2IsSet(true);
-                        msg = result;
-            }
             else             if (e instanceof MetaException) {
-                        result.o3 = (MetaException) e;
-                        result.setO3IsSet(true);
+                        result.o2 = (MetaException) e;
+                        result.setO2IsSet(true);
                         msg = result;
             }
              else 
@@ -20120,27 +20026,25 @@ import org.slf4j.LoggerFactory;
         return false;
       }
 
-      public void start(I iface, add_partitions_args args, org.apache.thrift.async.AsyncMethodCallback<Integer> resultHandler) throws TException {
-        iface.add_partitions(args.new_parts,resultHandler);
+      public void start(I iface, alter_table_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.alter_table_with_environment_context(args.dbname, args.tbl_name, args.new_tbl, args.environment_context,resultHandler);
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions_pspec<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_pspec_args, Integer> {
-      public add_partitions_pspec() {
-        super("add_partitions_pspec");
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table_with_cascade<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_with_cascade_args, Void> {
+      public alter_table_with_cascade() {
+        super("alter_table_with_cascade");
       }
 
-      public add_partitions_pspec_args getEmptyArgsInstance() {
-        return new add_partitions_pspec_args();
+      public alter_table_with_cascade_args getEmptyArgsInstance() {
+        return new alter_table_with_cascade_args();
       }
 
-      public AsyncMethodCallback<Integer> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Integer>() { 
-          public void onComplete(Integer o) {
-            add_partitions_pspec_result result = new add_partitions_pspec_result();
-            result.success = o;
-            result.setSuccessIsSet(true);
+        return new AsyncMethodCallback<Void>() { 
+          public void onComplete(Void o) {
+            alter_table_with_cascade_result result = new alter_table_with_cascade_result();
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -20152,20 +20056,15 @@ import org.slf4j.LoggerFactory;
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            add_partitions_pspec_result result = new add_partitions_pspec_result();
-            if (e instanceof InvalidObjectException) {
-                        result.o1 = (InvalidObjectException) e;
+            alter_table_with_cascade_result result = new alter_table_with_cascade_result();
+            if (e instanceof InvalidOperationException) {
+                        result.o1 = (InvalidOperationException) e;
                         result.setO1IsSet(true);
                         msg = result;
             }
-            else             if (e instanceof AlreadyExistsException) {
-                        result.o2 = (AlreadyExistsException) e;
-                        result.setO2IsSet(true);
-                        msg = result;
-            }
             else             if (e instanceof MetaException) {
-                        result.o3 = (MetaException) e;
-                        result.setO3IsSet(true);
+                        result.o2 = (MetaException) e;
+                        result.setO2IsSet(true);
                         msg = result;
             }
              else 
@@ -20188,25 +20087,25 @@ import org.slf4j.LoggerFactory;
         return false;
       }
 
-      public void start(I iface, add_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback<Integer> resultHandler) throws TException {
-        iface.add_partitions_pspec(args.new_parts,resultHandler);
+      public void start(I iface, alter_table_with_cascade_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.alter_table_with_cascade(args.dbname, args.tbl_name, args.new_tbl, args.cascade,resultHandler);
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_args, Partition> {
-      public append_partition() {
-        super("append_partition");
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partition_args, Partition> {
+      public add_partition() {
+        super("add_partition");
       }
 
-      public append_partition_args getEmptyArgsInstance() {
-        return new append_partition_args();
+      public add_partition_args getEmptyArgsInstance() {
+        return new add_partition_args();
       }
 
       public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
         return new AsyncMethodCallback<Partition>() { 
           public void onComplete(Partition o) {
-            append_partition_result result = new append_partition_result();
+            add_partition_result result = new add_partition_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -20219,7 +20118,7 @@ import org.slf4j.LoggerFactory;
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            append_partition_result result = new append_partition_result();
+            add_partition_result result = new add_partition_result();
             if (e instanceof InvalidObjectException) {
                         result.o1 = (InvalidObjectException) e;
                         result.setO1IsSet(true);
@@ -20255,25 +20154,25 @@ import org.slf4j.LoggerFactory;
         return false;
       }
 
-      public void start(I iface, append_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
-        iface.append_partition(args.db_name, args.tbl_name, args.part_vals,resultHandler);
+      public void start(I iface, add_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+        iface.add_partition(args.new_part,resultHandler);
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions_req<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_req_args, AddPartitionsResult> {
-      public add_partitions_req() {
-        super("add_partitions_req");
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partition_with_environment_context_args, Partition> {
+      public add_partition_with_environment_context() {
+        super("add_partition_with_environment_context");
       }
 
-      public add_partitions_req_args getEmptyArgsInstance() {
-        return new add_partitions_req_args();
+      public add_partition_with_environment_context_args getEmptyArgsInstance() {
+        return new add_partition_with_environment_context_args();
       }
 
-      public AsyncMethodCallback<AddPartitionsResult> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<AddPartitionsResult>() { 
-          public void onComplete(AddPartitionsResult o) {
-            add_partitions_req_result result = new add_partitions_req_result();
+        return new AsyncMethodCallback<Partition>() { 
+          public void onComplete(Partition o) {
+            add_partition_with_environment_context_result result = new add_partition_with_environment_context_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -20286,7 +20185,7 @@ import org.slf4j.LoggerFactory;
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            add_partitions_req_result result = new add_partitions_req_result();
+            add_partition_with_environment_context_result result = new add_partition_with_environment_context_result();
             if (e instanceof InvalidObjectException) {
                         result.o1 = (InvalidObjectException) e;
                         result.setO1IsSet(true);
@@ -20322,26 +20221,27 @@ import org.slf4j.LoggerFactory;
         return false;
       }
 
-      public void start(I iface, add_partitions_req_args args, org.apache.thrift.async.AsyncMethodCallback<AddPartitionsResult> resultHandler) throws TException {
-        iface.add_partitions_req(args.request,resultHandler);
+      public void start(I iface, add_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+        iface.add_partition_with_environment_context(args.new_part, args.environment_context,resultHandler);
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_with_environment_context_args, Partition> {
-      public append_partition_with_environment_context() {
-        super("append_partition_with_environment_context");
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_args, Integer> {
+      public add_partitions() {
+        super("add_partitions");
       }
 
-      public append_partition_with_environment_context_args getEmptyArgsInstance() {
-        return new append_partition_with_environment_context_args();
+      public add_partitions_args getEmptyArgsInstance() {
+        return new add_partitions_args();
       }
 
-      public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<Integer> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Partition>() { 
-          public void onComplete(Partition o) {
-            append_partition_with_environment_context_result result = new append_partition_with_environment_context_result();
+        return new AsyncMethodCallback<Integer>() { 
+          public void onComplete(Integer o) {
+            add_partitions_result result = new add_partitions_result();
             result.success = o;
+            result.setSuccessIsSet(true);
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -20353,7 +20253,7 @@ import org.slf4j.LoggerFactory;
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            append_partition_with_environment_context_result result = new append_partition_with_environment_context_result();
+            add_partitions_result result = new add_partitions_result();
             if (e instanceof InvalidObjectException) {
                         result.o1 = (InvalidObjectException) e;
                         result.setO1IsSet(true);
@@ -20389,26 +20289,27 @@ import org.slf4j.LoggerFactory;
         return false;
       }
 
-      public void start(I iface, append_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
-        iface.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context,resultHandler);
+      public void start(I iface, add_partitions_args args, org.apache.thrift.async.AsyncMethodCallback<Integer> resultHandler) throws TException {
+        iface.add_partitions(args.new_parts,resultHandler);
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition_by_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_by_name_args, Partition> {
-      public append_partition_by_name() {
-        super("append_partition_by_name");
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions_pspec<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_pspec_args, Integer> {
+      public add_partitions_pspec() {
+        super("add_partitions_pspec");
       }
 
-      public append_partition_by_name_args getEmptyArgsInstance() {
-        return new append_partition_by_name_args();
+      public add_partitions_pspec_args getEmptyArgsInstance() {
+        return new add_partitions_pspec_args();
       }
 
-      public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<Integer> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Partition>() { 
-          public void onComplete(Partition o) {
-            append_partition_by_name_result result = new append_partition_by_name_result();
+        return new AsyncMethodCallback<Integer>() { 
+          public void onComplete(Integer o) {
+            add_partitions_pspec_result result = new add_partitions_pspec_result();
             result.success = o;
+            result.setSuccessIsSet(true);
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -20420,7 +20321,7 @@ import org.slf4j.LoggerFactory;
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            append_partition_by_name_result result = new append_partition_by_name_result();
+            add_partitions_pspec_result result = new add_partitions_pspec_result();
             if (e instanceof InvalidObjectException) {
                         result.o1 = (InvalidObjectException) e;
                         result.setO1IsSet(true);
@@ -20456,25 +20357,25 @@ import org.slf4j.LoggerFactory;
         return false;
       }
 
-      public void start(I iface, append_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
-        iface.append_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler);
+      public void start(I iface, add_partitions_pspec_args args, org.apache.thrift.async.AsyncMethodCallback<Integer> resultHandler) throws TException {
+        iface.add_partitions_pspec(args.new_parts,resultHandler);
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition_by_name_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_by_name_with_environment_context_args, Partition> {
-      public append_partition_by_name_with_environment_context() {
-        super("append_partition_by_name_with_environment_context");
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_args, Partition> {
+      public append_partition() {
+        super("append_partition");
       }
 
-      public append_partition_by_name_with_environment_context_args getEmptyArgsInstance() {
-        return new append_partition_by_name_with_environment_context_args();
+      public append_partition_args getEmptyArgsInstance() {
+        return new append_partition_args();
       }
 
       public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
         return new AsyncMethodCallback<Partition>() { 
           public void onComplete(Partition o) {
-            append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result();
+            append_partition_result result = new append_partition_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -20487,7 +20388,7 @@ import org.slf4j.LoggerFactory;
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result();
+            append_partition_result result = new append_partition_result();
             if (e instanceof InvalidObjectException) {
                         result.o1 = (InvalidObjectException) e;
                         result.setO1IsSet(true);
@@ -20523,27 +20424,26 @@ import org.slf4j.LoggerFactory;
         return false;
       }
 
-      public void start(I iface, append_partition_by_name_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
-        iface.append_partition_by_name_with_environment_context(args.db_name, args.tbl_name, args.part_name, args.environment_context,resultHandler);
+      public void start(I iface, append_partition_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+        iface.append_partition(args.db_name, args.tbl_name, args.part_vals,resultHandler);
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_partition_args, Boolean> {
-      public drop_partition() {
-        super("drop_partition");
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partitions_req<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partitions_req_args, AddPartitionsResult> {
+      public add_partitions_req() {
+        super("add_partitions_req");
       }
 
-      public drop_partition_args getEmptyArgsInstance() {
-        return new drop_partition_args();
+      public add_partitions_req_args getEmptyArgsInstance() {
+        return new add_partitions_req_args();
       }
 
-      public AsyncMethodCallback<Boolean> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<AddPartitionsResult> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Boolean>() { 
-          public void onComplete(Boolean o) {
-            drop_partition_result result = new drop_partition_result();
+        return new AsyncMethodCallback<AddPartitionsResult>() { 
+          public void onComplete(AddPartitionsResult o) {
+            add_partitions_req_result result = new add_partitions_req_result();
             result.success = o;
-            result.setSuccessIsSet(true);
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -20555,14 +20455,283 @@ import org.slf4j.LoggerFactory;
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            drop_partition_result result = new drop_partition_result();
-            if (e instanceof NoSuchObjectException) {
-                        result.o1 = (NoSuchObjectException) e;
+            add_partitions_req_result result = new add_partitions_req_result();
+            if (e instanceof InvalidObjectException) {
+                        result.o1 = (InvalidObjectException) e;
                         result.setO1IsSet(true);
                         msg = result;
             }
-            else             if (e instanceof MetaException) {
-                        result.o2 = (MetaException) e;
+            else             if (e instanceof AlreadyExistsException) {
+                        result.o2 = (AlreadyExistsException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o3 = (MetaException) e;
+                        result.setO3IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, add_partitions_req_args args, org.apache.thrift.async.AsyncMethodCallback<AddPartitionsResult> resultHandler) throws TException {
+        iface.add_partitions_req(args.request,resultHandler);
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_with_environment_context_args, Partition> {
+      public append_partition_with_environment_context() {
+        super("append_partition_with_environment_context");
+      }
+
+      public append_partition_with_environment_context_args getEmptyArgsInstance() {
+        return new append_partition_with_environment_context_args();
+      }
+
+      public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Partition>() { 
+          public void onComplete(Partition o) {
+            append_partition_with_environment_context_result result = new append_partition_with_environment_context_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            append_partition_with_environment_context_result result = new append_partition_with_environment_context_result();
+            if (e instanceof InvalidObjectException) {
+                        result.o1 = (InvalidObjectException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof AlreadyExistsException) {
+                        result.o2 = (AlreadyExistsException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o3 = (MetaException) e;
+                        result.setO3IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, append_partition_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+        iface.append_partition_with_environment_context(args.db_name, args.tbl_name, args.part_vals, args.environment_context,resultHandler);
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition_by_name<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_by_name_args, Partition> {
+      public append_partition_by_name() {
+        super("append_partition_by_name");
+      }
+
+      public append_partition_by_name_args getEmptyArgsInstance() {
+        return new append_partition_by_name_args();
+      }
+
+      public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Partition>() { 
+          public void onComplete(Partition o) {
+            append_partition_by_name_result result = new append_partition_by_name_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            append_partition_by_name_result result = new append_partition_by_name_result();
+            if (e instanceof InvalidObjectException) {
+                        result.o1 = (InvalidObjectException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof AlreadyExistsException) {
+                        result.o2 = (AlreadyExistsException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o3 = (MetaException) e;
+                        result.setO3IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, append_partition_by_name_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+        iface.append_partition_by_name(args.db_name, args.tbl_name, args.part_name,resultHandler);
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class append_partition_by_name_with_environment_context<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, append_partition_by_name_with_environment_context_args, Partition> {
+      public append_partition_by_name_with_environment_context() {
+        super("append_partition_by_name_with_environment_context");
+      }
+
+      public append_partition_by_name_with_environment_context_args getEmptyArgsInstance() {
+        return new append_partition_by_name_with_environment_context_args();
+      }
+
+      public AsyncMethodCallback<Partition> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Partition>() { 
+          public void onComplete(Partition o) {
+            append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            append_partition_by_name_with_environment_context_result result = new append_partition_by_name_with_environment_context_result();
+            if (e instanceof InvalidObjectException) {
+                        result.o1 = (InvalidObjectException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof AlreadyExistsException) {
+                        result.o2 = (AlreadyExistsException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o3 = (MetaException) e;
+                        result.setO3IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, append_partition_by_name_with_environment_context_args args, org.apache.thrift.async.AsyncMethodCallback<Partition> resultHandler) throws TException {
+        iface.append_partition_by_name_with_environment_context(args.db_name, args.tbl_name, args.part_name, args.environment_context,resultHandler);
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, drop_partition_args, Boolean> {
+      public drop_partition() {
+        super("drop_partition");
+      }
+
+      public drop_partition_args getEmptyArgsInstance() {
+        return new drop_partition_args();
+      }
+
+      public AsyncMethodCallback<Boolean> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Boolean>() { 
+          public void onComplete(Boolean o) {
+            drop_partition_result result = new drop_partition_result();
+            result.success = o;
+            result.setSuccessIsSet(true);
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            drop_partition_result result = new drop_partition_result();
+            if (e instanceof NoSuchObjectException) {
+                        result.o1 = (NoSuchObjectException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o2 = (MetaException) e;
                         result.setO2IsSet(true);
                         msg = result;
             }
@@ -63884,7 +64053,1201 @@ import org.slf4j.LoggerFactory;
     static {
       Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
       tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetTablesResult.class)));
+          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetTablesResult.class)));
+      tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+      tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+      tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_table_objects_by_name_req_result.class, metaDataMap);
+    }
+
+    public get_table_objects_by_name_req_result() {
+    }
+
+    public get_table_objects_by_name_req_result(
+      GetTablesResult success,
+      MetaException o1,
+      InvalidOperationException o2,
+      UnknownDBException o3)
+    {
+      this();
+      this.success = success;
+      this.o1 = o1;
+      this.o2 = o2;
+      this.o3 = o3;
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public get_table_objects_by_name_req_result(get_table_objects_by_name_req_result other) {
+      if (other.isSetSuccess()) {
+        this.success = new GetTablesResult(other.success);
+      }
+      if (other.isSetO1()) {
+        this.o1 = new MetaException(other.o1);
+      }
+      if (other.isSetO2()) {
+        this.o2 = new InvalidOperationException(other.o2);
+      }
+      if (other.isSetO3()) {
+        this.o3 = new UnknownDBException(other.o3);
+      }
+    }
+
+    public get_table_objects_by_name_req_result deepCopy() {
+      return new get_table_objects_by_name_req_result(this);
+    }
+
+    @Override
+    public void clear() {
+      this.success = null;
+      this.o1 = null;
+      this.o2 = null;
+      this.o3 = null;
+    }
+
+    public GetTablesResult getSuccess() {
+      return this.success;
+    }
+
+    public void setSuccess(GetTablesResult success) {
+      this.success = success;
+    }
+
+    public void unsetSuccess() {
+      this.success = null;
+    }
+
+    /** Returns true if field success is set (has been assigned a value) and false otherwise */
+    public boolean isSetSuccess() {
+      return this.success != null;
+    }
+
+    public void setSuccessIsSet(boolean value) {
+      if (!value) {
+        this.success = null;
+      }
+    }
+
+    public MetaException getO1() {
+      return this.o1;
+    }
+
+    public void setO1(MetaException o1) {
+      this.o1 = o1;
+    }
+
+    public void unsetO1() {
+      this.o1 = null;
+    }
+
+    /** Returns true if field o1 is set (has been assigned a value) and false otherwise */
+    public boolean isSetO1() {
+      return this.o1 != null;
+    }
+
+    public void setO1IsSet(boolean value) {
+      if (!value) {
+        this.o1 = null;
+      }
+    }
+
+    public InvalidOperationException getO2() {
+      return this.o2;
+    }
+
+    public void setO2(InvalidOperationException o2) {
+      this.o2 = o2;
+    }
+
+    public void unsetO2() {
+      this.o2 = null;
+    }
+
+    /** Returns true if field o2 is set (has been assigned a value) and false otherwise */
+    public boolean isSetO2() {
+      return this.o2 != null;
+    }
+
+    public void setO2IsSet(boolean value) {
+      if (!value) {
+        this.o2 = null;
+      }
+    }
+
+    public UnknownDBException getO3() {
+      return this.o3;
+    }
+
+    public void setO3(UnknownDBException o3) {
+      this.o3 = o3;
+    }
+
+    public void unsetO3() {
+      this.o3 = null;
+    }
+
+    /** Returns true if field o3 is set (has been assigned a value) and false otherwise */
+    public boolean isSetO3() {
+      return this.o3 != null;
+    }
+
+    public void setO3IsSet(boolean value) {
+      if (!value) {
+        this.o3 = null;
+      }
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      case SUCCESS:
+        if (value == null) {
+          unsetSuccess();
+        } else {
+          setSuccess((GetTablesResult)value);
+        }
+        break;
+
+      case O1:
+        if (value == null) {
+          unsetO1();
+        } else {
+          setO1((MetaException)value);
+        }
+        break;
+
+      case O2:
+        if (value == null) {
+          unsetO2();
+        } else {
+          setO2((InvalidOperationException)value);
+        }
+        break;
+
+      case O3:
+        if (value == null) {
+          unsetO3();
+        } else {
+          setO3((UnknownDBException)value);
+        }
+        break;
+
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      case SUCCESS:
+        return getSuccess();
+
+      case O1:
+        return getO1();
+
+      case O2:
+        return getO2();
+
+      case O3:
+        return getO3();
+
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      case SUCCESS:
+        return isSetSuccess();
+      case O1:
+        return isSetO1();
+      case O2:
+        return isSetO2();
+      case O3:
+        return isSetO3();
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof get_table_objects_by_name_req_result)
+        return this.equals((get_table_objects_by_name_req_result)that);
+      return false;
+    }
+
+    public boolean equals(get_table_objects_by_name_req_result that) {
+      if (that == null)
+        return false;
+
+      boolean this_present_success = true && this.isSetSuccess();
+      boolean that_present_success = true && that.isSetSuccess();
+      if (this_present_success || that_present_success) {
+        if (!(this_present_success && that_present_success))
+          return false;
+        if (!this.success.equals(that.success))
+          return false;
+      }
+
+      boolean this_present_o1 = true && this.isSetO1();
+      boolean that_present_o1 = true && that.isSetO1();
+      if (this_present_o1 || that_present_o1) {
+        if (!(this_present_o1 && that_present_o1))
+          return false;
+        if (!this.o1.equals(that.o1))
+          return false;
+      }
+
+      boolean this_present_o2 = true && this.isSetO2();
+      boolean that_present_o2 = true && that.isSetO2();
+      if (this_present_o2 || that_present_o2) {
+        if (!(this_present_o2 && that_present_o2))
+          return false;
+        if (!this.o2.equals(that.o2))
+          return false;
+      }
+
+      boolean this_present_o3 = true && this.isSetO3();
+      boolean that_present_o3 = true && that.isSetO3();
+      if (this_present_o3 || that_present_o3) {
+        if (!(this_present_o3 && that_present_o3))
+          return false;
+        if (!this.o3.equals(that.o3))
+          return false;
+      }
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      List<Object> list = new ArrayList<Object>();
+
+      boolean present_success = true && (isSetSuccess());
+      list.add(present_success);
+      if (present_success)
+        list.add(success);
+
+      boolean present_o1 = true && (isSetO1());
+      list.add(present_o1);
+      if (present_o1)
+        list.add(o1);
+
+      boolean present_o2 = true && (isSetO2());
+      list.add(present_o2);
+      if (present_o2)
+        list.add(o2);
+
+      boolean present_o3 = true && (isSetO3());
+      list.add(present_o3);
+      if (present_o3)
+        list.add(o3);
+
+      return list.hashCode();
+    }
+
+    @Override
+    public int compareTo(get_table_objects_by_name_req_result other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+
+      lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetSuccess()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetO1()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetO2()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetO3()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+      }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("get_table_objects_by_name_req_result(");
+      boolean first = true;
+
+      sb.append("success:");
+      if (this.success == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.success);
+      }
+      first = false;
+      if (!first) sb.append(", ");
+      sb.append("o1:");
+      if (this.o1 == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.o1);
+      }
+      first = false;
+      if (!first) sb.append(", ");
+      sb.append("o2:");
+      if (this.o2 == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.o2);
+      }
+      first = false;
+      if (!first) sb.append(", ");
+      sb.append("o3:");
+      if (this.o3 == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.o3);
+      }
+      first = false;
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+      if (success != null) {
+        success.validate();
+      }
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class get_table_objects_by_name_req_resultStandardSchemeFactory implements SchemeFactory {
+      public get_table_objects_by_name_req_resultStandardScheme getScheme() {
+        return new get_table_objects_by_name_req_resultStandardScheme();
+      }
+    }
+
+    private static class get_table_objects_by_name_req_resultStandardScheme extends StandardScheme<get_table_objects_by_name_req_result> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_by_name_req_result struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBegin();
+          if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+            break;
+          }
+          switch (schemeField.id) {
+            case 0: // SUCCESS
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.success = new GetTablesResult();
+                struct.success.read(iprot);
+                struct.setSuccessIsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            case 1: // O1
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.o1 = new MetaException();
+                struct.o1.read(iprot);
+                struct.setO1IsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            case 2: // O2
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.o2 = new InvalidOperationException();
+                struct.o2.read(iprot);
+                struct.setO2IsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            case 3: // O3
+              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+                struct.o3 = new UnknownDBException();
+                struct.o3.read(iprot);
+                struct.setO3IsSet(true);
+              } else { 
+                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+              }
+              break;
+            default:
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+          }
+          iprot.readFieldEnd();
+        }
+        iprot.readStructEnd();
+        struct.validate();
+      }
+
+      public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_by_name_req_result struct) throws org.apache.thrift.TException {
+        struct.validate();
+
+        oprot.writeStructBegin(STRUCT_DESC);
+        if (struct.success != null) {
+          oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
+          struct.success.write(oprot);
+          oprot.writeFieldEnd();
+        }
+        if (struct.o1 != null) {
+          oprot.writeFieldBegin(O1_FIELD_DESC);
+          struct.o1.write(oprot);
+          oprot.writeFieldEnd();
+        }
+        if (struct.o2 != null) {
+          oprot.writeFieldBegin(O2_FIELD_DESC);
+          struct.o2.write(oprot);
+          oprot.writeFieldEnd();
+        }
+        if (struct.o3 != null) {
+          oprot.writeFieldBegin(O3_FIELD_DESC);
+          struct.o3.write(oprot);
+          oprot.writeFieldEnd();
+        }
+        oprot.writeFieldStop();
+        oprot.writeStructEnd();
+      }
+
+    }
+
+    private static class get_table_objects_by_name_req_resultTupleSchemeFactory implements SchemeFactory {
+      public get_table_objects_by_name_req_resultTupleScheme getScheme() {
+        return new get_table_objects_by_name_req_resultTupleScheme();
+      }
+    }
+
+    private static class get_table_objects_by_name_req_resultTupleScheme extends TupleScheme<get_table_objects_by_name_req_result> {
+
+      @Override
+      public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by_name_req_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol oprot = (TTupleProtocol) prot;
+        BitSet optionals = new BitSet();
+        if (struct.isSetSuccess()) {
+          optionals.set(0);
+        }
+        if (struct.isSetO1()) {
+          optionals.set(1);
+        }
+        if (struct.isSetO2()) {
+          optionals.set(2);
+        }
+        if (struct.isSetO3()) {
+          optionals.set(3);
+        }
+        oprot.writeBitSet(optionals, 4);
+        if (struct.isSetSuccess()) {
+          struct.success.write(oprot);
+        }
+        if (struct.isSetO1()) {
+          struct.o1.write(oprot);
+        }
+        if (struct.isSetO2()) {
+          struct.o2.write(oprot);
+        }
+        if (struct.isSetO3()) {
+          struct.o3.write(oprot);
+        }
+      }
+
+      @Override
+      public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by_name_req_result struct) throws org.apache.thrift.TException {
+        TTupleProtocol iprot = (TTupleProtocol) prot;
+        BitSet incoming = iprot.readBitSet(4);
+        if (incoming.get(0)) {
+          struct.success = new GetTablesResult();
+          struct.success.read(iprot);
+          struct.setSuccessIsSet(true);
+        }
+        if (incoming.get(1)) {
+          struct.o1 = new MetaException();
+          struct.o1.read(iprot);
+          struct.setO1IsSet(true);
+        }
+        if (incoming.get(2)) {
+          struct.o2 = new InvalidOperationException();
+          struct.o2.read(iprot);
+          struct.setO2IsSet(true);
+        }
+        if (incoming.get(3)) {
+          struct.o3 = new UnknownDBException();
+          struct.o3.read(iprot);
+          struct.setO3IsSet(true);
+        }
+      }
+    }
+
+  }
+
+  @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_materialization_invalidation_info_args implements org.apache.thrift.TBase<get_materialization_invalidation_info_args, get_materialization_invalidation_info_args._Fields>, java.io.Serializable, Cloneable, Comparable<get_materialization_invalidation_info_args>   {
+    private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_materialization_invalidation_info_args");
+
+    private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1);
+    private static final org.apache.thrift.protocol.TField TBL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_names", org.apache.thrift.protocol.TType.LIST, (short)2);
+
+    private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+    static {
+      schemes.put(StandardScheme.class, new get_materialization_invalidation_info_argsStandardSchemeFactory());
+      schemes.put(TupleScheme.class, new get_materialization_invalidation_info_argsTupleSchemeFactory());
+    }
+
+    private String dbname; // required
+    private List<String> tbl_names; // required
+
+    /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+    public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+      DBNAME((short)1, "dbname"),
+      TBL_NAMES((short)2, "tbl_names");
+
+      private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+      static {
+        for (_Fields field : EnumSet.allOf(_Fields.class)) {
+          byName.put(field.getFieldName(), field);
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, or null if its not found.
+       */
+      public static _Fields findByThriftId(int fieldId) {
+        switch(fieldId) {
+          case 1: // DBNAME
+            return DBNAME;
+          case 2: // TBL_NAMES
+            return TBL_NAMES;
+          default:
+            return null;
+        }
+      }
+
+      /**
+       * Find the _Fields constant that matches fieldId, throwing an exception
+       * if it is not found.
+       */
+      public static _Fields findByThriftIdOrThrow(int fieldId) {
+        _Fields fields = findByThriftId(fieldId);
+        if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+        return fields;
+      }
+
+      /**
+       * Find the _Fields constant that matches name, or null if its not found.
+       */
+      public static _Fields findByName(

<TRUNCATED>