You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/07/14 01:53:34 UTC

[12/20] hive git commit: HIVE-19820 : add ACID stats support to background stats updater and fix bunch of edge cases found in SU tests (Sergey Shelukhin)

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index 183f977..fc3da46 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -110,6 +110,8 @@ import org.slf4j.LoggerFactory;
 
     public void truncate_table(String dbName, String tableName, List<String> partNames) throws MetaException, org.apache.thrift.TException;
 
+    public TruncateTableResponse truncate_table_req(TruncateTableRequest req) throws MetaException, org.apache.thrift.TException;
+
     public List<String> get_tables(String db_name, String pattern) throws MetaException, org.apache.thrift.TException;
 
     public List<String> get_tables_by_type(String db_name, String pattern, String tableType) throws MetaException, org.apache.thrift.TException;
@@ -140,6 +142,8 @@ import org.slf4j.LoggerFactory;
 
     public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
 
+    public AlterTableResponse alter_table_req(AlterTableRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
+
     public Partition add_partition(Partition new_part) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException;
 
     public Partition add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException;
@@ -210,7 +214,7 @@ import org.slf4j.LoggerFactory;
 
     public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
 
-    public AlterPartitionsResponse alter_partitions_with_environment_context_req(AlterPartitionsRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
+    public AlterPartitionsResponse alter_partitions_req(AlterPartitionsRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
 
     public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
 
@@ -244,6 +248,10 @@ import org.slf4j.LoggerFactory;
 
     public boolean update_partition_column_statistics(ColumnStatistics stats_obj) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException;
 
+    public SetPartitionsStatsResponse update_table_column_statistics_req(SetPartitionsStatsRequest req) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException;
+
+    public SetPartitionsStatsResponse update_partition_column_statistics_req(SetPartitionsStatsRequest req) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException;
+
     public ColumnStatistics get_table_column_statistics(String db_name, String tbl_name, String col_name) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException;
 
     public ColumnStatistics get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException;
@@ -532,6 +540,8 @@ import org.slf4j.LoggerFactory;
 
     public void truncate_table(String dbName, String tableName, List<String> partNames, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
+    public void truncate_table_req(TruncateTableRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
     public void get_tables(String db_name, String pattern, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
     public void get_tables_by_type(String db_name, String pattern, String tableType, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -562,6 +572,8 @@ import org.slf4j.LoggerFactory;
 
     public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
+    public void alter_table_req(AlterTableRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
     public void add_partition(Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
     public void add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -632,7 +644,7 @@ import org.slf4j.LoggerFactory;
 
     public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
-    public void alter_partitions_with_environment_context_req(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void alter_partitions_req(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
     public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
@@ -666,6 +678,10 @@ import org.slf4j.LoggerFactory;
 
     public void update_partition_column_statistics(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
+    public void update_table_column_statistics_req(SetPartitionsStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void update_partition_column_statistics_req(SetPartitionsStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
     public void get_table_column_statistics(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
     public void get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -1879,6 +1895,32 @@ import org.slf4j.LoggerFactory;
       return;
     }
 
+    public TruncateTableResponse truncate_table_req(TruncateTableRequest req) throws MetaException, org.apache.thrift.TException
+    {
+      send_truncate_table_req(req);
+      return recv_truncate_table_req();
+    }
+
+    public void send_truncate_table_req(TruncateTableRequest req) throws org.apache.thrift.TException
+    {
+      truncate_table_req_args args = new truncate_table_req_args();
+      args.setReq(req);
+      sendBase("truncate_table_req", args);
+    }
+
+    public TruncateTableResponse recv_truncate_table_req() throws MetaException, org.apache.thrift.TException
+    {
+      truncate_table_req_result result = new truncate_table_req_result();
+      receiveBase(result, "truncate_table_req");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "truncate_table_req failed: unknown result");
+    }
+
     public List<String> get_tables(String db_name, String pattern) throws MetaException, org.apache.thrift.TException
     {
       send_get_tables(db_name, pattern);
@@ -2314,6 +2356,35 @@ import org.slf4j.LoggerFactory;
       return;
     }
 
+    public AlterTableResponse alter_table_req(AlterTableRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException
+    {
+      send_alter_table_req(req);
+      return recv_alter_table_req();
+    }
+
+    public void send_alter_table_req(AlterTableRequest req) throws org.apache.thrift.TException
+    {
+      alter_table_req_args args = new alter_table_req_args();
+      args.setReq(req);
+      sendBase("alter_table_req", args);
+    }
+
+    public AlterTableResponse recv_alter_table_req() throws InvalidOperationException, MetaException, org.apache.thrift.TException
+    {
+      alter_table_req_result result = new alter_table_req_result();
+      receiveBase(result, "alter_table_req");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "alter_table_req failed: unknown result");
+    }
+
     public Partition add_partition(Partition new_part) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException
     {
       send_add_partition(new_part);
@@ -3438,23 +3509,23 @@ import org.slf4j.LoggerFactory;
       return;
     }
 
-    public AlterPartitionsResponse alter_partitions_with_environment_context_req(AlterPartitionsRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException
+    public AlterPartitionsResponse alter_partitions_req(AlterPartitionsRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException
     {
-      send_alter_partitions_with_environment_context_req(req);
-      return recv_alter_partitions_with_environment_context_req();
+      send_alter_partitions_req(req);
+      return recv_alter_partitions_req();
     }
 
-    public void send_alter_partitions_with_environment_context_req(AlterPartitionsRequest req) throws org.apache.thrift.TException
+    public void send_alter_partitions_req(AlterPartitionsRequest req) throws org.apache.thrift.TException
     {
-      alter_partitions_with_environment_context_req_args args = new alter_partitions_with_environment_context_req_args();
+      alter_partitions_req_args args = new alter_partitions_req_args();
       args.setReq(req);
-      sendBase("alter_partitions_with_environment_context_req", args);
+      sendBase("alter_partitions_req", args);
     }
 
-    public AlterPartitionsResponse recv_alter_partitions_with_environment_context_req() throws InvalidOperationException, MetaException, org.apache.thrift.TException
+    public AlterPartitionsResponse recv_alter_partitions_req() throws InvalidOperationException, MetaException, org.apache.thrift.TException
     {
-      alter_partitions_with_environment_context_req_result result = new alter_partitions_with_environment_context_req_result();
-      receiveBase(result, "alter_partitions_with_environment_context_req");
+      alter_partitions_req_result result = new alter_partitions_req_result();
+      receiveBase(result, "alter_partitions_req");
       if (result.isSetSuccess()) {
         return result.success;
       }
@@ -3464,7 +3535,7 @@ import org.slf4j.LoggerFactory;
       if (result.o2 != null) {
         throw result.o2;
       }
-      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "alter_partitions_with_environment_context_req failed: unknown result");
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "alter_partitions_req failed: unknown result");
     }
 
     public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException
@@ -3960,6 +4031,76 @@ import org.slf4j.LoggerFactory;
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "update_partition_column_statistics failed: unknown result");
     }
 
+    public SetPartitionsStatsResponse update_table_column_statistics_req(SetPartitionsStatsRequest req) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException
+    {
+      send_update_table_column_statistics_req(req);
+      return recv_update_table_column_statistics_req();
+    }
+
+    public void send_update_table_column_statistics_req(SetPartitionsStatsRequest req) throws org.apache.thrift.TException
+    {
+      update_table_column_statistics_req_args args = new update_table_column_statistics_req_args();
+      args.setReq(req);
+      sendBase("update_table_column_statistics_req", args);
+    }
+
+    public SetPartitionsStatsResponse recv_update_table_column_statistics_req() throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException
+    {
+      update_table_column_statistics_req_result result = new update_table_column_statistics_req_result();
+      receiveBase(result, "update_table_column_statistics_req");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      if (result.o3 != null) {
+        throw result.o3;
+      }
+      if (result.o4 != null) {
+        throw result.o4;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "update_table_column_statistics_req failed: unknown result");
+    }
+
+    public SetPartitionsStatsResponse update_partition_column_statistics_req(SetPartitionsStatsRequest req) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException
+    {
+      send_update_partition_column_statistics_req(req);
+      return recv_update_partition_column_statistics_req();
+    }
+
+    public void send_update_partition_column_statistics_req(SetPartitionsStatsRequest req) throws org.apache.thrift.TException
+    {
+      update_partition_column_statistics_req_args args = new update_partition_column_statistics_req_args();
+      args.setReq(req);
+      sendBase("update_partition_column_statistics_req", args);
+    }
+
+    public SetPartitionsStatsResponse recv_update_partition_column_statistics_req() throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException
+    {
+      update_partition_column_statistics_req_result result = new update_partition_column_statistics_req_result();
+      receiveBase(result, "update_partition_column_statistics_req");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      if (result.o3 != null) {
+        throw result.o3;
+      }
+      if (result.o4 != null) {
+        throw result.o4;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "update_partition_column_statistics_req failed: unknown result");
+    }
+
     public ColumnStatistics get_table_column_statistics(String db_name, String tbl_name, String col_name) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException
     {
       send_get_table_column_statistics(db_name, tbl_name, col_name);
@@ -8070,6 +8211,38 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    public void truncate_table_req(TruncateTableRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      truncate_table_req_call method_call = new truncate_table_req_call(req, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class truncate_table_req_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private TruncateTableRequest req;
+      public truncate_table_req_call(TruncateTableRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.req = req;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("truncate_table_req", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        truncate_table_req_args args = new truncate_table_req_args();
+        args.setReq(req);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public TruncateTableResponse getResult() throws MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_truncate_table_req();
+      }
+    }
+
     public void get_tables(String db_name, String pattern, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
       checkReady();
       get_tables_call method_call = new get_tables_call(db_name, pattern, resultHandler, this, ___protocolFactory, ___transport);
@@ -8613,6 +8786,38 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    public void alter_table_req(AlterTableRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      alter_table_req_call method_call = new alter_table_req_call(req, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table_req_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private AlterTableRequest req;
+      public alter_table_req_call(AlterTableRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.req = req;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("alter_table_req", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        alter_table_req_args args = new alter_table_req_args();
+        args.setReq(req);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public AlterTableResponse getResult() throws InvalidOperationException, MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_alter_table_req();
+      }
+    }
+
     public void add_partition(Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
       checkReady();
       add_partition_call method_call = new add_partition_call(new_part, resultHandler, this, ___protocolFactory, ___transport);
@@ -9970,23 +10175,23 @@ import org.slf4j.LoggerFactory;
       }
     }
 
-    public void alter_partitions_with_environment_context_req(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void alter_partitions_req(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
       checkReady();
-      alter_partitions_with_environment_context_req_call method_call = new alter_partitions_with_environment_context_req_call(req, resultHandler, this, ___protocolFactory, ___transport);
+      alter_partitions_req_call method_call = new alter_partitions_req_call(req, resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
       ___manager.call(method_call);
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context_req_call extends org.apache.thrift.async.TAsyncMethodCall {
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_req_call extends org.apache.thrift.async.TAsyncMethodCall {
       private AlterPartitionsRequest req;
-      public alter_partitions_with_environment_context_req_call(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      public alter_partitions_req_call(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
         super(client, protocolFactory, transport, resultHandler, false);
         this.req = req;
       }
 
       public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
-        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("alter_partitions_with_environment_context_req", org.apache.thrift.protocol.TMessageType.CALL, 0));
-        alter_partitions_with_environment_context_req_args args = new alter_partitions_with_environment_context_req_args();
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("alter_partitions_req", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        alter_partitions_req_args args = new alter_partitions_req_args();
         args.setReq(req);
         args.write(prot);
         prot.writeMessageEnd();
@@ -9998,7 +10203,7 @@ import org.slf4j.LoggerFactory;
         }
         org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
         org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
-        return (new Client(prot)).recv_alter_partitions_with_environment_context_req();
+        return (new Client(prot)).recv_alter_partitions_req();
       }
     }
 
@@ -10556,6 +10761,70 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    public void update_table_column_statistics_req(SetPartitionsStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      update_table_column_statistics_req_call method_call = new update_table_column_statistics_req_call(req, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_table_column_statistics_req_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private SetPartitionsStatsRequest req;
+      public update_table_column_statistics_req_call(SetPartitionsStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.req = req;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("update_table_column_statistics_req", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        update_table_column_statistics_req_args args = new update_table_column_statistics_req_args();
+        args.setReq(req);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public SetPartitionsStatsResponse getResult() throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_update_table_column_statistics_req();
+      }
+    }
+
+    public void update_partition_column_statistics_req(SetPartitionsStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      update_partition_column_statistics_req_call method_call = new update_partition_column_statistics_req_call(req, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_column_statistics_req_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private SetPartitionsStatsRequest req;
+      public update_partition_column_statistics_req_call(SetPartitionsStatsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.req = req;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("update_partition_column_statistics_req", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        update_partition_column_statistics_req_args args = new update_partition_column_statistics_req_args();
+        args.setReq(req);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public SetPartitionsStatsResponse getResult() throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_update_partition_column_statistics_req();
+      }
+    }
+
     public void get_table_column_statistics(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
       checkReady();
       get_table_column_statistics_call method_call = new get_table_column_statistics_call(db_name, tbl_name, col_name, resultHandler, this, ___protocolFactory, ___transport);
@@ -14143,6 +14412,7 @@ import org.slf4j.LoggerFactory;
       processMap.put("drop_table", new drop_table());
       processMap.put("drop_table_with_environment_context", new drop_table_with_environment_context());
       processMap.put("truncate_table", new truncate_table());
+      processMap.put("truncate_table_req", new truncate_table_req());
       processMap.put("get_tables", new get_tables());
       processMap.put("get_tables_by_type", new get_tables_by_type());
       processMap.put("get_materialized_views_for_rewriting", new get_materialized_views_for_rewriting());
@@ -14158,6 +14428,7 @@ import org.slf4j.LoggerFactory;
       processMap.put("alter_table", new alter_table());
       processMap.put("alter_table_with_environment_context", new alter_table_with_environment_context());
       processMap.put("alter_table_with_cascade", new alter_table_with_cascade());
+      processMap.put("alter_table_req", new alter_table_req());
       processMap.put("add_partition", new add_partition());
       processMap.put("add_partition_with_environment_context", new add_partition_with_environment_context());
       processMap.put("add_partitions", new add_partitions());
@@ -14193,7 +14464,7 @@ import org.slf4j.LoggerFactory;
       processMap.put("alter_partition", new alter_partition());
       processMap.put("alter_partitions", new alter_partitions());
       processMap.put("alter_partitions_with_environment_context", new alter_partitions_with_environment_context());
-      processMap.put("alter_partitions_with_environment_context_req", new alter_partitions_with_environment_context_req());
+      processMap.put("alter_partitions_req", new alter_partitions_req());
       processMap.put("alter_partition_with_environment_context", new alter_partition_with_environment_context());
       processMap.put("rename_partition", new rename_partition());
       processMap.put("partition_name_has_valid_characters", new partition_name_has_valid_characters());
@@ -14210,6 +14481,8 @@ import org.slf4j.LoggerFactory;
       processMap.put("get_check_constraints", new get_check_constraints());
       processMap.put("update_table_column_statistics", new update_table_column_statistics());
       processMap.put("update_partition_column_statistics", new update_partition_column_statistics());
+      processMap.put("update_table_column_statistics_req", new update_table_column_statistics_req());
+      processMap.put("update_partition_column_statistics_req", new update_partition_column_statistics_req());
       processMap.put("get_table_column_statistics", new get_table_column_statistics());
       processMap.put("get_partition_column_statistics", new get_partition_column_statistics());
       processMap.put("get_table_statistics_req", new get_table_statistics_req());
@@ -15225,6 +15498,30 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class truncate_table_req<I extends Iface> extends org.apache.thrift.ProcessFunction<I, truncate_table_req_args> {
+      public truncate_table_req() {
+        super("truncate_table_req");
+      }
+
+      public truncate_table_req_args getEmptyArgsInstance() {
+        return new truncate_table_req_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public truncate_table_req_result getResult(I iface, truncate_table_req_args args) throws org.apache.thrift.TException {
+        truncate_table_req_result result = new truncate_table_req_result();
+        try {
+          result.success = iface.truncate_table_req(args.req);
+        } catch (MetaException o1) {
+          result.o1 = o1;
+        }
+        return result;
+      }
+    }
+
     @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_tables<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_tables_args> {
       public get_tables() {
         super("get_tables");
@@ -15607,6 +15904,32 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table_req<I extends Iface> extends org.apache.thrift.ProcessFunction<I, alter_table_req_args> {
+      public alter_table_req() {
+        super("alter_table_req");
+      }
+
+      public alter_table_req_args getEmptyArgsInstance() {
+        return new alter_table_req_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public alter_table_req_result getResult(I iface, alter_table_req_args args) throws org.apache.thrift.TException {
+        alter_table_req_result result = new alter_table_req_result();
+        try {
+          result.success = iface.alter_table_req(args.req);
+        } catch (InvalidOperationException o1) {
+          result.o1 = o1;
+        } catch (MetaException o2) {
+          result.o2 = o2;
+        }
+        return result;
+      }
+    }
+
     @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partition<I extends Iface> extends org.apache.thrift.ProcessFunction<I, add_partition_args> {
       public add_partition() {
         super("add_partition");
@@ -16550,23 +16873,23 @@ import org.slf4j.LoggerFactory;
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context_req<I extends Iface> extends org.apache.thrift.ProcessFunction<I, alter_partitions_with_environment_context_req_args> {
-      public alter_partitions_with_environment_context_req() {
-        super("alter_partitions_with_environment_context_req");
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_req<I extends Iface> extends org.apache.thrift.ProcessFunction<I, alter_partitions_req_args> {
+      public alter_partitions_req() {
+        super("alter_partitions_req");
       }
 
-      public alter_partitions_with_environment_context_req_args getEmptyArgsInstance() {
-        return new alter_partitions_with_environment_context_req_args();
+      public alter_partitions_req_args getEmptyArgsInstance() {
+        return new alter_partitions_req_args();
       }
 
       protected boolean isOneway() {
         return false;
       }
 
-      public alter_partitions_with_environment_context_req_result getResult(I iface, alter_partitions_with_environment_context_req_args args) throws org.apache.thrift.TException {
-        alter_partitions_with_environment_context_req_result result = new alter_partitions_with_environment_context_req_result();
+      public alter_partitions_req_result getResult(I iface, alter_partitions_req_args args) throws org.apache.thrift.TException {
+        alter_partitions_req_result result = new alter_partitions_req_result();
         try {
-          result.success = iface.alter_partitions_with_environment_context_req(args.req);
+          result.success = iface.alter_partitions_req(args.req);
         } catch (InvalidOperationException o1) {
           result.o1 = o1;
         } catch (MetaException o2) {
@@ -17012,6 +17335,66 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_table_column_statistics_req<I extends Iface> extends org.apache.thrift.ProcessFunction<I, update_table_column_statistics_req_args> {
+      public update_table_column_statistics_req() {
+        super("update_table_column_statistics_req");
+      }
+
+      public update_table_column_statistics_req_args getEmptyArgsInstance() {
+        return new update_table_column_statistics_req_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public update_table_column_statistics_req_result getResult(I iface, update_table_column_statistics_req_args args) throws org.apache.thrift.TException {
+        update_table_column_statistics_req_result result = new update_table_column_statistics_req_result();
+        try {
+          result.success = iface.update_table_column_statistics_req(args.req);
+        } catch (NoSuchObjectException o1) {
+          result.o1 = o1;
+        } catch (InvalidObjectException o2) {
+          result.o2 = o2;
+        } catch (MetaException o3) {
+          result.o3 = o3;
+        } catch (InvalidInputException o4) {
+          result.o4 = o4;
+        }
+        return result;
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_column_statistics_req<I extends Iface> extends org.apache.thrift.ProcessFunction<I, update_partition_column_statistics_req_args> {
+      public update_partition_column_statistics_req() {
+        super("update_partition_column_statistics_req");
+      }
+
+      public update_partition_column_statistics_req_args getEmptyArgsInstance() {
+        return new update_partition_column_statistics_req_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public update_partition_column_statistics_req_result getResult(I iface, update_partition_column_statistics_req_args args) throws org.apache.thrift.TException {
+        update_partition_column_statistics_req_result result = new update_partition_column_statistics_req_result();
+        try {
+          result.success = iface.update_partition_column_statistics_req(args.req);
+        } catch (NoSuchObjectException o1) {
+          result.o1 = o1;
+        } catch (InvalidObjectException o2) {
+          result.o2 = o2;
+        } catch (MetaException o3) {
+          result.o3 = o3;
+        } catch (InvalidInputException o4) {
+          result.o4 = o4;
+        }
+        return result;
+      }
+    }
+
     @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_column_statistics<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_table_column_statistics_args> {
       public get_table_column_statistics() {
         super("get_table_column_statistics");
@@ -19724,6 +20107,7 @@ import org.slf4j.LoggerFactory;
       processMap.put("drop_table", new drop_table());
       processMap.put("drop_table_with_environment_context", new drop_table_with_environment_context());
       processMap.put("truncate_table", new truncate_table());
+      processMap.put("truncate_table_req", new truncate_table_req());
       processMap.put("get_tables", new get_tables());
       processMap.put("get_tables_by_type", new get_tables_by_type());
       processMap.put("get_materialized_views_for_rewriting", new get_materialized_views_for_rewriting());
@@ -19739,6 +20123,7 @@ import org.slf4j.LoggerFactory;
       processMap.put("alter_table", new alter_table());
       processMap.put("alter_table_with_environment_context", new alter_table_with_environment_context());
       processMap.put("alter_table_with_cascade", new alter_table_with_cascade());
+      processMap.put("alter_table_req", new alter_table_req());
       processMap.put("add_partition", new add_partition());
       processMap.put("add_partition_with_environment_context", new add_partition_with_environment_context());
       processMap.put("add_partitions", new add_partitions());
@@ -19774,7 +20159,7 @@ import org.slf4j.LoggerFactory;
       processMap.put("alter_partition", new alter_partition());
       processMap.put("alter_partitions", new alter_partitions());
       processMap.put("alter_partitions_with_environment_context", new alter_partitions_with_environment_context());
-      processMap.put("alter_partitions_with_environment_context_req", new alter_partitions_with_environment_context_req());
+      processMap.put("alter_partitions_req", new alter_partitions_req());
       processMap.put("alter_partition_with_environment_context", new alter_partition_with_environment_context());
       processMap.put("rename_partition", new rename_partition());
       processMap.put("partition_name_has_valid_characters", new partition_name_has_valid_characters());
@@ -19791,6 +20176,8 @@ import org.slf4j.LoggerFactory;
       processMap.put("get_check_constraints", new get_check_constraints());
       processMap.put("update_table_column_statistics", new update_table_column_statistics());
       processMap.put("update_partition_column_statistics", new update_partition_column_statistics());
+      processMap.put("update_table_column_statistics_req", new update_table_column_statistics_req());
+      processMap.put("update_partition_column_statistics_req", new update_partition_column_statistics_req());
       processMap.put("get_table_column_statistics", new get_table_column_statistics());
       processMap.put("get_partition_column_statistics", new get_partition_column_statistics());
       processMap.put("get_table_statistics_req", new get_table_statistics_req());
@@ -22037,6 +22424,63 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class truncate_table_req<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, truncate_table_req_args, TruncateTableResponse> {
+      public truncate_table_req() {
+        super("truncate_table_req");
+      }
+
+      public truncate_table_req_args getEmptyArgsInstance() {
+        return new truncate_table_req_args();
+      }
+
+      public AsyncMethodCallback<TruncateTableResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<TruncateTableResponse>() { 
+          public void onComplete(TruncateTableResponse o) {
+            truncate_table_req_result result = new truncate_table_req_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            truncate_table_req_result result = new truncate_table_req_result();
+            if (e instanceof MetaException) {
+                        result.o1 = (MetaException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, truncate_table_req_args args, org.apache.thrift.async.AsyncMethodCallback<TruncateTableResponse> resultHandler) throws TException {
+        iface.truncate_table_req(args.req,resultHandler);
+      }
+    }
+
     @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_tables<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_tables_args, List<String>> {
       public get_tables() {
         super("get_tables");
@@ -22947,6 +23391,68 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_table_req<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_table_req_args, AlterTableResponse> {
+      public alter_table_req() {
+        super("alter_table_req");
+      }
+
+      public alter_table_req_args getEmptyArgsInstance() {
+        return new alter_table_req_args();
+      }
+
+      public AsyncMethodCallback<AlterTableResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<AlterTableResponse>() { 
+          public void onComplete(AlterTableResponse o) {
+            alter_table_req_result result = new alter_table_req_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            alter_table_req_result result = new alter_table_req_result();
+            if (e instanceof InvalidOperationException) {
+                        result.o1 = (InvalidOperationException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o2 = (MetaException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, alter_table_req_args args, org.apache.thrift.async.AsyncMethodCallback<AlterTableResponse> resultHandler) throws TException {
+        iface.alter_table_req(args.req,resultHandler);
+      }
+    }
+
     @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class add_partition<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_partition_args, Partition> {
       public add_partition() {
         super("add_partition");
@@ -25186,20 +25692,20 @@ import org.slf4j.LoggerFactory;
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_with_environment_context_req<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partitions_with_environment_context_req_args, AlterPartitionsResponse> {
-      public alter_partitions_with_environment_context_req() {
-        super("alter_partitions_with_environment_context_req");
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class alter_partitions_req<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, alter_partitions_req_args, AlterPartitionsResponse> {
+      public alter_partitions_req() {
+        super("alter_partitions_req");
       }
 
-      public alter_partitions_with_environment_context_req_args getEmptyArgsInstance() {
-        return new alter_partitions_with_environment_context_req_args();
+      public alter_partitions_req_args getEmptyArgsInstance() {
+        return new alter_partitions_req_args();
       }
 
       public AsyncMethodCallback<AlterPartitionsResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
         return new AsyncMethodCallback<AlterPartitionsResponse>() { 
           public void onComplete(AlterPartitionsResponse o) {
-            alter_partitions_with_environment_context_req_result result = new alter_partitions_with_environment_context_req_result();
+            alter_partitions_req_result result = new alter_partitions_req_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -25212,7 +25718,7 @@ import org.slf4j.LoggerFactory;
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            alter_partitions_with_environment_context_req_result result = new alter_partitions_with_environment_context_req_result();
+            alter_partitions_req_result result = new alter_partitions_req_result();
             if (e instanceof InvalidOperationException) {
                         result.o1 = (InvalidOperationException) e;
                         result.setO1IsSet(true);
@@ -25243,8 +25749,8 @@ import org.slf4j.LoggerFactory;
         return false;
       }
 
-      public void start(I iface, alter_partitions_with_environment_context_req_args args, org.apache.thrift.async.AsyncMethodCallback<AlterPartitionsResponse> resultHandler) throws TException {
-        iface.alter_partitions_with_environment_context_req(args.req,resultHandler);
+      public void start(I iface, alter_partitions_req_args args, org.apache.thrift.async.AsyncMethodCallback<AlterPartitionsResponse> resultHandler) throws TException {
+        iface.alter_partitions_req(args.req,resultHandler);
       }
     }
 
@@ -26281,6 +26787,150 @@ import org.slf4j.LoggerFactory;
       }
     }
 
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_table_column_statistics_req<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, update_table_column_statistics_req_args, SetPartitionsStatsResponse> {
+      public update_table_column_statistics_req() {
+        super("update_table_column_statistics_req");
+      }
+
+      public update_table_column_statistics_req_args getEmptyArgsInstance() {
+        return new update_table_column_statistics_req_args();
+      }
+
+      public AsyncMethodCallback<SetPartitionsStatsResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<SetPartitionsStatsResponse>() { 
+          public void onComplete(SetPartitionsStatsResponse o) {
+            update_table_column_statistics_req_result result = new update_table_column_statistics_req_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            update_table_column_statistics_req_result result = new update_table_column_statistics_req_result();
+            if (e instanceof NoSuchObjectException) {
+                        result.o1 = (NoSuchObjectException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof InvalidObjectException) {
+                        result.o2 = (InvalidObjectException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o3 = (MetaException) e;
+                        result.setO3IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof InvalidInputException) {
+                        result.o4 = (InvalidInputException) e;
+                        result.setO4IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, update_table_column_statistics_req_args args, org.apache.thrift.async.AsyncMethodCallback<SetPartitionsStatsResponse> resultHandler) throws TException {
+        iface.update_table_column_statistics_req(args.req,resultHandler);
+      }
+    }
+
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_partition_column_statistics_req<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, update_partition_column_statistics_req_args, SetPartitionsStatsResponse> {
+      public update_partition_column_statistics_req() {
+        super("update_partition_column_statistics_req");
+      }
+
+      public update_partition_column_statistics_req_args getEmptyArgsInstance() {
+        return new update_partition_column_statistics_req_args();
+      }
+
+      public AsyncMethodCallback<SetPartitionsStatsResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<SetPartitionsStatsResponse>() { 
+          public void onComplete(SetPartitionsStatsResponse o) {
+            update_partition_column_statistics_req_result result = new update_partition_column_statistics_req_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            update_partition_column_statistics_req_result result = new update_partition_column_statistics_req_result();
+            if (e instanceof NoSuchObjectException) {
+                        result.o1 = (NoSuchObjectException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof InvalidObjectException) {
+                        result.o2 = (InvalidObjectException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof MetaException) {
+                        result.o3 = (MetaException) e;
+                        result.setO3IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof InvalidInputException) {
+                        result.o4 = (InvalidInputException) e;
+                        result.setO4IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, update_partition_column_statistics_req_args args, org.apache.thrift.async.AsyncMethodCallback<SetPartitionsStatsResponse> resultHandler) throws TException {
+        iface.update_partition_column_statistics_req(args.req,resultHandler);
+      }
+    }
+
     @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_table_column_statistics<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_table_column_statistics_args, ColumnStatistics> {
       public get_table_column_statistics() {
         super("get_table_column_statistics");
@@ -42539,13 +43189,13 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list960 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list960.size);
-                  String _elem961;
-                  for (int _i962 = 0; _i962 < _list960.size; ++_i962)
+                  org.apache.thrift.protocol.TList _list968 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list968.size);
+                  String _elem969;
+                  for (int _i970 = 0; _i970 < _list968.size; ++_i970)
                   {
-                    _elem961 = iprot.readString();
-                    struct.success.add(_elem961);
+                    _elem969 = iprot.readString();
+                    struct.success.add(_elem969);
                   }
                   iprot.readListEnd();
                 }
@@ -42580,9 +43230,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter963 : struct.success)
+            for (String _iter971 : struct.success)
             {
-              oprot.writeString(_iter963);
+              oprot.writeString(_iter971);
             }
             oprot.writeListEnd();
           }
@@ -42621,9 +43271,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter964 : struct.success)
+            for (String _iter972 : struct.success)
             {
-              oprot.writeString(_iter964);
+              oprot.writeString(_iter972);
             }
           }
         }
@@ -42638,13 +43288,13 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list965 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list965.size);
-            String _elem966;
-            for (int _i967 = 0; _i967 < _list965.size; ++_i967)
+            org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list973.size);
+            String _elem974;
+            for (int _i975 = 0; _i975 < _list973.size; ++_i975)
             {
-              _elem966 = iprot.readString();
-              struct.success.add(_elem966);
+              _elem974 = iprot.readString();
+              struct.success.add(_elem974);
             }
           }
           struct.setSuccessIsSet(true);
@@ -43298,13 +43948,13 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list968 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list968.size);
-                  String _elem969;
-                  for (int _i970 = 0; _i970 < _list968.size; ++_i970)
+                  org.apache.thrift.protocol.TList _list976 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list976.size);
+                  String _elem977;
+                  for (int _i978 = 0; _i978 < _list976.size; ++_i978)
                   {
-                    _elem969 = iprot.readString();
-                    struct.success.add(_elem969);
+                    _elem977 = iprot.readString();
+                    struct.success.add(_elem977);
                   }
                   iprot.readListEnd();
                 }
@@ -43339,9 +43989,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter971 : struct.success)
+            for (String _iter979 : struct.success)
             {
-              oprot.writeString(_iter971);
+              oprot.writeString(_iter979);
             }
             oprot.writeListEnd();
           }
@@ -43380,9 +44030,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter972 : struct.success)
+            for (String _iter980 : struct.success)
             {
-              oprot.writeString(_iter972);
+              oprot.writeString(_iter980);
             }
           }
         }
@@ -43397,13 +44047,13 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list973.size);
-            String _elem974;
-            for (int _i975 = 0; _i975 < _list973.size; ++_i975)
+            org.apache.thrift.protocol.TList _list981 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list981.size);
+            String _elem982;
+            for (int _i983 = 0; _i983 < _list981.size; ++_i983)
             {
-              _elem974 = iprot.readString();
-              struct.success.add(_elem974);
+              _elem982 = iprot.readString();
+              struct.success.add(_elem982);
             }
           }
           struct.setSuccessIsSet(true);
@@ -48010,16 +48660,16 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                 {
-                  org.apache.thrift.protocol.TMap _map976 = iprot.readMapBegin();
-                  struct.success = new HashMap<String,Type>(2*_map976.size);
-                  String _key977;
-                  Type _val978;
-                  for (int _i979 = 0; _i979 < _map976.size; ++_i979)
+                  org.apache.thrift.protocol.TMap _map984 = iprot.readMapBegin();
+                  struct.success = new HashMap<String,Type>(2*_map984.size);
+                  String _key985;
+                  Type _val986;
+                  for (int _i987 = 0; _i987 < _map984.size; ++_i987)
                   {
-                    _key977 = iprot.readString();
-                    _val978 = new Type();
-                    _val978.read(iprot);
-                    struct.success.put(_key977, _val978);
+                    _key985 = iprot.readString();
+                    _val986 = new Type();
+                    _val986.read(iprot);
+                    struct.success.put(_key985, _val986);
                   }
                   iprot.readMapEnd();
                 }
@@ -48054,10 +48704,10 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (Map.Entry<String, Type> _iter980 : struct.success.entrySet())
+            for (Map.Entry<String, Type> _iter988 : struct.success.entrySet())
             {
-              oprot.writeString(_iter980.getKey());
-              _iter980.getValue().write(oprot);
+              oprot.writeString(_iter988.getKey());
+              _iter988.getValue().write(oprot);
             }
             oprot.writeMapEnd();
           }
@@ -48096,10 +48746,10 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (Map.Entry<String, Type> _iter981 : struct.success.entrySet())
+            for (Map.Entry<String, Type> _iter989 : struct.success.entrySet())
             {
-              oprot.writeString(_iter981.getKey());
-              _iter981.getValue().write(oprot);
+              oprot.writeString(_iter989.getKey());
+              _iter989.getValue().write(oprot);
             }
           }
         }
@@ -48114,16 +48764,16 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TMap _map982 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new HashMap<String,Type>(2*_map982.size);
-            String _key983;
-            Type _val984;
-            for (int _i985 = 0; _i985 < _map982.size; ++_i985)
+            org.apache.thrift.protocol.TMap _map990 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new HashMap<String,Type>(2*_map990.size);
+            String _key991;
+            Type _val992;
+            for (int _i993 = 0; _i993 < _map990.size; ++_i993)
             {
-              _key983 = iprot.readString();
-              _val984 = new Type();
-              _val984.read(iprot);
-              struct.success.put(_key983, _val984);
+              _key991 = iprot.readString();
+              _val992 = new Type();
+              _val992.read(iprot);
+              struct.success.put(_key991, _val992);
             }
           }
           struct.setSuccessIsSet(true);
@@ -49158,14 +49808,14 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list986 = iprot.readListBegin();
-                  struct.success = new ArrayList<FieldSchema>(_list986.size);
-                  FieldSchema _elem987;
-                  for (int _i988 = 0; _i988 < _list986.size; ++_i988)
+                  org.apache.thrift.protocol.TList _list994 = iprot.readListBegin();
+                  struct.success = new ArrayList<FieldSchema>(_list994.size);
+                  FieldSchema _elem995;
+                  for (int _i996 = 0; _i996 < _list994.size; ++_i996)
                   {
-                    _elem987 = new FieldSchema();
-                    _elem987.read(iprot);
-                    struct.success.add(_elem987);
+                    _elem995 = new FieldSchema();
+                    _elem995.read(iprot);
+                    struct.success.add(_elem995);
                   }
                   iprot.readListEnd();
                 }
@@ -49218,9 +49868,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (FieldSchema _iter989 : struct.success)
+            for (FieldSchema _iter997 : struct.success)
             {
-              _iter989.write(oprot);
+              _iter997.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -49275,9 +49925,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (FieldSchema _iter990 : struct.success)
+            for (FieldSchema _iter998 : struct.success)
             {
-              _iter990.write(oprot);
+              _iter998.write(oprot);
             }
           }
         }
@@ -49298,14 +49948,14 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list991 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<FieldSchema>(_list991.size);
-            FieldSchema _elem992;
-            for (int _i993 = 0; _i993 < _list991.size; ++_i993)
+            org.apache.thrift.protocol.TList _list999 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<FieldSchema>(_list999.size);
+            FieldSchema _elem1000;
+            for (int _i1001 = 0; _i1001 < _list999.size; ++_i1001)
             {
-              _elem992 = new FieldSchema();
-              _elem992.read(iprot);
-              struct.success.add(_elem992);
+              _elem1000 = new FieldSchema();
+              _elem1000.read(iprot);
+              struct.success.add(_elem1000);
             }
           }
           struct.setSuccessIsSet(true);
@@ -50459,14 +51109,14 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list994 = iprot.readListBegin();
-                  struct.success = new ArrayList<FieldSchema>(_list994.size);
-                  FieldSchema _elem995;
-                  for (int _i996 = 0; _i996 < _list994.size; ++_i996)
+                  org.apache.thrift.protocol.TList _list1002 = iprot.readListBegin();
+                  struct.success = new ArrayList<FieldSchema>(_list1002.size);
+                  FieldSchema _elem1003;
+                  for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004)
                   {
-                    _elem995 = new FieldSchema();
-                    _elem995.read(iprot);
-                    struct.success.add(_elem995);
+                    _elem1003 = new FieldSchema();
+                    _elem1003.read(iprot);
+                    struct.success.add(_elem1003);
                   }
                   iprot.readListEnd();
                 }
@@ -50519,9 +51169,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (FieldSchema _iter997 : struct.success)
+            for (FieldSchema _iter1005 : struct.success)
             {
-              _iter997.write(oprot);
+              _iter1005.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -50576,9 +51226,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (FieldSchema _iter998 : struct.success)
+            for (FieldSchema _iter1006 : struct.success)
             {
-              _iter998.write(oprot);
+              _iter1006.write(oprot);
             }
           }
         }
@@ -50599,14 +51249,14 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list999 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<FieldSchema>(_list999.size);
-            FieldSchema _elem1000;
-            for (int _i1001 = 0; _i1001 < _list999.size; ++_i1001)
+            org.apache.thrift.protocol.TList _list1007 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<FieldSchema>(_list1007.size);
+            FieldSchema _elem1008;
+            for (int _i1009 = 0; _i1009 < _list1007.size; ++_i1009)
             {
-              _elem1000 = new FieldSchema();
-              _elem1000.read(iprot);
-              struct.success.add(_elem1000);
+              _elem1008 = new FieldSchema();
+              _elem1008.read(iprot);
+              struct.success.add(_elem1008);
             }
           }
           struct.setSuccessIsSet(true);
@@ -51651,14 +52301,14 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1002 = iprot.readListBegin();
-                  struct.success = new ArrayList<FieldSchema>(_list1002.size);
-                  FieldSchema _elem1003;
-                  for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004)
+                  org.apache.thrift.protocol.TList _list1010 = iprot.readListBegin();
+                  struct.success = new ArrayList<FieldSchema>(_list1010.size);
+                  FieldSchema _elem1011;
+                  for (int _i1012 = 0; _i1012 < _list1010.size; ++_i1012)
                   {
-                    _elem1003 = new FieldSchema();
-                    _elem1003.read(iprot);
-                    struct.success.add(_elem1003);
+                    _elem1011 = new FieldSchema();
+                    _elem1011.read(iprot);
+                    struct.success.add(_elem1011);
                   }
                   iprot.readListEnd();
                 }
@@ -51711,9 +52361,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (FieldSchema _iter1005 : struct.success)
+            for (FieldSchema _iter1013 : struct.success)
             {
-              _iter1005.write(oprot);
+              _iter1013.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -51768,9 +52418,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (FieldSchema _iter1006 : struct.success)
+            for (FieldSchema _iter1014 : struct.success)
             {
-              _iter1006.write(oprot);
+              _iter1014.write(oprot);
             }
           }
         }
@@ -51791,14 +52441,14 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1007 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<FieldSchema>(_list1007.size);
-            FieldSchema _elem1008;
-            for (int _i1009 = 0; _i1009 < _list1007.size; ++_i1009)
+            org.apache.thrift.protocol.TList _list1015 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<FieldSchema>(_list1015.size);
+            FieldSchema _elem1016;
+            for (int _i1017 = 0; _i1017 < _list1015.size; ++_i1017)
             {
-              _elem1008 = new FieldSchema();
-              _elem1008.read(iprot);
-              struct.success.add(_elem1008);
+              _elem1016 = new FieldSchema();
+              _elem1016.read(iprot);
+              struct.success.add(_elem1016);
             }
           }
           struct.setSuccessIsSet(true);
@@ -52952,14 +53602,14 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1010 = iprot.readListBegin();
-                  struct.success = new ArrayList<FieldSchema>(_list1010.size);
-                  FieldSchema _elem1011;
-                  for (int _i1012 = 0; _i1012 < _list1010.size; ++_i1012)
+                  org.apache.thrift.protocol.TList _list1018 = iprot.readListBegin();
+                  struct.success = new ArrayList<FieldSchema>(_list1018.size);
+                  FieldSchema _elem1019;
+                  for (int _i1020 = 0; _i1020 < _list1018.size; ++_i1020)
                   {
-                    _elem1011 = new FieldSchema();
-                    _elem1011.read(iprot);
-                    struct.success.add(_elem1011);
+                    _elem1019 = new FieldSchema();
+                    _elem1019.read(iprot);
+                    struct.success.add(_elem1019);
                   }
                   iprot.readListEnd();
                 }
@@ -53012,9 +53662,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (FieldSchema _iter1013 : struct.success)
+            for (FieldSchema _iter1021 : struct.success)
             {
-              _iter1013.write(oprot);
+              _iter1021.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -53069,9 +53719,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (FieldSchema _iter1014 : struct.success)
+            for (FieldSchema _iter1022 : struct.success)
             {
-              _iter1014.write(oprot);
+              _iter1022.write(oprot);
             }
           }
         }
@@ -53092,14 +53742,14 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1015 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<FieldSchema>(_list1015.size);
-            FieldSchema _elem1016;
-            for (int _i1017 = 0; _i1017 < _list1015.size; ++_i1017)
+            org.apache.thrift.protocol.TList _list1023 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<FieldSchema>(_list1023.size);
+            FieldSchema _elem1024;
+            for (int _i1025 = 0; _i1025 < _list1023.size; ++_i1025)
             {
-              _elem1016 = new FieldSchema();
-              _elem1016.read(iprot);
-              struct.success.add(_elem1016);
+              _elem1024 = new FieldSchema();
+              _elem1024.read(iprot);
+              struct.success.add(_elem1024);
             }
           }
           struct.setSuccessIsSet(true);
@@ -56228,14 +56878,14 @@ import org.slf4j.LoggerFactory;
             case 2: // PRIMARY_KEYS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1018 = iprot.readListBegin();
-                  struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1018.size);
-                  SQLPrimaryKey _elem1019;
-                  for (int _i1020 = 0; _i1020 < _list1018.size; ++_i1020)
+                  org.apache.thrift.protocol.TList _list1026 = iprot.readListBegin();
+                  struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1026.size);
+                  SQLPrimaryKey _elem1027;
+                  for (int _i1028 = 0; _i1028 < _list1026.size; ++_i1028)
                   {
-                    _elem1019 = new SQLPrimaryKey();
-                    _elem1019.read(iprot);
-                    struct.primaryKeys.add(_elem1019);
+                    _elem1027 = new SQLPrimaryKey();
+                    _elem1027.read(iprot);
+                    struct.primaryKeys.add(_elem1027);
                   }
                   iprot.readListEnd();
                 }
@@ -56247,14 +56897,14 @@ import org.slf4j.LoggerFactory;
             case 3: // FOREIGN_KEYS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1021 = iprot.readListBegin();
-                  struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1021.size);
-                  SQLForeignKey _elem1022;
-                  for (int _i1023 = 0; _i1023 < _list1021.size; ++_i1023)
+                  org.apache.thrift.protocol.TList _list1029 = iprot.readListBegin();
+                  struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1029.size);
+                  SQLForeignKey _elem1030;
+                  for (int _i1031 = 0; _i1031 < _list1029.size; ++_i1031)
                   {
-                    _elem1022 = new SQLForeignKey();
-                    _elem1022.read(iprot);
-                    struct.foreignKeys.add(_elem1022);
+                    _elem1030 = new SQLForeignKey();
+                    _elem1030.read(iprot);
+                    struct.foreignKeys.add(_elem1030);
                   }
                   iprot.readListEnd();
                 }
@@ -56266,14 +56916,14 @@ import org.slf4j.LoggerFactory;
             case 4: // UNIQUE_CONSTRAINTS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1024 = iprot.readListBegin();
-                  struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1024.size);
-                  SQLUniqueConstraint _elem1025;
-                  for (int _i1026 = 0; _i1026 < _list1024.size; ++_i1026)
+                  org.apache.thrift.protocol.TList _list1032 = iprot.readListBegin();
+                  struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1032.size);
+                  SQLUniqueConstraint _elem1033;
+                  for (int _i1034 = 0; _i1034 < _list1032.size; ++_i1034)
                   {
-                    _elem1025 = new SQLUniqueConstraint();
-                    _elem1025.read(iprot);
-                    struct.uniqueConstraints.add(_elem1025);
+                    _elem1033 = new SQLUniqueConstraint();
+                    _elem1033.read(iprot);
+                    struct.uniqueConstraints.add(_elem1033);
                   }
                   iprot.readListEnd();
                 }
@@ -56285,14 +56935,14 @@ import org.slf4j.LoggerFactory;
             case 5: // NOT_NULL_CONSTRAINTS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1027 = iprot.readListBegin();
-                  struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1027.size);
-                  SQLNotNullConstraint _elem1028;
-                  for (int _i1029 = 0; _i1029 < _list1027.size; ++_i1029)
+                  org.apache.thrift.protocol.TList _list1035 = iprot.readListBegin();
+                  struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1035.size);
+                  SQLNotNullConstraint _elem1036;
+                  for (int _i1037 = 0; _i1037 < _list1035.size; ++_i1037)
                   {
-                    _elem1028 = new SQLNotNullConstraint();
-                    _elem1028.read(iprot);
-                    struct.notNullConstraints.add(_elem1028);
+                    _elem1036 = new SQLNotNullConstraint();
+                    _elem1036.read(iprot);
+                    struct.notNullConstraints.add(_elem1036);
                   }
                   iprot.readListEnd();
                 }
@@ -56304,14 +56954,14 @@ import org.slf4j.LoggerFactory;
             case 6: // DEFAULT_CONSTRAINTS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1030 = iprot.readListBegin();
-                  struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1030.size);
-                  SQLDefaultConstraint _elem1031;
-                  for (int _i1032 = 0; _i1032 < _list1030.size; ++_i1032)
+                  org.apache.thrift.protocol.TList _list1038 = iprot.readListBegin();
+                  struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1038.size);
+                  SQLDefaultConstraint _elem1039;
+                  for (int _i1040 = 0; _i1040 < _list1038.size; ++_i1040)
                   {
- 

<TRUNCATED>