You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/07/14 01:53:27 UTC

[05/20] hive git commit: HIVE-19820 : add ACID stats support to background stats updater and fix bunch of edge cases found in SU tests (Sergey Shelukhin)

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 86b469c..7573eeb 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -603,6 +603,49 @@ class GrantRevokePrivilegeResponse
   ::Thrift::Struct.generate_accessors self
 end
 
+class TruncateTableRequest
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  DBNAME = 1
+  TABLENAME = 2
+  PARTNAMES = 3
+  TXNID = 4
+  WRITEID = 5
+  VALIDWRITEIDLIST = 6
+
+  FIELDS = {
+    DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+    TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
+    PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}, :optional => true},
+    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+    WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true},
+    VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName
+    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableName is unset!') unless @tableName
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
+class TruncateTableResponse
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+
+  FIELDS = {
+
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
 class Role
   include ::Thrift::Struct, ::Thrift::Struct_Union
   ROLENAME = 1
@@ -1559,15 +1602,11 @@ class ColumnStatistics
   include ::Thrift::Struct, ::Thrift::Struct_Union
   STATSDESC = 1
   STATSOBJ = 2
-  TXNID = 3
-  VALIDWRITEIDLIST = 4
-  ISSTATSCOMPLIANT = 5
+  ISSTATSCOMPLIANT = 3
 
   FIELDS = {
     STATSDESC => {:type => ::Thrift::Types::STRUCT, :name => 'statsDesc', :class => ::ColumnStatisticsDesc},
     STATSOBJ => {:type => ::Thrift::Types::LIST, :name => 'statsObj', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}},
-    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
-    VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true},
     ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true}
   }
 
@@ -1628,6 +1667,23 @@ class SetPartitionsStatsRequest
   ::Thrift::Struct.generate_accessors self
 end
 
+class SetPartitionsStatsResponse
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  RESULT = 1
+
+  FIELDS = {
+    RESULT => {:type => ::Thrift::Types::BOOL, :name => 'result'}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field result is unset!') if @result.nil?
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
 class Schema
   include ::Thrift::Struct, ::Thrift::Struct_Union
   FIELDSCHEMAS = 1
@@ -5057,19 +5113,21 @@ end
 
 class AlterPartitionsRequest
   include ::Thrift::Struct, ::Thrift::Struct_Union
-  DBNAME = 1
-  TABLENAME = 2
-  PARTITIONS = 3
-  ENVIRONMENTCONTEXT = 4
-  TXNID = 5
-  WRITEID = 6
-  VALIDWRITEIDLIST = 7
+  CATNAME = 1
+  DBNAME = 2
+  TABLENAME = 3
+  PARTITIONS = 4
+  ENVIRONMENTCONTEXT = 5
+  TXNID = 6
+  WRITEID = 7
+  VALIDWRITEIDLIST = 8
 
   FIELDS = {
+    CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
     TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
     PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}},
-    ENVIRONMENTCONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environmentContext', :class => ::EnvironmentContext},
+    ENVIRONMENTCONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environmentContext', :class => ::EnvironmentContext, :optional => true},
     TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
     WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true},
     VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
@@ -5081,7 +5139,6 @@ class AlterPartitionsRequest
     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName
     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableName is unset!') unless @tableName
     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partitions is unset!') unless @partitions
-    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field environmentContext is unset!') unless @environmentContext
   end
 
   ::Thrift::Struct.generate_accessors self
@@ -5102,6 +5159,54 @@ class AlterPartitionsResponse
   ::Thrift::Struct.generate_accessors self
 end
 
+class AlterTableRequest
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  CATNAME = 1
+  DBNAME = 2
+  TABLENAME = 3
+  TABLE = 4
+  ENVIRONMENTCONTEXT = 5
+  TXNID = 6
+  WRITEID = 7
+  VALIDWRITEIDLIST = 8
+
+  FIELDS = {
+    CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
+    DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+    TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
+    TABLE => {:type => ::Thrift::Types::STRUCT, :name => 'table', :class => ::Table},
+    ENVIRONMENTCONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environmentContext', :class => ::EnvironmentContext, :optional => true},
+    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
+    WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true},
+    VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName
+    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableName is unset!') unless @tableName
+    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field table is unset!') unless @table
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
+class AlterTableResponse
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+
+  FIELDS = {
+
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
 class MetaException < ::Thrift::Exception
   include ::Thrift::Struct, ::Thrift::Struct_Union
   def initialize(message=nil)

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
index fdcd3de..0a68424 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
@@ -579,6 +579,22 @@ module ThriftHiveMetastore
       return
     end
 
+    def truncate_table_req(req)
+      send_truncate_table_req(req)
+      return recv_truncate_table_req()
+    end
+
+    def send_truncate_table_req(req)
+      send_message('truncate_table_req', Truncate_table_req_args, :req => req)
+    end
+
+    def recv_truncate_table_req()
+      result = receive_message(Truncate_table_req_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'truncate_table_req failed: unknown result')
+    end
+
     def get_tables(db_name, pattern)
       send_get_tables(db_name, pattern)
       return recv_get_tables()
@@ -827,6 +843,23 @@ module ThriftHiveMetastore
       return
     end
 
+    def alter_table_req(req)
+      send_alter_table_req(req)
+      return recv_alter_table_req()
+    end
+
+    def send_alter_table_req(req)
+      send_message('alter_table_req', Alter_table_req_args, :req => req)
+    end
+
+    def recv_alter_table_req()
+      result = receive_message(Alter_table_req_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'alter_table_req failed: unknown result')
+    end
+
     def add_partition(new_part)
       send_add_partition(new_part)
       return recv_add_partition()
@@ -1432,21 +1465,21 @@ module ThriftHiveMetastore
       return
     end
 
-    def alter_partitions_with_environment_context_req(req)
-      send_alter_partitions_with_environment_context_req(req)
-      return recv_alter_partitions_with_environment_context_req()
+    def alter_partitions_req(req)
+      send_alter_partitions_req(req)
+      return recv_alter_partitions_req()
     end
 
-    def send_alter_partitions_with_environment_context_req(req)
-      send_message('alter_partitions_with_environment_context_req', Alter_partitions_with_environment_context_req_args, :req => req)
+    def send_alter_partitions_req(req)
+      send_message('alter_partitions_req', Alter_partitions_req_args, :req => req)
     end
 
-    def recv_alter_partitions_with_environment_context_req()
-      result = receive_message(Alter_partitions_with_environment_context_req_result)
+    def recv_alter_partitions_req()
+      result = receive_message(Alter_partitions_req_result)
       return result.success unless result.success.nil?
       raise result.o1 unless result.o1.nil?
       raise result.o2 unless result.o2.nil?
-      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'alter_partitions_with_environment_context_req failed: unknown result')
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'alter_partitions_req failed: unknown result')
     end
 
     def alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context)
@@ -1726,6 +1759,44 @@ module ThriftHiveMetastore
       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'update_partition_column_statistics failed: unknown result')
     end
 
+    def update_table_column_statistics_req(req)
+      send_update_table_column_statistics_req(req)
+      return recv_update_table_column_statistics_req()
+    end
+
+    def send_update_table_column_statistics_req(req)
+      send_message('update_table_column_statistics_req', Update_table_column_statistics_req_args, :req => req)
+    end
+
+    def recv_update_table_column_statistics_req()
+      result = receive_message(Update_table_column_statistics_req_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise result.o3 unless result.o3.nil?
+      raise result.o4 unless result.o4.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'update_table_column_statistics_req failed: unknown result')
+    end
+
+    def update_partition_column_statistics_req(req)
+      send_update_partition_column_statistics_req(req)
+      return recv_update_partition_column_statistics_req()
+    end
+
+    def send_update_partition_column_statistics_req(req)
+      send_message('update_partition_column_statistics_req', Update_partition_column_statistics_req_args, :req => req)
+    end
+
+    def recv_update_partition_column_statistics_req()
+      result = receive_message(Update_partition_column_statistics_req_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise result.o3 unless result.o3.nil?
+      raise result.o4 unless result.o4.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'update_partition_column_statistics_req failed: unknown result')
+    end
+
     def get_table_column_statistics(db_name, tbl_name, col_name)
       send_get_table_column_statistics(db_name, tbl_name, col_name)
       return recv_get_table_column_statistics()
@@ -3953,6 +4024,17 @@ module ThriftHiveMetastore
       write_result(result, oprot, 'truncate_table', seqid)
     end
 
+    def process_truncate_table_req(seqid, iprot, oprot)
+      args = read_args(iprot, Truncate_table_req_args)
+      result = Truncate_table_req_result.new()
+      begin
+        result.success = @handler.truncate_table_req(args.req)
+      rescue ::MetaException => o1
+        result.o1 = o1
+      end
+      write_result(result, oprot, 'truncate_table_req', seqid)
+    end
+
     def process_get_tables(seqid, iprot, oprot)
       args = read_args(iprot, Get_tables_args)
       result = Get_tables_result.new()
@@ -4140,6 +4222,19 @@ module ThriftHiveMetastore
       write_result(result, oprot, 'alter_table_with_cascade', seqid)
     end
 
+    def process_alter_table_req(seqid, iprot, oprot)
+      args = read_args(iprot, Alter_table_req_args)
+      result = Alter_table_req_result.new()
+      begin
+        result.success = @handler.alter_table_req(args.req)
+      rescue ::InvalidOperationException => o1
+        result.o1 = o1
+      rescue ::MetaException => o2
+        result.o2 = o2
+      end
+      write_result(result, oprot, 'alter_table_req', seqid)
+    end
+
     def process_add_partition(seqid, iprot, oprot)
       args = read_args(iprot, Add_partition_args)
       result = Add_partition_result.new()
@@ -4621,17 +4716,17 @@ module ThriftHiveMetastore
       write_result(result, oprot, 'alter_partitions_with_environment_context', seqid)
     end
 
-    def process_alter_partitions_with_environment_context_req(seqid, iprot, oprot)
-      args = read_args(iprot, Alter_partitions_with_environment_context_req_args)
-      result = Alter_partitions_with_environment_context_req_result.new()
+    def process_alter_partitions_req(seqid, iprot, oprot)
+      args = read_args(iprot, Alter_partitions_req_args)
+      result = Alter_partitions_req_result.new()
       begin
-        result.success = @handler.alter_partitions_with_environment_context_req(args.req)
+        result.success = @handler.alter_partitions_req(args.req)
       rescue ::InvalidOperationException => o1
         result.o1 = o1
       rescue ::MetaException => o2
         result.o2 = o2
       end
-      write_result(result, oprot, 'alter_partitions_with_environment_context_req', seqid)
+      write_result(result, oprot, 'alter_partitions_req', seqid)
     end
 
     def process_alter_partition_with_environment_context(seqid, iprot, oprot)
@@ -4858,6 +4953,40 @@ module ThriftHiveMetastore
       write_result(result, oprot, 'update_partition_column_statistics', seqid)
     end
 
+    def process_update_table_column_statistics_req(seqid, iprot, oprot)
+      args = read_args(iprot, Update_table_column_statistics_req_args)
+      result = Update_table_column_statistics_req_result.new()
+      begin
+        result.success = @handler.update_table_column_statistics_req(args.req)
+      rescue ::NoSuchObjectException => o1
+        result.o1 = o1
+      rescue ::InvalidObjectException => o2
+        result.o2 = o2
+      rescue ::MetaException => o3
+        result.o3 = o3
+      rescue ::InvalidInputException => o4
+        result.o4 = o4
+      end
+      write_result(result, oprot, 'update_table_column_statistics_req', seqid)
+    end
+
+    def process_update_partition_column_statistics_req(seqid, iprot, oprot)
+      args = read_args(iprot, Update_partition_column_statistics_req_args)
+      result = Update_partition_column_statistics_req_result.new()
+      begin
+        result.success = @handler.update_partition_column_statistics_req(args.req)
+      rescue ::NoSuchObjectException => o1
+        result.o1 = o1
+      rescue ::InvalidObjectException => o2
+        result.o2 = o2
+      rescue ::MetaException => o3
+        result.o3 = o3
+      rescue ::InvalidInputException => o4
+        result.o4 = o4
+      end
+      write_result(result, oprot, 'update_partition_column_statistics_req', seqid)
+    end
+
     def process_get_table_column_statistics(seqid, iprot, oprot)
       args = read_args(iprot, Get_table_column_statistics_args)
       result = Get_table_column_statistics_result.new()
@@ -7356,6 +7485,40 @@ module ThriftHiveMetastore
     ::Thrift::Struct.generate_accessors self
   end
 
+  class Truncate_table_req_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    REQ = 1
+
+    FIELDS = {
+      REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::TruncateTableRequest}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Truncate_table_req_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::TruncateTableResponse},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
   class Get_tables_args
     include ::Thrift::Struct, ::Thrift::Struct_Union
     DB_NAME = 1
@@ -7924,6 +8087,42 @@ module ThriftHiveMetastore
     ::Thrift::Struct.generate_accessors self
   end
 
+  class Alter_table_req_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    REQ = 1
+
+    FIELDS = {
+      REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::AlterTableRequest}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Alter_table_req_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+    O2 = 2
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::AlterTableResponse},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::InvalidOperationException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
   class Add_partition_args
     include ::Thrift::Struct, ::Thrift::Struct_Union
     NEW_PART = 1
@@ -9362,7 +9561,7 @@ module ThriftHiveMetastore
     ::Thrift::Struct.generate_accessors self
   end
 
-  class Alter_partitions_with_environment_context_req_args
+  class Alter_partitions_req_args
     include ::Thrift::Struct, ::Thrift::Struct_Union
     REQ = 1
 
@@ -9378,7 +9577,7 @@ module ThriftHiveMetastore
     ::Thrift::Struct.generate_accessors self
   end
 
-  class Alter_partitions_with_environment_context_req_result
+  class Alter_partitions_req_result
     include ::Thrift::Struct, ::Thrift::Struct_Union
     SUCCESS = 0
     O1 = 1
@@ -10018,6 +10217,86 @@ module ThriftHiveMetastore
     ::Thrift::Struct.generate_accessors self
   end
 
+  class Update_table_column_statistics_req_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    REQ = 1
+
+    FIELDS = {
+      REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::SetPartitionsStatsRequest}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Update_table_column_statistics_req_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+    O2 = 2
+    O3 = 3
+    O4 = 4
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::SetPartitionsStatsResponse},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidObjectException},
+      O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException},
+      O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => ::InvalidInputException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Update_partition_column_statistics_req_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    REQ = 1
+
+    FIELDS = {
+      REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::SetPartitionsStatsRequest}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Update_partition_column_statistics_req_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+    O2 = 2
+    O3 = 3
+    O4 = 4
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::SetPartitionsStatsResponse},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidObjectException},
+      O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException},
+      O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => ::InvalidInputException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
   class Get_table_column_statistics_args
     include ::Thrift::Struct, ::Thrift::Struct_Union
     DB_NAME = 1

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
index af9b0b1..35be3c4 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
@@ -157,10 +157,6 @@ public class StatsSetupConst {
 
   public static final String CASCADE = "CASCADE";
 
-  // TODO: when alter calls are switched to req/resp models, replace these and the above with fields.
-  public static final String TXN_ID = "WRITER_TXN_ID";
-  public static final String VALID_WRITE_IDS = "WRITER_WRITE_ID";
-
   public static final String TRUE = "true";
 
   public static final String FALSE = "false";
@@ -275,10 +271,11 @@ public class StatsSetupConst {
     if (params == null) {
       return false;
     }
+    // TODO: should this also check that the basic flag is valid?
     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
     return stats.columnStats.containsKey(colName);
   }
-  
+
   public static void clearColumnStatsState(Map<String, String> params) {
     if (params == null) {
       return;
@@ -321,7 +318,7 @@ public class StatsSetupConst {
       setColumnStatsState(params, cols);
     }
   }
-  
+
   private static ColumnStatsAccurate parseStatsAcc(String statsAcc) {
     if (statsAcc == null) {
       return new ColumnStatsAccurate();

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
index e7cf07f..f3dc264 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
@@ -61,7 +61,7 @@ public interface AlterHandler extends Configurable {
   default void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname,
     String name, Table newTable, EnvironmentContext envContext)
       throws InvalidOperationException, MetaException {
-    alterTable(msdb, wh, catName, dbname, name, newTable, envContext, null);
+    alterTable(msdb, wh, catName, dbname, name, newTable, envContext, null, -1, null);
   }
 
   /**
@@ -88,7 +88,8 @@ public interface AlterHandler extends Configurable {
    */
   void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname,
       String name, Table newTable, EnvironmentContext envContext,
-      IHMSHandler handler) throws InvalidOperationException, MetaException;
+      IHMSHandler handler, long txnId, String writeIdList)
+          throws InvalidOperationException, MetaException;
 
   /**
    * @deprecated As of release 2.2.0.  Replaced by {@link #alterPartition(RawStore, Warehouse, String,
@@ -145,7 +146,7 @@ public interface AlterHandler extends Configurable {
   Partition alterPartition(final RawStore msdb, Warehouse wh, final String catName,
                            final String dbname, final String name, final List<String> part_vals,
                            final Partition new_part, EnvironmentContext environmentContext,
-                           IHMSHandler handler)
+                           IHMSHandler handler, long txnId, String validWriteIds)
       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index 8b2a6ba..e8226f8 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -90,7 +90,8 @@ public class HiveAlterHandler implements AlterHandler {
   @Override
   public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname,
       String name, Table newt, EnvironmentContext environmentContext,
-      IHMSHandler handler) throws InvalidOperationException, MetaException {
+      IHMSHandler handler, long txnId, String writeIdList)
+          throws InvalidOperationException, MetaException {
     catName = normalizeIdentifier(catName);
     name = name.toLowerCase();
     dbname = dbname.toLowerCase();
@@ -296,7 +297,7 @@ public class HiveAlterHandler implements AlterHandler {
                 partValues.add(part.getValues());
               }
               msdb.alterPartitions(catName, newDbName, newTblName, partValues,
-                  partBatch, -1, -1, null);
+                  partBatch, newt.getWriteId(), txnId, writeIdList);
             }
           }
 
@@ -304,14 +305,15 @@ public class HiveAlterHandler implements AlterHandler {
             ColumnStatistics newPartColStats = partColStats.getValue();
             newPartColStats.getStatsDesc().setDbName(newDbName);
             newPartColStats.getStatsDesc().setTableName(newTblName);
-            msdb.updatePartitionColumnStatistics(newPartColStats, partColStats.getKey().getValues());
+            msdb.updatePartitionColumnStatistics(newPartColStats, partColStats.getKey().getValues(),
+                txnId, writeIdList, newt.getWriteId());
           }
         } else {
-          alterTableUpdateTableColumnStats(msdb, oldt, newt, environmentContext);
+          alterTableUpdateTableColumnStats(
+              msdb, oldt, newt, environmentContext, txnId, writeIdList);
         }
       } else {
         // operations other than table rename
-
         if (MetaStoreUtils.requireCalStats(null, null, newt, environmentContext) &&
             !isPartitionedTable) {
           Database db = msdb.getDatabase(catName, newDbName);
@@ -330,23 +332,26 @@ public class HiveAlterHandler implements AlterHandler {
               ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name,
                   part.getValues(), oldCols, oldt, part, null);
               assert(colStats == null);
-              // Note: we don't do txn stats validation here; this can only delete stats?
               if (cascade) {
-                msdb.alterPartition(catName, dbname, name, part.getValues(), part, -1, null);
+                msdb.alterPartition(
+                    catName, dbname, name, part.getValues(), part, txnId, writeIdList);
               } else {
                 // update changed properties (stats)
                 oldPart.setParameters(part.getParameters());
-                msdb.alterPartition(catName, dbname, name, part.getValues(), oldPart, -1, null);
+                msdb.alterPartition(
+                    catName, dbname, name, part.getValues(), oldPart, txnId, writeIdList);
               }
             }
             // Don't validate table-level stats for a partitoned table.
             msdb.alterTable(catName, dbname, name, newt, -1, null);
           } else {
             LOG.warn("Alter table not cascaded to partitions.");
-            alterTableUpdateTableColumnStats(msdb, oldt, newt, environmentContext);
+            alterTableUpdateTableColumnStats(
+                msdb, oldt, newt, environmentContext, txnId, writeIdList);
           }
         } else {
-          alterTableUpdateTableColumnStats(msdb, oldt, newt, environmentContext);
+          alterTableUpdateTableColumnStats(
+              msdb, oldt, newt, environmentContext, txnId, writeIdList);
         }
       }
 
@@ -426,14 +431,13 @@ public class HiveAlterHandler implements AlterHandler {
     EnvironmentContext environmentContext)
       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
     return alterPartition(msdb, wh, DEFAULT_CATALOG_NAME, dbname, name, part_vals, new_part,
-        environmentContext, null);
+        environmentContext, null, -1, null);
   }
 
   @Override
-  public Partition alterPartition(final RawStore msdb, Warehouse wh, final String catName,
-                                  final String dbname, final String name,
-                                  final List<String> part_vals, final Partition new_part,
-                                  EnvironmentContext environmentContext, IHMSHandler handler)
+  public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, String dbname,
+      String name, List<String> part_vals, final Partition new_part,
+      EnvironmentContext environmentContext, IHMSHandler handler, long txnId, String validWriteIds)
       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
     boolean success = false;
     Partition oldPart;
@@ -449,13 +453,6 @@ public class HiveAlterHandler implements AlterHandler {
       new_part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System
           .currentTimeMillis() / 1000));
     }
-    long txnId = -1;
-    String validWriteIds = null;
-    if (environmentContext != null && environmentContext.isSetProperties()
-        && environmentContext.getProperties().containsKey(StatsSetupConst.VALID_WRITE_IDS)) {
-      txnId = Long.parseLong(environmentContext.getProperties().get(StatsSetupConst.TXN_ID));
-      validWriteIds = environmentContext.getProperties().get(StatsSetupConst.VALID_WRITE_IDS);
-    }
 
     //alter partition
     if (part_vals == null || part_vals.size() == 0) {
@@ -623,7 +620,10 @@ public class HiveAlterHandler implements AlterHandler {
       if (cs != null) {
         cs.getStatsDesc().setPartName(newPartName);
         try {
-          msdb.updatePartitionColumnStatistics(cs, new_part.getValues());
+          // Verifying ACID state again is not strictly needed here (alterPartition above does it),
+          // but we are going to use the uniform approach for simplicity.
+          msdb.updatePartitionColumnStatistics(cs, new_part.getValues(),
+              txnId, validWriteIds, new_part.getWriteId());
         } catch (InvalidInputException iie) {
           throw new InvalidOperationException("Unable to update partition stats in table rename." + iie);
         } catch (NoSuchObjectException nsoe) {
@@ -796,7 +796,7 @@ public class HiveAlterHandler implements AlterHandler {
 
   @VisibleForTesting
   void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable,
-      EnvironmentContext ec)
+      EnvironmentContext ec, long txnId, String validWriteIds)
       throws MetaException, InvalidObjectException {
     String catName = normalizeIdentifier(oldTable.isSetCatName() ? oldTable.getCatName() :
         getDefaultCatalog(conf));
@@ -804,77 +804,65 @@ public class HiveAlterHandler implements AlterHandler {
     String tableName = normalizeIdentifier(oldTable.getTableName());
     String newDbName = newTable.getDbName().toLowerCase();
     String newTableName = normalizeIdentifier(newTable.getTableName());
-    long txnId = -1;
-    String validWriteIds = null;
-    if (ec != null && ec.isSetProperties() && ec.getProperties().containsKey(
-        StatsSetupConst.VALID_WRITE_IDS)) {
-      txnId = Long.parseLong(ec.getProperties().get(StatsSetupConst.TXN_ID));
-      validWriteIds = ec.getProperties().get(StatsSetupConst.VALID_WRITE_IDS);
-    }
 
     try {
       List<FieldSchema> oldCols = oldTable.getSd().getCols();
       List<FieldSchema> newCols = newTable.getSd().getCols();
       List<ColumnStatisticsObj> newStatsObjs = new ArrayList<>();
       ColumnStatistics colStats = null;
-      boolean updateColumnStats = true;
-
-      // Nothing to update if everything is the same
-        if (newDbName.equals(dbName) &&
-            newTableName.equals(tableName) &&
-            MetaStoreUtils.columnsIncludedByNameType(oldCols, newCols)) {
-          updateColumnStats = false;
+      boolean updateColumnStats = !newDbName.equals(dbName) || !newTableName.equals(tableName)
+          || !MetaStoreUtils.columnsIncludedByNameType(oldCols, newCols);
+      if (updateColumnStats) {
+        List<String> oldColNames = new ArrayList<>(oldCols.size());
+        for (FieldSchema oldCol : oldCols) {
+          oldColNames.add(oldCol.getName());
         }
 
-        if (updateColumnStats) {
-          List<String> oldColNames = new ArrayList<>(oldCols.size());
-          for (FieldSchema oldCol : oldCols) {
-            oldColNames.add(oldCol.getName());
-          }
-
-          // Collect column stats which need to be rewritten and remove old stats.
-          colStats = msdb.getTableColumnStatistics(catName, dbName, tableName, oldColNames);
-          if (colStats == null) {
-            updateColumnStats = false;
-          } else {
-            List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
-            if (statsObjs != null) {
-              List<String> deletedCols = new ArrayList<>();
-              for (ColumnStatisticsObj statsObj : statsObjs) {
-                boolean found = false;
-                for (FieldSchema newCol : newCols) {
-                  if (statsObj.getColName().equalsIgnoreCase(newCol.getName())
-                      && statsObj.getColType().equalsIgnoreCase(newCol.getType())) {
-                    found = true;
-                    break;
-                  }
+        // NOTE: this doesn't check stats being compliant, but the alterTable call below does.
+        //       The worst we can do is delete the stats.
+        // Collect column stats which need to be rewritten and remove old stats.
+        colStats = msdb.getTableColumnStatistics(catName, dbName, tableName, oldColNames);
+        if (colStats == null) {
+          updateColumnStats = false;
+        } else {
+          List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
+          if (statsObjs != null) {
+            List<String> deletedCols = new ArrayList<>();
+            for (ColumnStatisticsObj statsObj : statsObjs) {
+              boolean found = false;
+              for (FieldSchema newCol : newCols) {
+                if (statsObj.getColName().equalsIgnoreCase(newCol.getName())
+                    && statsObj.getColType().equalsIgnoreCase(newCol.getType())) {
+                  found = true;
+                  break;
                 }
+              }
 
-                if (found) {
-                  if (!newDbName.equals(dbName) || !newTableName.equals(tableName)) {
-                    msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName());
-                    newStatsObjs.add(statsObj);
-                    deletedCols.add(statsObj.getColName());
-                  }
-                } else {
+              if (found) {
+                if (!newDbName.equals(dbName) || !newTableName.equals(tableName)) {
                   msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName());
+                  newStatsObjs.add(statsObj);
                   deletedCols.add(statsObj.getColName());
                 }
+              } else {
+                msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName());
+                deletedCols.add(statsObj.getColName());
               }
-              StatsSetupConst.removeColumnStatsState(newTable.getParameters(), deletedCols);
             }
+            StatsSetupConst.removeColumnStatsState(newTable.getParameters(), deletedCols);
           }
         }
+      }
 
-        // Change to new table and append stats for the new table
-        msdb.alterTable(catName, dbName, tableName, newTable, txnId, validWriteIds);
-        if (updateColumnStats && !newStatsObjs.isEmpty()) {
-          ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
-          statsDesc.setDbName(newDbName);
-          statsDesc.setTableName(newTableName);
-          colStats.setStatsObj(newStatsObjs);
-          msdb.updateTableColumnStatistics(colStats);
-        }
+      // Change to new table and append stats for the new table
+      msdb.alterTable(catName, dbName, tableName, newTable, txnId, validWriteIds);
+      if (updateColumnStats && !newStatsObjs.isEmpty()) {
+        ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
+        statsDesc.setDbName(newDbName);
+        statsDesc.setTableName(newTableName);
+        colStats.setStatsObj(newStatsObjs);
+        msdb.updateTableColumnStatistics(colStats, txnId, validWriteIds, newTable.getWriteId());
+      }
     } catch (NoSuchObjectException nsoe) {
       LOG.debug("Could not find db entry." + nsoe);
     } catch (InvalidInputException e) {
@@ -907,7 +895,7 @@ public class HiveAlterHandler implements AlterHandler {
         oldColNames.add(oldCol.getName());
       }
       List<String> oldPartNames = Lists.newArrayList(oldPartName);
-      // Note: doesn't take txn stats into account. This method can only remove stats.
+      // TODO: doesn't take txn stats into account. This method can only remove stats.
       List<ColumnStatistics> partsColStats = msdb.getPartitionColumnStatistics(catName, dbname, tblname,
           oldPartNames, oldColNames);
       assert (partsColStats.size() <= 1);

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index a46d2f9..091e5de 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -2692,12 +2692,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       return;
     }
 
-    private void alterPartitionForTruncate(final RawStore ms,
-                                           final String catName,
-                                           final String dbName,
-                                           final String tableName,
-                                           final Table table,
-                                           final Partition partition) throws Exception {
+    private void alterPartitionForTruncate(RawStore ms, String catName, String dbName, String tableName,
+        Table table, Partition partition, long txnId, String validWriteIds, long writeId) throws Exception {
       EnvironmentContext environmentContext = new EnvironmentContext();
       updateStatsForTruncate(partition.getParameters(), environmentContext);
 
@@ -2713,20 +2709,21 @@ public class HiveMetaStore extends ThriftHiveMetastore {
                 new AlterPartitionEvent(partition, partition, table, true, true, this));
       }
 
+      if (writeId > 0) {
+        partition.setWriteId(writeId);
+      }
       alterHandler.alterPartition(ms, wh, catName, dbName, tableName, null, partition,
-          environmentContext, this);
+          environmentContext, this, txnId, validWriteIds);
     }
 
-    private void alterTableStatsForTruncate(final RawStore ms,
-                                            final String catName,
-                                            final String dbName,
-                                            final String tableName,
-                                            final Table table,
-                                            final List<String> partNames) throws Exception {
+    private void alterTableStatsForTruncate(RawStore ms, String catName, String dbName,
+        String tableName, Table table, List<String> partNames,
+        long txnId, String validWriteIds, long writeId) throws Exception {
       if (partNames == null) {
         if (0 != table.getPartitionKeysSize()) {
           for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) {
-            alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition);
+            alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition,
+                txnId, validWriteIds, writeId);
           }
         } else {
           EnvironmentContext environmentContext = new EnvironmentContext();
@@ -2744,11 +2741,17 @@ public class HiveMetaStore extends ThriftHiveMetastore {
                     new AlterTableEvent(table, table, true, true, this));
           }
 
-          alterHandler.alterTable(ms, wh, catName, dbName, tableName, table, environmentContext, this);
+          // TODO: this should actually pass thru and set writeId for txn stats.
+          if (writeId > 0) {
+            table.setWriteId(writeId);
+          }
+          alterHandler.alterTable(ms, wh, catName, dbName, tableName, table,
+              environmentContext, this, txnId, validWriteIds);
         }
       } else {
         for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames)) {
-          alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition);
+          alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition,
+              txnId, validWriteIds, writeId);
         }
       }
       return;
@@ -2786,6 +2789,20 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     @Override
     public void truncate_table(final String dbName, final String tableName, List<String> partNames)
       throws NoSuchObjectException, MetaException {
+      // Deprecated path, won't work for txn tables.
+      truncateTableInternal(dbName, tableName, partNames, -1, null, -1);
+    }
+
+    @Override
+    public TruncateTableResponse truncate_table_req(TruncateTableRequest req)
+        throws MetaException, TException {
+      truncateTableInternal(req.getDbName(), req.getTableName(), req.getPartNames(),
+          req.getTxnId(), req.getValidWriteIdList(), req.getWriteId());
+      return new TruncateTableResponse();
+    }
+
+    private void truncateTableInternal(String dbName, String tableName, List<String> partNames,
+        long txnId, String validWriteIds, long writeId) throws MetaException, NoSuchObjectException {
       try {
         String[] parsedDbName = parseDbName(dbName, conf);
         Table tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName);
@@ -2817,7 +2834,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
         // Alter the table/partition stats and also notify truncate table event
         alterTableStatsForTruncate(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME],
-            tableName, tbl, partNames);
+            tableName, tbl, partNames, txnId, validWriteIds, writeId);
       } catch (IOException e) {
         throw new MetaException(e.getMessage());
       } catch (MetaException | NoSuchObjectException e) {
@@ -4804,6 +4821,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         final EnvironmentContext envContext)
         throws TException {
       String[] parsedDbName = parseDbName(dbName, conf);
+      // TODO: this method name is confusing, it actually does full alter (sortof)
       rename_partition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, null, newPartition,
           envContext);
     }
@@ -4855,7 +4873,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
 
         oldPart = alterHandler.alterPartition(getMS(), wh, catName, db_name, tbl_name,
-            part_vals, new_part, envContext, this);
+            part_vals, new_part, envContext, this, -1, null);
 
         // Only fetch the table if we actually have a listener
         Table table = null;
@@ -4890,15 +4908,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     public void alter_partitions(final String db_name, final String tbl_name,
         final List<Partition> new_parts)
         throws TException {
-      alter_partitions_with_environment_context(
-          db_name, tbl_name, new_parts, null, -1, null, -1);
+      String[] o = parseDbName(db_name, conf);
+      alter_partitions_with_environment_context(o[0], o[1],
+          tbl_name, new_parts, null, -1, null, -1);
     }
 
     @Override
-    public AlterPartitionsResponse alter_partitions_with_environment_context_req(
-            AlterPartitionsRequest req)
-            throws TException {
-      alter_partitions_with_environment_context(
+    public AlterPartitionsResponse alter_partitions_req(AlterPartitionsRequest req) throws TException {
+      alter_partitions_with_environment_context(req.getCatName(),
           req.getDbName(), req.getTableName(), req.getPartitions(), req.getEnvironmentContext(),
           req.isSetTxnId() ? req.getTxnId() : -1,
           req.isSetValidWriteIdList() ? req.getValidWriteIdList() : null,
@@ -4912,17 +4929,23 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     public void alter_partitions_with_environment_context(final String db_name, final String tbl_name,
         final List<Partition> new_parts, EnvironmentContext environmentContext)
             throws TException {
-      alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environmentContext,
+      String[] o = parseDbName(db_name, conf);
+      alter_partitions_with_environment_context(o[0], o[1], tbl_name, new_parts, environmentContext,
           -1, null, -1);
     }
 
-    private void alter_partitions_with_environment_context(final String db_name, final String tbl_name,
+    private void alter_partitions_with_environment_context(String catName, String db_name, final String tbl_name,
         final List<Partition> new_parts, EnvironmentContext environmentContext,
         long txnId, String writeIdList, long writeId)
         throws TException {
+      if (environmentContext == null) {
+        environmentContext = new EnvironmentContext();
+      }
+      if (catName == null) {
+        catName = MetaStoreUtils.getDefaultCatalog(conf);
+      }
 
-      String[] parsedDbName = parseDbName(db_name, conf);
-      startTableFunction("alter_partitions", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name);
+      startTableFunction("alter_partitions", catName, db_name, tbl_name);
 
       if (LOG.isInfoEnabled()) {
         for (Partition tmpPart : new_parts) {
@@ -4939,10 +4962,10 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           if (!tmpPart.isSetCatName()) {
             tmpPart.setCatName(getDefaultCatalog(conf));
           }
-          firePreEvent(new PreAlterPartitionEvent(parsedDbName[DB_NAME], tbl_name, null, tmpPart, this));
+          firePreEvent(new PreAlterPartitionEvent(db_name, tbl_name, null, tmpPart, this));
         }
-        oldParts = alterHandler.alterPartitions(getMS(), wh, parsedDbName[CAT_NAME],
-            parsedDbName[DB_NAME], tbl_name, new_parts, environmentContext, txnId, writeIdList, writeId, this);
+        oldParts = alterHandler.alterPartitions(getMS(), wh,
+            catName, db_name, tbl_name, new_parts, environmentContext, txnId, writeIdList, writeId, this);
         Iterator<Partition> olditr = oldParts.iterator();
         // Only fetch the table if we have a listener that needs it.
         Table table = null;
@@ -4956,8 +4979,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           }
 
           if (table == null) {
-            table = getMS().getTable(
-                parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name,  -1, null);
+            table = getMS().getTable(catName, db_name, tbl_name,  -1, null);
           }
 
           if (!listeners.isEmpty()) {
@@ -4995,7 +5017,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         throws InvalidOperationException, MetaException {
       // Do not set an environment context.
       String[] parsedDbName = parseDbName(dbname, conf);
-      alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, newTable, null);
+      alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, newTable,
+          null, -1, null);
     }
 
     @Override
@@ -5008,7 +5031,16 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         envContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE);
       }
       String[] parsedDbName = parseDbName(dbname, conf);
-      alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, newTable, envContext);
+      alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, newTable,
+          envContext, -1, null);
+    }
+
+    @Override
+    public AlterTableResponse alter_table_req(AlterTableRequest req)
+        throws InvalidOperationException, MetaException, TException {
+      alter_table_core(req.getCatName(), req.getDbName(), req.getTableName(),
+          req.getTable(), req.getEnvironmentContext(), req.getTxnId(), req.getValidWriteIdList());
+      return new AlterTableResponse();
     }
 
     @Override
@@ -5017,14 +5049,21 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         final EnvironmentContext envContext)
         throws InvalidOperationException, MetaException {
       String[] parsedDbName = parseDbName(dbname, conf);
-      alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, newTable, envContext);
+      alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME],
+          name, newTable, envContext, -1, null);
     }
 
-    private void alter_table_core(final String catName, final String dbname, final String name,
-                                  final Table newTable, final EnvironmentContext envContext)
+    private void alter_table_core(String catName, String dbname, String name, Table newTable,
+        EnvironmentContext envContext, long txnId, String validWriteIdList)
         throws InvalidOperationException, MetaException {
       startFunction("alter_table", ": " + TableName.getQualified(catName, dbname, name)
           + " newtbl=" + newTable.getTableName());
+      if (envContext == null) {
+        envContext = new EnvironmentContext();
+      }
+      if (catName == null) {
+        catName = MetaStoreUtils.getDefaultCatalog(conf);
+      }
 
       // Update the time if it hasn't been specified.
       if (newTable.getParameters() == null ||
@@ -5052,7 +5091,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         Table oldt = get_table_core(catName, dbname, name);
         firePreEvent(new PreAlterTableEvent(oldt, newTable, this));
         alterHandler.alterTable(getMS(), wh, catName, dbname, name, newTable,
-                envContext, this);
+                envContext, this, txnId, validWriteIdList);
         success = true;
       } catch (NoSuchObjectException e) {
         // thrown when the table to be altered does not exist
@@ -5600,6 +5639,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     }
 
     private String lowerCaseConvertPartName(String partName) throws MetaException {
+      if (partName == null) return partName;
       boolean isFirst = true;
       Map<String, String> partSpec = Warehouse.makeEscSpecFromName(partName);
       String convertedPartName = new String();
@@ -5618,6 +5658,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       return convertedPartName;
     }
 
+    @Deprecated
     @Override
     public ColumnStatistics get_table_column_statistics(String dbName, String tableName,
       String colName) throws TException {
@@ -5660,7 +5701,13 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         ColumnStatistics cs = getMS().getTableColumnStatistics(
             catName, dbName, tblName, lowerCaseColNames,
             request.getTxnId(), request.getValidWriteIdList());
-        result = new TableStatsResult((cs == null || cs.getStatsObj() == null)
+        // Note: stats compliance is not propagated to the client; instead, we just return nothing
+        //       if stats are not compliant for now. This won't work for stats merging, but that
+        //       is currently only done on metastore size (see set_aggr...).
+        //       For some optimizations we might make use of incorrect stats that are "better than
+        //       nothing", so this may change in future.
+        result = new TableStatsResult((cs == null || cs.getStatsObj() == null
+            || (cs.isSetIsStatsCompliant() && !cs.isIsStatsCompliant()))
             ? Lists.newArrayList() : cs.getStatsObj());
       } finally {
         endFunction("get_table_statistics_req", result == null, null, tblName);
@@ -5725,8 +5772,16 @@ public class HiveMetaStore extends ThriftHiveMetastore {
             request.isSetTxnId() ? request.getTxnId() : -1,
             request.isSetValidWriteIdList() ? request.getValidWriteIdList() : null);
         Map<String, List<ColumnStatisticsObj>> map = new HashMap<>();
-        for (ColumnStatistics stat : stats) {
-          map.put(stat.getStatsDesc().getPartName(), stat.getStatsObj());
+        if (stats != null) {
+          for (ColumnStatistics stat : stats) {
+            // Note: stats compliance is not propagated to the client; instead, we just return nothing
+            //       if stats are not compliant for now. This won't work for stats merging, but that
+            //       is currently only done on metastore size (see set_aggr...).
+            //       For some optimizations we might make use of incorrect stats that are "better than
+            //       nothing", so this may change in future.
+            if (stat.isSetIsStatsCompliant() && !stat.isIsStatsCompliant()) continue;
+            map.put(stat.getStatsDesc().getPartName(), stat.getStatsObj());
+          }
         }
         result = new PartitionsStatsResult(map);
       } finally {
@@ -5737,79 +5792,73 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
     @Override
     public boolean update_table_column_statistics(ColumnStatistics colStats) throws TException {
-      String catName;
-      String dbName;
-      String tableName;
-      String colName;
-      ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
-      catName = statsDesc.isSetCatName() ? statsDesc.getCatName().toLowerCase() : getDefaultCatalog(conf);
-      dbName = statsDesc.getDbName().toLowerCase();
-      tableName = statsDesc.getTableName().toLowerCase();
-
-      statsDesc.setCatName(catName);
-      statsDesc.setDbName(dbName);
-      statsDesc.setTableName(tableName);
-      long time = System.currentTimeMillis() / 1000;
-      statsDesc.setLastAnalyzed(time);
-
-      List<ColumnStatisticsObj> statsObjs =  colStats.getStatsObj();
+      // Deprecated API, won't work for transactional tables
+      return updateTableColumnStatsInternal(colStats, -1, null, -1);
+    }
 
-      startFunction("write_column_statistics", ":  table=" +
-          TableName.getQualified(catName, dbName, tableName));
-      for (ColumnStatisticsObj statsObj:statsObjs) {
-        colName = statsObj.getColName().toLowerCase();
-        statsObj.setColName(colName);
-        statsObj.setColType(statsObj.getColType().toLowerCase());
+    @Override
+    public SetPartitionsStatsResponse update_table_column_statistics_req(
+        SetPartitionsStatsRequest req) throws NoSuchObjectException,
+        InvalidObjectException, MetaException, InvalidInputException,
+        TException {
+      if (req.getColStatsSize() != 1) {
+        throw new InvalidInputException("Only one stats object expected");
+      }
+      if (req.isNeedMerge()) {
+        throw new InvalidInputException("Merge is not supported for non-aggregate stats");
       }
+      ColumnStatistics colStats = req.getColStatsIterator().next();
+      boolean ret = updateTableColumnStatsInternal(colStats,
+          req.getTxnId(), req.getValidWriteIdList(), req.getWriteId());
+      return new SetPartitionsStatsResponse(ret);
+    }
 
-     colStats.setStatsDesc(statsDesc);
-     colStats.setStatsObj(statsObjs);
+    private boolean updateTableColumnStatsInternal(ColumnStatistics colStats,
+        long txnId, String validWriteIds, long writeId)
+        throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+      normalizeColStatsInput(colStats);
 
-     boolean ret = false;
+      startFunction("write_column_statistics", ":  table=" + TableName.getQualified(
+          colStats.getStatsDesc().getCatName(), colStats.getStatsDesc().getDbName(),
+          colStats.getStatsDesc().getTableName()));
 
+      boolean ret = false;
       try {
-        ret = getMS().updateTableColumnStatistics(colStats);
-        return ret;
+        ret = getMS().updateTableColumnStatistics(colStats, txnId, validWriteIds, writeId);
       } finally {
-        endFunction("write_column_statistics", ret != false, null, tableName);
+        endFunction("write_column_statistics", ret != false, null,
+            colStats.getStatsDesc().getTableName());
       }
+      return ret;
     }
 
-    private boolean updatePartitonColStats(Table tbl, ColumnStatistics colStats)
-        throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
-      String catName;
-      String dbName;
-      String tableName;
-      String partName;
-      String colName;
-
+    private void normalizeColStatsInput(ColumnStatistics colStats) throws MetaException {
+      // TODO: is this really needed? this code is propagated from HIVE-1362 but most of it is useless.
       ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
-      catName = statsDesc.isSetCatName() ? statsDesc.getCatName().toLowerCase() : getDefaultCatalog(conf);
-      dbName = statsDesc.getDbName().toLowerCase();
-      tableName = statsDesc.getTableName().toLowerCase();
-      partName = lowerCaseConvertPartName(statsDesc.getPartName());
-
-      statsDesc.setCatName(catName);
-      statsDesc.setDbName(dbName);
-      statsDesc.setTableName(tableName);
-      statsDesc.setPartName(partName);
-
+      statsDesc.setCatName(statsDesc.isSetCatName() ? statsDesc.getCatName().toLowerCase() : getDefaultCatalog(conf));
+      statsDesc.setDbName(statsDesc.getDbName().toLowerCase());
+      statsDesc.setTableName(statsDesc.getTableName().toLowerCase());
+      statsDesc.setPartName(lowerCaseConvertPartName(statsDesc.getPartName()));
       long time = System.currentTimeMillis() / 1000;
       statsDesc.setLastAnalyzed(time);
 
-      List<ColumnStatisticsObj> statsObjs =  colStats.getStatsObj();
-
-      startFunction("write_partition_column_statistics",
-          ":  db=" + dbName + " table=" + tableName
-              + " part=" + partName);
-      for (ColumnStatisticsObj statsObj:statsObjs) {
-        colName = statsObj.getColName().toLowerCase();
-        statsObj.setColName(colName);
+      for (ColumnStatisticsObj statsObj : colStats.getStatsObj()) {
+        statsObj.setColName(statsObj.getColName().toLowerCase());
         statsObj.setColType(statsObj.getColType().toLowerCase());
       }
-
       colStats.setStatsDesc(statsDesc);
-      colStats.setStatsObj(statsObjs);
+      colStats.setStatsObj(colStats.getStatsObj());
+    }
+
+    private boolean updatePartitonColStatsInternal(Table tbl, ColumnStatistics colStats,
+        long txnId, String validWriteIds, long writeId)
+        throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
+      normalizeColStatsInput(colStats);
+
+      ColumnStatisticsDesc csd = colStats.getStatsDesc();
+      String catName = csd.getCatName(), dbName = csd.getDbName(), tableName = csd.getTableName();
+      startFunction("write_partition_column_statistics", ":  db=" + dbName  + " table=" + tableName
+              + " part=" + csd.getPartName());
 
       boolean ret = false;
 
@@ -5817,9 +5866,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         if (tbl == null) {
           tbl = getTable(catName, dbName, tableName);
         }
-        List<String> partVals = getPartValsFromName(tbl, partName);
-        ret = getMS().updatePartitionColumnStatistics(colStats, partVals);
-        return ret;
+        List<String> partVals = getPartValsFromName(tbl, csd.getPartName());
+        return getMS().updatePartitionColumnStatistics(
+            colStats, partVals, txnId, validWriteIds, writeId);
       } finally {
         endFunction("write_partition_column_statistics", ret != false, null, tableName);
       }
@@ -5827,7 +5876,26 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
     @Override
     public boolean update_partition_column_statistics(ColumnStatistics colStats) throws TException {
-      return updatePartitonColStats(null, colStats);
+      // Deprecated API.
+      return updatePartitonColStatsInternal(null, colStats, -1, null, -1);
+    }
+
+
+    @Override
+    public SetPartitionsStatsResponse update_partition_column_statistics_req(
+        SetPartitionsStatsRequest req) throws NoSuchObjectException,
+        InvalidObjectException, MetaException, InvalidInputException,
+        TException {
+      if (req.getColStatsSize() != 1) {
+        throw new InvalidInputException("Only one stats object expected");
+      }
+      if (req.isNeedMerge()) {
+        throw new InvalidInputException("Merge is not supported for non-aggregate stats");
+      }
+      ColumnStatistics colStats = req.getColStatsIterator().next();
+      boolean ret = updatePartitonColStatsInternal(null, colStats,
+          req.getTxnId(), req.getValidWriteIdList(), req.getWriteId());
+      return new SetPartitionsStatsResponse(ret);
     }
 
     @Override
@@ -5843,13 +5911,25 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       startFunction("delete_column_statistics_by_partition",": table=" +
           TableName.getQualified(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName) +
           " partition=" + convertedPartName + " column=" + colName);
-      boolean ret = false;
+      boolean ret = false, committed = false;
 
+      getMS().openTransaction();
       try {
         List<String> partVals = getPartValsFromName(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, convertedPartName);
+        Table table = getMS().getTable(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName);
+        // This API looks unused; if it were used we'd need to update stats state and write ID.
+        // We cannot just randomly nuke some txn stats.
+        if (TxnUtils.isTransactionalTable(table)) {
+          throw new MetaException("Cannot delete stats via this API for a transactional table");
+        }
+
         ret = getMS().deletePartitionColumnStatistics(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName,
                                                       convertedPartName, partVals, colName);
+        committed = getMS().commitTransaction();
       } finally {
+        if (!committed) {
+          getMS().rollbackTransaction();
+        }
         endFunction("delete_column_statistics_by_partition", ret != false, null, tableName);
       }
       return ret;
@@ -5870,10 +5950,23 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           TableName.getQualified(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName) + " column=" +
           colName);
 
-      boolean ret = false;
+
+      boolean ret = false, committed = false;
+      getMS().openTransaction();
       try {
+        Table table = getMS().getTable(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName);
+        // This API looks unused; if it were used we'd need to update stats state and write ID.
+        // We cannot just randomly nuke some txn stats.
+        if (TxnUtils.isTransactionalTable(table)) {
+          throw new MetaException("Cannot delete stats via this API for a transactional table");
+        }
+
         ret = getMS().deleteTableColumnStatistics(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, colName);
+        committed = getMS().commitTransaction();
       } finally {
+        if (!committed) {
+          getMS().rollbackTransaction();
+        }
         endFunction("delete_column_statistics_by_table", ret != false, null, tableName);
       }
       return ret;
@@ -7489,31 +7582,13 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           throw new MetaException(
               "Expecting only 1 ColumnStatistics for table's column stats, but find "
                   + request.getColStatsSize());
+        }
+        if (request.isSetNeedMerge() && request.isNeedMerge()) {
+          return updateTableColumnStatsWithMerge(catName, dbName, tableName, colNames, request);
         } else {
-          if (request.isSetNeedMerge() && request.isNeedMerge()) {
-            // one single call to get all column stats
-            ColumnStatistics csOld =
-                getMS().getTableColumnStatistics(
-                    catName, dbName, tableName, colNames,
-                    request.getTxnId(), request.getValidWriteIdList());
-            Table t = getTable(catName, dbName, tableName);
-            // we first use t.getParameters() to prune the stats
-            MetaStoreUtils.getMergableCols(firstColStats, t.getParameters());
-            // we merge those that can be merged
-            if (csOld != null && csOld.getStatsObjSize() != 0
-                && !firstColStats.getStatsObj().isEmpty()) {
-              MetaStoreUtils.mergeColStats(firstColStats, csOld);
-            }
-            if (!firstColStats.getStatsObj().isEmpty()) {
-              return update_table_column_statistics(firstColStats);
-            } else {
-              LOG.debug("All the column stats are not accurate to merge.");
-              return true;
-            }
-          } else {
-            // This is the overwrite case, we do not care about the accuracy.
-            return update_table_column_statistics(firstColStats);
-          }
+          // This is the overwrite case, we do not care about the accuracy.
+          return updateTableColumnStatsInternal(firstColStats, request.getTxnId(),
+              request.getValidWriteIdList(), request.getWriteId());
         }
       } else {
         // partition level column stats merging
@@ -7529,54 +7604,151 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           newStatsMap.put(partName, csNew);
         }
 
-        Map<String, ColumnStatistics> oldStatsMap = new HashMap<>();
-        Map<String, Partition> mapToPart = new HashMap<>();
         if (request.isSetNeedMerge() && request.isNeedMerge()) {
-          // a single call to get all column stats for all partitions
-          List<String> partitionNames = new ArrayList<>();
-          partitionNames.addAll(newStatsMap.keySet());
-          List<ColumnStatistics> csOlds =
-              getMS().getPartitionColumnStatistics(
-                  catName, dbName, tableName, partitionNames, colNames,
-                  request.getTxnId(), request.getValidWriteIdList());
-          if (newStatsMap.values().size() != csOlds.size()) {
-            // some of the partitions miss stats.
-            LOG.debug("Some of the partitions miss stats.");
-          }
-          for (ColumnStatistics csOld : csOlds) {
-            oldStatsMap.put(csOld.getStatsDesc().getPartName(), csOld);
+          ret = updatePartColumnStatsWithMerge(catName, dbName, tableName,
+              colNames, newStatsMap, request);
+        } else { // No merge.
+          Table t = getTable(catName, dbName, tableName);
+          for (Entry<String, ColumnStatistics> entry : newStatsMap.entrySet()) {
+            // We don't short-circuit on errors here anymore. That can leave acid stats invalid.
+            ret = updatePartitonColStatsInternal(t, entry.getValue(), request.getTxnId(),
+                request.getValidWriteIdList(), request.getWriteId()) && ret;
           }
+        }
+      }
+      return ret;
+    }
 
-          // another single call to get all the partition objects
-          partitions = getMS().getPartitionsByNames(catName, dbName, tableName, partitionNames);
-          for (int index = 0; index < partitionNames.size(); index++) {
-            mapToPart.put(partitionNames.get(index), partitions.get(index));
-          }
+    private boolean updatePartColumnStatsWithMerge(String catName, String dbName, String tableName,
+      List<String> colNames, Map<String, ColumnStatistics> newStatsMap, SetPartitionsStatsRequest request)
+          throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
+      RawStore ms = getMS();
+      ms.openTransaction();
+      boolean isCommitted = false, result = false;
+      try {
+        // a single call to get all column stats for all partitions
+        List<String> partitionNames = new ArrayList<>();
+        partitionNames.addAll(newStatsMap.keySet());
+        List<ColumnStatistics> csOlds = ms.getPartitionColumnStatistics(catName, dbName, tableName,
+            partitionNames, colNames, request.getTxnId(), request.getValidWriteIdList());
+        if (newStatsMap.values().size() != csOlds.size()) {
+          // some of the partitions miss stats.
+          LOG.debug("Some of the partitions miss stats.");
         }
-        Table t = getTable(catName, dbName, tableName,
-            request.getTxnId(), request.getValidWriteIdList());
+        Map<String, ColumnStatistics> oldStatsMap = new HashMap<>();
+        for (ColumnStatistics csOld : csOlds) {
+          oldStatsMap.put(csOld.getStatsDesc().getPartName(), csOld);
+        }
+
+        // another single call to get all the partition objects
+        List<Partition> partitions = ms.getPartitionsByNames(catName, dbName, tableName, partitionNames);
+        Map<String, Partition> mapToPart = new HashMap<>();
+        for (int index = 0; index < partitionNames.size(); index++) {
+          mapToPart.put(partitionNames.get(index), partitions.get(index));
+        }
+
+        Table t = getTable(catName, dbName, tableName);
         for (Entry<String, ColumnStatistics> entry : newStatsMap.entrySet()) {
           ColumnStatistics csNew = entry.getValue();
           ColumnStatistics csOld = oldStatsMap.get(entry.getKey());
-          if (request.isSetNeedMerge() && request.isNeedMerge()) {
+          boolean isInvalidTxnStats = csOld != null
+              && csOld.isSetIsStatsCompliant() && !csOld.isIsStatsCompliant();
+          Partition part = mapToPart.get(entry.getKey());
+          if (isInvalidTxnStats) {
+            // No columns can be merged; a shortcut for getMergableCols.
+            csNew.setStatsObj(Lists.newArrayList());
+          } else {
             // we first use getParameters() to prune the stats
-            MetaStoreUtils.getMergableCols(csNew, mapToPart.get(entry.getKey()).getParameters());
+            MetaStoreUtils.getMergableCols(csNew, part.getParameters());
             // we merge those that can be merged
             if (csOld != null && csOld.getStatsObjSize() != 0 && !csNew.getStatsObj().isEmpty()) {
               MetaStoreUtils.mergeColStats(csNew, csOld);
             }
-            if (!csNew.getStatsObj().isEmpty()) {
-              ret = ret && updatePartitonColStats(t, csNew);
-            } else {
-              LOG.debug("All the column stats " + csNew.getStatsDesc().getPartName()
-                  + " are not accurate to merge.");
-            }
+          }
+
+          if (!csNew.getStatsObj().isEmpty()) {
+            // We don't short-circuit on errors here anymore. That can leave acid stats invalid.
+            result = updatePartitonColStatsInternal(t, csNew, request.getTxnId(),
+                request.getValidWriteIdList(), request.getWriteId()) && result;
+          } else if (isInvalidTxnStats) {
+            // For now because the stats state is such as it is, we will invalidate everything.
+            // Overall the sematics here are not clear - we could invalide only some columns, but does
+            // that make any physical sense? Could query affect some columns but not others?
+            part.setWriteId(request.getWriteId());
+            StatsSetupConst.clearColumnStatsState(part.getParameters());
+            StatsSetupConst.setBasicStatsState(part.getParameters(), StatsSetupConst.FALSE);
+            ms.alterPartition(catName, dbName, tableName, part.getValues(), part,
+                request.getTxnId(), request.getValidWriteIdList());
+            result = false;
           } else {
-            ret = ret && updatePartitonColStats(t, csNew);
+            // TODO: why doesn't the original call for non acid tables invalidate the stats?
+            LOG.debug("All the column stats " + csNew.getStatsDesc().getPartName()
+                + " are not accurate to merge.");
           }
         }
+        ms.commitTransaction();
+        isCommitted = true;
+      } finally {
+        if (!isCommitted) {
+          ms.rollbackTransaction();
+        }
       }
-      return ret;
+      return result;
+    }
+
+
+    private boolean updateTableColumnStatsWithMerge(String catName, String dbName, String tableName,
+        List<String> colNames, SetPartitionsStatsRequest request) throws MetaException,
+        NoSuchObjectException, InvalidObjectException, InvalidInputException {
+      ColumnStatistics firstColStats = request.getColStats().get(0);
+      RawStore ms = getMS();
+      ms.openTransaction();
+      boolean isCommitted = false, result = false;
+      try {
+        ColumnStatistics csOld = ms.getTableColumnStatistics(catName, dbName, tableName, colNames,
+            request.getTxnId(), request.getValidWriteIdList());
+        // we first use the valid stats list to prune the stats
+        boolean isInvalidTxnStats = csOld != null
+            && csOld.isSetIsStatsCompliant() && !csOld.isIsStatsCompliant();
+        if (isInvalidTxnStats) {
+          // No columns can be merged; a shortcut for getMergableCols.
+          firstColStats.setStatsObj(Lists.newArrayList());
+        } else {
+          Table t = getTable(catName, dbName, tableName);
+          MetaStoreUtils.getMergableCols(firstColStats, t.getParameters());
+
+          // we merge those that can be merged
+          if (csOld != null && csOld.getStatsObjSize() != 0 && !firstColStats.getStatsObj().isEmpty()) {
+            MetaStoreUtils.mergeColStats(firstColStats, csOld);
+          }
+        }
+
+        if (!firstColStats.getStatsObj().isEmpty()) {
+          result = updateTableColumnStatsInternal(firstColStats, request.getTxnId(),
+              request.getValidWriteIdList(), request.getWriteId());
+        } else if (isInvalidTxnStats) {
+          // For now because the stats state is such as it is, we will invalidate everything.
+          // Overall the sematics here are not clear - we could invalide only some columns, but does
+          // that make any physical sense? Could query affect some columns but not others?
+          Table t = getTable(catName, dbName, tableName);
+          t.setWriteId(request.getWriteId());
+          StatsSetupConst.clearColumnStatsState(t.getParameters());
+          StatsSetupConst.setBasicStatsState(t.getParameters(), StatsSetupConst.FALSE);
+          ms.alterTable(catName, dbName, tableName, t, request.getTxnId(), request.getValidWriteIdList());
+        } else {
+          // TODO: why doesn't the original call for non acid tables invalidate the stats?
+          LOG.debug("All the column stats are not accurate to merge.");
+          result = true;
+        }
+
+        ms.commitTransaction();
+        isCommitted = true;
+      } finally {
+        if (!isCommitted) {
+          ms.rollbackTransaction();
+        }
+      }
+      return result;
     }
 
     private Table getTable(String catName, String dbName, String tableName)

http://git-wip-us.apache.org/repos/asf/hive/blob/1c9947f3/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index cc417ea..38327d7 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -130,7 +130,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
 
   //copied from ErrorMsg.java
   private static final String REPL_EVENTS_MISSING_IN_METASTORE = "Notification events are missing in the meta store.";
-  
+
   static final protected Logger LOG = LoggerFactory.getLogger(HiveMetaStoreClient.class);
 
   public HiveMetaStoreClient(Configuration conf) throws MetaException {
@@ -404,15 +404,36 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
     if (hook != null) {
       hook.preAlterTable(new_tbl, envContext);
     }
-    client.alter_table_with_environment_context(prependCatalogToDbName(dbname, conf),
-        tbl_name, new_tbl, envContext);
+    AlterTableRequest req = new AlterTableRequest(dbname, tbl_name, new_tbl);
+    req.setCatName(MetaStoreUtils.getDefaultCatalog(conf));
+    req.setEnvironmentContext(envContext);
+    client.alter_table_req(req);
   }
 
   @Override
   public void alter_table(String catName, String dbName, String tblName, Table newTable,
                          EnvironmentContext envContext) throws TException {
-    client.alter_table_with_environment_context(prependCatalogToDbName(catName,
-        dbName, conf), tblName, newTable, envContext);
+    // This never used to call the hook. Why? There's overload madness in metastore...
+    AlterTableRequest req = new AlterTableRequest(dbName, tblName, newTable);
+    req.setCatName(catName);
+    req.setEnvironmentContext(envContext);
+    client.alter_table_req(req);
+  }
+
+  @Override
+  public void alter_table(String catName, String dbName, String tbl_name, Table new_tbl,
+      EnvironmentContext envContext, long txnId, String validWriteIds)
+          throws InvalidOperationException, MetaException, TException {
+    HiveMetaHook hook = getHook(new_tbl);
+    if (hook != null) {
+      hook.preAlterTable(new_tbl, envContext);
+    }
+    AlterTableRequest req = new AlterTableRequest(dbName, tbl_name, new_tbl);
+    req.setCatName(catName);
+    req.setTxnId(txnId);
+    req.setValidWriteIdList(validWriteIds);
+    req.setEnvironmentContext(envContext);
+    client.alter_table_req(req);
   }
 
   @Override
@@ -1339,14 +1360,33 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
   }
 
   @Override
+  public void truncateTable(String dbName, String tableName, List<String> partNames,
+      long txnId, String validWriteIds, long writeId) throws TException {
+    truncateTableInternal(getDefaultCatalog(conf),
+        dbName, tableName, partNames, txnId, validWriteIds, writeId);
+  }
+
+  @Override
   public void truncateTable(String dbName, String tableName, List<String> partNames) throws TException {
-    truncateTable(getDefaultCatalog(conf), dbName, tableName, partNames);
+    truncateTableInternal(getDefaultCatalog(conf), dbName, tableName, partNames, -1, null, -1);
   }
 
   @Override
   public void truncateTable(String catName, String dbName, String tableName, List<String> partNames)
       throws TException {
-    client.truncate_table(prependCatalogToDbName(catName, dbName, conf), tableName, partNames);
+    truncateTableInternal(catName, dbName, tableName, partNames, -1, null, -1);
+  }
+
+  private void truncateTableInternal(String catName, String dbName, String tableName,
+      List<String> partNames, long txnId, String validWriteIds, long writeId)
+          throws MetaException, TException {
+    TruncateTableRequest req = new TruncateTableRequest(
+        prependCatalogToDbName(catName, dbName, conf), tableName);
+    req.setPartNames(partNames);
+    req.setTxnId(txnId);
+    req.setValidWriteIdList(validWriteIds);
+    req.setWriteId(writeId);
+    client.truncate_table_req(req);
   }
 
   /**
@@ -1870,7 +1910,8 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
   }
 
   @Override
-  public void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext)
+  public void alter_partition(String dbName, String tblName, Partition newPart,
+      EnvironmentContext environmentContext)
       throws InvalidOperationException, MetaException, TException {
     alter_partition(getDefaultCatalog(conf), dbName, tblName, newPart, environmentContext);
   }
@@ -1878,11 +1919,26 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
   @Override
   public void alter_partition(String catName, String dbName, String tblName, Partition newPart,
                               EnvironmentContext environmentContext) throws TException {
-    client.alter_partition_with_environment_context(prependCatalogToDbName(catName, dbName, conf), tblName,
-        newPart, environmentContext);
+    AlterPartitionsRequest req = new AlterPartitionsRequest(dbName, tblName, Lists.newArrayList(newPart));
+    req.setCatName(catName);
+    req.setEnvironmentContext(environmentContext);
+    client.alter_partitions_req(req);
   }
 
   @Override
+  public void alter_partition(String dbName, String tblName, Partition newPart,
+      EnvironmentContext environmentContext, long txnId, String writeIdList)
+      throws InvalidOperationException, MetaException, TException {
+    AlterPartitionsRequest req = new AlterPartitionsRequest(
+        dbName, tblName, Lists.newArrayList(newPart));
+    req.setEnvironmentContext(environmentContext);
+    req.setTxnId(txnId);
+    req.setValidWriteIdList(writeIdList);
+    client.alter_partitions_req(req);
+  }
+
+  @Deprecated
+  @Override
   public void alter_partitions(String dbName, String tblName, List<Partition> newParts)
       throws TException {
     alter_partitions(
@@ -1901,8 +1957,6 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
                                EnvironmentContext environmentContext,
                                long txnId, String writeIdList, long writeId)
       throws InvalidOperationException, MetaException, TException {
-    //client.alter_partition_with_environment_context(getDefaultCatalog(conf),
-      //  dbName, tblName, newParts, environmentContext);
     alter_partitions(getDefaultCatalog(conf),
         dbName, tblName, newParts, environmentContext, txnId, writeIdList, writeId);
 
@@ -1914,14 +1968,15 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
                                EnvironmentContext environmentContext,
                                long txnId, String writeIdList, long writeId) throws TException {
     AlterPartitionsRequest req = new AlterPartitionsRequest();
-    req.setDbName(prependCatalogToDbName(catName, dbName, conf));
+    req.setCatName(catName);
+    req.setDbName(dbName);
     req.setTableName(tblName);
     req.setPartitions(newParts);
     req.setEnvironmentContext(environmentContext);
     req.setTxnId(txnId);
     req.setValidWriteIdList(writeIdList);
     req.setWriteId(writeId);
-    client.alter_partitions_with_environment_context_req(req);
+    client.alter_partitions_req(req);
   }
 
   @Override
@@ -2005,7 +2060,11 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
     if (!statsObj.getStatsDesc().isSetCatName()) {
       statsObj.getStatsDesc().setCatName(getDefaultCatalog(conf));
     }
-    return client.update_table_column_statistics(statsObj);
+    // Note: currently this method doesn't set txn properties and thus won't work on txn tables.
+    SetPartitionsStatsRequest req = new SetPartitionsStatsRequest();
+    req.addToColStats(statsObj);
+    req.setNeedMerge(false);
+    return client.update_table_column_statistics_req(req).isResult();
   }
 
   @Override
@@ -2013,7 +2072,11 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
     if (!statsObj.getStatsDesc().isSetCatName()) {
       statsObj.getStatsDesc().setCatName(getDefaultCatalog(conf));
     }
-    return client.update_partition_column_statistics(statsObj);
+    // Note: currently this method doesn't set txn properties and thus won't work on txn tables.
+    SetPartitionsStatsRequest req = new SetPartitionsStatsRequest();
+    req.addToColStats(statsObj);
+    req.setNeedMerge(false);
+    return client.update_partition_column_statistics_req(req).isResult();
   }
 
   @Override
@@ -3431,5 +3494,4 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
     req.setMaxCreateTime(maxCreateTime);
     return client.get_runtime_stats(req);
   }
-
 }