You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ha...@apache.org on 2020/05/27 00:35:51 UTC

[hive] branch master updated: HIVE-23281 : ObjectStore::convertToStorageDescriptor can be optimised to reduce calls to DB for ACID tables (Ramesh Kumar, Rajesh Balamohan via Ashutosh Chauhan)

This is an automated email from the ASF dual-hosted git repository.

hashutosh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 6aeb63a  HIVE-23281 : ObjectStore::convertToStorageDescriptor can be optimised to reduce calls to DB for ACID tables (Ramesh Kumar, Rajesh Balamohan via Ashutosh Chauhan)
6aeb63a is described below

commit 6aeb63a2becec1d2eeae8e144da1cbaf84546d72
Author: Ashutosh Chauhan <ha...@apache.org>
AuthorDate: Sun May 24 23:14:55 2020 -0700

    HIVE-23281 : ObjectStore::convertToStorageDescriptor can be optimised to reduce calls to DB for ACID tables (Ramesh Kumar, Rajesh Balamohan via Ashutosh Chauhan)
    
    Signed-off-by: Ashutosh Chauhan <ha...@apache.org>
---
 .../org/apache/hadoop/hive/ql/metadata/Table.java  |  2 +
 .../hive/ql/parse/ImportSemanticAnalyzer.java      |  9 ++-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java     | 19 ++---
 .../exim_09_nonpart_noncompat_serdeparam.q.out     |  2 +-
 .../clientpositive/llap/acid_bucket_pruning.q.out  |  2 -
 .../clientpositive/llap/acid_no_buckets.q.out      |  4 -
 .../clientpositive/llap/acid_nullscan.q.out        |  2 -
 .../results/clientpositive/llap/acid_stats2.q.out  | 18 -----
 .../results/clientpositive/llap/acid_stats5.q.out  | 12 ---
 .../clientpositive/llap/acid_table_stats.q.out     | 12 ---
 .../clientpositive/llap/autoColumnStats_4.q.out    |  6 --
 .../clientpositive/llap/check_constraint.q.out     | 18 -----
 .../llap/create_transactional_full_acid.q.out      |  2 -
 .../llap/create_transactional_insert_only.q.out    |  2 -
 .../clientpositive/llap/default_constraint.q.out   | 24 ------
 .../insert_values_orig_table_use_metadata.q.out    | 10 ---
 .../test/results/clientpositive/llap/mm_all.q.out  |  6 --
 .../test/results/clientpositive/llap/mm_bhif.q.out | 44 +++++++----
 .../results/clientpositive/llap/mm_default.q.out   | 12 ---
 .../test/results/clientpositive/llap/mm_exim.q.out |  4 -
 .../llap/murmur_hash_migration2.q.out              |  2 -
 .../clientpositive/llap/sqlmerge_stats.q.out       | 10 ---
 .../clientpositive/llap/stats_nonpart.q.out        |  4 -
 .../results/clientpositive/llap/stats_part.q.out   | 10 ---
 .../results/clientpositive/llap/stats_part2.q.out  | 30 --------
 .../clientpositive/llap/stats_sizebug.q.out        |  4 -
 .../hadoop/hive/metastore/MetaStoreDirectSql.java  | 43 +++++++----
 .../apache/hadoop/hive/metastore/ObjectStore.java  | 87 ++++++++++++----------
 28 files changed, 127 insertions(+), 273 deletions(-)

diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index 8805eee..61b9fb8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -209,6 +209,8 @@ public class Table implements Serializable {
       // We have to use MetadataTypedColumnsetSerDe because LazySimpleSerDe does
       // not support a table with no columns.
       sd.getSerdeInfo().setSerializationLib(MetadataTypedColumnsetSerDe.class.getName());
+      //TODO setting serializaton format here is hacky. Only lazy simple serde needs it
+      // so should be set by serde only. Setting it here sets it unconditionally.
       sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
       sd.setInputFormat(SequenceFileInputFormat.class.getName());
       sd.setOutputFormat(HiveSequenceFileOutputFormat.class.getName());
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
index c21c6f1..cd92247 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java
@@ -934,15 +934,18 @@ public class ImportSemanticAnalyzer extends BaseSemanticAnalyzer {
           .getSerdeParam(serdeConstants.SERIALIZATION_FORMAT);
       String importedSerdeFormat = tableDesc.getSerdeProps().get(
           serdeConstants.SERIALIZATION_FORMAT);
-      /*
+
+      /* TODO : Remove this weirdity. See notes in Table.getEmptyTable()
        * If Imported SerdeFormat is null, then set it to "1" just as
        * metadata.Table.getEmptyTable
        */
       importedSerdeFormat = importedSerdeFormat == null ? "1" : importedSerdeFormat;
-      if (!ObjectUtils.equals(existingSerdeFormat, importedSerdeFormat)) {
+      if (!TxnUtils.isTransactionalTable(table.getParameters()) &&
+          !ObjectUtils.equals(existingSerdeFormat, importedSerdeFormat)) {
         throw new SemanticException(
             ErrorMsg.INCOMPATIBLE_SCHEMA
-                .getMsg(" Table Serde format does not match"));
+                .getMsg(" Table Serde format does not match. Imported :"
+                    + " "+importedSerdeFormat + " existing: " + existingSerdeFormat));
       }
     }
     {
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index aa8d84e..8238a2a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -343,7 +343,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
    * that describes percentage and number.
    */
   private final Map<String, SplitSample> nameToSplitSample;
-  private Map<GroupByOperator, Set<String>> groupOpToInputTables;
+  private final Map<GroupByOperator, Set<String>> groupOpToInputTables;
   protected Map<String, PrunedPartitionList> prunedPartitions;
   protected List<FieldSchema> resultSchema;
   protected CreateViewDesc createVwDesc;
@@ -7294,17 +7294,16 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       idToTableNameMap.put(String.valueOf(destTableId), destinationTable.getTableName());
       currentTableId = destTableId;
       destTableId++;
-
-      lbCtx = constructListBucketingCtx(destinationTable.getSkewedColNames(),
-          destinationTable.getSkewedColValues(), destinationTable.getSkewedColValueLocationMaps(),
-          destinationTable.isStoredAsSubDirectories());
-
       // Create the work for moving the table
       // NOTE: specify Dynamic partitions in dest_tab for WriteEntity
       if (!isNonNativeTable) {
         if (destTableIsTransactional) {
           acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
           checkAcidConstraints();
+        } else {
+          lbCtx = constructListBucketingCtx(destinationTable.getSkewedColNames(),
+              destinationTable.getSkewedColValues(), destinationTable.getSkewedColValueLocationMaps(),
+              destinationTable.isStoredAsSubDirectories());
         }
         try {
           if (ctx.getExplainConfig() != null) {
@@ -7440,12 +7439,14 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       currentTableId = destTableId;
       destTableId++;
 
-      lbCtx = constructListBucketingCtx(destinationPartition.getSkewedColNames(),
-          destinationPartition.getSkewedColValues(), destinationPartition.getSkewedColValueLocationMaps(),
-          destinationPartition.isStoredAsSubDirectories());
       if (destTableIsTransactional) {
         acidOp = getAcidType(tableDescriptor.getOutputFileFormatClass(), dest, isMmTable);
         checkAcidConstraints();
+      } else {
+        // Acid tables can't be list bucketed or have skewed cols
+        lbCtx = constructListBucketingCtx(destinationPartition.getSkewedColNames(),
+            destinationPartition.getSkewedColValues(), destinationPartition.getSkewedColValueLocationMaps(),
+            destinationPartition.isStoredAsSubDirectories());
       }
       try {
         if (ctx.getExplainConfig() != null) {
diff --git a/ql/src/test/results/clientnegative/exim_09_nonpart_noncompat_serdeparam.q.out b/ql/src/test/results/clientnegative/exim_09_nonpart_noncompat_serdeparam.q.out
index d076829..d9bc742 100644
--- a/ql/src/test/results/clientnegative/exim_09_nonpart_noncompat_serdeparam.q.out
+++ b/ql/src/test/results/clientnegative/exim_09_nonpart_noncompat_serdeparam.q.out
@@ -69,4 +69,4 @@ POSTHOOK: query: create table exim_department ( dep_id int comment "department i
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:importer
 POSTHOOK: Output: importer@exim_department
-FAILED: SemanticException [Error 10120]: The existing table is not compatible with the Export/Import spec.   Table Serde format does not match
+FAILED: SemanticException [Error 10120]: The existing table is not compatible with the Export/Import spec.   Table Serde format does not match. Imported : 1 existing: 0
diff --git a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
index 0e8f3af..6d51027 100644
--- a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
@@ -106,7 +106,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
                     location hdfs://### HDFS PATH ###
                     name default.acidtbldefault
-                    serialization.format 1
                     serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
                     transactional true
                     transactional_properties default
@@ -125,7 +124,6 @@ STAGE PLANS:
 #### A masked pattern was here ####
                       location hdfs://### HDFS PATH ###
                       name default.acidtbldefault
-                      serialization.format 1
                       serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
                       transactional true
                       transactional_properties default
diff --git a/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out b/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
index aeeffa5..a925fc9 100644
--- a/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
@@ -299,8 +299,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: describe formatted srcpart_acid key
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@srcpart_acid
@@ -411,8 +409,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: describe formatted srcpart_acid key
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@srcpart_acid
diff --git a/ql/src/test/results/clientpositive/llap/acid_nullscan.q.out b/ql/src/test/results/clientpositive/llap/acid_nullscan.q.out
index b941555..e7bda2c 100644
--- a/ql/src/test/results/clientpositive/llap/acid_nullscan.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_nullscan.q.out
@@ -97,7 +97,6 @@ STAGE PLANS:
                     columns.types int:string
 #### A masked pattern was here ####
                     name default.acid_vectorized_n1
-                    serialization.format 1
                     serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe
                     transactional true
                     transactional_properties default
@@ -115,7 +114,6 @@ STAGE PLANS:
                       columns.types int:string
 #### A masked pattern was here ####
                       name default.acid_vectorized_n1
-                      serialization.format 1
                       serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
                       transactional true
                       transactional_properties default
diff --git a/ql/src/test/results/clientpositive/llap/acid_stats2.q.out b/ql/src/test/results/clientpositive/llap/acid_stats2.q.out
index dfacb61..f692c2a 100644
--- a/ql/src/test/results/clientpositive/llap/acid_stats2.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_stats2.q.out
@@ -324,8 +324,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[value]             	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats4 partition(ds='tomorrow')
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats4
@@ -361,8 +359,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[value]             	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats4 partition(ds='today')
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats4
@@ -398,8 +394,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[value]             	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: explain select count(*) from stats4
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats4
@@ -477,8 +471,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[value]             	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats4 partition(ds='tomorrow')
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats4
@@ -514,8 +506,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[value]             	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats4 partition(ds='today')
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats4
@@ -551,8 +541,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[value]             	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: explain select count(*) from stats4
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats4
@@ -657,8 +645,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[value]             	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats4 partition(ds='tomorrow')
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats4
@@ -694,8 +680,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[value]             	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats4 partition(ds='today')
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats4
@@ -731,8 +715,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[value]             	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: explain select count(*) from stats4
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats4
diff --git a/ql/src/test/results/clientpositive/llap/acid_stats5.q.out b/ql/src/test/results/clientpositive/llap/acid_stats5.q.out
index 8a891eb..3c6f3fb 100644
--- a/ql/src/test/results/clientpositive/llap/acid_stats5.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_stats5.q.out
@@ -106,8 +106,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats2 key
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats2
@@ -316,8 +314,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats2 key
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats2
@@ -464,8 +460,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats2 key
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats2
@@ -562,8 +556,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats2 key
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats2
@@ -729,8 +721,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats2 key
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats2
@@ -877,8 +867,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats2 key
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats2
diff --git a/ql/src/test/results/clientpositive/llap/acid_table_stats.q.out b/ql/src/test/results/clientpositive/llap/acid_table_stats.q.out
index 4daf7e3..0ee584f 100644
--- a/ql/src/test/results/clientpositive/llap/acid_table_stats.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_table_stats.q.out
@@ -50,8 +50,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[key]               	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: insert into table acid partition(ds)  select key,value,ds from srcpart
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
@@ -108,8 +106,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[key]               	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: select count(*) from acid where ds='2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid
@@ -195,8 +191,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[key]               	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: analyze table acid partition(ds='2008-04-08') compute statistics for columns
 PREHOOK: type: ANALYZE_TABLE
 PREHOOK: Input: default@acid
@@ -246,8 +240,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[key]               	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: select count(*) from acid where ds='2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid
@@ -342,8 +334,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[key]               	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: analyze table acid partition(ds='2008-04-08') compute statistics
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid
@@ -391,8 +381,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[key]               	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: explain select count(*) from acid where ds='2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid
diff --git a/ql/src/test/results/clientpositive/llap/autoColumnStats_4.q.out b/ql/src/test/results/clientpositive/llap/autoColumnStats_4.q.out
index 5e34ef3..b1a84e0 100644
--- a/ql/src/test/results/clientpositive/llap/autoColumnStats_4.q.out
+++ b/ql/src/test/results/clientpositive/llap/autoColumnStats_4.q.out
@@ -41,8 +41,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[a]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: explain insert into table acid_dtt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
@@ -226,8 +224,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[a]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: delete from acid_dtt where b = '0ruyd6Y50JpdGRf6HqD' or b = '2uLyD28144vklju213J1mr'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid_dtt
@@ -271,5 +267,3 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[a]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
diff --git a/ql/src/test/results/clientpositive/llap/check_constraint.q.out b/ql/src/test/results/clientpositive/llap/check_constraint.q.out
index e896b69..553937f 100644
--- a/ql/src/test/results/clientpositive/llap/check_constraint.q.out
+++ b/ql/src/test/results/clientpositive/llap/check_constraint.q.out
@@ -52,8 +52,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[i]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -1330,8 +1328,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[i]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -2293,8 +2289,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[value]             	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -3124,8 +3118,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[b]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -3174,8 +3166,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[b]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -3329,8 +3319,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[b]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -3722,8 +3710,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -3778,8 +3764,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -3908,8 +3892,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
diff --git a/ql/src/test/results/clientpositive/llap/create_transactional_full_acid.q.out b/ql/src/test/results/clientpositive/llap/create_transactional_full_acid.q.out
index 04b16a0..e8279b1 100644
--- a/ql/src/test/results/clientpositive/llap/create_transactional_full_acid.q.out
+++ b/ql/src/test/results/clientpositive/llap/create_transactional_full_acid.q.out
@@ -58,8 +58,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[key]               	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: insert into table transactional_table_test partition(ds)  select key,value,ds from srcpart
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
diff --git a/ql/src/test/results/clientpositive/llap/create_transactional_insert_only.q.out b/ql/src/test/results/clientpositive/llap/create_transactional_insert_only.q.out
index 1c6e8f5..47cb3ca 100644
--- a/ql/src/test/results/clientpositive/llap/create_transactional_insert_only.q.out
+++ b/ql/src/test/results/clientpositive/llap/create_transactional_insert_only.q.out
@@ -50,8 +50,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[key]               	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: insert into table transactional_insert_only_table partition(ds)  select key,value,ds from srcpart
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
diff --git a/ql/src/test/results/clientpositive/llap/default_constraint.q.out b/ql/src/test/results/clientpositive/llap/default_constraint.q.out
index 858dd5a..4074082 100644
--- a/ql/src/test/results/clientpositive/llap/default_constraint.q.out
+++ b/ql/src/test/results/clientpositive/llap/default_constraint.q.out
@@ -44,8 +44,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[a]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -565,8 +563,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[i]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -836,8 +832,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[b]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -1406,8 +1400,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[b]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -1569,8 +1561,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[b]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -1745,8 +1735,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[b]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -1825,8 +1813,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[b]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -2012,8 +1998,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[b]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -2091,8 +2075,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[b]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -2171,8 +2153,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[b]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -2608,8 +2588,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[b]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
@@ -2771,8 +2749,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[b]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
diff --git a/ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out b/ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out
index dba4201..475d56d 100644
--- a/ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out
+++ b/ql/src/test/results/clientpositive/llap/insert_values_orig_table_use_metadata.q.out
@@ -119,8 +119,6 @@ Compressed:         	No
 Num Buckets:        	1                   	 
 Bucket Columns:     	[cint]              	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: insert into acid_ivot select * from acid_ivot_stage
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid_ivot_stage
@@ -186,8 +184,6 @@ Compressed:         	No
 Num Buckets:        	1                   	 
 Bucket Columns:     	[cint]              	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: explain select count(*) from acid_ivot
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid_ivot
@@ -357,8 +353,6 @@ Compressed:         	No
 Num Buckets:        	1                   	 
 Bucket Columns:     	[cint]              	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: explain select count(*) from acid_ivot
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid_ivot
@@ -455,8 +449,6 @@ Compressed:         	No
 Num Buckets:        	1                   	 
 Bucket Columns:     	[cint]              	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: explain select count(*) from acid_ivot
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid_ivot
@@ -549,8 +541,6 @@ Compressed:         	No
 Num Buckets:        	1                   	 
 Bucket Columns:     	[cint]              	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: explain select count(*) from acid_ivot
 PREHOOK: type: QUERY
 PREHOOK: Input: default@acid_ivot
diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out b/ql/src/test/results/clientpositive/llap/mm_all.q.out
index fd28d39..1930217 100644
--- a/ql/src/test/results/clientpositive/llap/mm_all.q.out
+++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out
@@ -1861,8 +1861,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: insert into table stats_mm  select key from intermediate_n0
 PREHOOK: type: QUERY
 PREHOOK: Input: default@intermediate_n0
@@ -1912,8 +1910,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: drop table stats_mm
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@stats_mm
@@ -1971,8 +1967,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: drop table stats2_mm
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@stats2_mm
diff --git a/ql/src/test/results/clientpositive/llap/mm_bhif.q.out b/ql/src/test/results/clientpositive/llap/mm_bhif.q.out
index cd90865..225f08d 100644
--- a/ql/src/test/results/clientpositive/llap/mm_bhif.q.out
+++ b/ql/src/test/results/clientpositive/llap/mm_bhif.q.out
@@ -66,7 +66,8 @@ STAGE PLANS:
     Tez
 #### A masked pattern was here ####
       Edges:
-        Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -80,26 +81,41 @@ STAGE PLANS:
                     Statistics: Num rows: 6 Data size: 510 Basic stats: COMPLETE Column stats: COMPLETE
                     Group By Operator
                       keys: key (type: string)
-                      mode: final
+                      minReductionHashAggr: 0.5
+                      mode: hash
                       outputColumnNames: _col0
-                      Statistics: Num rows: 5 Data size: 425 Basic stats: COMPLETE Column stats: COMPLETE
-                      Group By Operator
-                        aggregations: count(_col0)
-                        minReductionHashAggr: 0.8
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                        Reduce Output Operator
-                          null sort order: 
-                          sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                          value expressions: _col0 (type: bigint)
+                      Statistics: Num rows: 3 Data size: 255 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        null sort order: z
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 3 Data size: 255 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: no inputs
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 3 Data size: 255 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: count(_col0)
+                  minReductionHashAggr: 0.6666666
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    null sort order: 
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    value expressions: _col0 (type: bigint)
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
diff --git a/ql/src/test/results/clientpositive/llap/mm_default.q.out b/ql/src/test/results/clientpositive/llap/mm_default.q.out
index 29516e2..2bc6a01 100644
--- a/ql/src/test/results/clientpositive/llap/mm_default.q.out
+++ b/ql/src/test/results/clientpositive/llap/mm_default.q.out
@@ -128,8 +128,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted mm2
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@mm2
@@ -164,8 +162,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted mm3
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@mm3
@@ -200,8 +196,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted mm4
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@mm4
@@ -236,8 +230,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted non_mm1
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@non_mm1
@@ -306,8 +298,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted acid2_n0
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@acid2_n0
@@ -342,8 +332,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: drop table non_mm0
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@non_mm0
diff --git a/ql/src/test/results/clientpositive/llap/mm_exim.q.out b/ql/src/test/results/clientpositive/llap/mm_exim.q.out
index 37d3952..b66099d 100644
--- a/ql/src/test/results/clientpositive/llap/mm_exim.q.out
+++ b/ql/src/test/results/clientpositive/llap/mm_exim.q.out
@@ -326,8 +326,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: select * from import2_mm order by key, p
 PREHOOK: type: QUERY
 PREHOOK: Input: default@import2_mm
@@ -404,8 +402,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: select * from import3_mm order by key, p
 PREHOOK: type: QUERY
 PREHOOK: Input: default@import3_mm
diff --git a/ql/src/test/results/clientpositive/llap/murmur_hash_migration2.q.out b/ql/src/test/results/clientpositive/llap/murmur_hash_migration2.q.out
index cc74705..b1a419f 100644
--- a/ql/src/test/results/clientpositive/llap/murmur_hash_migration2.q.out
+++ b/ql/src/test/results/clientpositive/llap/murmur_hash_migration2.q.out
@@ -130,7 +130,6 @@ STAGE PLANS:
                         name default.acid_ptn_bucket1
                         partition_columns ds
                         partition_columns.types string
-                        serialization.format 1
                         serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
                         transactional true
                         transactional_properties default
@@ -166,7 +165,6 @@ STAGE PLANS:
                 name default.acid_ptn_bucket1
                 partition_columns ds
                 partition_columns.types string
-                serialization.format 1
                 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
                 transactional true
                 transactional_properties default
diff --git a/ql/src/test/results/clientpositive/llap/sqlmerge_stats.q.out b/ql/src/test/results/clientpositive/llap/sqlmerge_stats.q.out
index b857f36..d40aa94 100644
--- a/ql/src/test/results/clientpositive/llap/sqlmerge_stats.q.out
+++ b/ql/src/test/results/clientpositive/llap/sqlmerge_stats.q.out
@@ -49,8 +49,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[a]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: insert into t values (1,1)
 PREHOOK: type: QUERY
 PREHOOK: Input: _dummy_database@_dummy_table
@@ -106,8 +104,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[a]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: explain merge into t as t using upd_t as u ON t.a = u.a 
 WHEN MATCHED THEN UPDATE SET b = 99
 WHEN NOT MATCHED THEN INSERT VALUES(u.a, u.b)
@@ -455,8 +451,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[a]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: merge into t as t using upd_t as u ON t.a = u.a 
 WHEN MATCHED THEN DELETE
 WHEN NOT MATCHED THEN INSERT (a, b) VALUES(u.a, u.b)
@@ -521,8 +515,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[a]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: create table t2(a int, b int, c int default 1) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -599,8 +591,6 @@ Compressed:         	No
 Num Buckets:        	2                   	 
 Bucket Columns:     	[a]                 	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 	 	 
 # Constraints	 	 
 	 	 
diff --git a/ql/src/test/results/clientpositive/llap/stats_nonpart.q.out b/ql/src/test/results/clientpositive/llap/stats_nonpart.q.out
index 29eab63..b54c262 100644
--- a/ql/src/test/results/clientpositive/llap/stats_nonpart.q.out
+++ b/ql/src/test/results/clientpositive/llap/stats_nonpart.q.out
@@ -104,8 +104,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: explain insert into table stats_nonpartitioned select * from mysource where p == 100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@mysource
@@ -250,8 +248,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: explain select count(*) from stats_nonpartitioned
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_nonpartitioned
diff --git a/ql/src/test/results/clientpositive/llap/stats_part.q.out b/ql/src/test/results/clientpositive/llap/stats_part.q.out
index 1c1d248..5f6fd3b 100644
--- a/ql/src/test/results/clientpositive/llap/stats_part.q.out
+++ b/ql/src/test/results/clientpositive/llap/stats_part.q.out
@@ -170,8 +170,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@mysource
@@ -242,8 +240,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: insert into table mysource values (103,20,200), (103,83,832), (103,53,530)
 PREHOOK: type: QUERY
 PREHOOK: Input: _dummy_database@_dummy_table
@@ -305,8 +301,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: show partitions stats_part
 PREHOOK: type: SHOWPARTITIONS
 PREHOOK: Input: default@stats_part
@@ -464,8 +458,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: explain select max(key) from stats_part where p > 100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_part
@@ -551,8 +543,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: explain select count(*) from stats_part where p = 100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_part
diff --git a/ql/src/test/results/clientpositive/llap/stats_part2.q.out b/ql/src/test/results/clientpositive/llap/stats_part2.q.out
index 0fbd42d..d925428 100644
--- a/ql/src/test/results/clientpositive/llap/stats_part2.q.out
+++ b/ql/src/test/results/clientpositive/llap/stats_part2.q.out
@@ -218,8 +218,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@mysource
@@ -290,8 +288,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: explain select count(key) from stats_part where p > 100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_part
@@ -389,8 +385,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: show partitions stats_part
 PREHOOK: type: SHOWPARTITIONS
 PREHOOK: Input: default@stats_part
@@ -543,8 +537,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats_part partition(p = 101)
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats_part
@@ -580,8 +572,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats_part partition(p = 102)
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats_part
@@ -617,8 +607,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: update stats_part set key = key + 100 where key in(-50,40) and p > 100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_part
@@ -750,8 +738,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats_part partition(p = 101)
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats_part
@@ -787,8 +773,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats_part partition(p = 102)
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats_part
@@ -824,8 +808,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: select count(value) from stats_part
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_part
@@ -890,8 +872,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats_part partition(p = 101)
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats_part
@@ -927,8 +907,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats_part partition(p = 102)
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats_part
@@ -964,8 +942,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: select count(value) from stats_part
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_part
@@ -1034,8 +1010,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats_part partition(p = 101)
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats_part
@@ -1071,8 +1045,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: desc formatted stats_part partition(p = 102)
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@stats_part
@@ -1108,8 +1080,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: explain select count(*) from stats_part where p = 100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@stats_part
diff --git a/ql/src/test/results/clientpositive/llap/stats_sizebug.q.out b/ql/src/test/results/clientpositive/llap/stats_sizebug.q.out
index 4c78c23..c160d08 100644
--- a/ql/src/test/results/clientpositive/llap/stats_sizebug.q.out
+++ b/ql/src/test/results/clientpositive/llap/stats_sizebug.q.out
@@ -186,8 +186,6 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
 PREHOOK: query: analyze table mysource compute statistics for columns p, key
 PREHOOK: type: ANALYZE_TABLE
 PREHOOK: Input: default@mysource
@@ -233,5 +231,3 @@ Compressed:         	No
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
 Sort Columns:       	[]                  	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
index 1be7e77..a0021f6 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
@@ -91,6 +91,7 @@ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LogicalOperator;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.ColStatsObjWithSourceInfo;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
@@ -658,7 +659,7 @@ class MetaStoreDirectSql {
    * @return List of partitions.
    */
   public List<Partition> getPartitionsViaSqlFilter(String catName, String dbName, String tableName,
-      SqlFilterForPushdown filter, Integer max) throws MetaException {
+      SqlFilterForPushdown filter, Integer max, boolean isTxnTable) throws MetaException {
     List<Long> partitionIds = getPartitionIdsViaSqlFilter(catName,
         dbName, tableName, filter.filter, filter.params,
         filter.joins, max);
@@ -669,7 +670,7 @@ class MetaStoreDirectSql {
       @Override
       public List<Partition> run(List<Long> input) throws MetaException {
         return getPartitionsFromPartitionIds(catName, dbName,
-            tableName, null, input, Collections.emptyList());
+            tableName, null, input, Collections.emptyList(), isTxnTable);
       }
     });
   }
@@ -914,6 +915,13 @@ class MetaStoreDirectSql {
   /** Should be called with the list short enough to not trip up Oracle/etc. */
   private List<Partition> getPartitionsFromPartitionIds(String catName, String dbName, String tblName,
       Boolean isView, List<Long> partIdList, List<String> projectionFields) throws MetaException {
+    return getPartitionsFromPartitionIds(catName, dbName, tblName, isView, partIdList, projectionFields, false);
+  }
+
+  /** Should be called with the list short enough to not trip up Oracle/etc. */
+  private List<Partition> getPartitionsFromPartitionIds(String catName, String dbName, String tblName,
+      Boolean isView, List<Long> partIdList, List<String> projectionFields,
+      boolean isTxnTable) throws MetaException {
 
     boolean doTrace = LOG.isDebugEnabled();
 
@@ -1049,16 +1057,22 @@ class MetaStoreDirectSql {
     String serdeIds = trimCommaList(serdeSb);
     String colIds = trimCommaList(colsSb);
 
-    // Get all the stuff for SD. Don't do empty-list check - we expect partitions do have SDs.
-    MetastoreDirectSqlUtils.setSDParameters(SD_PARAMS, convertMapNullsToEmptyStrings, pm, sds, sdIds);
+    if (!isTxnTable) {
+      // Get all the stuff for SD. Don't do empty-list check - we expect partitions do have SDs.
+      MetastoreDirectSqlUtils.setSDParameters(SD_PARAMS, convertMapNullsToEmptyStrings, pm, sds, sdIds);
+    }
 
-    MetastoreDirectSqlUtils.setSDSortCols(SORT_COLS, pm, sds, sdIds);
+    boolean hasSkewedColumns = false;
+    if (!isTxnTable) {
+      MetastoreDirectSqlUtils.setSDSortCols(SORT_COLS, pm, sds, sdIds);
+    }
 
     MetastoreDirectSqlUtils.setSDBucketCols(BUCKETING_COLS, pm, sds, sdIds);
 
-    // Skewed columns stuff.
-    boolean hasSkewedColumns = MetastoreDirectSqlUtils
-        .setSkewedColNames(SKEWED_COL_NAMES, pm, sds, sdIds);
+    if (!isTxnTable) {
+      // Skewed columns stuff.
+      hasSkewedColumns = MetastoreDirectSqlUtils.setSkewedColNames(SKEWED_COL_NAMES, pm, sds, sdIds);
+    }
 
     // Assume we don't need to fetch the rest of the skewed column data if we have no columns.
     if (hasSkewedColumns) {
@@ -1078,8 +1092,9 @@ class MetaStoreDirectSql {
     }
 
     // Finally, get all the stuff for serdes - just the params.
-    MetastoreDirectSqlUtils
-        .setSerdeParams(SERDE_PARAMS, convertMapNullsToEmptyStrings, pm, serdes, serdeIds);
+    if (!isTxnTable) {
+      MetastoreDirectSqlUtils.setSerdeParams(SERDE_PARAMS, convertMapNullsToEmptyStrings, pm, serdes, serdeIds);
+    }
 
     return orderedResult;
   }
@@ -1125,10 +1140,10 @@ class MetaStoreDirectSql {
   }
 
   private static class PartitionFilterGenerator extends TreeVisitor {
-    private String catName;
-    private String dbName;
-    private String tableName;
-    private List<FieldSchema> partitionKeys;
+    private final String catName;
+    private final String dbName;
+    private final String tableName;
+    private final List<FieldSchema> partitionKeys;
     private final FilterBuilder filterBuffer;
     private final List<Object> params;
     private final List<String> joins;
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index d1ad6b7..3922282 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -1939,10 +1939,12 @@ public class ObjectStore implements RawStore, Configurable {
         tableType = TableType.MANAGED_TABLE.toString();
       }
     }
+    Map<String, String> parameters = convertMap(mtbl.getParameters());
+    boolean isTxnTable = TxnUtils.isTransactionalTable(parameters);
     final Table t = new Table(mtbl.getTableName(), mtbl.getDatabase().getName(), mtbl
         .getOwner(), mtbl.getCreateTime(), mtbl.getLastAccessTime(), mtbl
-        .getRetention(), convertToStorageDescriptor(mtbl.getSd()),
-        convertToFieldSchemas(mtbl.getPartitionKeys()), convertMap(mtbl.getParameters()),
+        .getRetention(), convertToStorageDescriptor(mtbl.getSd(), false, isTxnTable),
+        convertToFieldSchemas(mtbl.getPartitionKeys()), parameters,
         mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), tableType);
 
     if (Strings.isNullOrEmpty(mtbl.getOwnerType())) {
@@ -2095,34 +2097,37 @@ public class ObjectStore implements RawStore, Configurable {
     return new MColumnDescriptor(cols);
   }
 
-  // MSD and SD should be same objects. Not sure how to make then same right now
-  // MSerdeInfo *& SerdeInfo should be same as well
   private StorageDescriptor convertToStorageDescriptor(
-      MStorageDescriptor msd,
-      boolean noFS) throws MetaException {
+      MStorageDescriptor msd, boolean noFS, boolean isTxnTable) throws MetaException {
     if (msd == null) {
       return null;
     }
     List<MFieldSchema> mFieldSchemas = msd.getCD() == null ? null : msd.getCD().getCols();
 
+    List<Order> orderList = (isTxnTable) ? Collections.emptyList() : convertToOrders(msd.getSortCols());
+    List<String> bucList = convertList(msd.getBucketCols());
+    SkewedInfo skewedInfo = null;
+
+    Map<String, String> sdParams = isTxnTable ? Collections.emptyMap() : convertMap(msd.getParameters());
     StorageDescriptor sd = new StorageDescriptor(noFS ? null : convertToFieldSchemas(mFieldSchemas),
         msd.getLocation(), msd.getInputFormat(), msd.getOutputFormat(), msd
-        .isCompressed(), msd.getNumBuckets(), convertToSerDeInfo(msd
-        .getSerDeInfo(), true), convertList(msd.getBucketCols()), convertToOrders(msd
-        .getSortCols()), convertMap(msd.getParameters()));
-    SkewedInfo skewedInfo = new SkewedInfo(convertList(msd.getSkewedColNames()),
-        convertToSkewedValues(msd.getSkewedColValues()),
-        covertToSkewedMap(msd.getSkewedColValueLocationMaps()));
+        .isCompressed(), msd.getNumBuckets(),
+        (!isTxnTable) ? convertToSerDeInfo(msd.getSerDeInfo(), true)
+            : new SerDeInfo(msd.getSerDeInfo().getName(), msd.getSerDeInfo().getSerializationLib(), Collections.emptyMap()),
+        bucList , orderList, sdParams);
+    if (!isTxnTable) {
+      skewedInfo = new SkewedInfo(convertList(msd.getSkewedColNames()),
+          convertToSkewedValues(msd.getSkewedColValues()),
+          covertToSkewedMap(msd.getSkewedColValueLocationMaps()));
+    } else {
+      skewedInfo = new SkewedInfo(Collections.emptyList(), Collections.emptyList(),
+          Collections.emptyMap());
+    }
     sd.setSkewedInfo(skewedInfo);
     sd.setStoredAsSubDirectories(msd.isStoredAsSubDirectories());
     return sd;
   }
 
-  private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd)
-      throws MetaException {
-    return convertToStorageDescriptor(msd, false);
-  }
-
   /**
    * Convert a list of MStringList to a list of list string
    */
@@ -2453,7 +2458,7 @@ public class ObjectStore implements RawStore, Configurable {
       openTransaction();
       MTable table = this.getMTable(catName, dbName, tableName);
       MPartition mpart = getMPartition(catName, dbName, tableName, part_vals);
-      part = convertToPart(mpart);
+      part = convertToPart(mpart, false);
       committed = commitTransaction();
       if (part == null) {
         throw new NoSuchObjectException("partition values="
@@ -2616,7 +2621,7 @@ public class ObjectStore implements RawStore, Configurable {
         msd, part.getParameters());
   }
 
-  private Partition convertToPart(MPartition mpart) throws MetaException {
+  private Partition convertToPart(MPartition mpart, boolean isTxnTable) throws MetaException {
     if (mpart == null) {
       return null;
     }
@@ -2627,22 +2632,25 @@ public class ObjectStore implements RawStore, Configurable {
     String tableName = table == null ? null : table.getTableName();
     String catName = table == null ? null :
         table.getDatabase() == null ? null : table.getDatabase().getCatalogName();
+    Map<String,String> params = convertMap(mpart.getParameters());
     Partition p = new Partition(convertList(mpart.getValues()), dbName, tableName, mpart.getCreateTime(),
-        mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd()),
-        convertMap(mpart.getParameters()));
+        mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd(), false, isTxnTable),
+        params);
     p.setCatName(catName);
     p.setWriteId(mpart.getWriteId());
     return p;
   }
 
-  private Partition convertToPart(String catName, String dbName, String tblName, MPartition mpart)
+  private Partition convertToPart(String catName, String dbName, String tblName,
+      MPartition mpart, boolean isTxnTable)
       throws MetaException {
     if (mpart == null) {
       return null;
     }
+    Map<String,String> params = convertMap(mpart.getParameters());
     Partition p = new Partition(convertList(mpart.getValues()), dbName, tblName,
         mpart.getCreateTime(), mpart.getLastAccessTime(),
-        convertToStorageDescriptor(mpart.getSd(), false), convertMap(mpart.getParameters()));
+        convertToStorageDescriptor(mpart.getSd(), false, isTxnTable), params);
     p.setCatName(catName);
     p.setWriteId(mpart.getWriteId());
     return p;
@@ -2873,7 +2881,7 @@ public class ObjectStore implements RawStore, Configurable {
       if (CollectionUtils.isNotEmpty(mparts)) {
         for (MPartition mpart : mparts) {
           MTable mtbl = mpart.getTable();
-          Partition part = convertToPart(mpart);
+          Partition part = convertToPart(mpart, false);
           parts.add(part);
 
           if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
@@ -2907,7 +2915,7 @@ public class ObjectStore implements RawStore, Configurable {
       }
       Partition part = null;
       MTable mtbl = mpart.getTable();
-      part = convertToPart(mpart);
+      part = convertToPart(mpart, false);
       if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
         String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl
             .getPartitionKeys()), partVals);
@@ -2938,17 +2946,18 @@ public class ObjectStore implements RawStore, Configurable {
       dest = new ArrayList<>(src.size());
     }
     for (MPartition mp : src) {
-      dest.add(convertToPart(mp));
+      dest.add(convertToPart(mp, false));
       Deadline.checkTimeout();
     }
     return dest;
   }
 
-  private List<Partition> convertToParts(String catName, String dbName, String tblName, List<MPartition> mparts)
+  private List<Partition> convertToParts(String catName, String dbName, String tblName,
+      List<MPartition> mparts, boolean isTxnTable)
       throws MetaException {
     List<Partition> parts = new ArrayList<>(mparts.size());
     for (MPartition mp : mparts) {
-      parts.add(convertToPart(catName, dbName, tblName, mp));
+      parts.add(convertToPart(catName, dbName, tblName, mp, isTxnTable));
       Deadline.checkTimeout();
     }
     return parts;
@@ -3411,7 +3420,7 @@ public class ObjectStore implements RawStore, Configurable {
           part_vals, max_parts, null, queryWrapper);
       MTable mtbl = getMTable(catName, db_name, tbl_name);
       for (Object o : parts) {
-        Partition part = convertToPart((MPartition) o);
+        Partition part = convertToPart((MPartition) o, false);
         //set auth privileges
         if (null != userName && null != groupNames &&
             "TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
@@ -3552,7 +3561,7 @@ public class ObjectStore implements RawStore, Configurable {
       @Override
       protected List<Partition> getJdoResult(
           GetHelper<List<Partition>> ctx) throws MetaException, NoSuchObjectException {
-        return getPartitionsViaOrmFilter(catName, dbName, tblName, partNames);
+        return getPartitionsViaOrmFilter(catName, dbName, tblName, partNames, false);
       }
     }.run(false);
   }
@@ -3579,6 +3588,7 @@ public class ObjectStore implements RawStore, Configurable {
 
     MTable mTable = ensureGetMTable(catName, dbName, tblName);
     List<FieldSchema> partitionKeys = convertToFieldSchemas(mTable.getPartitionKeys());
+    boolean isTxnTbl = TxnUtils.isTransactionalTable(convertMap(mTable.getParameters()));
     result.addAll(new GetListHelper<Partition>(catName, dbName, tblName, allowSql, allowJdo) {
       @Override
       protected List<Partition> getSqlResult(GetHelper<List<Partition>> ctx) throws MetaException {
@@ -3588,7 +3598,7 @@ public class ObjectStore implements RawStore, Configurable {
           if (directSql.generateSqlFilterForPushdown(catName, dbName, tblName, partitionKeys,
               exprTree, defaultPartitionName, filter)) {
             String catalogName = (catName != null) ? catName : DEFAULT_CATALOG_NAME;
-            return directSql.getPartitionsViaSqlFilter(catalogName, dbName, tblName, filter, null);
+            return directSql.getPartitionsViaSqlFilter(catalogName, dbName, tblName, filter, null, isTxnTbl);
           }
         }
         // We couldn't do SQL filter pushdown. Get names via normal means.
@@ -3611,7 +3621,7 @@ public class ObjectStore implements RawStore, Configurable {
           List<String> partNames = new ArrayList<>();
           hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn(
                   catName, dbName, tblName, partitionKeys, expr, defaultPartitionName, maxParts, partNames));
-          result = getPartitionsViaOrmFilter(catName, dbName, tblName, partNames);
+          result = getPartitionsViaOrmFilter(catName, dbName, tblName, partNames, isTxnTbl);
         }
         return result;
       }
@@ -3720,7 +3730,7 @@ public class ObjectStore implements RawStore, Configurable {
    * @return Resulting partitions.
    */
   private List<Partition> getPartitionsViaOrmFilter(String catName, String dbName, String tblName,
-      List<String> partNames) throws MetaException {
+      List<String> partNames, boolean isTxnTable) throws MetaException {
 
     if (partNames.isEmpty()) {
       return Collections.emptyList();
@@ -3737,7 +3747,7 @@ public class ObjectStore implements RawStore, Configurable {
         query.setOrdering("partitionName ascending");
 
         List<MPartition> mparts = (List<MPartition>) query.executeWithMap(queryWithParams.getRight());
-        List<Partition> partitions = convertToParts(catName, dbName, tblName, mparts);
+        List<Partition> partitions = convertToParts(catName, dbName, tblName, mparts, isTxnTable);
         query.closeAll();
 
         return partitions;
@@ -4088,7 +4098,7 @@ public class ObjectStore implements RawStore, Configurable {
     tblName = normalizeIdentifier(tblName);
     MTable mTable = ensureGetMTable(catName, dbName, tblName);
     List<FieldSchema> partitionKeys = convertToFieldSchemas(mTable.getPartitionKeys());
-
+    Map<String, String> parameters = mTable.getParameters();
     return new GetHelper<Integer>(catName, dbName, tblName, true, true) {
       private final SqlFilterForPushdown filter = new SqlFilterForPushdown();
 
@@ -4179,6 +4189,7 @@ public class ObjectStore implements RawStore, Configurable {
 
     MTable mTable = ensureGetMTable(catName, dbName, tblName);
     List<FieldSchema> partitionKeys = convertToFieldSchemas(mTable.getPartitionKeys());
+    boolean isTxnTable = TxnUtils.isTransactionalTable(convertMap(mTable.getParameters()));
     final ExpressionTree tree = (filter != null && !filter.isEmpty())
         ? PartFilterExprUtil.getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE;
     return new GetListHelper<Partition>(catName, dbName, tblName, allowSql, allowJdo) {
@@ -4191,7 +4202,7 @@ public class ObjectStore implements RawStore, Configurable {
 
       @Override
       protected List<Partition> getSqlResult(GetHelper<List<Partition>> ctx) throws MetaException {
-        return directSql.getPartitionsViaSqlFilter(catName, dbName, tblName, filter, (maxParts < 0) ? null : (int)maxParts);
+        return directSql.getPartitionsViaSqlFilter(catName, dbName, tblName, filter, (maxParts < 0) ? null : (int)maxParts, isTxnTable);
       }
 
       @Override
@@ -4693,7 +4704,7 @@ public class ObjectStore implements RawStore, Configurable {
     }
 
     oldCd.t = oldCD;
-    return convertToPart(oldp);
+    return convertToPart(oldp, false);
   }
 
   @Override
@@ -8928,7 +8939,7 @@ public class ObjectStore implements RawStore, Configurable {
       String catName = statsDesc.isSetCatName() ? statsDesc.getCatName() : getDefaultCatalog(conf);
       Table table = ensureGetTable(catName, statsDesc.getDbName(), statsDesc.getTableName());
       Partition partition = convertToPart(getMPartition(
-          catName, statsDesc.getDbName(), statsDesc.getTableName(), partVals));
+          catName, statsDesc.getDbName(), statsDesc.getTableName(), partVals), false);
       List<String> colNames = new ArrayList<>();
 
       for(ColumnStatisticsObj statsObj : statsObjs) {