You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by sa...@apache.org on 2018/03/06 05:50:32 UTC

[21/23] hive git commit: HIVE-18749: Need to replace transactionId with writeId in RecordIdentifier and other relevant contexts (Sankar Hariappan, reviewed by Eugene Koifman)

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java
index 38358f2..a4df509 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java
@@ -89,13 +89,13 @@ public class TestTxnNoBuckets extends TxnCommandsBaseForTests {
     /**the insert creates 2 output files (presumably because there are 2 input files)
      * The number in the file name is writerId.  This is the number encoded in ROW__ID.bucketId -
      * see {@link org.apache.hadoop.hive.ql.io.BucketCodec}*/
-    Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t0\t"));
+    Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t0\t"));
     Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00000"));
-    Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t"));
+    Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t"));
     Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00000"));
-    Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\t1\t1\t"));
+    Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":1,\"bucketid\":536936448,\"rowid\":0}\t1\t1\t1\t"));
     Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00001"));
-    Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t"));
+    Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":1,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t"));
     Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00001"));
 
     runStatementOnDriver("update nobuckets set c3 = 17 where c3 in(0,1)");
@@ -104,14 +104,14 @@ public class TestTxnNoBuckets extends TxnCommandsBaseForTests {
     for(String s : rs) {
       LOG.warn(s);
     }
-    Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t"));
+    Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t"));
     Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00000"));
-    Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t"));
+    Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":1,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t"));
     Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/delta_0000001_0000001_0000/bucket_00001"));
     //so update has 1 writer which creates bucket0 where both new rows land
-    Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t17\t"));
+    Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t17\t"));
     Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/delta_0000002_0000002_0000/bucket_00000"));
-    Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t17\t"));
+    Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t17\t"));
     Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/delta_0000002_0000002_0000/bucket_00000"));
 
     Set<String> expectedFiles = new HashSet<>();
@@ -144,13 +144,13 @@ public class TestTxnNoBuckets extends TxnCommandsBaseForTests {
 └── delta_0000002_0000002_0000
     └── bucket_00000
     */
-    Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t"));
+    Assert.assertTrue(rs.get(0), rs.get(0).startsWith("{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t3\t3\t"));
     Assert.assertTrue(rs.get(0), rs.get(0).endsWith("nobuckets/base_0000002/bucket_00000"));
-    Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t17\t"));
+    Assert.assertTrue(rs.get(1), rs.get(1).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t0\t17\t"));
     Assert.assertTrue(rs.get(1), rs.get(1).endsWith("nobuckets/base_0000002/bucket_00000"));
-    Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t17\t"));
+    Assert.assertTrue(rs.get(2), rs.get(2).startsWith("{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t17\t"));
     Assert.assertTrue(rs.get(2), rs.get(2).endsWith("nobuckets/base_0000002/bucket_00000"));
-    Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t"));
+    Assert.assertTrue(rs.get(3), rs.get(3).startsWith("{\"writeid\":1,\"bucketid\":536936448,\"rowid\":1}\t2\t2\t2\t"));
     Assert.assertTrue(rs.get(3), rs.get(3).endsWith("nobuckets/base_0000002/bucket_00001"));
 
     expectedFiles.clear();
@@ -185,8 +185,8 @@ public class TestTxnNoBuckets extends TxnCommandsBaseForTests {
       "'='true', 'transactional_properties'='default') as select a, b from " + Table.NONACIDORCTBL);
     List<String> rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from myctas order by ROW__ID");
     String expected[][] = {
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas/delta_0000001_0000001_0000/bucket_00000"},
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2", "warehouse/myctas/delta_0000001_0000001_0000/bucket_00000"},
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas/delta_0000001_0000001_0000/bucket_00000"},
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2", "warehouse/myctas/delta_0000001_0000001_0000/bucket_00000"},
     };
     checkExpected(rs, expected, "Unexpected row count after ctas from non acid table");
 
@@ -195,8 +195,8 @@ public class TestTxnNoBuckets extends TxnCommandsBaseForTests {
       "'='true', 'transactional_properties'='default') as select a, b from " + Table.ACIDTBL);//todo: try this with acid default - it seem makeing table acid in listener is too late
     rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from myctas2 order by ROW__ID");
     String expected2[][] = {
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas2/delta_0000001_0000001_0000/bucket_00000"},
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2", "warehouse/myctas2/delta_0000001_0000001_0000/bucket_00000"}
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas2/delta_0000001_0000001_0000/bucket_00000"},
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2", "warehouse/myctas2/delta_0000001_0000001_0000/bucket_00000"}
     };
     checkExpected(rs, expected2, "Unexpected row count after ctas from acid table");
 
@@ -205,10 +205,10 @@ public class TestTxnNoBuckets extends TxnCommandsBaseForTests {
       " union all select a, b from " + Table.ACIDTBL);
     rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from myctas3 order by ROW__ID");
     String expected3[][] = {
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas3/delta_0000001_0000001_0000/bucket_00000"},
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2", "warehouse/myctas3/delta_0000001_0000001_0000/bucket_00000"},
-        {"{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":0}\t3\t4", "warehouse/myctas3/delta_0000001_0000001_0000/bucket_00001"},
-        {"{\"transactionid\":1,\"bucketid\":536936448,\"rowid\":1}\t1\t2", "warehouse/myctas3/delta_0000001_0000001_0000/bucket_00001"},
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t3\t4", "warehouse/myctas3/delta_0000001_0000001_0000/bucket_00000"},
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2", "warehouse/myctas3/delta_0000001_0000001_0000/bucket_00000"},
+        {"{\"writeid\":1,\"bucketid\":536936448,\"rowid\":0}\t3\t4", "warehouse/myctas3/delta_0000001_0000001_0000/bucket_00001"},
+        {"{\"writeid\":1,\"bucketid\":536936448,\"rowid\":1}\t1\t2", "warehouse/myctas3/delta_0000001_0000001_0000/bucket_00001"},
     };
     checkExpected(rs, expected3, "Unexpected row count after ctas from union all query");
 
@@ -217,8 +217,8 @@ public class TestTxnNoBuckets extends TxnCommandsBaseForTests {
       " union distinct select a, b from " + Table.ACIDTBL);
     rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from myctas4 order by ROW__ID");
     String expected4[][] = {
-      {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "/delta_0000001_0000001_0000/bucket_00000"},
-      {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "/delta_0000001_0000001_0000/bucket_00000"},
+      {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "/delta_0000001_0000001_0000/bucket_00000"},
+      {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "/delta_0000001_0000001_0000/bucket_00000"},
     };
     checkExpected(rs, expected4, "Unexpected row count after ctas from union distinct query");
   }
@@ -268,11 +268,11 @@ ekoifman:apache-hive-3.0.0-SNAPSHOT-bin ekoifman$ tree /Users/ekoifman/dev/hiver
     List<String> rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from T order by ROW__ID");
 
     String expected[][] = {
-        {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "/delta_0000001_0000001_0001/bucket_00000"},
-        {"{\"transactionid\":1,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "/delta_0000001_0000001_0001/bucket_00000"},
-        {"{\"transactionid\":1,\"bucketid\":536870914,\"rowid\":0}\t7\t8", "/delta_0000001_0000001_0002/bucket_00000"},
-        {"{\"transactionid\":1,\"bucketid\":536870914,\"rowid\":1}\t5\t6", "/delta_0000001_0000001_0002/bucket_00000"},
-        {"{\"transactionid\":1,\"bucketid\":536870915,\"rowid\":0}\t9\t10", "/delta_0000001_0000001_0003/bucket_00000"},
+        {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":0}\t1\t2", "/delta_0000001_0000001_0001/bucket_00000"},
+        {"{\"writeid\":1,\"bucketid\":536870913,\"rowid\":1}\t3\t4", "/delta_0000001_0000001_0001/bucket_00000"},
+        {"{\"writeid\":1,\"bucketid\":536870914,\"rowid\":0}\t7\t8", "/delta_0000001_0000001_0002/bucket_00000"},
+        {"{\"writeid\":1,\"bucketid\":536870914,\"rowid\":1}\t5\t6", "/delta_0000001_0000001_0002/bucket_00000"},
+        {"{\"writeid\":1,\"bucketid\":536870915,\"rowid\":0}\t9\t10", "/delta_0000001_0000001_0003/bucket_00000"},
     };
     checkExpected(rs, expected, "Unexpected row count after ctas");
   }
@@ -347,16 +347,16 @@ ekoifman:apache-hive-3.0.0-SNAPSHOT-bin ekoifman$ tree /Users/ekoifman/dev/hiver
      logical bucket (tranche)
      */
     String expected2[][] = {
-        {"{\"transactionid\":0,\"bucketid\":537001984,\"rowid\":0}\t1\t2",  "warehouse/t/000002_0"},
-        {"{\"transactionid\":0,\"bucketid\":537001984,\"rowid\":1}\t2\t4",  "warehouse/t/000002_0"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}\t5\t6",  "warehouse/t/000000_0"},
-        {"{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":0}\t6\t8",  "warehouse/t/000001_0"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":0}\t9\t10", "warehouse/t/000000_0"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t10\t20", "warehouse/t/HIVE_UNION_SUBDIR_15/000000_0"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":2}\t12\t12", "warehouse/t/000000_0_copy_1"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}\t20\t40", "warehouse/t/HIVE_UNION_SUBDIR_15/000000_0"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":5}\t50\t60", "warehouse/t/HIVE_UNION_SUBDIR_16/000000_0"},
-        {"{\"transactionid\":0,\"bucketid\":536936448,\"rowid\":1}\t60\t80", "warehouse/t/HIVE_UNION_SUBDIR_16/000001_0"},
+        {"{\"writeid\":0,\"bucketid\":537001984,\"rowid\":0}\t1\t2",  "warehouse/t/000002_0"},
+        {"{\"writeid\":0,\"bucketid\":537001984,\"rowid\":1}\t2\t4",  "warehouse/t/000002_0"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":1}\t5\t6",  "warehouse/t/000000_0"},
+        {"{\"writeid\":0,\"bucketid\":536936448,\"rowid\":0}\t6\t8",  "warehouse/t/000001_0"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":0}\t9\t10", "warehouse/t/000000_0"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":3}\t10\t20", "warehouse/t/HIVE_UNION_SUBDIR_15/000000_0"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":2}\t12\t12", "warehouse/t/000000_0_copy_1"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":4}\t20\t40", "warehouse/t/HIVE_UNION_SUBDIR_15/000000_0"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":5}\t50\t60", "warehouse/t/HIVE_UNION_SUBDIR_16/000000_0"},
+        {"{\"writeid\":0,\"bucketid\":536936448,\"rowid\":1}\t60\t80", "warehouse/t/HIVE_UNION_SUBDIR_16/000001_0"},
     };
     checkExpected(rs, expected2,"after converting to acid (no compaction)");
     Assert.assertEquals(0, BucketCodec.determineVersion(536870912).decodeWriterId(536870912));
@@ -368,15 +368,15 @@ ekoifman:apache-hive-3.0.0-SNAPSHOT-bin ekoifman$ tree /Users/ekoifman/dev/hiver
     assertVectorized(shouldVectorize(), "delete from T where b = 8");
     runStatementOnDriver("delete from T where b = 8");
     String expected3[][] = {
-        {"{\"transactionid\":0,\"bucketid\":537001984,\"rowid\":0}\t1\t2",  "warehouse/t/000002_0"},
-        {"{\"transactionid\":0,\"bucketid\":537001984,\"rowid\":1}\t2\t4",  "warehouse/t/000002_0"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}\t5\t6",  "warehouse/t/000000_0"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":0}\t9\t10", "warehouse/t/000000_0"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t10\t20", "warehouse/t/HIVE_UNION_SUBDIR_15/000000_0"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":2}\t12\t12", "warehouse/t/000000_0_copy_1"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}\t20\t40", "warehouse/t/HIVE_UNION_SUBDIR_15/000000_0"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":5}\t50\t60", "warehouse/t/HIVE_UNION_SUBDIR_16/000000_0"},
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t60\t88", "warehouse/t/delta_0000001_0000001_0000/bucket_00000"},
+        {"{\"writeid\":0,\"bucketid\":537001984,\"rowid\":0}\t1\t2",  "warehouse/t/000002_0"},
+        {"{\"writeid\":0,\"bucketid\":537001984,\"rowid\":1}\t2\t4",  "warehouse/t/000002_0"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":1}\t5\t6",  "warehouse/t/000000_0"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":0}\t9\t10", "warehouse/t/000000_0"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":3}\t10\t20", "warehouse/t/HIVE_UNION_SUBDIR_15/000000_0"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":2}\t12\t12", "warehouse/t/000000_0_copy_1"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":4}\t20\t40", "warehouse/t/HIVE_UNION_SUBDIR_15/000000_0"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":5}\t50\t60", "warehouse/t/HIVE_UNION_SUBDIR_16/000000_0"},
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t60\t88", "warehouse/t/delta_0000001_0000001_0000/bucket_00000"},
     };
     rs = runStatementOnDriver("select ROW__ID, a, b, INPUT__FILE__NAME from T order by a, b, INPUT__FILE__NAME");
     checkExpected(rs, expected3,"after converting to acid (no compaction with updates)");
@@ -388,15 +388,15 @@ ekoifman:apache-hive-3.0.0-SNAPSHOT-bin ekoifman$ tree /Users/ekoifman/dev/hiver
 
     /*Compaction preserves location of rows wrt buckets/tranches (for now)*/
     String expected4[][] = {
-        {"{\"transactionid\":0,\"bucketid\":537001984,\"rowid\":0}\t1\t2", "warehouse/t/base_0000002/bucket_00002"},
-        {"{\"transactionid\":0,\"bucketid\":537001984,\"rowid\":1}\t2\t4", "warehouse/t/base_0000002/bucket_00002"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}\t5\t6", "warehouse/t/base_0000002/bucket_00000"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":0}\t9\t10", "warehouse/t/base_0000002/bucket_00000"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t10\t20", "warehouse/t/base_0000002/bucket_00000"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":2}\t12\t12", "warehouse/t/base_0000002/bucket_00000"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}\t20\t40", "warehouse/t/base_0000002/bucket_00000"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":5}\t50\t60", "warehouse/t/base_0000002/bucket_00000"},
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t60\t88", "warehouse/t/base_0000002/bucket_00000"},
+        {"{\"writeid\":0,\"bucketid\":537001984,\"rowid\":0}\t1\t2", "warehouse/t/base_0000002/bucket_00002"},
+        {"{\"writeid\":0,\"bucketid\":537001984,\"rowid\":1}\t2\t4", "warehouse/t/base_0000002/bucket_00002"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":1}\t5\t6", "warehouse/t/base_0000002/bucket_00000"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":0}\t9\t10", "warehouse/t/base_0000002/bucket_00000"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":3}\t10\t20", "warehouse/t/base_0000002/bucket_00000"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":2}\t12\t12", "warehouse/t/base_0000002/bucket_00000"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":4}\t20\t40", "warehouse/t/base_0000002/bucket_00000"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":5}\t50\t60", "warehouse/t/base_0000002/bucket_00000"},
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t60\t88", "warehouse/t/base_0000002/bucket_00000"},
     };
     checkExpected(rs, expected4,"after major compact");
   }
@@ -468,15 +468,15 @@ ekoifman:apache-hive-3.0.0-SNAPSHOT-bin ekoifman$ tree /Users/ekoifman/dev/hiver
      * Also check the file name (only) after compaction for completeness
      */
     String[][] expected = {
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t0\t13",  "bucket_00000", "000000_0_copy_1"},
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t15", "bucket_00000", "bucket_00000"},
-        {"{\"transactionid\":3,\"bucketid\":536870912,\"rowid\":0}\t0\t17", "bucket_00000", "bucket_00000"},
-        {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t120", "bucket_00000", "bucket_00000"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":0}\t1\t2",   "bucket_00000", "000000_0"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}\t1\t4",   "bucket_00000", "000000_0_copy_1"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":5}\t1\t5",   "bucket_00000", "000000_0_copy_1"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":6}\t1\t6",   "bucket_00000", "000000_0_copy_2"},
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t16", "bucket_00000", "bucket_00000"}
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":3}\t0\t13",  "bucket_00000", "000000_0_copy_1"},
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t0\t15", "bucket_00000", "bucket_00000"},
+        {"{\"writeid\":3,\"bucketid\":536870912,\"rowid\":0}\t0\t17", "bucket_00000", "bucket_00000"},
+        {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t0\t120", "bucket_00000", "bucket_00000"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":0}\t1\t2",   "bucket_00000", "000000_0"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":4}\t1\t4",   "bucket_00000", "000000_0_copy_1"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":5}\t1\t5",   "bucket_00000", "000000_0_copy_1"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":6}\t1\t6",   "bucket_00000", "000000_0_copy_2"},
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t16", "bucket_00000", "bucket_00000"}
     };
     Assert.assertEquals("Unexpected row count before compaction", expected.length, rs.size());
     for(int i = 0; i < expected.length; i++) {
@@ -569,8 +569,8 @@ ekoifman:apache-hive-3.0.0-SNAPSHOT-bin ekoifman$ tree /Users/ekoifman/dev/hiver
     query = "select ROW__ID, a from T where b > 6 order by a";
     rs = runStatementOnDriver(query);
     String[][] expected1 = {
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}", "6"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}", "9"}
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":3}", "6"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":4}", "9"}
     };
     checkExpected(rs, expected1, "After conversion with VC1");
     assertVectorized(shouldVectorize(), query);
@@ -579,11 +579,11 @@ ekoifman:apache-hive-3.0.0-SNAPSHOT-bin ekoifman$ tree /Users/ekoifman/dev/hiver
     query = "select ROW__ID, a from T where b > 0 order by a";
     rs = runStatementOnDriver(query);
     String[][] expected2 = {
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":0}", "1"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}", "2"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":2}", "5"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}", "6"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}", "9"}
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":0}", "1"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":1}", "2"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":2}", "5"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":3}", "6"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":4}", "9"}
     };
     checkExpected(rs, expected2, "After conversion with VC2");
     assertVectorized(shouldVectorize(), query);
@@ -593,8 +593,8 @@ ekoifman:apache-hive-3.0.0-SNAPSHOT-bin ekoifman$ tree /Users/ekoifman/dev/hiver
     rs = runStatementOnDriver(query);
     Assert.assertEquals("", 2, rs.size());
     String[][] expected3 = {
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t6", "warehouse/t/000000_0"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}\t9", "warehouse/t/000000_0"}
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":3}\t6", "warehouse/t/000000_0"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":4}\t9", "warehouse/t/000000_0"}
     };
     checkExpected(rs, expected3, "After non-vectorized read");
     Assert.assertEquals(0, BucketCodec.determineVersion(536870912).decodeWriterId(536870912));
@@ -606,11 +606,11 @@ ekoifman:apache-hive-3.0.0-SNAPSHOT-bin ekoifman$ tree /Users/ekoifman/dev/hiver
     query = "select ROW__ID, b from T where b > 0 order by a";
     rs = runStatementOnDriver(query);
     String[][] expected4 = {
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}","17"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}","4"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":2}","6"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}","8"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}","10"}
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}","17"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":1}","4"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":2}","6"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":3}","8"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":4}","10"}
     };
     checkExpected(rs, expected4, "After conversion with VC4");
     assertVectorized(shouldVectorize(), query);
@@ -627,11 +627,11 @@ ekoifman:apache-hive-3.0.0-SNAPSHOT-bin ekoifman$ tree /Users/ekoifman/dev/hiver
     query = "select ROW__ID, a, b, INPUT__FILE__NAME from T where b > 0 order by a, b";
     rs = runStatementOnDriver(query);
     String[][] expected5 = {//the row__ids are the same after compaction
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "warehouse/t/base_0000001/bucket_00000"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":1}\t2\t4",   "warehouse/t/base_0000001/bucket_00000"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":2}\t5\t6",   "warehouse/t/base_0000001/bucket_00000"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":3}\t6\t8",   "warehouse/t/base_0000001/bucket_00000"},
-        {"{\"transactionid\":0,\"bucketid\":536870912,\"rowid\":4}\t9\t10",  "warehouse/t/base_0000001/bucket_00000"}
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t17", "warehouse/t/base_0000001/bucket_00000"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":1}\t2\t4",   "warehouse/t/base_0000001/bucket_00000"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":2}\t5\t6",   "warehouse/t/base_0000001/bucket_00000"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":3}\t6\t8",   "warehouse/t/base_0000001/bucket_00000"},
+        {"{\"writeid\":0,\"bucketid\":536870912,\"rowid\":4}\t9\t10",  "warehouse/t/base_0000001/bucket_00000"}
     };
     checkExpected(rs, expected5, "After major compaction");
     //vectorized because there is INPUT__FILE__NAME
@@ -671,14 +671,14 @@ ekoifman:apache-hive-3.0.0-SNAPSHOT-bin ekoifman$ tree /Users/ekoifman/dev/hiver
     String query = "select ROW__ID, p, q, a, b, INPUT__FILE__NAME from T order by p, q, a, b";
     List<String> rs = runStatementOnDriver(query);
     String[][] expected = {
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t4\t1", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"},
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4\t3", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"},
-        {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t5\t1", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"},
-        {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t5\t3", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"},
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t4\t2", "t/p=1/q=2/delta_0000001_0000001_0000/bucket_00000"},
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4\t4", "t/p=1/q=2/delta_0000001_0000001_0000/bucket_00000"},
-        {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t5\t2", "t/p=1/q=2/delta_0000002_0000002_0000/bucket_00000"},
-        {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t5\t4", "t/p=1/q=2/delta_0000002_0000002_0000/bucket_00000"}
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t4\t1", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"},
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4\t3", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"},
+        {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t5\t1", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"},
+        {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t5\t3", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"},
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t4\t2", "t/p=1/q=2/delta_0000001_0000001_0000/bucket_00000"},
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4\t4", "t/p=1/q=2/delta_0000001_0000001_0000/bucket_00000"},
+        {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t5\t2", "t/p=1/q=2/delta_0000002_0000002_0000/bucket_00000"},
+        {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t5\t4", "t/p=1/q=2/delta_0000002_0000002_0000/bucket_00000"}
     };
     checkExpected(rs, expected, "insert data");
 
@@ -689,14 +689,14 @@ ekoifman:apache-hive-3.0.0-SNAPSHOT-bin ekoifman$ tree /Users/ekoifman/dev/hiver
     query = "select ROW__ID, p, q, a, b, INPUT__FILE__NAME from T order by p, q, a, b";
     rs = runStatementOnDriver(query);
     String[][] expected2 = {
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t4\t1", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"},
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4\t3", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"},
-        {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t5\t1", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"},
-        {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t5\t3", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"},
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t4\t2", "t/p=1/q=2/base_0000002/bucket_00000"},
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4\t4", "t/p=1/q=2/base_0000002/bucket_00000"},
-        {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t5\t2", "t/p=1/q=2/base_0000002/bucket_00000"},
-        {"{\"transactionid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t5\t4", "t/p=1/q=2/base_0000002/bucket_00000"}
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t4\t1", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"},
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t4\t3", "t/p=1/q=1/delta_0000001_0000001_0000/bucket_00000"},
+        {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t1\t5\t1", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"},
+        {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t1\t5\t3", "t/p=1/q=1/delta_0000002_0000002_0000/bucket_00000"},
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t4\t2", "t/p=1/q=2/base_0000002/bucket_00000"},
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t4\t4", "t/p=1/q=2/base_0000002/bucket_00000"},
+        {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":0}\t1\t2\t5\t2", "t/p=1/q=2/base_0000002/bucket_00000"},
+        {"{\"writeid\":2,\"bucketid\":536870912,\"rowid\":1}\t1\t2\t5\t4", "t/p=1/q=2/base_0000002/bucket_00000"}
     };
     checkExpected(rs, expected2, "after major compaction");
 
@@ -721,8 +721,8 @@ ekoifman:apache-hive-3.0.0-SNAPSHOT-bin ekoifman$ tree /Users/ekoifman/dev/hiver
     List<String> rs = runStatementOnDriver(query);
     String[][] expected = {
         //this proves data is written in Acid layout so T was made Acid
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000001_0000/bucket_00000"},
-        {"{\"transactionid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/bucket_00000"}
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":0}\t1\t2", "t/delta_0000001_0000001_0000/bucket_00000"},
+        {"{\"writeid\":1,\"bucketid\":536870912,\"rowid\":1}\t3\t4", "t/delta_0000001_0000001_0000/bucket_00000"}
     };
     checkExpected(rs, expected, "insert data");
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java
index d673be4..7f6077c 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java
@@ -264,7 +264,7 @@ public class TestFileSinkOperator {
 
   private FileSinkOperator getFileSink(AcidUtils.Operation writeType,
                                        boolean dynamic,
-                                       long txnId) throws IOException, HiveException {
+                                       long writeId) throws IOException, HiveException {
     TableDesc tableDesc = null;
     switch (writeType) {
       case DELETE:
@@ -292,8 +292,8 @@ public class TestFileSinkOperator {
     }
     desc.setWriteType(writeType);
     desc.setGatherStats(true);
-    if (txnId > 0) {
-      desc.setTableWriteId(txnId);
+    if (writeId > 0) {
+      desc.setTableWriteId(writeId);
     }
     if (writeType != AcidUtils.Operation.NOT_ACID) {
       desc.setTableWriteId(1L);

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
index 073b072..e534c9e 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
@@ -2504,7 +2504,7 @@ public class TestInputOutputFormat {
     List<OrcProto.Type> types = new ArrayList<OrcProto.Type>();
     OrcProto.Type.Builder builder = OrcProto.Type.newBuilder();
     builder.setKind(OrcProto.Type.Kind.STRUCT)
-        .addAllFieldNames(Arrays.asList("op", "otid", "bucket", "rowid", "ctid",
+        .addAllFieldNames(Arrays.asList("op", "owid", "bucket", "rowid", "cwid",
             "row"))
         .addAllSubtypes(Arrays.asList(1,2,3,4,5,6));
     types.add(builder.build());

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
index 6f41d69..d8a7af8 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
@@ -508,7 +508,7 @@ public class TestOrcRawRecordMerger {
       return "col1,ROW__ID";
     }
     static String getColumnTypesProperty() {
-      return "string:struct<transactionId:bigint,bucketId:int,rowId:bigint>";
+      return "string:struct<writeId:bigint,bucketId:int,rowId:bigint>";
     }
 
   }
@@ -1050,7 +1050,7 @@ public class TestOrcRawRecordMerger {
       return "myint,mylong,mytext,myfloat,mydouble,ROW__ID";
     }
     static String getColumnTypesProperty() {
-      return "int:bigint:string:float:double:struct<transactionId:bigint,bucketId:int,rowId:bigint>";
+      return "int:bigint:string:float:double:struct<writeId:bigint,bucketId:int,rowId:bigint>";
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
index 709f021..3acc085 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
@@ -57,8 +57,8 @@ import org.junit.Test;
  */
 public class TestVectorizedOrcAcidRowBatchReader {
 
-  private static final long NUM_ROWID_PER_OTID = 15000L;
-  private static final long NUM_OTID = 10L;
+  private static final long NUM_ROWID_PER_OWID = 15000L;
+  private static final long NUM_OWID = 10L;
   private JobConf conf;
   private FileSystem fs;
   private Path root;
@@ -118,16 +118,16 @@ public class TestVectorizedOrcAcidRowBatchReader {
         .bucket(bucket)
         .writingBase(false)
         .minimumWriteId(1)
-        .maximumWriteId(NUM_OTID)
+        .maximumWriteId(NUM_OWID)
         .inspector(inspector)
         .reporter(Reporter.NULL)
         .recordIdColumn(1)
         .finalDestination(root);
     RecordUpdater updater = new OrcRecordUpdater(root, options);
     // Create a single insert delta with 150,000 rows, with 15000 rowIds per original transaction id.
-    for (long i = 1; i <= NUM_OTID; ++i) {
-      for (long j = 0; j < NUM_ROWID_PER_OTID; ++j) {
-        long payload = (i-1) * NUM_ROWID_PER_OTID + j;
+    for (long i = 1; i <= NUM_OWID; ++i) {
+      for (long j = 0; j < NUM_ROWID_PER_OWID; ++j) {
+        long payload = (i-1) * NUM_ROWID_PER_OWID + j;
         updater.insert(i, new DummyRow(payload, j, i, bucket));
       }
     }
@@ -140,11 +140,11 @@ public class TestVectorizedOrcAcidRowBatchReader {
 
     // Create a delete delta that has rowIds divisible by 2 but not by 3. This will produce
     // a delete delta file with 50,000 delete events.
-    long currTxnId = NUM_OTID + 1;
+    long currTxnId = NUM_OWID + 1;
     options.minimumWriteId(currTxnId).maximumWriteId(currTxnId);
     updater = new OrcRecordUpdater(root, options);
-    for (long i = 1; i <= NUM_OTID; ++i) {
-      for (long j = 0; j < NUM_ROWID_PER_OTID; j += 1) {
+    for (long i = 1; i <= NUM_OWID; ++i) {
+      for (long j = 0; j < NUM_ROWID_PER_OWID; j += 1) {
         if (j % 2 == 0 && j % 3 != 0) {
           updater.delete(currTxnId, new DummyRow(-1, j, i, bucket));
         }
@@ -153,11 +153,11 @@ public class TestVectorizedOrcAcidRowBatchReader {
     updater.close(false);
     // Now, create a delete delta that has rowIds divisible by 3 but not by 2. This will produce
     // a delete delta file with 25,000 delete events.
-    currTxnId = NUM_OTID + 2;
+    currTxnId = NUM_OWID + 2;
     options.minimumWriteId(currTxnId).maximumWriteId(currTxnId);
     updater = new OrcRecordUpdater(root, options);
-    for (long i = 1; i <= NUM_OTID; ++i) {
-      for (long j = 0; j < NUM_ROWID_PER_OTID; j += 1) {
+    for (long i = 1; i <= NUM_OWID; ++i) {
+      for (long j = 0; j < NUM_ROWID_PER_OWID; j += 1) {
         if (j % 2 != 0 && j % 3 == 0) {
           updater.delete(currTxnId, new DummyRow(-1, j, i, bucket));
         }
@@ -166,11 +166,11 @@ public class TestVectorizedOrcAcidRowBatchReader {
     updater.close(false);
     // Now, create a delete delta that has rowIds divisible by both 3 and 2. This will produce
     // a delete delta file with 25,000 delete events.
-    currTxnId = NUM_OTID + 3;
+    currTxnId = NUM_OWID + 3;
     options.minimumWriteId(currTxnId).maximumWriteId(currTxnId);
     updater = new OrcRecordUpdater(root, options);
-    for (long i = 1; i <= NUM_OTID; ++i) {
-      for (long j = 0; j < NUM_ROWID_PER_OTID; j += 1) {
+    for (long i = 1; i <= NUM_OWID; ++i) {
+      for (long j = 0; j < NUM_ROWID_PER_OWID; j += 1) {
         if (j % 2 == 0 && j % 3 == 0) {
           updater.delete(currTxnId, new DummyRow(-1, j, i, bucket));
         }
@@ -235,10 +235,10 @@ public class TestVectorizedOrcAcidRowBatchReader {
       for (int i = 0; i < vectorizedRowBatch.size; ++i) {
         int idx = vectorizedRowBatch.selected[i];
         long payload = col.vector[idx];
-        long otid = (payload / NUM_ROWID_PER_OTID) + 1;
-        long rowId = payload % NUM_ROWID_PER_OTID;
+        long owid = (payload / NUM_ROWID_PER_OWID) + 1;
+        long rowId = payload % NUM_ROWID_PER_OWID;
         assertFalse(rowId % 2 == 0 || rowId % 3 == 0);
-        assertTrue(otid != 5); // Check that txn#5 has been excluded.
+        assertTrue(owid != 5); // Check that writeid#5 has been excluded.
         assertTrue(payload > previousPayload); // Check that the data is in sorted order.
         previousPayload = payload;
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/queries/clientpositive/row__id.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/row__id.q b/ql/src/test/queries/clientpositive/row__id.q
index 6aaa40f..df2cb15 100644
--- a/ql/src/test/queries/clientpositive/row__id.q
+++ b/ql/src/test/queries/clientpositive/row__id.q
@@ -13,12 +13,12 @@ insert into hello_acid partition (load_date='2016-03-02') values (2, 2);
 insert into hello_acid partition (load_date='2016-03-03') values (3, 3);
 
 explain
-select tid from (select row__id.transactionid as tid from hello_acid) sub order by tid;
+select tid from (select row__id.writeid as tid from hello_acid) sub order by tid;
 
-select tid from (select row__id.transactionid as tid from hello_acid) sub order by tid;
+select tid from (select row__id.writeid as tid from hello_acid) sub order by tid;
 
 explain
-select tid from (select row__id.transactionid as tid from hello_acid) sub where tid = 3;
+select tid from (select row__id.writeid as tid from hello_acid) sub where tid = 3;
 
-select tid from (select row__id.transactionid as tid from hello_acid) sub where tid = 3;
+select tid from (select row__id.writeid as tid from hello_acid) sub where tid = 3;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part.q
index 09d7050..e2e356e 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part.q
@@ -19,7 +19,7 @@ set hive.llap.io.enabled=false;
 --
 -- FILE VARIATION: ORC, ACID Non-Vectorized, MapWork, Partitioned
 -- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema evolution is always used for ACID.
--- Also, we don't do regular EXPLAINs on ACID files because the transaction id causes Q file statistics differences...
+-- Also, we don't do regular EXPLAINs on ACID files because the write id causes Q file statistics differences...
 -- Instead just one explain vectorization only detail
 --
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_llap_io.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_llap_io.q
index a526f57..1e5f69b 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_llap_io.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_llap_io.q
@@ -20,7 +20,7 @@ set hive.llap.io.encode.enabled=true;
 --
 -- FILE VARIATION: ORC, ACID Non-Vectorized, MapWork, Partitioned
 -- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema evolution is always used for ACID.
--- Also, we don't do regular EXPLAINs on ACID files because the transaction id causes Q file statistics differences...
+-- Also, we don't do regular EXPLAINs on ACID files because the write id causes Q file statistics differences...
 -- Instead just one explain vectorization only detail
 --
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_update.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_update.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_update.q
index 17d461c..b58bb56 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_update.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_update.q
@@ -19,7 +19,7 @@ set hive.llap.io.enabled=false;
 --
 -- FILE VARIATION: ORC, ACID Non-Vectorized, MapWork, Partitioned
 -- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema evolution is always used for ACID.
--- Also, we don't do EXPLAINs on ACID files because the transaction id causes Q file statistics differences...
+-- Also, we don't do EXPLAINs on ACID files because the write id causes Q file statistics differences...
 --
 
 CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string)

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_update_llap_io.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_update_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_update_llap_io.q
index 1866fc8..2f82583 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_update_llap_io.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_part_update_llap_io.q
@@ -20,7 +20,7 @@ set hive.llap.io.encode.enabled=true;
 --
 -- FILE VARIATION: ORC, ACID Non-Vectorized, MapWork, Partitioned
 -- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema evolution is always used for ACID.
--- Also, we don't do EXPLAINs on ACID files because the transaction id causes Q file statistics differences...
+-- Also, we don't do EXPLAINs on ACID files because the write id causes Q file statistics differences...
 --
 
 CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string)

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table.q
index 23af572..19e7bc5 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table.q
@@ -18,7 +18,7 @@ set hive.llap.io.enabled=false;
 --
 -- FILE VARIATION: ORC, ACID Non-Vectorized, MapWork, Table
 -- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema evolution is always used for ACID.
--- Also, we don't do regular EXPLAINs on ACID files because the transaction id causes Q file statistics differences...
+-- Also, we don't do regular EXPLAINs on ACID files because the write id causes Q file statistics differences...
 -- Instead just one explain vectorization only detail
 --
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_llap_io.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_llap_io.q
index 66e6da4..71ab2e5 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_llap_io.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_llap_io.q
@@ -19,7 +19,7 @@ set hive.llap.io.encode.enabled=true;
 --
 -- FILE VARIATION: ORC, ACID Non-Vectorized, MapWork, Table
 -- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema evolution is always used for ACID.
--- Also, we don't do regular EXPLAINs on ACID files because the transaction id causes Q file statistics differences...
+-- Also, we don't do regular EXPLAINs on ACID files because the write id causes Q file statistics differences...
 -- Instead just one explain vectorization only detail
 --
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_update.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_update.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_update.q
index 4699cca..35c758a 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_update.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_update.q
@@ -18,7 +18,7 @@ set hive.llap.io.enabled=false;
 --
 -- FILE VARIATION: ORC, ACID Non-Vectorized, MapWork, Table
 -- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema evolution is always used for ACID.
--- Also, we don't do EXPLAINs on ACID files because the transaction id causes Q file statistics differences...
+-- Also, we don't do EXPLAINs on ACID files because the write id causes Q file statistics differences...
 --
 
 CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string)

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_update_llap_io.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_update_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_update_llap_io.q
index 16df15a..b72ded6 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_update_llap_io.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acid_table_update_llap_io.q
@@ -19,7 +19,7 @@ set hive.llap.io.encode.enabled=true;
 --
 -- FILE VARIATION: ORC, ACID Non-Vectorized, MapWork, Table
 -- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema evolution is always used for ACID.
--- Also, we don't do EXPLAINs on ACID files because the transaction id causes Q file statistics differences...
+-- Also, we don't do EXPLAINs on ACID files because the write id causes Q file statistics differences...
 --
 
 CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string)

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part.q
index 1ad8650..fb86392 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part.q
@@ -19,7 +19,7 @@ set hive.llap.io.enabled=false;
 --
 -- FILE VARIATION: ORC, ACID Vectorized, MapWork, Partitioned
 -- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema evolution is always used for ACID.
--- Also, we don't do regular EXPLAINs on ACID files because the transaction id causes Q file statistics differences...
+-- Also, we don't do regular EXPLAINs on ACID files because the write id causes Q file statistics differences...
 -- Instead explain vectorization only detail
 --
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_llap_io.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_llap_io.q
index e676346..7509d3d 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_llap_io.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_llap_io.q
@@ -20,7 +20,7 @@ set hive.llap.io.encode.enabled=true;
 --
 -- FILE VARIATION: ORC, ACID Vectorized, MapWork, Partitioned
 -- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema evolution is always used for ACID.
--- Also, we don't do regular EXPLAINs on ACID files because the transaction id causes Q file statistics differences...
+-- Also, we don't do regular EXPLAINs on ACID files because the write id causes Q file statistics differences...
 -- Instead explain vectorization only detail
 --
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_update.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_update.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_update.q
index 9a6bcaa..aba65c9 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_update.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_update.q
@@ -19,7 +19,7 @@ set hive.llap.io.enabled=false;
 --
 -- FILE VARIATION: ORC, ACID Non-Vectorized, MapWork, Partitioned
 -- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema evolution is always used for ACID.
--- Also, we don't do EXPLAINs on ACID files because the transaction id causes Q file statistics differences...
+-- Also, we don't do EXPLAINs on ACID files because the write id causes Q file statistics differences...
 --
 
 CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string)

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_update_llap_io.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_update_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_update_llap_io.q
index 2b404ba..6191d34 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_update_llap_io.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_part_update_llap_io.q
@@ -20,7 +20,7 @@ set hive.llap.io.encode.enabled=true;
 --
 -- FILE VARIATION: ORC, ACID Non-Vectorized, MapWork, Partitioned
 -- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema evolution is always used for ACID.
--- Also, we don't do EXPLAINs on ACID files because the transaction id causes Q file statistics differences...
+-- Also, we don't do EXPLAINs on ACID files because the write id causes Q file statistics differences...
 --
 
 CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string)

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table.q
index 7e23d7f..e26a329 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table.q
@@ -18,7 +18,7 @@ set hive.llap.io.enabled=false;
 --
 -- FILE VARIATION: ORC, ACID Vectorized, MapWork, Table
 -- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema evolution is always used for ACID.
--- Also, we don't do regular EXPLAINs on ACID files because the transaction id causes Q file statistics differences...
+-- Also, we don't do regular EXPLAINs on ACID files because the write id causes Q file statistics differences...
 -- Instead explain vectorization only detail
 --
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_llap_io.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_llap_io.q
index b599ed5..7ca5459 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_llap_io.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_llap_io.q
@@ -19,7 +19,7 @@ set hive.llap.io.encode.enabled=true;
 --
 -- FILE VARIATION: ORC, ACID Vectorized, MapWork, Table
 -- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema evolution is always used for ACID.
--- Also, we don't do regular EXPLAINs on ACID files because the transaction id causes Q file statistics differences...
+-- Also, we don't do regular EXPLAINs on ACID files because the write id causes Q file statistics differences...
 -- Instead explain vectorization only detail
 --
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update.q
index 8174e38..65e68a6 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update.q
@@ -18,7 +18,7 @@ set hive.llap.io.enabled=false;
 --
 -- FILE VARIATION: ORC, ACID Vectorized, MapWork, Table
 -- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema evolution is always used for ACID.
--- Also, we don't do EXPLAINs on ACID files because the transaction id causes Q file statistics differences...
+-- Also, we don't do EXPLAINs on ACID files because the write id causes Q file statistics differences...
 --
 
 CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string)

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update_llap_io.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update_llap_io.q b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update_llap_io.q
index 8e3ba40..f1d20e6 100644
--- a/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update_llap_io.q
+++ b/ql/src/test/queries/clientpositive/schema_evol_orc_acidvec_table_update_llap_io.q
@@ -19,7 +19,7 @@ set hive.llap.io.encode.enabled=true;
 --
 -- FILE VARIATION: ORC, ACID Vectorized, MapWork, Table
 -- *IMPORTANT NOTE* We set hive.exec.schema.evolution=false above since schema evolution is always used for ACID.
--- Also, we don't do EXPLAINs on ACID files because the transaction id causes Q file statistics differences...
+-- Also, we don't do EXPLAINs on ACID files because the write id causes Q file statistics differences...
 --
 
 CREATE TABLE schema_evolution_data(insert_num int, boolean1 boolean, tinyint1 tinyint, smallint1 smallint, int1 int, bigint1 bigint, decimal1 decimal(38,18), float1 float, double1 double, string1 string, string2 string, date1 date, timestamp1 timestamp, boolean_str string, tinyint_str string, smallint_str string, int_str string, bigint_str string, decimal_str string, float_str string, double_str string, date_str string, timestamp_str string, filler string)

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/results/clientnegative/invalid_cast_from_binary_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/invalid_cast_from_binary_1.q.out b/ql/src/test/results/clientnegative/invalid_cast_from_binary_1.q.out
index acecbae..30dc6eb 100644
--- a/ql/src/test/results/clientnegative/invalid_cast_from_binary_1.q.out
+++ b/ql/src/test/results/clientnegative/invalid_cast_from_binary_1.q.out
@@ -6,4 +6,4 @@ POSTHOOK: query: create table tbl (a binary)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@tbl
-FAILED: SemanticException Line 0:-1 Wrong arguments 'a': No matching method for class org.apache.hadoop.hive.ql.udf.UDFToInteger with (binary). Possible choices: _FUNC_(bigint)  _FUNC_(boolean)  _FUNC_(decimal(38,18))  _FUNC_(double)  _FUNC_(float)  _FUNC_(smallint)  _FUNC_(string)  _FUNC_(struct<transactionid:bigint,bucketid:int,rowid:bigint>)  _FUNC_(timestamp)  _FUNC_(tinyint)  _FUNC_(void)  
+FAILED: SemanticException Line 0:-1 Wrong arguments 'a': No matching method for class org.apache.hadoop.hive.ql.udf.UDFToInteger with (binary). Possible choices: _FUNC_(bigint)  _FUNC_(boolean)  _FUNC_(decimal(38,18))  _FUNC_(double)  _FUNC_(float)  _FUNC_(smallint)  _FUNC_(string)  _FUNC_(struct<writeid:bigint,bucketid:int,rowid:bigint>)  _FUNC_(timestamp)  _FUNC_(tinyint)  _FUNC_(void)  

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/results/clientpositive/acid_subquery.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/acid_subquery.q.out b/ql/src/test/results/clientpositive/acid_subquery.q.out
index 77bafe7..1dc1775 100644
--- a/ql/src/test/results/clientpositive/acid_subquery.q.out
+++ b/ql/src/test/results/clientpositive/acid_subquery.q.out
@@ -99,4 +99,4 @@ POSTHOOK: Output: default@target@p=1/q=3
 POSTHOOK: Output: default@target@p=1/q=3
 POSTHOOK: Output: default@target@p=2/q=2
 POSTHOOK: Output: default@target@p=2/q=2
-POSTHOOK: Lineage: merge_tmp_table.val EXPRESSION [(target)t.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), (target)t.FieldSchema(name:p, type:int, comment:null), (target)t.FieldSchema(name:q, type:int, comment:null), ]
+POSTHOOK: Lineage: merge_tmp_table.val EXPRESSION [(target)t.FieldSchema(name:ROW__ID, type:struct<writeId:bigint,bucketId:int,rowId:bigint>, comment:), (target)t.FieldSchema(name:p, type:int, comment:null), (target)t.FieldSchema(name:q, type:int, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/8f93ca0b/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out b/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
index 629b05d..89b7169 100644
--- a/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
@@ -124,11 +124,11 @@ STAGE PLANS:
                     predicate: (UDFToInteger(key)) IN (413, 43) (type: boolean)
                     Statistics: Num rows: 500 Data size: 181000 Basic stats: COMPLETE Column stats: PARTIAL
                     Select Operator
-                      expressions: ROW__ID (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), key (type: string), concat(value, 'updated') (type: string), ds (type: string)
+                      expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), key (type: string), concat(value, 'updated') (type: string), ds (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 500 Data size: 308500 Basic stats: COMPLETE Column stats: PARTIAL
                       Reduce Output Operator
-                        key expressions: _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+                        key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Statistics: Num rows: 500 Data size: 308500 Basic stats: COMPLETE Column stats: PARTIAL
                         value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string)
@@ -138,7 +138,7 @@ STAGE PLANS:
             Execution mode: llap
             Reduce Operator Tree:
               Select Operator
-                expressions: KEY.reducesinkkey0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), '11' (type: string)
+                expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), '11' (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 500 Data size: 308500 Basic stats: COMPLETE Column stats: PARTIAL
                 File Output Operator
@@ -306,11 +306,11 @@ STAGE PLANS:
                     predicate: (key) IN ('1001', '213', '43') (type: boolean)
                     Statistics: Num rows: 20 Data size: 9100 Basic stats: COMPLETE Column stats: PARTIAL
                     Select Operator
-                      expressions: ROW__ID (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), ds (type: string), hr (type: string)
+                      expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), ds (type: string), hr (type: string)
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 20 Data size: 8880 Basic stats: COMPLETE Column stats: PARTIAL
                       Reduce Output Operator
-                        key expressions: _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+                        key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Statistics: Num rows: 20 Data size: 8880 Basic stats: COMPLETE Column stats: PARTIAL
                         value expressions: _col1 (type: string), _col2 (type: string)
@@ -320,7 +320,7 @@ STAGE PLANS:
             Execution mode: llap
             Reduce Operator Tree:
               Select Operator
-                expressions: KEY.reducesinkkey0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: string), VALUE._col1 (type: string)
+                expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: string), VALUE._col1 (type: string)
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 20 Data size: 8880 Basic stats: COMPLETE Column stats: PARTIAL
                 File Output Operator
@@ -451,7 +451,7 @@ POSTHOOK: Output: default@srcpart_acid@ds=2008-04-09/hr=11
 POSTHOOK: Output: default@srcpart_acid@ds=2008-04-09/hr=11
 POSTHOOK: Output: default@srcpart_acid@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@srcpart_acid@ds=2008-04-09/hr=12
-POSTHOOK: Lineage: merge_tmp_table.val EXPRESSION [(srcpart_acid)t.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), (srcpart_acid)t.FieldSchema(name:ds, type:string, comment:null), (srcpart_acid)t.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: merge_tmp_table.val EXPRESSION [(srcpart_acid)t.FieldSchema(name:ROW__ID, type:struct<writeId:bigint,bucketId:int,rowId:bigint>, comment:), (srcpart_acid)t.FieldSchema(name:ds, type:string, comment:null), (srcpart_acid)t.FieldSchema(name:hr, type:string, comment:null), ]
 PREHOOK: query: select count(*) from srcpart_acid where ds='2008-04-08' and hr=='12'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart_acid
@@ -623,11 +623,11 @@ STAGE PLANS:
                     predicate: (UDFToInteger(key)) IN (413, 43) (type: boolean)
                     Statistics: Num rows: 500 Data size: 181000 Basic stats: COMPLETE Column stats: PARTIAL
                     Select Operator
-                      expressions: ROW__ID (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), key (type: string), concat(value, 'updated') (type: string), ds (type: string)
+                      expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), key (type: string), concat(value, 'updated') (type: string), ds (type: string)
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 500 Data size: 308500 Basic stats: COMPLETE Column stats: PARTIAL
                       Reduce Output Operator
-                        key expressions: _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+                        key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
                         Statistics: Num rows: 500 Data size: 308500 Basic stats: COMPLETE Column stats: PARTIAL
@@ -638,7 +638,7 @@ STAGE PLANS:
             Execution mode: llap
             Reduce Operator Tree:
               Select Operator
-                expressions: KEY.reducesinkkey0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), '11' (type: string)
+                expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), '11' (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 500 Data size: 308500 Basic stats: COMPLETE Column stats: PARTIAL
                 File Output Operator
@@ -806,11 +806,11 @@ STAGE PLANS:
                     predicate: (key) IN ('1001', '213', '43') (type: boolean)
                     Statistics: Num rows: 20 Data size: 9100 Basic stats: COMPLETE Column stats: PARTIAL
                     Select Operator
-                      expressions: ROW__ID (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), ds (type: string), hr (type: string)
+                      expressions: ROW__ID (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), ds (type: string), hr (type: string)
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 20 Data size: 8880 Basic stats: COMPLETE Column stats: PARTIAL
                       Reduce Output Operator
-                        key expressions: _col0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>)
+                        key expressions: _col0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>)
                         sort order: +
                         Map-reduce partition columns: UDFToInteger(_col0) (type: int)
                         Statistics: Num rows: 20 Data size: 8880 Basic stats: COMPLETE Column stats: PARTIAL
@@ -821,7 +821,7 @@ STAGE PLANS:
             Execution mode: llap
             Reduce Operator Tree:
               Select Operator
-                expressions: KEY.reducesinkkey0 (type: struct<transactionid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: string), VALUE._col1 (type: string)
+                expressions: KEY.reducesinkkey0 (type: struct<writeid:bigint,bucketid:int,rowid:bigint>), VALUE._col0 (type: string), VALUE._col1 (type: string)
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 20 Data size: 8880 Basic stats: COMPLETE Column stats: PARTIAL
                 File Output Operator
@@ -952,7 +952,7 @@ POSTHOOK: Output: default@srcpart_acidb@ds=2008-04-09/hr=11
 POSTHOOK: Output: default@srcpart_acidb@ds=2008-04-09/hr=11
 POSTHOOK: Output: default@srcpart_acidb@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@srcpart_acidb@ds=2008-04-09/hr=12
-POSTHOOK: Lineage: merge_tmp_table.val EXPRESSION [(srcpart_acidb)t.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), (srcpart_acidb)t.FieldSchema(name:ds, type:string, comment:null), (srcpart_acidb)t.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: merge_tmp_table.val EXPRESSION [(srcpart_acidb)t.FieldSchema(name:ROW__ID, type:struct<writeId:bigint,bucketId:int,rowId:bigint>, comment:), (srcpart_acidb)t.FieldSchema(name:ds, type:string, comment:null), (srcpart_acidb)t.FieldSchema(name:hr, type:string, comment:null), ]
 PREHOOK: query: select count(*) from srcpart_acidb where ds='2008-04-08' and hr=='12'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart_acidb
@@ -1123,7 +1123,7 @@ STAGE PLANS:
             Map Operator Tree:
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ds:string, 3:hr:string, 4:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ds:string, 3:hr:string, 4:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                     Filter Vectorization:
                         className: VectorFilterOperator
                         native: true
@@ -1170,7 +1170,7 @@ STAGE PLANS:
                 vectorized: true
                 rowBatchContext:
                     dataColumnCount: 4
-                    dataColumns: KEY.reducesinkkey0:struct<transactionid:bigint,bucketid:int,rowid:bigint>, VALUE._col0:string, VALUE._col1:string, VALUE._col2:string
+                    dataColumns: KEY.reducesinkkey0:struct<writeid:bigint,bucketid:int,rowid:bigint>, VALUE._col0:string, VALUE._col1:string, VALUE._col2:string
                     partitionColumnCount: 0
                     scratchColumnTypeNames: [string]
             Reduce Operator Tree:
@@ -1322,7 +1322,7 @@ STAGE PLANS:
             Map Operator Tree:
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ds:string, 3:hr:string, 4:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ds:string, 3:hr:string, 4:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                     Filter Vectorization:
                         className: VectorFilterOperator
                         native: true
@@ -1368,7 +1368,7 @@ STAGE PLANS:
                 vectorized: true
                 rowBatchContext:
                     dataColumnCount: 3
-                    dataColumns: KEY.reducesinkkey0:struct<transactionid:bigint,bucketid:int,rowid:bigint>, VALUE._col0:string, VALUE._col1:string
+                    dataColumns: KEY.reducesinkkey0:struct<writeid:bigint,bucketid:int,rowid:bigint>, VALUE._col0:string, VALUE._col1:string
                     partitionColumnCount: 0
                     scratchColumnTypeNames: []
             Reduce Operator Tree:
@@ -1487,7 +1487,7 @@ STAGE PLANS:
             Map Operator Tree:
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ds:string, 3:hr:string, 4:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ds:string, 3:hr:string, 4:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                     Select Vectorization:
                         className: VectorSelectOperator
                         native: true
@@ -1528,7 +1528,7 @@ STAGE PLANS:
             Map Operator Tree:
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ds:string, 3:hr:string, 4:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ds:string, 3:hr:string, 4:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         keyColumnNums: [2, 3, 0, 1]
@@ -1626,7 +1626,7 @@ STAGE PLANS:
                 vectorized: true
                 rowBatchContext:
                     dataColumnCount: 3
-                    dataColumns: KEY.reducesinkkey0:struct<transactionid:bigint,bucketid:int,rowid:bigint>, VALUE._col0:string, VALUE._col1:string
+                    dataColumns: KEY.reducesinkkey0:struct<writeid:bigint,bucketid:int,rowid:bigint>, VALUE._col0:string, VALUE._col1:string
                     partitionColumnCount: 0
                     scratchColumnTypeNames: []
             Reduce Operator Tree:
@@ -1649,7 +1649,7 @@ STAGE PLANS:
                 vectorized: true
                 rowBatchContext:
                     dataColumnCount: 5
-                    dataColumns: KEY.reducesinkkey0:struct<transactionid:bigint,bucketid:int,rowid:bigint>, VALUE._col0:string, VALUE._col1:string, VALUE._col2:string, VALUE._col3:string
+                    dataColumns: KEY.reducesinkkey0:struct<writeid:bigint,bucketid:int,rowid:bigint>, VALUE._col0:string, VALUE._col1:string, VALUE._col2:string, VALUE._col3:string
                     partitionColumnCount: 0
                     scratchColumnTypeNames: []
             Reduce Operator Tree:
@@ -1736,7 +1736,7 @@ POSTHOOK: Output: default@srcpart_acidv@ds=2008-04-09/hr=11
 POSTHOOK: Output: default@srcpart_acidv@ds=2008-04-09/hr=11
 POSTHOOK: Output: default@srcpart_acidv@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@srcpart_acidv@ds=2008-04-09/hr=12
-POSTHOOK: Lineage: merge_tmp_table.val EXPRESSION [(srcpart_acidv)t.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), (srcpart_acidv)t.FieldSchema(name:ds, type:string, comment:null), (srcpart_acidv)t.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: merge_tmp_table.val EXPRESSION [(srcpart_acidv)t.FieldSchema(name:ROW__ID, type:struct<writeId:bigint,bucketId:int,rowId:bigint>, comment:), (srcpart_acidv)t.FieldSchema(name:ds, type:string, comment:null), (srcpart_acidv)t.FieldSchema(name:hr, type:string, comment:null), ]
 PREHOOK: query: select count(*) from srcpart_acidv where ds='2008-04-08' and hr=='12'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart_acidv
@@ -1907,7 +1907,7 @@ STAGE PLANS:
             Map Operator Tree:
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ds:string, 3:hr:string, 4:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ds:string, 3:hr:string, 4:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                     Filter Vectorization:
                         className: VectorFilterOperator
                         native: true
@@ -1955,7 +1955,7 @@ STAGE PLANS:
                 vectorized: true
                 rowBatchContext:
                     dataColumnCount: 4
-                    dataColumns: KEY.reducesinkkey0:struct<transactionid:bigint,bucketid:int,rowid:bigint>, VALUE._col0:string, VALUE._col1:string, VALUE._col2:string
+                    dataColumns: KEY.reducesinkkey0:struct<writeid:bigint,bucketid:int,rowid:bigint>, VALUE._col0:string, VALUE._col1:string, VALUE._col2:string
                     partitionColumnCount: 0
                     scratchColumnTypeNames: [string]
             Reduce Operator Tree:
@@ -2107,7 +2107,7 @@ STAGE PLANS:
             Map Operator Tree:
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ds:string, 3:hr:string, 4:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ds:string, 3:hr:string, 4:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                     Filter Vectorization:
                         className: VectorFilterOperator
                         native: true
@@ -2154,7 +2154,7 @@ STAGE PLANS:
                 vectorized: true
                 rowBatchContext:
                     dataColumnCount: 3
-                    dataColumns: KEY.reducesinkkey0:struct<transactionid:bigint,bucketid:int,rowid:bigint>, VALUE._col0:string, VALUE._col1:string
+                    dataColumns: KEY.reducesinkkey0:struct<writeid:bigint,bucketid:int,rowid:bigint>, VALUE._col0:string, VALUE._col1:string
                     partitionColumnCount: 0
                     scratchColumnTypeNames: []
             Reduce Operator Tree:
@@ -2274,7 +2274,7 @@ STAGE PLANS:
             Map Operator Tree:
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ds:string, 3:hr:string, 4:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ds:string, 3:hr:string, 4:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                     Select Vectorization:
                         className: VectorSelectOperator
                         native: true
@@ -2315,7 +2315,7 @@ STAGE PLANS:
             Map Operator Tree:
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ds:string, 3:hr:string, 4:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:key:string, 1:value:string, 2:ds:string, 3:hr:string, 4:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                     Reduce Sink Vectorization:
                         className: VectorReduceSinkObjectHashOperator
                         keyColumnNums: [2, 3, 0, 1]
@@ -2413,7 +2413,7 @@ STAGE PLANS:
                 vectorized: true
                 rowBatchContext:
                     dataColumnCount: 3
-                    dataColumns: KEY.reducesinkkey0:struct<transactionid:bigint,bucketid:int,rowid:bigint>, VALUE._col0:string, VALUE._col1:string
+                    dataColumns: KEY.reducesinkkey0:struct<writeid:bigint,bucketid:int,rowid:bigint>, VALUE._col0:string, VALUE._col1:string
                     partitionColumnCount: 0
                     scratchColumnTypeNames: []
             Reduce Operator Tree:
@@ -2436,7 +2436,7 @@ STAGE PLANS:
                 vectorized: true
                 rowBatchContext:
                     dataColumnCount: 5
-                    dataColumns: KEY.reducesinkkey0:struct<transactionid:bigint,bucketid:int,rowid:bigint>, VALUE._col0:string, VALUE._col1:string, VALUE._col2:string, VALUE._col3:string
+                    dataColumns: KEY.reducesinkkey0:struct<writeid:bigint,bucketid:int,rowid:bigint>, VALUE._col0:string, VALUE._col1:string, VALUE._col2:string, VALUE._col3:string
                     partitionColumnCount: 0
                     scratchColumnTypeNames: []
             Reduce Operator Tree:
@@ -2531,7 +2531,7 @@ POSTHOOK: Output: default@srcpart_acidvb@ds=2008-04-09/hr=11
 POSTHOOK: Output: default@srcpart_acidvb@ds=2008-04-09/hr=11
 POSTHOOK: Output: default@srcpart_acidvb@ds=2008-04-09/hr=12
 POSTHOOK: Output: default@srcpart_acidvb@ds=2008-04-09/hr=12
-POSTHOOK: Lineage: merge_tmp_table.val EXPRESSION [(srcpart_acidvb)t.FieldSchema(name:ROW__ID, type:struct<transactionId:bigint,bucketId:int,rowId:bigint>, comment:), (srcpart_acidvb)t.FieldSchema(name:ds, type:string, comment:null), (srcpart_acidvb)t.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: merge_tmp_table.val EXPRESSION [(srcpart_acidvb)t.FieldSchema(name:ROW__ID, type:struct<writeId:bigint,bucketId:int,rowId:bigint>, comment:), (srcpart_acidvb)t.FieldSchema(name:ds, type:string, comment:null), (srcpart_acidvb)t.FieldSchema(name:hr, type:string, comment:null), ]
 PREHOOK: query: select count(*) from srcpart_acidvb where ds='2008-04-08' and hr=='12'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart_acidvb