You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by mb...@apache.org on 2021/11/17 13:19:25 UTC
[hive] branch master updated: HIVE-25690: Fix column reorder detection for Iceberg schema evolution (Marton Bod, reviewed by Adam Szita)
This is an automated email from the ASF dual-hosted git repository.
mbod pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 7c3899c HIVE-25690: Fix column reorder detection for Iceberg schema evolution (Marton Bod, reviewed by Adam Szita)
7c3899c is described below
commit 7c3899c51d87a31bcb49fc10750f0a23b67b4041
Author: Marton Bod <mb...@cloudera.com>
AuthorDate: Wed Nov 17 14:19:10 2021 +0100
HIVE-25690: Fix column reorder detection for Iceberg schema evolution (Marton Bod, reviewed by Adam Szita)
---
.../org/apache/iceberg/hive/HiveSchemaUtil.java | 58 +++++++++++++++++-----
.../iceberg/mr/hive/HiveIcebergMetaHook.java | 5 +-
.../mr/hive/TestHiveIcebergSchemaEvolution.java | 48 ++++++++++++++++++
.../src/test/queries/positive/llap_iceberg_read.q | 16 +++---
.../queries/positive/vectorized_iceberg_read.q | 4 +-
.../results/positive/llap/llap_iceberg_read.q.out | 32 ++++++------
.../positive/llap/vectorized_iceberg_read.q.out | 6 +--
.../results/positive/vectorized_iceberg_read.q.out | 6 +--
8 files changed, 128 insertions(+), 47 deletions(-)
diff --git a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveSchemaUtil.java b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveSchemaUtil.java
index 57123e1..cc9ad46 100644
--- a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveSchemaUtil.java
+++ b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveSchemaUtil.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
+import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.util.Pair;
@@ -178,28 +179,61 @@ public final class HiveSchemaUtil {
}
/**
- * Compares a list of columns to another list, by name, to find an out of order column.
- * It iterates through updated one by one, and compares the name of the column to the name of the column in the old
- * list, in the same position. It returns the first mismatch it finds in updated, if any.
+ * Compares two lists of columns to each other to find the (singular) column that was moved. This works ideally for
+ * identifying the column that was moved by an ALTER TABLE ... CHANGE COLUMN command.
*
- * @param updated The list of the columns after some updates have taken place
+ * Note: This method is only suitable for finding a single reordered column.
+ * Consequently, this method is NOT suitable for handling scenarios where multiple column reorders are possible at the
+ * same time, such as ALTER TABLE ... REPLACE COLUMNS commands.
+ *
+ * @param updated The list of the columns after some updates have taken place (if any)
* @param old The list of the original columns
* @param renameMapping A map of name aliases for the updated columns (e.g. if a column rename occurred)
- * @return A pair consisting of the first out of order column name, and its preceding column name (if any).
+ * @return A pair consisting of the reordered column's name, and its preceding column's name (if any).
* Returns a null in case there are no out of order columns.
*/
- public static Pair<String, Optional<String>> getFirstOutOfOrderColumn(List<FieldSchema> updated,
+ public static Pair<String, Optional<String>> getReorderedColumn(List<FieldSchema> updated,
List<FieldSchema> old,
Map<String, String> renameMapping) {
- for (int i = 0; i < updated.size() && i < old.size(); ++i) {
+ // first collect the updated index for each column
+ Map<String, Integer> nameToNewIndex = Maps.newHashMap();
+ for (int i = 0; i < updated.size(); ++i) {
String updatedCol = renameMapping.getOrDefault(updated.get(i).getName(), updated.get(i).getName());
- String oldCol = old.get(i).getName();
- if (!oldCol.equals(updatedCol)) {
- Optional<String> previousCol = i > 0 ? Optional.of(updated.get(i - 1).getName()) : Optional.empty();
- return Pair.of(updatedCol, previousCol);
+ nameToNewIndex.put(updatedCol, i);
+ }
+
+ // find the column which has the highest index difference between its position in the old vs the updated list
+ String reorderedColName = null;
+ int maxIndexDiff = 0;
+ for (int oldIndex = 0; oldIndex < old.size(); ++oldIndex) {
+ String oldName = old.get(oldIndex).getName();
+ Integer newIndex = nameToNewIndex.get(oldName);
+ if (newIndex != null) {
+ int indexDiff = Math.abs(newIndex - oldIndex);
+ if (maxIndexDiff < indexDiff) {
+ maxIndexDiff = indexDiff;
+ reorderedColName = oldName;
+ }
+ }
+ }
+
+ if (maxIndexDiff == 0) {
+ // if there are no changes in index, there were no reorders
+ return null;
+ } else {
+ int newIndex = nameToNewIndex.get(reorderedColName);
+ if (newIndex > 0) {
+ // if the newIndex > 0, that means the column was moved after another column:
+ // ALTER TABLE tbl CHANGE COLUMN reorderedColName reorderedColName type AFTER previousColName;
+ String previousColName = renameMapping.getOrDefault(
+ updated.get(newIndex - 1).getName(), updated.get(newIndex - 1).getName());
+ return Pair.of(reorderedColName, Optional.of(previousColName));
+ } else {
+ // if the newIndex is 0, that means the column was moved to the first position:
+ // ALTER TABLE tbl CHANGE COLUMN reorderedColName reorderedColName type FIRST;
+ return Pair.of(reorderedColName, Optional.empty());
}
}
- return null;
}
public static class SchemaDifference {
diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
index 8fcd866..f19b3b6 100644
--- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
+++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java
@@ -574,7 +574,7 @@ public class HiveIcebergMetaHook implements HiveMetaHook {
schemaDifference.getMissingFromFirst().forEach(icebergCols::remove);
}
- Pair<String, Optional<String>> outOfOrder = HiveSchemaUtil.getFirstOutOfOrderColumn(
+ Pair<String, Optional<String>> outOfOrder = HiveSchemaUtil.getReorderedColumn(
hmsCols, icebergCols, ImmutableMap.of());
// limit the scope of this operation to only dropping columns
@@ -612,8 +612,7 @@ public class HiveIcebergMetaHook implements HiveMetaHook {
schemaDifference.getMissingFromSecond().get(0).getName(),
schemaDifference.getMissingFromFirst().get(0).getName());
}
- Pair<String, Optional<String>> outOfOrder = HiveSchemaUtil.getFirstOutOfOrderColumn(hmsCols, icebergCols,
- renameMapping);
+ Pair<String, Optional<String>> outOfOrder = HiveSchemaUtil.getReorderedColumn(hmsCols, icebergCols, renameMapping);
if (!schemaDifference.isEmpty() || outOfOrder != null) {
transaction = icebergTable.newTransaction();
diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergSchemaEvolution.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergSchemaEvolution.java
index c75d882..076d6af 100644
--- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergSchemaEvolution.java
+++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergSchemaEvolution.java
@@ -32,6 +32,7 @@ import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.data.Record;
import org.apache.iceberg.hive.HiveSchemaUtil;
import org.apache.iceberg.mr.TestHelper;
+import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.types.Types;
import org.apache.thrift.TException;
import org.junit.Assert;
@@ -92,6 +93,53 @@ public class TestHiveIcebergSchemaEvolution extends HiveIcebergStorageHandlerWit
}
+ @Test
+ public void testColumnReorders() throws IOException {
+ Schema schema = new Schema(
+ required(1, "a", Types.LongType.get()),
+ required(2, "b", Types.StringType.get()),
+ required(3, "c", Types.StringType.get()),
+ required(4, "d", Types.IntegerType.get()),
+ required(5, "e", Types.IntegerType.get()),
+ required(6, "f", Types.StringType.get())
+ );
+ testTables.createTable(shell, "customers", schema, fileFormat, ImmutableList.of());
+ shell.executeStatement("INSERT INTO customers VALUES (1, 'foo', 'bar', 33, 44, 'baz'), " +
+ "(2, 'foo2', 'bar2', 55, 66, 'baz2')");
+
+ // move one position to the right
+ // a,b,c,d,e,f -> b,a,c,d,e,f
+ shell.executeStatement("ALTER TABLE customers CHANGE COLUMN a a bigint AFTER b");
+ List<Object[]> result = shell.executeStatement("SELECT * FROM customers ORDER BY a");
+ Assert.assertEquals(2, result.size());
+ Assert.assertArrayEquals(new Object[]{"foo", 1L, "bar", 33, 44, "baz"}, result.get(0));
+ Assert.assertArrayEquals(new Object[]{"foo2", 2L, "bar2", 55, 66, "baz2"}, result.get(1));
+
+ // move first to the last
+ // b,a,c,d,e,f -> a,c,d,e,f,b
+ shell.executeStatement("ALTER TABLE customers CHANGE COLUMN b b string AFTER f");
+ result = shell.executeStatement("SELECT * FROM customers ORDER BY a");
+ Assert.assertEquals(2, result.size());
+ Assert.assertArrayEquals(new Object[]{1L, "bar", 33, 44, "baz", "foo"}, result.get(0));
+ Assert.assertArrayEquals(new Object[]{2L, "bar2", 55, 66, "baz2", "foo2"}, result.get(1));
+
+ // move middle to the first
+ // a,c,d,e,f,b -> e,a,c,d,f,b
+ shell.executeStatement("ALTER TABLE customers CHANGE COLUMN e e int FIRST");
+ result = shell.executeStatement("SELECT * FROM customers ORDER BY a");
+ Assert.assertEquals(2, result.size());
+ Assert.assertArrayEquals(new Object[]{44, 1L, "bar", 33, "baz", "foo"}, result.get(0));
+ Assert.assertArrayEquals(new Object[]{66, 2L, "bar2", 55, "baz2", "foo2"}, result.get(1));
+
+ // move one position to the left
+ // e,a,c,d,f,b -> e,a,d,c,f,b
+ shell.executeStatement("ALTER TABLE customers CHANGE COLUMN d d int AFTER a");
+ result = shell.executeStatement("SELECT * FROM customers ORDER BY a");
+ Assert.assertEquals(2, result.size());
+ Assert.assertArrayEquals(new Object[]{44, 1L, 33, "bar", "baz", "foo"}, result.get(0));
+ Assert.assertArrayEquals(new Object[]{66, 2L, 55, "bar2", "baz2", "foo2"}, result.get(1));
+ }
+
// Tests CHANGE COLUMN feature similarly like above, but with a more complex schema, aimed to verify vectorized
// reads support the feature properly, also combining with other schema changes e.g. ADD COLUMN
@Test
diff --git a/iceberg/iceberg-handler/src/test/queries/positive/llap_iceberg_read.q b/iceberg/iceberg-handler/src/test/queries/positive/llap_iceberg_read.q
index 3c10c1f..c692dd4 100644
--- a/iceberg/iceberg-handler/src/test/queries/positive/llap_iceberg_read.q
+++ b/iceberg/iceberg-handler/src/test/queries/positive/llap_iceberg_read.q
@@ -53,8 +53,8 @@ SELECT i.name, i.description, SUM(o.quantity) FROM llap_items i JOIN llap_orders
--adding a column
ALTER TABLE llap_items ADD COLUMNS (to60 float);
INSERT INTO llap_items VALUES
-(7, 'Model X', 93000, 'SUV', 'Long range', 3.8),
-(7, 'Model X', 113000, 'SUV', 'Plaid', 2.5);
+(7, 'Model X', 93000, 'Long range', 'SUV', 3.8),
+(7, 'Model X', 113000, 'Plaid', 'SUV', 2.5);
SELECT cat, min(to60) from llap_items group by cat;
--removing a column
@@ -70,7 +70,7 @@ SELECT name, min(to60), max(cost) FROM llap_items WHERE itemid > 3 GROUP BY name
ALTER TABLE llap_orders CHANGE tradets ordertime timestamp AFTER p2;
ALTER TABLE llap_orders CHANGE p1 region string;
INSERT INTO llap_orders VALUES
-(21, 21, 8, 'EU', timestamp('2000-01-04 19:55:46.129'), 'HU');
+(21, 21, 8, 'EU', 'HU', timestamp('2000-01-04 19:55:46.129'));
SELECT region, min(ordertime), sum(quantity) FROM llap_orders WHERE itemid > 5 GROUP BY region;
ALTER TABLE llap_orders CHANGE p2 state string;
@@ -79,25 +79,25 @@ SELECT region, state, min(ordertime), sum(quantity) FROM llap_orders WHERE itemi
--adding new column
ALTER TABLE llap_orders ADD COLUMNS (city string);
INSERT INTO llap_orders VALUES
-(22, 99, 9, 'EU', timestamp('2021-01-04 19:55:46.129'), 'DE', 'München');
+(22, 99, 9, 'EU', 'DE', timestamp('2021-01-04 19:55:46.129'), 'München');
SELECT state, max(city) from llap_orders WHERE region = 'EU' GROUP BY state;
--making it a partition column
ALTER TABLE llap_orders SET PARTITION SPEC (region, state, city);
INSERT INTO llap_orders VALUES
-(23, 89, 6, 'EU', timestamp('2021-02-04 19:55:46.129'), 'IT', 'Venezia');
+(23, 89, 6, 'EU', 'IT', timestamp('2021-02-04 19:55:46.129'), 'Venezia');
SELECT state, max(city), avg(itemid) from llap_orders WHERE region = 'EU' GROUP BY state;
--de-partitioning a column
ALTER TABLE llap_orders SET PARTITION SPEC (state, city);
INSERT INTO llap_orders VALUES
-(24, 88, 5, 'EU', timestamp('2006-02-04 19:55:46.129'), 'UK', 'London');
+(24, 88, 5, 'EU', 'UK', timestamp('2006-02-04 19:55:46.129'), 'London');
SELECT state, max(city), avg(itemid) from llap_orders WHERE region = 'EU' GROUP BY state;
--removing a column from schema
-ALTER TABLE llap_orders REPLACE COLUMNS (quantity int, itemid int, region string COMMENT 'from deserializer', ordertime timestamp COMMENT 'from deserializer', state string COMMENT 'from deserializer', city string);
+ALTER TABLE llap_orders REPLACE COLUMNS (quantity int, itemid int, region string COMMENT 'from deserializer', state string COMMENT 'from deserializer', ordertime timestamp COMMENT 'from deserializer', city string);
INSERT INTO llap_orders VALUES
-(88, 5, 'EU', timestamp('2006-02-04 19:55:46.129'), 'FR', 'Paris');
+(88, 5, 'EU', 'FR', timestamp('2006-02-04 19:55:46.129'), 'Paris');
SELECT state, max(city), avg(itemid) from llap_orders WHERE region = 'EU' GROUP BY state;
diff --git a/iceberg/iceberg-handler/src/test/queries/positive/vectorized_iceberg_read.q b/iceberg/iceberg-handler/src/test/queries/positive/vectorized_iceberg_read.q
index 6227b17..073b679 100644
--- a/iceberg/iceberg-handler/src/test/queries/positive/vectorized_iceberg_read.q
+++ b/iceberg/iceberg-handler/src/test/queries/positive/vectorized_iceberg_read.q
@@ -42,7 +42,7 @@ insert into tbl_ice_orc_parted values
-- query with projection of partition columns' subset
select p1, a, min(b) from tbl_ice_orc_parted group by p1, a;
--- required for reordering between differnt types
+-- required for reordering between different types
set hive.metastore.disallow.incompatible.col.type.changes=false;
-- move partition columns
@@ -59,7 +59,7 @@ describe tbl_ice_orc_parted;
-- should yield to the same result as previously
select p1, a, min(b) from tbl_ice_orc_parted group by p1, a;
-insert into tbl_ice_orc_parted values ('Europe', 3, 'cc', 'Austria');
+insert into tbl_ice_orc_parted values ('Europe', 'cc', 3, 'Austria');
-- projecting all columns
select p1, p2, a, min(b) from tbl_ice_orc_parted group by p1, p2, a;
diff --git a/iceberg/iceberg-handler/src/test/results/positive/llap/llap_iceberg_read.q.out b/iceberg/iceberg-handler/src/test/results/positive/llap/llap_iceberg_read.q.out
index 185d912..23392e3 100644
--- a/iceberg/iceberg-handler/src/test/results/positive/llap/llap_iceberg_read.q.out
+++ b/iceberg/iceberg-handler/src/test/results/positive/llap/llap_iceberg_read.q.out
@@ -144,14 +144,14 @@ POSTHOOK: type: ALTERTABLE_ADDCOLS
POSTHOOK: Input: default@llap_items
POSTHOOK: Output: default@llap_items
PREHOOK: query: INSERT INTO llap_items VALUES
-(7, 'Model X', 93000, 'SUV', 'Long range', 3.8),
-(7, 'Model X', 113000, 'SUV', 'Plaid', 2.5)
+(7, 'Model X', 93000, 'Long range', 'SUV', 3.8),
+(7, 'Model X', 113000, 'Plaid', 'SUV', 2.5)
PREHOOK: type: QUERY
PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@llap_items
POSTHOOK: query: INSERT INTO llap_items VALUES
-(7, 'Model X', 93000, 'SUV', 'Long range', 3.8),
-(7, 'Model X', 113000, 'SUV', 'Plaid', 2.5)
+(7, 'Model X', 93000, 'Long range', 'SUV', 3.8),
+(7, 'Model X', 113000, 'Plaid', 'SUV', 2.5)
POSTHOOK: type: QUERY
POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@llap_items
@@ -216,12 +216,12 @@ POSTHOOK: type: ALTERTABLE_RENAMECOL
POSTHOOK: Input: default@llap_orders
POSTHOOK: Output: default@llap_orders
PREHOOK: query: INSERT INTO llap_orders VALUES
-(21, 21, 8, 'EU', timestamp('2000-01-04 19:55:46.129'), 'HU')
+(21, 21, 8, 'EU', 'HU', timestamp('2000-01-04 19:55:46.129'))
PREHOOK: type: QUERY
PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@llap_orders
POSTHOOK: query: INSERT INTO llap_orders VALUES
-(21, 21, 8, 'EU', timestamp('2000-01-04 19:55:46.129'), 'HU')
+(21, 21, 8, 'EU', 'HU', timestamp('2000-01-04 19:55:46.129'))
POSTHOOK: type: QUERY
POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@llap_orders
@@ -264,12 +264,12 @@ POSTHOOK: type: ALTERTABLE_ADDCOLS
POSTHOOK: Input: default@llap_orders
POSTHOOK: Output: default@llap_orders
PREHOOK: query: INSERT INTO llap_orders VALUES
-(22, 99, 9, 'EU', timestamp('2021-01-04 19:55:46.129'), 'DE', 'München')
+(22, 99, 9, 'EU', 'DE', timestamp('2021-01-04 19:55:46.129'), 'München')
PREHOOK: type: QUERY
PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@llap_orders
POSTHOOK: query: INSERT INTO llap_orders VALUES
-(22, 99, 9, 'EU', timestamp('2021-01-04 19:55:46.129'), 'DE', 'München')
+(22, 99, 9, 'EU', 'DE', timestamp('2021-01-04 19:55:46.129'), 'München')
POSTHOOK: type: QUERY
POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@llap_orders
@@ -295,12 +295,12 @@ POSTHOOK: type: ALTERTABLE_SETPARTSPEC
POSTHOOK: Input: default@llap_orders
POSTHOOK: Output: default@llap_orders
PREHOOK: query: INSERT INTO llap_orders VALUES
-(23, 89, 6, 'EU', timestamp('2021-02-04 19:55:46.129'), 'IT', 'Venezia')
+(23, 89, 6, 'EU', 'IT', timestamp('2021-02-04 19:55:46.129'), 'Venezia')
PREHOOK: type: QUERY
PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@llap_orders
POSTHOOK: query: INSERT INTO llap_orders VALUES
-(23, 89, 6, 'EU', timestamp('2021-02-04 19:55:46.129'), 'IT', 'Venezia')
+(23, 89, 6, 'EU', 'IT', timestamp('2021-02-04 19:55:46.129'), 'Venezia')
POSTHOOK: type: QUERY
POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@llap_orders
@@ -326,12 +326,12 @@ POSTHOOK: type: ALTERTABLE_SETPARTSPEC
POSTHOOK: Input: default@llap_orders
POSTHOOK: Output: default@llap_orders
PREHOOK: query: INSERT INTO llap_orders VALUES
-(24, 88, 5, 'EU', timestamp('2006-02-04 19:55:46.129'), 'UK', 'London')
+(24, 88, 5, 'EU', 'UK', timestamp('2006-02-04 19:55:46.129'), 'London')
PREHOOK: type: QUERY
PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@llap_orders
POSTHOOK: query: INSERT INTO llap_orders VALUES
-(24, 88, 5, 'EU', timestamp('2006-02-04 19:55:46.129'), 'UK', 'London')
+(24, 88, 5, 'EU', 'UK', timestamp('2006-02-04 19:55:46.129'), 'London')
POSTHOOK: type: QUERY
POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@llap_orders
@@ -349,21 +349,21 @@ FR NULL 2.75
HU NULL 7.0
IT Venezia 6.0
UK London 2.0
-PREHOOK: query: ALTER TABLE llap_orders REPLACE COLUMNS (quantity int, itemid int, region string COMMENT 'from deserializer', ordertime timestamp COMMENT 'from deserializer', state string COMMENT 'from deserializer', city string)
+PREHOOK: query: ALTER TABLE llap_orders REPLACE COLUMNS (quantity int, itemid int, region string COMMENT 'from deserializer', state string COMMENT 'from deserializer', ordertime timestamp COMMENT 'from deserializer', city string)
PREHOOK: type: ALTERTABLE_REPLACECOLS
PREHOOK: Input: default@llap_orders
PREHOOK: Output: default@llap_orders
-POSTHOOK: query: ALTER TABLE llap_orders REPLACE COLUMNS (quantity int, itemid int, region string COMMENT 'from deserializer', ordertime timestamp COMMENT 'from deserializer', state string COMMENT 'from deserializer', city string)
+POSTHOOK: query: ALTER TABLE llap_orders REPLACE COLUMNS (quantity int, itemid int, region string COMMENT 'from deserializer', state string COMMENT 'from deserializer', ordertime timestamp COMMENT 'from deserializer', city string)
POSTHOOK: type: ALTERTABLE_REPLACECOLS
POSTHOOK: Input: default@llap_orders
POSTHOOK: Output: default@llap_orders
PREHOOK: query: INSERT INTO llap_orders VALUES
-(88, 5, 'EU', timestamp('2006-02-04 19:55:46.129'), 'FR', 'Paris')
+(88, 5, 'EU', 'FR', timestamp('2006-02-04 19:55:46.129'), 'Paris')
PREHOOK: type: QUERY
PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@llap_orders
POSTHOOK: query: INSERT INTO llap_orders VALUES
-(88, 5, 'EU', timestamp('2006-02-04 19:55:46.129'), 'FR', 'Paris')
+(88, 5, 'EU', 'FR', timestamp('2006-02-04 19:55:46.129'), 'Paris')
POSTHOOK: type: QUERY
POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@llap_orders
diff --git a/iceberg/iceberg-handler/src/test/results/positive/llap/vectorized_iceberg_read.q.out b/iceberg/iceberg-handler/src/test/results/positive/llap/vectorized_iceberg_read.q.out
index cff315c..c5935a3 100644
--- a/iceberg/iceberg-handler/src/test/results/positive/llap/vectorized_iceberg_read.q.out
+++ b/iceberg/iceberg-handler/src/test/results/positive/llap/vectorized_iceberg_read.q.out
@@ -301,8 +301,8 @@ POSTHOOK: query: describe tbl_ice_orc_parted
POSTHOOK: type: DESCTABLE
POSTHOOK: Input: default@tbl_ice_orc_parted
p1 string from deserializer
-a int from deserializer
b string from deserializer
+a int from deserializer
p2 string from deserializer
# Partition Transform Information
@@ -319,11 +319,11 @@ POSTHOOK: Input: default@tbl_ice_orc_parted
#### A masked pattern was here ####
America 2 aa
Europe 1 aa
-PREHOOK: query: insert into tbl_ice_orc_parted values ('Europe', 3, 'cc', 'Austria')
+PREHOOK: query: insert into tbl_ice_orc_parted values ('Europe', 'cc', 3, 'Austria')
PREHOOK: type: QUERY
PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@tbl_ice_orc_parted
-POSTHOOK: query: insert into tbl_ice_orc_parted values ('Europe', 3, 'cc', 'Austria')
+POSTHOOK: query: insert into tbl_ice_orc_parted values ('Europe', 'cc', 3, 'Austria')
POSTHOOK: type: QUERY
POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@tbl_ice_orc_parted
diff --git a/iceberg/iceberg-handler/src/test/results/positive/vectorized_iceberg_read.q.out b/iceberg/iceberg-handler/src/test/results/positive/vectorized_iceberg_read.q.out
index b0d5923..cfdef05 100644
--- a/iceberg/iceberg-handler/src/test/results/positive/vectorized_iceberg_read.q.out
+++ b/iceberg/iceberg-handler/src/test/results/positive/vectorized_iceberg_read.q.out
@@ -229,8 +229,8 @@ POSTHOOK: query: describe tbl_ice_orc_parted
POSTHOOK: type: DESCTABLE
POSTHOOK: Input: default@tbl_ice_orc_parted
p1 string from deserializer
-a int from deserializer
b string from deserializer
+a int from deserializer
p2 string from deserializer
# Partition Transform Information
@@ -247,11 +247,11 @@ POSTHOOK: Input: default@tbl_ice_orc_parted
POSTHOOK: Output: hdfs://### HDFS PATH ###
America 2 aa
Europe 1 aa
-PREHOOK: query: insert into tbl_ice_orc_parted values ('Europe', 3, 'cc', 'Austria')
+PREHOOK: query: insert into tbl_ice_orc_parted values ('Europe', 'cc', 3, 'Austria')
PREHOOK: type: QUERY
PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@tbl_ice_orc_parted
-POSTHOOK: query: insert into tbl_ice_orc_parted values ('Europe', 3, 'cc', 'Austria')
+POSTHOOK: query: insert into tbl_ice_orc_parted values ('Europe', 'cc', 3, 'Austria')
POSTHOOK: type: QUERY
POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@tbl_ice_orc_parted