You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@doris.apache.org by mo...@apache.org on 2023/01/27 15:52:23 UTC

[doris] branch master updated: [fix](multi catalog)Support parquet and orc upper case column name (#16111)

This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 1589d453a3 [fix](multi catalog)Support parquet and orc upper case column name (#16111)
1589d453a3 is described below

commit 1589d453a3229aee1085c8b408e7e93e17cb8c34
Author: Jibing-Li <64...@users.noreply.github.com>
AuthorDate: Fri Jan 27 23:52:11 2023 +0800

    [fix](multi catalog)Support parquet and orc upper case column name (#16111)
    
    External hms catalog table column names in doris are all in lower case,
    while iceberg table or spark-sql created hive table may contain upper case column name,
    which will cause empty query result. This pr is to fix this bug.
    1. For parquet file, transfer all column names to lower case while parse parquet metadata.
    2. For orc file, store the origin column names and lower case column names in two vectors, use the suitable names in different cases.
    3. FE side, change the column name back to the origin column name in iceberg while doing convertToIcebergExpr.
---
 be/src/vec/exec/format/orc/vorc_reader.cpp         |  34 ++++---
 be/src/vec/exec/format/orc/vorc_reader.h           |   3 +
 be/src/vec/exec/format/parquet/schema_desc.cpp     |   8 +-
 be/src/vec/exec/format/table/iceberg_reader.cpp    |   4 +-
 be/src/vec/exec/format/table/iceberg_reader.h      |   4 +-
 .../org/apache/doris/catalog/IcebergTable.java     |   5 +
 .../doris/external/iceberg/util/IcebergUtils.java  |  14 +--
 .../planner/external/IcebergScanProvider.java      |   4 +-
 .../hive/test_upper_case_column_name.out           |  89 ++++++++++++++++++
 .../hive/test_upper_case_column_name.groovy        | 103 +++++++++++++++++++++
 10 files changed, 243 insertions(+), 25 deletions(-)

diff --git a/be/src/vec/exec/format/orc/vorc_reader.cpp b/be/src/vec/exec/format/orc/vorc_reader.cpp
index 339c150422..2b2e0c1029 100644
--- a/be/src/vec/exec/format/orc/vorc_reader.cpp
+++ b/be/src/vec/exec/format/orc/vorc_reader.cpp
@@ -193,7 +193,7 @@ Status OrcReader::init_reader(
     auto& selected_type = _row_reader->getSelectedType();
     _col_orc_type.resize(selected_type.getSubtypeCount());
     for (int i = 0; i < selected_type.getSubtypeCount(); ++i) {
-        _colname_to_idx[selected_type.getFieldName(i)] = i;
+        _colname_to_idx[_get_field_name_lower_case(&selected_type, i)] = i;
         _col_orc_type[i] = selected_type.getSubtype(i);
     }
     return Status::OK();
@@ -238,7 +238,7 @@ Status OrcReader::get_parsed_schema(std::vector<std::string>* col_names,
 
     auto& root_type = _reader->getType();
     for (int i = 0; i < root_type.getSubtypeCount(); ++i) {
-        col_names->emplace_back(root_type.getFieldName(i));
+        col_names->emplace_back(_get_field_name_lower_case(&root_type, i));
         col_types->emplace_back(_convert_to_doris_type(root_type.getSubtype(i)));
     }
     return Status::OK();
@@ -246,15 +246,20 @@ Status OrcReader::get_parsed_schema(std::vector<std::string>* col_names,
 
 Status OrcReader::_init_read_columns() {
     auto& root_type = _reader->getType();
-    std::unordered_set<std::string> orc_cols;
+    std::vector<std::string> orc_cols;
+    std::vector<std::string> orc_cols_lower_case;
     for (int i = 0; i < root_type.getSubtypeCount(); ++i) {
-        orc_cols.emplace(root_type.getFieldName(i));
+        orc_cols.emplace_back(root_type.getFieldName(i));
+        orc_cols_lower_case.emplace_back(_get_field_name_lower_case(&root_type, i));
     }
     for (auto& col_name : _column_names) {
-        if (orc_cols.find(col_name) == orc_cols.end()) {
+        auto iter = std::find(orc_cols_lower_case.begin(), orc_cols_lower_case.end(), col_name);
+        if (iter == orc_cols_lower_case.end()) {
             _missing_cols.emplace_back(col_name);
         } else {
-            _read_cols.emplace_back(col_name);
+            int pos = std::distance(orc_cols_lower_case.begin(), iter);
+            _read_cols.emplace_back(orc_cols[pos]);
+            _read_cols_lower_case.emplace_back(col_name);
         }
     }
     return Status::OK();
@@ -512,7 +517,7 @@ void OrcReader::_init_search_argument(
     auto& root_type = _reader->getType();
     std::unordered_map<std::string, const orc::Type*> type_map;
     for (int i = 0; i < root_type.getSubtypeCount(); ++i) {
-        type_map.emplace(root_type.getFieldName(i), root_type.getSubtype(i));
+        type_map.emplace(_get_field_name_lower_case(&root_type, i), root_type.getSubtype(i));
     }
     for (auto it = colname_to_value_range->begin(); it != colname_to_value_range->end(); ++it) {
         auto type_it = type_map.find(it->first);
@@ -592,7 +597,7 @@ TypeDescriptor OrcReader::_convert_to_doris_type(const orc::Type* orc_type) {
         TypeDescriptor struct_type(PrimitiveType::TYPE_STRUCT);
         for (int i = 0; i < orc_type->getSubtypeCount(); ++i) {
             struct_type.children.emplace_back(_convert_to_doris_type(orc_type->getSubtype(i)));
-            struct_type.field_names.emplace_back(orc_type->getFieldName(i));
+            struct_type.field_names.emplace_back(_get_field_name_lower_case(orc_type, i));
         }
         return struct_type;
     }
@@ -605,7 +610,8 @@ std::unordered_map<std::string, TypeDescriptor> OrcReader::get_name_to_type() {
     std::unordered_map<std::string, TypeDescriptor> map;
     auto& root_type = _reader->getType();
     for (int i = 0; i < root_type.getSubtypeCount(); ++i) {
-        map.emplace(root_type.getFieldName(i), _convert_to_doris_type(root_type.getSubtype(i)));
+        map.emplace(_get_field_name_lower_case(&root_type, i),
+                    _convert_to_doris_type(root_type.getSubtype(i)));
     }
     return map;
 }
@@ -614,7 +620,7 @@ Status OrcReader::get_columns(std::unordered_map<std::string, TypeDescriptor>* n
                               std::unordered_set<std::string>* missing_cols) {
     auto& root_type = _reader->getType();
     for (int i = 0; i < root_type.getSubtypeCount(); ++i) {
-        name_to_type->emplace(root_type.getFieldName(i),
+        name_to_type->emplace(_get_field_name_lower_case(&root_type, i),
                               _convert_to_doris_type(root_type.getSubtype(i)));
     }
     for (auto& col : _missing_cols) {
@@ -769,6 +775,12 @@ Status OrcReader::_orc_column_to_doris_column(const std::string& col_name,
     return Status::InternalError("Unsupported type for column '{}'", col_name);
 }
 
+std::string OrcReader::_get_field_name_lower_case(const orc::Type* orc_type, int pos) {
+    std::string name = orc_type->getFieldName(pos);
+    transform(name.begin(), name.end(), name.begin(), ::tolower);
+    return name;
+}
+
 Status OrcReader::get_next_block(Block* block, size_t* read_rows, bool* eof) {
     SCOPED_RAW_TIMER(&_statistics.column_read_time);
     {
@@ -780,7 +792,7 @@ Status OrcReader::get_next_block(Block* block, size_t* read_rows, bool* eof) {
         }
     }
     const auto& batch_vec = down_cast<orc::StructVectorBatch*>(_batch.get())->fields;
-    for (auto& col : _read_cols) {
+    for (auto& col : _read_cols_lower_case) {
         auto& column_with_type_and_name = block->get_by_name(col);
         auto& column_ptr = column_with_type_and_name.column;
         auto& column_type = column_with_type_and_name.type;
diff --git a/be/src/vec/exec/format/orc/vorc_reader.h b/be/src/vec/exec/format/orc/vorc_reader.h
index 74ef9977a4..daf2e8cd68 100644
--- a/be/src/vec/exec/format/orc/vorc_reader.h
+++ b/be/src/vec/exec/format/orc/vorc_reader.h
@@ -254,6 +254,8 @@ private:
                                      const MutableColumnPtr& data_column, orc::ListVectorBatch* lvb,
                                      size_t num_values, size_t* element_size);
 
+    std::string _get_field_name_lower_case(const orc::Type* orc_type, int pos);
+
     RuntimeProfile* _profile;
     const TFileScanRangeParams& _scan_params;
     const TFileRangeDesc& _scan_range;
@@ -265,6 +267,7 @@ private:
     cctz::time_zone _time_zone;
 
     std::list<std::string> _read_cols;
+    std::list<std::string> _read_cols_lower_case;
     std::list<std::string> _missing_cols;
     std::unordered_map<std::string, int> _colname_to_idx;
     std::vector<const orc::Type*> _col_orc_type;
diff --git a/be/src/vec/exec/format/parquet/schema_desc.cpp b/be/src/vec/exec/format/parquet/schema_desc.cpp
index b8b9b07184..e1a5225ff8 100644
--- a/be/src/vec/exec/format/parquet/schema_desc.cpp
+++ b/be/src/vec/exec/format/parquet/schema_desc.cpp
@@ -129,7 +129,9 @@ Status FieldDescriptor::parse_node_field(const std::vector<tparquet::SchemaEleme
         auto child = &node_field->children[0];
         parse_physical_field(t_schema, false, child);
 
-        node_field->name = t_schema.name;
+        std::string lower_case_name;
+        transform(t_schema.name.begin(), t_schema.name.end(), lower_case_name.begin(), ::tolower);
+        node_field->name = lower_case_name;
         node_field->type.type = TYPE_ARRAY;
         node_field->is_nullable = false;
         _next_schema_pos = curr_pos + 1;
@@ -146,7 +148,9 @@ Status FieldDescriptor::parse_node_field(const std::vector<tparquet::SchemaEleme
 
 void FieldDescriptor::parse_physical_field(const tparquet::SchemaElement& physical_schema,
                                            bool is_nullable, FieldSchema* physical_field) {
-    physical_field->name = physical_schema.name;
+    std::string lower_case_name = physical_schema.name;
+    transform(lower_case_name.begin(), lower_case_name.end(), lower_case_name.begin(), ::tolower);
+    physical_field->name = lower_case_name;
     physical_field->parquet_schema = physical_schema;
     physical_field->is_nullable = is_nullable;
     physical_field->physical_type = physical_schema.type;
diff --git a/be/src/vec/exec/format/table/iceberg_reader.cpp b/be/src/vec/exec/format/table/iceberg_reader.cpp
index 145c57672c..adc31e605b 100644
--- a/be/src/vec/exec/format/table/iceberg_reader.cpp
+++ b/be/src/vec/exec/format/table/iceberg_reader.cpp
@@ -60,8 +60,8 @@ IcebergTableReader::IcebergTableReader(GenericReader* file_format_reader, Runtim
 }
 
 Status IcebergTableReader::init_reader(
-        std::vector<std::string>& file_col_names,
-        std::unordered_map<int, std::string>& col_id_name_map,
+        const std::vector<std::string>& file_col_names,
+        const std::unordered_map<int, std::string>& col_id_name_map,
         std::unordered_map<std::string, ColumnValueRangeType>* colname_to_value_range,
         VExprContext* vconjunct_ctx) {
     ParquetReader* parquet_reader = static_cast<ParquetReader*>(_file_format_reader.get());
diff --git a/be/src/vec/exec/format/table/iceberg_reader.h b/be/src/vec/exec/format/table/iceberg_reader.h
index 767cecb726..93a6963c80 100644
--- a/be/src/vec/exec/format/table/iceberg_reader.h
+++ b/be/src/vec/exec/format/table/iceberg_reader.h
@@ -57,8 +57,8 @@ public:
                        std::unordered_set<std::string>* missing_cols) override;
 
     Status init_reader(
-            std::vector<std::string>& file_col_names,
-            std::unordered_map<int, std::string>& col_id_name_map,
+            const std::vector<std::string>& file_col_names,
+            const std::unordered_map<int, std::string>& col_id_name_map,
             std::unordered_map<std::string, ColumnValueRangeType>* colname_to_value_range,
             VExprContext* vconjunct_ctx);
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/IcebergTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/IcebergTable.java
index 834f30fab0..8d6907ff6a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/IcebergTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/IcebergTable.java
@@ -34,6 +34,7 @@ import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.iceberg.FileScanTask;
+import org.apache.iceberg.Schema;
 import org.apache.iceberg.TableProperties;
 import org.apache.iceberg.TableScan;
 import org.apache.iceberg.catalog.TableIdentifier;
@@ -171,6 +172,10 @@ public class IcebergTable extends Table {
         return fileFormat;
     }
 
+    public Schema getIcebergSchema() {
+        return icebergTable.schema();
+    }
+
     private org.apache.iceberg.Table getTable() throws Exception {
         if (isLoaded.get()) {
             Preconditions.checkNotNull(icebergTable);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/iceberg/util/IcebergUtils.java b/fe/fe-core/src/main/java/org/apache/doris/external/iceberg/util/IcebergUtils.java
index 325541e79d..876842ea06 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/external/iceberg/util/IcebergUtils.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/external/iceberg/util/IcebergUtils.java
@@ -220,7 +220,7 @@ public class IcebergUtils {
         return DorisTypeVisitor.visit(type, new DorisTypeToType());
     }
 
-    public static Expression convertToIcebergExpr(Expr expr) {
+    public static Expression convertToIcebergExpr(Expr expr, Schema schema) {
         if (expr == null) {
             return null;
         }
@@ -241,23 +241,23 @@ public class IcebergUtils {
             CompoundPredicate compoundPredicate = (CompoundPredicate) expr;
             switch (compoundPredicate.getOp()) {
                 case AND: {
-                    Expression left = convertToIcebergExpr(compoundPredicate.getChild(0));
-                    Expression right = convertToIcebergExpr(compoundPredicate.getChild(1));
+                    Expression left = convertToIcebergExpr(compoundPredicate.getChild(0), schema);
+                    Expression right = convertToIcebergExpr(compoundPredicate.getChild(1), schema);
                     if (left != null && right != null) {
                         return Expressions.and(left, right);
                     }
                     return null;
                 }
                 case OR: {
-                    Expression left = convertToIcebergExpr(compoundPredicate.getChild(0));
-                    Expression right = convertToIcebergExpr(compoundPredicate.getChild(1));
+                    Expression left = convertToIcebergExpr(compoundPredicate.getChild(0), schema);
+                    Expression right = convertToIcebergExpr(compoundPredicate.getChild(1), schema);
                     if (left != null && right != null) {
                         return Expressions.or(left, right);
                     }
                     return null;
                 }
                 case NOT: {
-                    Expression child = convertToIcebergExpr(compoundPredicate.getChild(0));
+                    Expression child = convertToIcebergExpr(compoundPredicate.getChild(0), schema);
                     if (child != null) {
                         return Expressions.not(child);
                     }
@@ -290,6 +290,8 @@ public class IcebergUtils {
                     return null;
                 }
                 String colName = slotRef.getColumnName();
+                Types.NestedField nestedField = schema.caseInsensitiveFindField(colName);
+                colName = nestedField.name();
                 Object value = extractDorisLiteral(literalExpr);
                 if (value == null) {
                     if (opCode == TExprOpcode.EQ_FOR_NULL && literalExpr instanceof NullLiteral) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/external/IcebergScanProvider.java b/fe/fe-core/src/main/java/org/apache/doris/planner/external/IcebergScanProvider.java
index 4b8bdbee98..24dc94fc0e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/external/IcebergScanProvider.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/external/IcebergScanProvider.java
@@ -129,14 +129,14 @@ public class IcebergScanProvider extends HiveScanProvider {
     @Override
     public List<InputSplit> getSplits(List<Expr> exprs) throws UserException {
         List<Expression> expressions = new ArrayList<>();
+        org.apache.iceberg.Table table = HiveMetaStoreClientHelper.getIcebergTable(hmsTable);
         for (Expr conjunct : exprs) {
-            Expression expression = IcebergUtils.convertToIcebergExpr(conjunct);
+            Expression expression = IcebergUtils.convertToIcebergExpr(conjunct, table.schema());
             if (expression != null) {
                 expressions.add(expression);
             }
         }
 
-        org.apache.iceberg.Table table = HiveMetaStoreClientHelper.getIcebergTable(hmsTable);
         TableScan scan = table.newScan();
         TableSnapshot tableSnapshot = desc.getRef().getTableSnapshot();
         if (tableSnapshot != null) {
diff --git a/regression-test/data/external_table_emr_p2/hive/test_upper_case_column_name.out b/regression-test/data/external_table_emr_p2/hive/test_upper_case_column_name.out
new file mode 100644
index 0000000000..1b39ef2771
--- /dev/null
+++ b/regression-test/data/external_table_emr_p2/hive/test_upper_case_column_name.out
@@ -0,0 +1,89 @@
+-- This file is automatically generated. You should know what you did if you want to edit this
+-- !hiveParquet1 --
+1	name
+
+-- !hiveParquet2 --
+1	name
+
+-- !hiveParquet3 --
+
+-- !hiveParquet4 --
+1	name
+
+-- !hiveParquet5 --
+
+-- !hiveParquet6 --
+1
+
+-- !hiveParquet7 --
+name
+
+-- !hiveParquet8 --
+1	name
+
+-- !hiveOrc1 --
+1	name
+
+-- !hiveOrc2 --
+1	name
+
+-- !hiveOrc3 --
+
+-- !hiveOrc4 --
+1	name
+
+-- !hiveOrc5 --
+
+-- !hiveOrc6 --
+1
+
+-- !hiveOrc7 --
+name
+
+-- !hiveOrc8 --
+1	name
+
+-- !icebergParquet1 --
+1	name
+
+-- !icebergParquet2 --
+1	name
+
+-- !icebergParquet3 --
+
+-- !icebergParquet4 --
+1	name
+
+-- !icebergParquet5 --
+
+-- !icebergParquet6 --
+1
+
+-- !icebergParquet7 --
+name
+
+-- !icebergParquet8 --
+1	name
+
+-- !icebergOrc1 --
+1	name
+
+-- !icebergOrc2 --
+1	name
+
+-- !icebergOrc3 --
+
+-- !icebergOrc4 --
+1	name
+
+-- !icebergOrc5 --
+
+-- !icebergOrc6 --
+1
+
+-- !icebergOrc7 --
+name
+
+-- !icebergOrc8 --
+1	name
+
diff --git a/regression-test/suites/external_table_emr_p2/hive/test_upper_case_column_name.groovy b/regression-test/suites/external_table_emr_p2/hive/test_upper_case_column_name.groovy
new file mode 100644
index 0000000000..30e78a0512
--- /dev/null
+++ b/regression-test/suites/external_table_emr_p2/hive/test_upper_case_column_name.groovy
@@ -0,0 +1,103 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("upper_case_column_name", "p2") {
+    def hiveParquet1 = """select * from hive_upper_case_parquet;"""
+    def hiveParquet2 = """select * from hive_upper_case_parquet where id=1;"""
+    def hiveParquet3 = """select * from hive_upper_case_parquet where id>1;"""
+    def hiveParquet4 = """select * from hive_upper_case_parquet where name='name';"""
+    def hiveParquet5 = """select * from hive_upper_case_parquet where name!='name';"""
+    def hiveParquet6 = """select id from hive_upper_case_parquet where id=1;"""
+    def hiveParquet7 = """select name from hive_upper_case_parquet where id=1;"""
+    def hiveParquet8 = """select id, name from hive_upper_case_parquet where id=1;"""
+    def hiveOrc1 = """select * from hive_upper_case_orc;"""
+    def hiveOrc2 = """select * from hive_upper_case_orc where id=1;"""
+    def hiveOrc3 = """select * from hive_upper_case_orc where id>1;"""
+    def hiveOrc4 = """select * from hive_upper_case_orc where name='name';"""
+    def hiveOrc5 = """select * from hive_upper_case_orc where name!='name';"""
+    def hiveOrc6 = """select id from hive_upper_case_orc where id=1;"""
+    def hiveOrc7 = """select name from hive_upper_case_orc where id=1;"""
+    def hiveOrc8 = """select id, name from hive_upper_case_orc where id=1;"""
+    def icebergParquet1 = """select * from iceberg_upper_case_parquet;"""
+    def icebergParquet2 = """select * from iceberg_upper_case_parquet where id=1;"""
+    def icebergParquet3 = """select * from iceberg_upper_case_parquet where id>1;"""
+    def icebergParquet4 = """select * from iceberg_upper_case_parquet where name='name';"""
+    def icebergParquet5 = """select * from iceberg_upper_case_parquet where name!='name';"""
+    def icebergParquet6 = """select id from iceberg_upper_case_parquet where id=1;"""
+    def icebergParquet7 = """select name from iceberg_upper_case_parquet where id=1;"""
+    def icebergParquet8 = """select id, name from iceberg_upper_case_parquet where id=1;"""
+    def icebergOrc1 = """select * from iceberg_upper_case_orc;"""
+    def icebergOrc2 = """select * from iceberg_upper_case_orc where id=1;"""
+    def icebergOrc3 = """select * from iceberg_upper_case_orc where id>1;"""
+    def icebergOrc4 = """select * from iceberg_upper_case_orc where name='name';"""
+    def icebergOrc5 = """select * from iceberg_upper_case_orc where name!='name';"""
+    def icebergOrc6 = """select id from iceberg_upper_case_orc where id=1;"""
+    def icebergOrc7 = """select name from iceberg_upper_case_orc where id=1;"""
+    def icebergOrc8 = """select id, name from iceberg_upper_case_orc where id=1;"""
+
+
+    String enabled = context.config.otherConfigs.get("enableExternalHiveTest")
+    if (enabled != null && enabled.equalsIgnoreCase("true")) {
+        String extHiveHmsHost = context.config.otherConfigs.get("extHiveHmsHost")
+        String extHiveHmsPort = context.config.otherConfigs.get("extHiveHmsPort")
+        String catalog_name = "upper_case"
+        sql """drop catalog if exists ${catalog_name};"""
+        sql """
+            create catalog if not exists ${catalog_name} properties (
+                'type'='hms',
+                'hive.metastore.uris' = 'thrift://${extHiveHmsHost}:${extHiveHmsPort}'
+            );
+        """
+        logger.info("catalog " + catalog_name + " created")
+        sql """switch ${catalog_name};"""
+        logger.info("switched to catalog " + catalog_name)
+        sql """use multi_catalog;"""
+        qt_hiveParquet1 hiveParquet1
+        qt_hiveParquet2 hiveParquet2
+        qt_hiveParquet3 hiveParquet3
+        qt_hiveParquet4 hiveParquet4
+        qt_hiveParquet5 hiveParquet5
+        qt_hiveParquet6 hiveParquet6
+        qt_hiveParquet7 hiveParquet7
+        qt_hiveParquet8 hiveParquet8
+        qt_hiveOrc1 hiveOrc1
+        qt_hiveOrc2 hiveOrc2
+        qt_hiveOrc3 hiveOrc3
+        qt_hiveOrc4 hiveOrc4
+        qt_hiveOrc5 hiveOrc5
+        qt_hiveOrc6 hiveOrc6
+        qt_hiveOrc7 hiveOrc7
+        qt_hiveOrc8 hiveOrc8
+        qt_icebergParquet1 icebergParquet1
+        qt_icebergParquet2 icebergParquet2
+        qt_icebergParquet3 icebergParquet3
+        qt_icebergParquet4 icebergParquet4
+        qt_icebergParquet5 icebergParquet5
+        qt_icebergParquet6 icebergParquet6
+        qt_icebergParquet7 icebergParquet7
+        qt_icebergParquet8 icebergParquet8
+        qt_icebergOrc1 icebergOrc1
+        qt_icebergOrc2 icebergOrc2
+        qt_icebergOrc3 icebergOrc3
+        qt_icebergOrc4 icebergOrc4
+        qt_icebergOrc5 icebergOrc5
+        qt_icebergOrc6 icebergOrc6
+        qt_icebergOrc7 icebergOrc7
+        qt_icebergOrc8 icebergOrc8
+    }
+}
+


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org