You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@doris.apache.org by ya...@apache.org on 2020/09/10 05:07:55 UTC

[incubator-doris] branch master updated: Add a session variable to show or hide hidden columns (#4510)

This is an automated email from the ASF dual-hosted git repository.

yangzhg pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-doris.git


The following commit(s) were added to refs/heads/master by this push:
     new fe0260e  Add a session variable to show or hide hidden columns (#4510)
fe0260e is described below

commit fe0260e54f8dfa37260423cffcf42096de19ed1f
Author: Zhengguo Yang <ya...@gmail.com>
AuthorDate: Thu Sep 10 13:07:43 2020 +0800

    Add a session variable to show or hide hidden columns (#4510)
    
    * add session variable to show hidden columns
---
 docs/.vuepress/sidebar/en.js                       |   1 +
 docs/.vuepress/sidebar/zh-CN.js                    |   1 +
 .../load-data/batch-delete-manual.md               | 152 +++++++++++++++++++++
 .../load-data/batch-delete-manual.md               | 152 +++++++++++++++++++++
 .../java/org/apache/doris/analysis/SelectStmt.java |   2 +-
 .../java/org/apache/doris/catalog/Catalog.java     |   2 +-
 .../doris/catalog/MaterializedIndexMeta.java       |  11 +-
 .../java/org/apache/doris/catalog/OlapTable.java   |   4 +-
 .../java/org/apache/doris/catalog/SchemaTable.java |   2 +-
 .../main/java/org/apache/doris/catalog/Table.java  |   5 +-
 .../apache/doris/common/proc/IndexInfoProcDir.java |   2 +-
 .../java/org/apache/doris/common/util/Util.java    |   5 +
 .../org/apache/doris/planner/SchemaScanNode.java   |   2 +
 .../apache/doris/planner/SingleNodePlanner.java    |   3 +-
 .../java/org/apache/doris/qe/SessionVariable.java  |  14 +-
 .../apache/doris/service/FrontendServiceImpl.java  |   2 +-
 .../org/apache/doris/analysis/SelectStmtTest.java  |  12 +-
 .../org/apache/doris/qe/PartitionCacheTest.java    |  10 +-
 gensrc/thrift/FrontendService.thrift               |   1 +
 gensrc/thrift/PlanNodes.thrift                     |   1 +
 20 files changed, 370 insertions(+), 14 deletions(-)

diff --git a/docs/.vuepress/sidebar/en.js b/docs/.vuepress/sidebar/en.js
index 0531694..8c8dd9a 100644
--- a/docs/.vuepress/sidebar/en.js
+++ b/docs/.vuepress/sidebar/en.js
@@ -55,6 +55,7 @@ module.exports = [
           "insert-into-manual",
           "delete-manual",
           "load-json-format",
+          "batch-delete-manual",
         ],
         sidebarDepth: 2,
       },
diff --git a/docs/.vuepress/sidebar/zh-CN.js b/docs/.vuepress/sidebar/zh-CN.js
index 382c875..9bdd242 100644
--- a/docs/.vuepress/sidebar/zh-CN.js
+++ b/docs/.vuepress/sidebar/zh-CN.js
@@ -56,6 +56,7 @@ module.exports = [
           "spark-load-manual",
           "delete-manual",
           "load-json-format",
+          "batch-delete-manual",
         ],
         sidebarDepth: 2,
       },
diff --git a/docs/en/administrator-guide/load-data/batch-delete-manual.md b/docs/en/administrator-guide/load-data/batch-delete-manual.md
new file mode 100644
index 0000000..66bd90a
--- /dev/null
+++ b/docs/en/administrator-guide/load-data/batch-delete-manual.md
@@ -0,0 +1,152 @@
+---
+{
+    "title": "Batch Delete",
+    "language": "en"
+}
+---
+
+<!-- 
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+# Batch Delete
+Currently, Doris supports multiple import methods such as broker load, routine load, stream load, etc. The data can only be deleted through the delete statement at present. When the delete statement is used to delete, a new data version will be generated every time delete is executed. Frequent deletion will seriously affect the query performance, and when using the delete method to delete, it is achieved by generating an empty rowset to record the deletion conditions. Each time you read, [...]
+
+For scenarios similar to the import of cdc data, insert and delete in the data data generally appear interspersed. In this scenario, our current import method is not enough, even if we can separate insert and delete, it can solve the import problem , But still cannot solve the problem of deletion. Use the batch delete function to solve the needs of these scenarios.
+There are three ways to merge data import:
+1. APPEND: All data are appended to existing data
+2. DELETE: delete all rows with the same key column value as the imported data
+3. MERGE: APPEND or DELETE according to DELETE ON decision
+
+## Principle
+This is achieved by adding a hidden column `__DELETE_SIGN__`, because we are only doing batch deletion on the unique model, so we only need to add a hidden column whose type is bool and the aggregate function is replace. In be, the various aggregation write processes are the same as normal columns, and there are two read schemes:
+
+Remove `__DELETE_SIGN__` when fe encounters extensions such as *, and add the condition of `__DELETE_SIGN__ != true` by default
+When be reads, a column is added for judgment, and the condition is used to determine whether to delete.
+
+### Import
+
+When importing, set the value of the hidden column to the value of the `DELETE ON` expression during fe parsing. The other aggregation behaviors are the same as the replace aggregation column
+
+### Read
+
+When reading, add the condition of `__DELETE_SIGN__ != true` to all olapScanNodes with hidden columns, be does not perceive this process and executes normally
+
+### Cumulative Compaction
+
+In Cumulative Compaction, hidden columns are treated as normal columns, and the compaction logic remains unchanged
+
+### Base Compaction
+
+In Base Compaction, delete the rows marked for deletion to reduce the space occupied by data
+
+### Syntax
+The import syntax design is mainly to add a column mapping that specifies the field of the delete mark column, and this column needs to be added to the imported data. The method of setting each import method is as follows
+
+#### stream load
+
+The wording of stream load adds a field to set the delete mark column in the columns field in the header. Example
+`-H "columns: k1, k2, label_c3" -H "merge_type: [MERGE|APPEND|DELETE]" -H "delete: label_c3=1"`
+
+#### broker load
+
+Set the field to delete the mark column at `PROPERTIES`
+
+```
+LOAD LABEL db1.label1
+(
+    [MERGE|APPEND|DELETE] DATA INFILE("hdfs://abc.com:8888/user/palo/test/ml/file1")
+    INTO TABLE tbl1
+    COLUMNS TERMINATED BY ","
+    (tmp_c1,tmp_c2, label_c3)
+    SET
+    (
+        id=tmp_c2,
+        name=tmp_c1,
+    )
+    [DELETE ON label=true]
+
+)
+WITH BROKER'broker'
+(
+    "username"="user",
+    "password"="pass"
+)
+PROPERTIES
+(
+    "timeout" = "3600"
+    
+);
+
+```
+
+#### reoutine load
+
+Routine load adds a mapping to the `columns` field. The mapping method is the same as above, the example is as follows
+
+```
+   CREATE ROUTINE LOAD example_db.test1 ON example_tbl
+    [WITH MERGE|APPEND|DELETE]
+    COLUMNS(k1, k2, k3, v1, v2, label),
+    WHERE k1> 100 and k2 like "%doris%"
+    [DELETE ON label=true]
+    PROPERTIES
+    (
+        "desired_concurrent_number"="3",
+        "max_batch_interval" = "20",
+        "max_batch_rows" = "300000",
+        "max_batch_size" = "209715200",
+        "strict_mode" = "false"
+    )
+    FROM KAFKA
+    (
+        "kafka_broker_list" = "broker1:9092,broker2:9092,broker3:9092",
+        "kafka_topic" = "my_topic",
+        "kafka_partitions" = "0,1,2,3",
+        "kafka_offsets" = "101,0,0,200"
+    );
+```
+
+## Enable bulk delete support
+When creating a new table, if `enable_batch_delete_by_default` is true, all new tables support batch deletion. If it is false, the new table does not support batch deletion by default.
+For a table that does not support batch delete function, if you want to use batch delete, you can use the following statement:
+`ALTER TABLE tablename ENABLE FEATURE "BATCH_DELETE"` to enable the batch delete.
+If you determine whether a table supports batch deletion, you can set a session variable to display the hidden columns `SET show_hidden_columns=true`, then use `desc tablename`, if there is a `__DELETE_SIGN__` column in the output, it is supported, if not, it is not supported
+
+## Note
+1. Since import operations other than stream load may be executed out of order inside doris, if it is not stream load when importing using the `MERGE` method, it needs to be used with load sequence. For the specific syntax, please refer to the sequence column related documents
+2. `DELETE ON` condition can only be used with MERGE
+
+## Usage example
+Let's take stream load as an example to show how to use it
+1. Import data normally:
+```
+curl --location-trusted -u root: -H "column_separator:," -H "columns: siteid, citycode, username, pv" -H "merge_type: APPEND" -T ~/table1_data http://127.0.0.1: 8130/api/test/table1/_stream_load
+```
+The APPEND condition can be omitted, which has the same effect as the following statement:
+```
+curl --location-trusted -u root: -H "column_separator:," -H "columns: siteid, citycode, username, pv" -T ~/table1_data http://127.0.0.1:8130/api/test/table1 /_stream_load
+```
+2. Delete all data with the same key as the imported data
+```
+curl --location-trusted -u root: -H "column_separator:," -H "columns: siteid, citycode, username, pv" -H "merge_type: DELETE" -T ~/table1_data http://127.0.0.1: 8130/api/test/table1/_stream_load
+```
+3. Import the same row as the key column of the row with `site_id=1`
+```
+curl --location-trusted -u root: -H "column_separator:," -H "columns: siteid, citycode, username, pv" -H "merge_type: MERGE" -H "delete: siteid=1" -T ~/ table1_data http://127.0.0.1:8130/api/test/table1/_stream_load
+```
diff --git a/docs/zh-CN/administrator-guide/load-data/batch-delete-manual.md b/docs/zh-CN/administrator-guide/load-data/batch-delete-manual.md
new file mode 100644
index 0000000..25fefea
--- /dev/null
+++ b/docs/zh-CN/administrator-guide/load-data/batch-delete-manual.md
@@ -0,0 +1,152 @@
+---
+{
+    "title": "批量删除",
+    "language": "zh-CN"
+}
+---
+
+<!-- 
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+# 批量删除
+目前Doris 支持broker load, routine load, stream load 等多种导入方式,对于数据的删除目前只能通过delete 语句进行删除,使用delete 语句的方式删除时,每执行一次delete 都会生成一个新的数据版本,如果频繁删除会严重影响查询性能,并且在使用delete 方式删除时,是通过生成一个空的rowset来记录删除条件实现,每次读取都要对删除跳条件进行过滤,同样在条件较多时会对性能造成影响。对比其他的系统,greenplum 的实现方式更像是传统数据库产品,snowflake 通过merge 语法实现。
+
+对于类似于cdc 数据的导入的场景,数据数据中insert 和delete 一般是穿插出现的,面对这种场景我们目前的导入方式也无法满足,即使我们能够分离出insert 和delete 虽然可以解决导入的问题,但是仍然解决不了删除的问题。使用批量删除功能可以解决这些个场景的需求。
+数据导入有三种合并方式:
+1. APPEND: 数据全部追加到现有数据中
+2. DELETE: 删除所有与导入数据key 列值相同的行
+3. MERGE: 根据 DELETE ON 的决定 APPEND 还是 DELETE
+
+## 原理
+通过增加一个隐藏列`__DELETE_SIGN__`实现,因为我们只是在unique 模型上做批量删除,因此只需要增加一个 类型为bool 聚合函数为replace 的隐藏列即可。在be 各种聚合写入流程都和正常列一样,读取方案有两个:
+
+在fe遇到 * 等扩展时去去掉`__DELETE_SIGN__`,并且默认加上 `__DELETE_SIGN__ != true` 的条件
+be 读取时都会加上一列进行判断,通过条件确定是否删除。
+
+### 导入
+
+导入时在fe 解析时将隐藏列的值设置成 `DELETE ON` 表达式的值,其他的聚合行为和replace的聚合列相同
+
+### 读取
+
+读取时在所有存在隐藏列的olapScanNode上增加`__DELETE_SIGN__ != true` 的条件,be 不感知这以过程,正常执行
+
+### Cumulative Compaction
+
+Cumulative Compaction 时将隐藏列看作正常的列处理,Compaction逻辑没有变化
+
+### Base Compaction
+
+Base Compaction 时要将标记为删除的行的删掉,以减少数据占用的空间
+
+### 语法
+导入的语法设计方面主要是增加一个指定删除标记列的字段的column 映射,并且需要在导入数据中增加这一列,各个导入方式设置的方法如下
+
+#### stream load
+
+stream load 的写法在在header 中的 columns  字段增加一个设置删除标记列的字段, 示例
+` -H "columns: k1, k2, label_c3" -H "merge_type: [MERGE|APPEND|DELETE]" -H "delete: label_c3=1"`
+
+#### broker load
+
+在`PROPERTIES ` 处设置删除标记列的字段
+
+```
+LOAD LABEL db1.label1
+(
+    [MERGE|APPEND|DELETE] DATA INFILE("hdfs://abc.com:8888/user/palo/test/ml/file1")
+    INTO TABLE tbl1
+    COLUMNS TERMINATED BY ","
+    (tmp_c1,tmp_c2, label_c3)
+    SET
+    (
+        id=tmp_c2,
+        name=tmp_c1,
+    )
+    [DELETE ON label=true]
+
+)
+WITH BROKER 'broker'
+(
+    "username"="user",
+    "password"="pass"
+)
+PROPERTIES
+(
+    "timeout" = "3600"
+    
+);
+
+```
+
+#### reoutine load
+
+routine load 在`columns` 字段增加映射 映射方式同上,示例如下
+
+```
+   CREATE ROUTINE LOAD example_db.test1 ON example_tbl 
+    [WITH MERGE|APPEND|DELETE]
+    COLUMNS(k1, k2, k3, v1, v2, label),
+    WHERE k1 > 100 and k2 like "%doris%"
+    [DELETE ON label=true]
+    PROPERTIES
+    (
+        "desired_concurrent_number"="3",
+        "max_batch_interval" = "20",
+        "max_batch_rows" = "300000",
+        "max_batch_size" = "209715200",
+        "strict_mode" = "false"
+    )
+    FROM KAFKA
+    (
+        "kafka_broker_list" = "broker1:9092,broker2:9092,broker3:9092",
+        "kafka_topic" = "my_topic",
+        "kafka_partitions" = "0,1,2,3",
+        "kafka_offsets" = "101,0,0,200"
+    );
+```
+
+## 启用批量删除支持
+在新建表时如果 `enable_batch_delete_by_default` 为true 则新建表的都支持批量删除,如果为false 则新建的表默认不支持批量删除功能。
+对于一个不支持批量删除功能的表 如果项要使用批量删除 可以使用如下语句:
+`ALTER TABLE tablename ENABLE FEATURE "BATCH_DELETE"` 来启用批量删除。
+如果确定一个表是否支持批量删除,可以通过 设置一个session variable 来显示隐藏列 `SET show_hidden_columns=true` ,之后使用`desc tablename`,如果输出中有`__DELETE_SIGN__` 列则支持,如果没有则不支持
+
+## 注意
+1. 由于除stream load 外的导入操作在doris 内部有可能乱序执行,因此在使用`MERGE` 方式导入时如果不是stream load,需要与 load sequence 一起使用,具体的 语法可以参照sequence列 相关的文档 
+2. `DELETE ON` 条件只能与 MERGE 一起使用
+
+## 使用示例
+下面以stream load 为例 展示下使用方式
+1. 正常导入数据:
+```
+curl --location-trusted -u root: -H "column_separator:," -H "columns: siteid, citycode, username, pv" -H "merge_type: APPEND"  -T ~/table1_data http://127.0.0.1:8130/api/test/table1/_stream_load
+```
+其中的APPEND 条件可以省略,与下面的语句效果相同:
+```
+curl --location-trusted -u root: -H "column_separator:," -H "columns: siteid, citycode, username, pv" -T ~/table1_data http://127.0.0.1:8130/api/test/table1/_stream_load
+```
+2. 将与导入数据key 相同的数据全部删除
+```
+curl --location-trusted -u root: -H "column_separator:," -H "columns: siteid, citycode, username, pv" -H "merge_type: DELETE"  -T ~/table1_data http://127.0.0.1:8130/api/test/table1/_stream_load
+```
+3. 将导入数据中与`site_id=1` 的行的key列相同的行
+```
+curl --location-trusted -u root: -H "column_separator:," -H "columns: siteid, citycode, username, pv" -H "merge_type: MERGE" -H "delete: siteid=1"  -T ~/table1_data http://127.0.0.1:8130/api/test/table1/_stream_load
+```
\ No newline at end of file
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java
index b9abe31..90c9a87 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java
@@ -958,7 +958,7 @@ public class SelectStmt extends QueryStmt {
      * refs for each column to selectListExprs.
      */
     private void expandStar(TableName tblName, TupleDescriptor desc) {
-        for (Column col : desc.getTable().getBaseSchema(false)) {
+        for (Column col : desc.getTable().getBaseSchema()) {
             resultExprs.add(new SlotRef(tblName, col.getName()));
             colLabels.add(col.getName());
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java
index 5e6bc76..58a864e 100755
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java
@@ -3947,7 +3947,7 @@ public class Catalog {
         sb.append("TABLE ");
         sb.append("`").append(table.getName()).append("` (\n");
         int idx = 0;
-        for (Column column : table.getBaseSchema(false)) {
+        for (Column column : table.getBaseSchema()) {
             if (idx++ != 0) {
                 sb.append(",\n");
             }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/MaterializedIndexMeta.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/MaterializedIndexMeta.java
index 3d4e3f5..0e6712d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/MaterializedIndexMeta.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/MaterializedIndexMeta.java
@@ -40,6 +40,7 @@ import java.io.IOException;
 import java.io.StringReader;
 import java.util.List;
 import java.util.Map;
+import java.util.stream.Collectors;
 
 public class MaterializedIndexMeta implements Writable, GsonPostProcessable {
     @SerializedName(value = "indexId")
@@ -93,7 +94,15 @@ public class MaterializedIndexMeta implements Writable, GsonPostProcessable {
     }
 
     public List<Column> getSchema() {
-        return schema;
+        return getSchema(true);
+    }
+
+    public List<Column> getSchema(boolean full) {
+        if (full) {
+            return schema;
+        } else {
+            return schema.stream().filter(column -> column.isVisible()).collect(Collectors.toList());
+        }
     }
 
     public int getSchemaHash() {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
index bb8deb4..806a6d4 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
@@ -524,13 +524,13 @@ public class OlapTable extends Table {
     public Map<Long, List<Column>> getIndexIdToSchema() {
         Map<Long, List<Column>> result = Maps.newHashMap();
         for (Map.Entry<Long, MaterializedIndexMeta> entry : indexIdToMeta.entrySet()) {
-            result.put(entry.getKey(), entry.getValue().getSchema());
+            result.put(entry.getKey(), entry.getValue().getSchema(Util.showHiddenColumns()));
         }
         return result;
     }
 
     public List<Column> getSchemaByIndexId(Long indexId) {
-        return indexIdToMeta.get(indexId).getSchema();
+        return getSchemaByIndexId(indexId, Util.showHiddenColumns());
     }
 
     public List<Column> getSchemaByIndexId(Long indexId, boolean full) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java
index a3788e2..b376ede 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java
@@ -76,7 +76,7 @@ public class SchemaTable extends Table {
                             "tables",
                             TableType.SCHEMA,
                             builder()
-                                    .column("TABLE_CATALOG", ScalarType.createVarchar(FN_REFLEN))
+                                    .column(" TABLE_CATALOG", ScalarType.createVarchar(FN_REFLEN))
                                     .column("TABLE_SCHEMA", ScalarType.createVarchar(NAME_CHAR_LEN))
                                     .column("TABLE_NAME", ScalarType.createVarchar(NAME_CHAR_LEN))
                                     .column("TABLE_TYPE", ScalarType.createVarchar(NAME_CHAR_LEN))
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Table.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Table.java
index 544ac2d..0e51fc9 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Table.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Table.java
@@ -21,6 +21,7 @@ import org.apache.doris.analysis.CreateTableStmt;
 import org.apache.doris.common.FeMetaVersion;
 import org.apache.doris.common.io.Text;
 import org.apache.doris.common.io.Writable;
+import org.apache.doris.common.util.Util;
 import org.apache.doris.thrift.TTableDescriptor;
 
 import com.google.common.base.Preconditions;
@@ -141,12 +142,12 @@ public class Table extends MetaObject implements Writable {
     }
 
     public List<Column> getFullSchema() {
-        return fullSchema;
+        return getBaseSchema();
     }
 
     // should override in subclass if necessary
     public List<Column> getBaseSchema() {
-        return fullSchema;
+        return getBaseSchema(Util.showHiddenColumns());
     }
     public List<Column> getBaseSchema(boolean full) {
         if (full) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/IndexInfoProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/IndexInfoProcDir.java
index 4d29a88..5dc1f5d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/IndexInfoProcDir.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/IndexInfoProcDir.java
@@ -124,7 +124,7 @@ public class IndexInfoProcDir implements ProcDirInterface {
             Set<String> bfColumns = null;
             if (table.getType() == TableType.OLAP) {
                 OlapTable olapTable = (OlapTable) table;
-                schema = olapTable.getSchemaByIndexId(idxId, false);
+                schema = olapTable.getSchemaByIndexId(idxId);
                 if (schema == null) {
                     throw new AnalysisException("Index " + idxId + " does not exist");
                 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java
index 673e3e3..9e4ac29 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java
@@ -20,6 +20,7 @@ package org.apache.doris.common.util;
 import org.apache.doris.catalog.Column;
 import org.apache.doris.catalog.PrimitiveType;
 import org.apache.doris.common.AnalysisException;
+import org.apache.doris.qe.ConnectContext;
 
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;
@@ -477,5 +478,9 @@ public class Util {
         conn.setReadTimeout(readTimeoutMs);
         return conn.getInputStream();
     }
+
+    public static boolean showHiddenColumns() {
+        return ConnectContext.get() != null && ConnectContext.get().getSessionVariable().showHiddenColumns();
+    }
 }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/SchemaScanNode.java b/fe/fe-core/src/main/java/org/apache/doris/planner/SchemaScanNode.java
index d54ad97..c5692c3 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/SchemaScanNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/SchemaScanNode.java
@@ -22,6 +22,7 @@ import org.apache.doris.analysis.TupleDescriptor;
 import org.apache.doris.catalog.SchemaTable;
 import org.apache.doris.common.Config;
 import org.apache.doris.common.UserException;
+import org.apache.doris.common.util.Util;
 import org.apache.doris.qe.ConnectContext;
 import org.apache.doris.service.FrontendOptions;
 import org.apache.doris.thrift.TPlanNode;
@@ -91,6 +92,7 @@ public class SchemaScanNode extends ScanNode {
                 msg.schema_scan_node.setDb("SESSION");
             }
         }
+        msg.schema_scan_node.show_hidden_cloumns = Util.showHiddenColumns();
 
         if (schemaTable != null) {
             msg.schema_scan_node.setTable(schemaTable);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java b/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java
index 76264a7..25557e7 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/planner/SingleNodePlanner.java
@@ -60,6 +60,7 @@ import org.apache.doris.common.AnalysisException;
 import org.apache.doris.common.FeConstants;
 import org.apache.doris.common.Reference;
 import org.apache.doris.common.UserException;
+import org.apache.doris.common.util.Util;
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Predicate;
@@ -1364,7 +1365,7 @@ public class SingleNodePlanner {
         switch (tblRef.getTable().getType()) {
             case OLAP:
                 OlapScanNode olapNode = new OlapScanNode(ctx_.getNextNodeId(), tblRef.getDesc(), "OlapScanNode");
-                if (((OlapTable) tblRef.getTable()).hasDeleteSign()) {
+                if (Util.showHiddenColumns() && ((OlapTable) tblRef.getTable()).hasDeleteSign()) {
                     Expr conjunct = new BinaryPredicate(BinaryPredicate.Operator.EQ,
                             new SlotRef(tblRef.getAliasAsName(), Column.DELETE_SIGN), new IntLiteral(0));
                     conjunct.analyze(analyzer);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
index 7f6831d..6b61e1f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/SessionVariable.java
@@ -85,6 +85,7 @@ public class SessionVariable implements Serializable, Writable {
     public static final String FORWARD_TO_MASTER = "forward_to_master";
     // user can set instance num after exchange, no need to be equal to nums of before exchange
     public static final String PARALLEL_EXCHANGE_INSTANCE_NUM = "parallel_exchange_instance_num";
+    public static final String SHOW_HIDDEN_COLUMNS = "show_hidden_columns";
     /*
      * configure the mem limit of load process on BE. 
      * Previously users used exec_mem_limit to set memory limits.
@@ -262,6 +263,8 @@ public class SessionVariable implements Serializable, Writable {
     private int maxScanKeyNum = -1;
     @VariableMgr.VarAttr(name = MAX_PUSHDOWN_CONDITIONS_PER_COLUMN)
     private int maxPushdownConditionsPerColumn = -1;
+    @VariableMgr.VarAttr(name = SHOW_HIDDEN_COLUMNS, flag = VariableMgr.SESSION_ONLY)
+    private boolean showHiddenColumns = false;
 
     public long getMaxExecMemByte() {
         return maxExecMemByte;
@@ -444,7 +447,7 @@ public class SessionVariable implements Serializable, Writable {
     public void setEnablePartitionCache(boolean enablePartitionCache) {
         this.enablePartitionCache = enablePartitionCache;
     }
-    
+
     // Serialize to thrift object
     public boolean getForwardToMaster() {
         return forwardToMaster;
@@ -509,6 +512,15 @@ public class SessionVariable implements Serializable, Writable {
         this.maxPushdownConditionsPerColumn = maxPushdownConditionsPerColumn;
     }
 
+    public boolean showHiddenColumns() {
+        return showHiddenColumns;
+    }
+
+    public void setShowHiddenColumns(boolean showHiddenColumns) {
+        this.showHiddenColumns = showHiddenColumns;
+    }
+
+
     // Serialize to thrift object
     // used for rest api
     public TQueryOptions toThrift() {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java
index b854934..57142cd 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/service/FrontendServiceImpl.java
@@ -307,7 +307,7 @@ public class FrontendServiceImpl implements FrontendService.Iface {
             try {
                 Table table = db.getTable(params.getTableName());
                 if (table != null) {
-                    for (Column column : table.getBaseSchema()) {
+                    for (Column column : table.getBaseSchema(params.isShowHiddenColumns())) {
                         final TColumnDesc desc = new TColumnDesc(column.getName(), column.getDataType().toThrift());
                         final Integer precision = column.getOriginType().getPrecision();
                         if (precision != null) {
diff --git a/fe/fe-core/src/test/java/org/apache/doris/analysis/SelectStmtTest.java b/fe/fe-core/src/test/java/org/apache/doris/analysis/SelectStmtTest.java
index 3df31d6..f6553f8 100644
--- a/fe/fe-core/src/test/java/org/apache/doris/analysis/SelectStmtTest.java
+++ b/fe/fe-core/src/test/java/org/apache/doris/analysis/SelectStmtTest.java
@@ -19,8 +19,9 @@ package org.apache.doris.analysis;
 
 import org.apache.doris.common.AnalysisException;
 import org.apache.doris.common.Config;
-import org.apache.doris.qe.ConnectContext;
+import org.apache.doris.common.util.Util;
 import org.apache.doris.planner.Planner;
+import org.apache.doris.qe.ConnectContext;
 import org.apache.doris.qe.VariableMgr;
 import org.apache.doris.utframe.DorisAssert;
 import org.apache.doris.utframe.UtFrameUtils;
@@ -34,6 +35,9 @@ import org.junit.rules.ExpectedException;
 
 import java.util.UUID;
 
+import mockit.Mock;
+import mockit.MockUp;
+
 public class SelectStmtTest {
     private static String runningDir = "fe/mocked/DemoTest/" + UUID.randomUUID().toString() + "/";
     private static DorisAssert dorisAssert;
@@ -48,6 +52,12 @@ public class SelectStmtTest {
 
     @BeforeClass
     public static void setUp() throws Exception {
+        new MockUp<Util>() {
+            @Mock
+            public boolean showHiddenColumns() {
+                return true;
+            }
+        };
         Config.enable_batch_delete_by_default = true;
         UtFrameUtils.createMinDorisCluster(runningDir);
         String createTblStmtStr = "create table db1.tbl1(k1 varchar(32), k2 varchar(32), k3 varchar(32), k4 int) "
diff --git a/fe/fe-core/src/test/java/org/apache/doris/qe/PartitionCacheTest.java b/fe/fe-core/src/test/java/org/apache/doris/qe/PartitionCacheTest.java
index 5b3d297..d2f2333 100644
--- a/fe/fe-core/src/test/java/org/apache/doris/qe/PartitionCacheTest.java
+++ b/fe/fe-core/src/test/java/org/apache/doris/qe/PartitionCacheTest.java
@@ -23,6 +23,7 @@ import org.apache.doris.common.util.SqlParserUtils;
 import org.apache.doris.common.AnalysisException;
 import org.apache.doris.common.DdlException;
 import org.apache.doris.common.UserException;
+import org.apache.doris.common.util.Util;
 import org.apache.doris.thrift.TStorageType;
 
 import org.apache.doris.qe.ConnectScheduler;
@@ -77,6 +78,8 @@ import org.apache.doris.qe.ConnectContext;
 import org.apache.doris.system.SystemInfoService;
 import org.apache.doris.thrift.TUniqueId;
 
+import mockit.Mock;
+import mockit.MockUp;
 import mockit.Mocked;
 import mockit.Tested;
 import mockit.Injectable;
@@ -147,7 +150,12 @@ public class PartitionCacheTest {
     public void setUp() {
         MockedAuth.mockedAuth(auth);
         MockedAuth.mockedConnectContext(ctx, "root", "192.168.1.1");
-        
+        new MockUp<Util>() {
+            @Mock
+            public boolean showHiddenColumns() {
+                return true;
+            }
+        };
         db = new Database(1L, fullDbName);
             
         OlapTable tbl1 = createOrderTable();
diff --git a/gensrc/thrift/FrontendService.thrift b/gensrc/thrift/FrontendService.thrift
index 3aec2f7..69dccc5 100644
--- a/gensrc/thrift/FrontendService.thrift
+++ b/gensrc/thrift/FrontendService.thrift
@@ -66,6 +66,7 @@ struct TDescribeTableParams {
   3: optional string user   // deprecated
   4: optional string user_ip    // deprecated
   5: optional Types.TUserIdentity current_user_ident // to replace the user and user ip
+  6: optional bool show_hidden_columns = false
 }
 
 // Results of a call to describeTable()
diff --git a/gensrc/thrift/PlanNodes.thrift b/gensrc/thrift/PlanNodes.thrift
index f0af3a2..e680bf9 100644
--- a/gensrc/thrift/PlanNodes.thrift
+++ b/gensrc/thrift/PlanNodes.thrift
@@ -291,6 +291,7 @@ struct TSchemaScanNode {
   9: optional i64 thread_id
   10: optional string user_ip   // deprecated
   11: optional Types.TUserIdentity current_user_ident   // to replace the user and user_ip
+  12: optional bool show_hidden_cloumns = false
 }
 
 struct TMetaScanNode {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org