You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@doris.apache.org by mo...@apache.org on 2020/03/01 13:30:43 UTC

[incubator-doris] branch master updated: [Temp Partition] Support add/drop/replace temp partitions (#2828)

This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-doris.git


The following commit(s) were added to refs/heads/master by this push:
     new df56588  [Temp Partition] Support add/drop/replace temp partitions (#2828)
df56588 is described below

commit df56588bb5e9e8c7c74e77787246d82d997b9398
Author: Mingyu Chen <mo...@gmail.com>
AuthorDate: Sun Mar 1 21:30:34 2020 +0800

    [Temp Partition] Support add/drop/replace temp partitions (#2828)
    
    This CL implements 3 new operations:
    
    ```
    ALTER TABLE tbl ADD TEMPORARY PARTITION ...;
    ALTER TABLE tbl DROP TEMPORARY PARTITION ...;
    ALTER TABLE tbl REPLACE TEMPORARY PARTITION (p1, p2, ...);
    ```
    
    User manual can be found in document:
    `docs/documentation/cn/administrator-guide/alter-table/alter-table-temp-partition.md`
    
    I did not update the grammar manual of `alter-table.md`.
    This manual is too confusing and too big, I will reorganize this manual after.
    
    This is the first part to implement the "overwrite load" feature mentioned in issue #2663.
    I will implement the "load to temp partition" feature in next PR.
    
    This CL also add GSON serialization method for the following classes (But not used):
    
    ```
    Partition.java
    MaterializedIndex.java
    Tablet.java
    Replica.java
    ```
---
 .../alter-table/alter-table-bitmap-index.md        |   1 +
 .../alter-table/alter-table-temp-partition.md      | 199 +++++++
 .../alter-table/alter-table-bitmap-index_EN.md     |   4 +-
 .../documentation/en/administrator-guide/index.rst |   1 +
 fe/src/main/cup/sql_parser.cup                     |  33 +-
 fe/src/main/java/org/apache/doris/alter/Alter.java |  17 +-
 .../doris/alter/MaterializedViewHandler.java       |  10 +
 .../apache/doris/alter/SchemaChangeHandler.java    |   7 +-
 .../apache/doris/analysis/AddPartitionClause.java  |  12 +-
 .../apache/doris/analysis/DropPartitionClause.java |   9 +-
 .../apache/doris/analysis/PartitionKeyDesc.java    |   2 +-
 .../doris/analysis/ReplacePartitionClause.java     | 116 ++++
 .../apache/doris/analysis/ShowPartitionsStmt.java  |  27 +-
 .../doris/analysis/SingleRangePartitionDesc.java   |   2 +-
 .../java/org/apache/doris/catalog/Catalog.java     | 183 +++++--
 .../apache/doris/catalog/CatalogRecycleBin.java    |  12 +-
 .../org/apache/doris/catalog/DistributionInfo.java |   6 +
 .../apache/doris/catalog/HashDistributionInfo.java |   3 +
 .../apache/doris/catalog/MaterializedIndex.java    |  19 +-
 .../java/org/apache/doris/catalog/OlapTable.java   | 164 +++++-
 .../java/org/apache/doris/catalog/Partition.java   |  15 +-
 .../apache/doris/catalog/RangePartitionInfo.java   | 109 +---
 .../java/org/apache/doris/catalog/Replica.java     |  14 +-
 .../main/java/org/apache/doris/catalog/Tablet.java |   8 +-
 .../org/apache/doris/catalog/TempPartitions.java   | 145 +++++
 .../doris/clone/DynamicPartitionScheduler.java     |  13 +-
 .../org/apache/doris/common/FeMetaVersion.java     |   5 +-
 .../doris/common/proc/PartitionsProcDir.java       | 186 +++----
 .../org/apache/doris/common/proc/TableProcDir.java |  10 +-
 .../apache/doris/common/util/PropertyAnalyzer.java |  18 +-
 .../org/apache/doris/common/util/RangeUtils.java   | 210 +++++++
 .../org/apache/doris/http/action/SystemAction.java |   2 +
 .../org/apache/doris/journal/JournalEntity.java    |   9 +-
 .../doris/journal/local/LocalJournalCursor.java    |   3 +-
 .../doris/load/routineload/RoutineLoadJob.java     |   1 -
 .../apache/doris/persist/DropPartitionInfo.java    |  46 +-
 .../java/org/apache/doris/persist/EditLog.java     |   9 +
 .../org/apache/doris/persist/OperationType.java    |   1 +
 .../apache/doris/persist/PartitionPersistInfo.java |  22 +-
 .../persist/ReplacePartitionOperationLog.java      |  93 ++++
 .../apache/doris/persist/TruncateTableInfo.java    |  39 +-
 .../doris/persist/gson/GsonPostProcessable.java    |  22 +
 .../org/apache/doris/persist/gson/GsonUtils.java   |  48 +-
 .../org/apache/doris/planner/OlapScanNode.java     |   6 +-
 .../apache/doris/planner/SingleNodePlanner.java    |   2 +-
 .../java/org/apache/doris/qe/ShowExecutor.java     |  13 +-
 .../java/org/apache/doris/task/LoadEtlTask.java    |   2 +-
 .../doris/task/UpdateTabletMetaInfoTask.java       |  11 +-
 fe/src/main/jflex/sql_scanner.flex                 |   1 +
 .../doris/analysis/ShowPartitionsStmtTest.java     |  10 +-
 .../apache/doris/catalog/TempPartitionTest.java    | 604 +++++++++++++++++++++
 .../org/apache/doris/http/DorisHttpTestCase.java   |  16 +-
 .../gson/GsonDerivedClassSerializationTest.java    |  15 +-
 .../org/apache/doris/utframe/UtFrameUtils.java     |  15 +-
 54 files changed, 2186 insertions(+), 364 deletions(-)

diff --git a/docs/documentation/cn/administrator-guide/alter-table/alter-table-bitmap-index.md b/docs/documentation/cn/administrator-guide/alter-table/alter-table-bitmap-index.md
index cc22e5e..5911995 100644
--- a/docs/documentation/cn/administrator-guide/alter-table/alter-table-bitmap-index.md
+++ b/docs/documentation/cn/administrator-guide/alter-table/alter-table-bitmap-index.md
@@ -39,6 +39,7 @@ create/drop index 语法
 2. 查看索引
 
     参照[SHOW INDEX](../../sql-reference/sql-statements/Administration/SHOW%20INDEX.md)
+
 3. 删除索引
 
     参照[DROP INDEX](../../sql-reference/sql-statements/Data%20Definition/DROP%20INDEX.md)
diff --git a/docs/documentation/cn/administrator-guide/alter-table/alter-table-temp-partition.md b/docs/documentation/cn/administrator-guide/alter-table/alter-table-temp-partition.md
new file mode 100644
index 0000000..c93e0c0
--- /dev/null
+++ b/docs/documentation/cn/administrator-guide/alter-table/alter-table-temp-partition.md
@@ -0,0 +1,199 @@
+<!-- 
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+# 临时分区
+
+在 0.12 版本中,Doris 支持了临时分区功能。
+
+临时分区是归属于某一分区表的。只有分区表可以创建临时分区。
+
+## 规则
+
+* 临时分区的分区列和正式分区相同,且不可修改。
+* 一张表所有临时分区之间的分区范围不可重叠,但临时分区的范围和正式分区范围可以重叠。
+* 临时分区的分区名称不能和正式分区以及其他临时分区重复。
+
+## 支持的操作
+
+临时分区支持添加、删除、替换操作。
+
+### 添加临时分区
+
+可以通过 `ALTER TABLE ADD TEMPORARY PARTITION` 语句对一个表添加临时分区:
+
+```
+ALTER TABLE tbl1 ADD TEMPORARY PARTITION tp1 VALUES LESS THAN("2020-02-01");
+
+ALTER TABLE tbl2 ADD TEMPORARY PARTITION tp1 VALUES [("2020-01-01"), ("2020-02-01"));
+
+ALTER TABLE tbl1 ADD TEMPORARY PARTITION tp1 VALUES LESS THAN("2020-02-01")
+("in_memory" = "true", "replication_num" = "1")
+DISTRIBUTED BY HASH(k1) BUCKETS 5;
+```
+
+通过 `HELP ALTER TABLE;` 查看更多帮助和示例。
+
+添加操作的一些说明:
+
+* 临时分区的添加和正式分区的添加操作相似。临时分区的分区范围独立于正式分区。
+* 临时分区可以独立指定一些属性。包括分桶数、副本数、是否是内存表、存储介质等信息。
+
+### 删除临时分区
+
+可以通过 `ALTER TABLE DROP TEMPORARY PARTITION` 语句删除一个表的临时分区:
+
+```
+ALTER TABLE tbl1 DROP TEMPORARY PARTITION tp1;
+```
+
+通过 `HELP ALTER TABLE;` 查看更多帮助和示例。
+
+删除操作的一些说明:
+
+* 删除临时分区,不影响正式分区的数据。
+
+### 替换分区
+
+可以通过 `ALTER TABLE REPLACE PARTITION` 语句将一个表的正式分区替换为临时分区。
+
+```
+ALTER TABLE tbl1 REPLACE PARTITION (p1) WITH TEMPORARY PARTITION (tp1);
+
+ALTER TABLE tbl1 REPLACE PARTITION (p1, p2) WITH TEMPORARY PARTITION (tp1, tp2, tp3);
+
+ALTER TABLE tbl1 REPLACE PARTITION (p1, p2) WITH TEMPORARY PARTITION (tp1, tp2)
+PROPERTIES (
+    "strict_range" = "false",
+    "use_temp_partition_name" = "true"
+);
+```
+
+通过 `HELP ALTER TABLE;` 查看更多帮助和示例。
+
+替换操作有两个特殊的可选参数:
+
+1. `strict_range`
+
+    默认为 true。当该参数为 true 时,表示要被替换的所有正式分区的范围并集需要和替换的临时分区的范围并集完全相同。当置为 false 时,只需要保证替换后,新的正式分区间的范围不重叠即可。下面举例说明:
+    
+    * 示例1
+    
+        待替换的分区 p1, p2, p3 的范围 (=> 并集):
+        
+        ```
+        [10, 20), [20, 30), [40, 50) => [10, 30), [40, 50)
+        ```
+    
+        替换分区 tp1, tp2 的范围(=> 并集):
+        
+        ```
+        [10, 30), [40, 45), [45, 50) => [10, 30), [40, 50)
+        ```
+        
+        范围并集相同,则可以使用 tp1 和 tp2 替换 p1, p2, p3。
+    
+    * 示例2
+    
+        待替换的分区 p1 的范围 (=> 并集):
+        
+        ```
+        [10, 50) => [10, 50)
+        ```
+    
+        替换分区 tp1, tp2 的范围(=> 并集):
+        
+        ```
+        [10, 30), [40, 50) => [10, 30), [40, 50)
+        ```
+        
+        范围并集不相同,如果 `strict_range` 为 true,则不可以使用 tp1 和 tp2 替换 p1。如果为 false,且替换后的两个分区范围 `[10, 30), [40, 50)` 和其他正式分区不重叠,则可以替换。
+
+2. `use_temp_partition_name`
+
+    默认为 false。当该参数为 false,并且待替换的分区和替换分区的个数相同时,则替换后的正式分区名称维持不变。如果为 true,则替换后,正式分区的名称为替换分区的名称。下面举例说明:
+    
+    * 示例1
+    
+        ```
+        ALTER TABLE tbl1 REPLACE PARTITION (p1) WITH TEMPORARY PARTITION (tp1);
+        ```
+        
+        `use_temp_partition_name` 默认为 false,则在替换后,分区的名称依然为 p1,但是相关的数据和属性都替换为 tp1 的。
+        
+        如果 `use_temp_partition_name` 默认为 true,则在替换后,分区的名称为 tp1。p1 分区不再存在。
+        
+    * 示例2
+
+        ```
+        ALTER TABLE tbl1 REPLACE PARTITION (p1, p2) WITH TEMPORARY PARTITION (tp1);
+        ```
+
+        `use_temp_partition_name` 默认为 false,但因为待替换分区的个数和替换分区的个数不同,则该参数无效。替换后,分区名称为 tp1,p1 和 p2 不再存在。
+        
+替换操作的一些说明:
+
+* 分区替换成功后,被替换的分区将被删除且不可恢复。
+
+## 临时分区的导入
+
+(TODO)
+
+## 和其他操作的关系
+
+### DROP
+
+* 使用 Drop 操作直接删除数据库或表后,可以通过 Recover 命令恢复数据库或表(限定时间内),同时,对应的临时分区也会被一同恢复。
+* 使用 Alter 命令删除正式分区后,可以通过 Recover 命令恢复分区(限定时间内)。操作正式分区和临时分区无关。
+* 使用 Alter 命令删除临时分区后,无法通过 Recover 命令恢复临时分区。
+
+### TRUNCATE
+
+* 使用 Truncate 命令清空表,表的临时分区会被删除,且不可恢复。
+* 使用 Truncate 命令清空正式分区时,不影响临时分区。
+* 不可使用 Truncate 命令清空临时分区。
+
+### ALTER
+
+* 当表存在临时分区时,无法使用 Alter 命令对表进行 Schema Change、Rollup 等变更操作。
+* 当表在进行变更操作时,无法对表添加临时分区。
+
+
+## 最佳实践
+
+1. 原子的覆盖写操作
+
+    某些情况下,用户希望能够重写某一分区的数据,但如果采用先删除再导入的方式进行,在中间会有一段时间无法查看数据。这是,用户可以先创建一个对应的临时分区,将新的数据导入到临时分区后,通过替换操作,原子的替换原有分区,以达到目的。
+    
+2. 修改分桶数
+
+    某些情况下,用户在创建分区时使用了不合适的分桶数。则用户可以先创建一个对应分区范围的临时分区,并指定新的分桶数。然后通过 `INSERT INTO` 命令将正式分区的数据导入到临时分区中,通过替换操作,原子的替换原有分区,以达到目的。
+    
+3. 合并或分割分区
+
+    某些情况下,用户希望对分区的范围进行修改,比如合并两个分区,或将一个大分区分割成多个小分区。则用户可以先建立对应合并或分割后范围的临时分区,然后通过 `INSERT INTO` 命令将正式分区的数据导入到临时分区中,通过替换操作,原子的替换原有分区,以达到目的。
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/documentation/en/administrator-guide/alter-table/alter-table-bitmap-index_EN.md b/docs/documentation/en/administrator-guide/alter-table/alter-table-bitmap-index_EN.md
index 7aa72a0..2143b02 100644
--- a/docs/documentation/en/administrator-guide/alter-table/alter-table-bitmap-index_EN.md
+++ b/docs/documentation/en/administrator-guide/alter-table/alter-table-bitmap-index_EN.md
@@ -40,10 +40,10 @@ create/drop index syntax
 2. Show Index
 
     Please refer to [SHOW INDEX](../../sql-reference/sql-statements/Administration/SHOW%20INDEX_EN.md)
+
 3. Drop Index
 
-    Please refer to [DROP INDEX](../../sql-reference/sql-statements/Data%20Definition/DROP%20INDEX_EN.md) or [ALTER TABLE
-    ](../../sql-reference/sql-statements/Data%20Definition/ALTER%20TABLE_EN.md#description)
+    Please refer to [DROP INDEX](../../sql-reference/sql-statements/Data%20Definition/DROP%20INDEX_EN.md) or [ALTER TABLE](../../sql-reference/sql-statements/Data%20Definition/ALTER%20TABLE_EN.md#description)
 
 ## Create Job
 Please refer to [Scheam Change](alter-table-schema-change_EN.md#Create Job)
diff --git a/docs/documentation/en/administrator-guide/index.rst b/docs/documentation/en/administrator-guide/index.rst
index a1237db..6e1f2ab 100644
--- a/docs/documentation/en/administrator-guide/index.rst
+++ b/docs/documentation/en/administrator-guide/index.rst
@@ -6,6 +6,7 @@ Administrator Guide
     :hidden:
 
     load-data/index
+    alter-table/index
     http-actions/index
     operation/index
     config/index
diff --git a/fe/src/main/cup/sql_parser.cup b/fe/src/main/cup/sql_parser.cup
index 62f3925..0b9eae2 100644
--- a/fe/src/main/cup/sql_parser.cup
+++ b/fe/src/main/cup/sql_parser.cup
@@ -226,7 +226,7 @@ terminal String KW_ADD, KW_ADMIN, KW_AFTER, KW_AGGREGATE, KW_ALL, KW_ALTER, KW_A
     KW_SCHEMAS, KW_SECOND, KW_SELECT, KW_SEMI, KW_SERIALIZABLE, KW_SESSION, KW_SET, KW_SETS, KW_SHOW,
     KW_SMALLINT, KW_SNAPSHOT, KW_SONAME, KW_SPLIT, KW_START, KW_STATUS, KW_STOP, KW_STORAGE, KW_STRING,
     KW_SUM, KW_SUPERUSER, KW_SYNC, KW_SYSTEM,
-    KW_TABLE, KW_TABLES, KW_TABLET, KW_TASK, KW_TERMINATED, KW_THAN, KW_THEN, KW_TIME, KW_TIMESTAMP, KW_TINYINT,
+    KW_TABLE, KW_TABLES, KW_TABLET, KW_TASK, KW_TEMPORARY, KW_TERMINATED, KW_THAN, KW_TIME, KW_THEN, KW_TIMESTAMP, KW_TINYINT,
     KW_TO, KW_TRANSACTION, KW_TRIGGERS, KW_TRIM, KW_TRUE, KW_TRUNCATE, KW_TYPE, KW_TYPES,
     KW_UNCOMMITTED, KW_UNBOUNDED, KW_UNION, KW_UNIQUE, KW_UNSIGNED, KW_USE, KW_USER, KW_USING,
     KW_VALUE, KW_VALUES, KW_VARCHAR, KW_VARIABLES, KW_VIEW, KW_MATERIALIZED,
@@ -436,6 +436,8 @@ nonterminal IndexDef.IndexType opt_index_type;
 nonterminal ShowAlterStmt.AlterType opt_alter_type;
 nonterminal Boolean opt_builtin;
 
+nonterminal Boolean opt_tmp;
+
 precedence left KW_FULL, KW_MERGE;
 precedence left DOT;
 precedence left SET_VAR;
@@ -807,18 +809,22 @@ alter_table_clause ::=
     {:
         RESULT = new ModifyTablePropertiesClause(properties);
     :}
-    | KW_ADD single_range_partition_desc:desc opt_distribution:distribution opt_properties:properties
+    | KW_ADD opt_tmp:isTempPartition single_range_partition_desc:desc opt_distribution:distribution opt_properties:properties
     {:
-        RESULT = new AddPartitionClause(desc, distribution, properties);
+        RESULT = new AddPartitionClause(desc, distribution, properties, isTempPartition);
     :}
-    | KW_DROP KW_PARTITION opt_if_exists:ifExists ident:partitionName
+    | KW_DROP opt_tmp:isTempPartition KW_PARTITION opt_if_exists:ifExists ident:partitionName
     {:
-        RESULT = new DropPartitionClause(ifExists, partitionName);
+        RESULT = new DropPartitionClause(ifExists, partitionName, isTempPartition);
     :}
     | KW_MODIFY KW_PARTITION ident:partitionName KW_SET LPAREN key_value_map:properties RPAREN
     {:
         RESULT = new ModifyPartitionClause(partitionName, properties);
     :}
+    | KW_REPLACE opt_partitions:partitions KW_WITH KW_TEMPORARY opt_partitions:tempPartitions opt_properties:properties
+    {:
+        RESULT = new ReplacePartitionClause(partitions, tempPartitions, properties);
+    :}
     | KW_RENAME ident:newTableName
     {:
         RESULT = new TableRenameClause(newTableName);
@@ -2083,9 +2089,9 @@ show_param ::=
     {:
         RESULT = new ShowDataStmt(dbTblName.getDb(), dbTblName.getTbl());
     :}
-	| KW_PARTITIONS KW_FROM table_name:tblName opt_wild_where order_by_clause:orderByClause limit_clause: limitClause
+	| opt_tmp:tmp KW_PARTITIONS KW_FROM table_name:tblName opt_wild_where order_by_clause:orderByClause limit_clause: limitClause
     {:
-        RESULT = new ShowPartitionsStmt(tblName, parser.where, orderByClause, limitClause);
+        RESULT = new ShowPartitionsStmt(tblName, parser.where, orderByClause, limitClause, tmp);
     :}
     | KW_TABLET INTEGER_LITERAL:tabletId
     {:
@@ -2165,6 +2171,17 @@ show_param ::=
     :}
     ;
 
+opt_tmp ::=
+    /* empty */
+    {:
+        RESULT = false;
+    :}
+    | KW_TEMPORARY
+    {:
+        RESULT = true;
+    :}
+    ;
+
 keys_or_index ::=
     KW_KEY
     | KW_INDEX
@@ -4436,6 +4453,8 @@ keyword ::=
     {: RESULT = id; :}
     | KW_TABLES:id
     {: RESULT = id; :}
+    | KW_TEMPORARY:id
+    {: RESULT = id; :}
     | KW_THAN:id
     {: RESULT = id; :}
     | KW_TIMESTAMP:id
diff --git a/fe/src/main/java/org/apache/doris/alter/Alter.java b/fe/src/main/java/org/apache/doris/alter/Alter.java
index abe0bd1..766b42b 100644
--- a/fe/src/main/java/org/apache/doris/alter/Alter.java
+++ b/fe/src/main/java/org/apache/doris/alter/Alter.java
@@ -39,6 +39,7 @@ import org.apache.doris.analysis.ModifyPartitionClause;
 import org.apache.doris.analysis.ModifyTablePropertiesClause;
 import org.apache.doris.analysis.PartitionRenameClause;
 import org.apache.doris.analysis.ReorderColumnsClause;
+import org.apache.doris.analysis.ReplacePartitionClause;
 import org.apache.doris.analysis.RollupRenameClause;
 import org.apache.doris.analysis.TableName;
 import org.apache.doris.analysis.TableRenameClause;
@@ -244,8 +245,8 @@ public class Alter {
             } else if (alterClause instanceof DropRollupClause && !hasSchemaChange && !hasAddMaterializedView
                     && !hasPartition && !hasRename && !hasModifyProp) {
                 hasDropRollup = true;
-            } else if (alterClause instanceof AddPartitionClause && !hasSchemaChange && !hasAddMaterializedView && !hasDropRollup
-                    && !hasPartition && !hasRename && !hasModifyProp) {
+            } else if (alterClause instanceof AddPartitionClause && !hasSchemaChange && !hasAddMaterializedView
+                    && !hasDropRollup && !hasPartition && !hasRename && !hasModifyProp) {
                 hasPartition = true;
             } else if (alterClause instanceof DropPartitionClause && !hasSchemaChange && !hasAddMaterializedView && !hasDropRollup
                     && !hasPartition && !hasRename && !hasModifyProp) {
@@ -258,6 +259,9 @@ public class Alter {
                     && !hasSchemaChange && !hasAddMaterializedView && !hasDropRollup && !hasPartition && !hasRename
                     && !hasModifyProp) {
                 hasRename = true;
+            } else if (alterClause instanceof ReplacePartitionClause && !hasSchemaChange && !hasAddMaterializedView
+                    && !hasDropRollup && !hasPartition && !hasRename && !hasModifyProp) {
+                hasPartition = true;
             } else if (alterClause instanceof ModifyTablePropertiesClause && !hasSchemaChange && !hasAddMaterializedView
                     && !hasDropRollup && !hasPartition && !hasRename && !hasModifyProp) {
                 Map<String, String> properties = alterClause.getProperties();
@@ -314,12 +318,16 @@ public class Alter {
                 materializedViewHandler.process(alterClauses, clusterName, db, olapTable);
             } else if (hasPartition) {
                 Preconditions.checkState(alterClauses.size() == 1);
+                // when this is a dynamic partition table, do not allow doing partition operation.
+                // TODO(cmy): although some of operation can be done with dynamic partition,
+                // but currently we check it strictly to avoid some unexpected exception.
+                DynamicPartitionUtil.checkAlterAllowed(olapTable);
                 AlterClause alterClause = alterClauses.get(0);
                 if (alterClause instanceof DropPartitionClause) {
-                    DynamicPartitionUtil.checkAlterAllowed(olapTable);
                     Catalog.getInstance().dropPartition(db, olapTable, ((DropPartitionClause) alterClause));
+                } else if (alterClause instanceof ReplacePartitionClause) {
+                    Catalog.getCurrentCatalog().replaceTempPartition(db, tableName, (ReplacePartitionClause) alterClause);
                 } else if (alterClause instanceof ModifyPartitionClause) {
-                    DynamicPartitionUtil.checkAlterAllowed(olapTable);
                     ModifyPartitionClause clause = ((ModifyPartitionClause) alterClause);
                     Map<String, String> properties = clause.getProperties();
                     if (properties.containsKey(PropertyAnalyzer.PROPERTIES_INMEMORY)) {
@@ -329,6 +337,7 @@ public class Alter {
                         Catalog.getInstance().modifyPartition(db, olapTable, partitionName, properties);
                     }
                 } else {
+                    // add (temp) partition
                     needSynchronized = true;
                 }
             } else if (hasRename) {
diff --git a/fe/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java b/fe/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java
index 9357747..57e8e42 100644
--- a/fe/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java
+++ b/fe/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java
@@ -171,6 +171,11 @@ public class MaterializedViewHandler extends AlterHandler {
      */
     public void processCreateMaterializedView(CreateMaterializedViewStmt addMVClause, Database db, OlapTable olapTable)
             throws DdlException, AnalysisException {
+
+        if (olapTable.existTempPartitions()) {
+            throw new DdlException("Can not alter table when there are temp partitions in table");
+        }
+
         // Step1.1: semantic analysis
         // TODO(ML): support the materialized view as base index
         if (!addMVClause.getBaseIndexName().equals(olapTable.getName())) {
@@ -1019,6 +1024,11 @@ public class MaterializedViewHandler extends AlterHandler {
     @Override
     public void process(List<AlterClause> alterClauses, String clusterName, Database db, OlapTable olapTable)
             throws DdlException, AnalysisException {
+
+        if (olapTable.existTempPartitions()) {
+            throw new DdlException("Can not alter table when there are temp partitions in table");
+        }
+
         Optional<AlterClause> alterClauseOptional = alterClauses.stream().findAny();
         if (alterClauseOptional.isPresent()) {
             if (alterClauseOptional.get() instanceof AddRollupClause) {
diff --git a/fe/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java b/fe/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
index 8fa3c91..8cf8fc6 100644
--- a/fe/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
+++ b/fe/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
@@ -75,6 +75,7 @@ import org.apache.doris.task.ClearAlterTask;
 import org.apache.doris.task.UpdateTabletMetaInfoTask;
 import org.apache.doris.thrift.TStorageFormat;
 import org.apache.doris.thrift.TStorageMedium;
+import org.apache.doris.thrift.TTaskType;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
@@ -84,7 +85,6 @@ import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
-import org.apache.doris.thrift.TTaskType;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
@@ -1330,6 +1330,11 @@ public class SchemaChangeHandler extends AlterHandler {
     @Override
     public void process(List<AlterClause> alterClauses, String clusterName, Database db, OlapTable olapTable)
             throws UserException {
+
+        if (olapTable.existTempPartitions()) {
+            throw new DdlException("Can not alter table when there are temp partitions in table");
+        }
+
         // index id -> index schema
         Map<Long, LinkedList<Column>> indexSchemaMap = new HashMap<>();
         for (Map.Entry<Long, List<Column>> entry : olapTable.getIndexIdToSchema().entrySet()) {
diff --git a/fe/src/main/java/org/apache/doris/analysis/AddPartitionClause.java b/fe/src/main/java/org/apache/doris/analysis/AddPartitionClause.java
index 2305f0e..62b3da0 100644
--- a/fe/src/main/java/org/apache/doris/analysis/AddPartitionClause.java
+++ b/fe/src/main/java/org/apache/doris/analysis/AddPartitionClause.java
@@ -27,6 +27,8 @@ public class AddPartitionClause extends AlterTableClause {
     private SingleRangePartitionDesc partitionDesc;
     private DistributionDesc distributionDesc;
     private Map<String, String> properties;
+    // true if this is to add a temporary partition
+    private boolean isTempPartition;
 
     public SingleRangePartitionDesc getSingeRangePartitionDesc() {
         return partitionDesc;
@@ -36,18 +38,24 @@ public class AddPartitionClause extends AlterTableClause {
         return distributionDesc;
     }
 
+    public boolean isTempPartition() {
+        return isTempPartition;
+    }
+
     public AddPartitionClause(SingleRangePartitionDesc partitionDesc,
                               DistributionDesc distributionDesc,
-                              Map<String, String> properties) {
+                              Map<String, String> properties,
+                              boolean isTempPartition) {
         this.partitionDesc = partitionDesc;
         this.distributionDesc = distributionDesc;
         this.properties = properties;
+        this.isTempPartition = isTempPartition;
+        
         this.needTableStable = false;
     }
 
     @Override
     public void analyze(Analyzer analyzer) throws AnalysisException {
-
     }
 
     @Override
diff --git a/fe/src/main/java/org/apache/doris/analysis/DropPartitionClause.java b/fe/src/main/java/org/apache/doris/analysis/DropPartitionClause.java
index ffee2b8..90ca887 100644
--- a/fe/src/main/java/org/apache/doris/analysis/DropPartitionClause.java
+++ b/fe/src/main/java/org/apache/doris/analysis/DropPartitionClause.java
@@ -29,10 +29,13 @@ import java.util.Map;
 public class DropPartitionClause extends AlterTableClause {
     private boolean ifExists;
     private String partitionName;
+    // true if this is to drop a temp partition
+    private boolean isTempPartition;
 
-    public DropPartitionClause(boolean ifExists, String partitionName) {
+    public DropPartitionClause(boolean ifExists, String partitionName, boolean isTempPartition) {
         this.ifExists = ifExists;
         this.partitionName = partitionName;
+        this.isTempPartition = isTempPartition;
         this.needTableStable = false;
     }
 
@@ -44,6 +47,10 @@ public class DropPartitionClause extends AlterTableClause {
         return partitionName;
     }
 
+    public boolean isTempPartition() {
+        return isTempPartition;
+    }
+
     @Override
     public void analyze(Analyzer analyzer) throws AnalysisException {
         if (Strings.isNullOrEmpty(partitionName)) {
diff --git a/fe/src/main/java/org/apache/doris/analysis/PartitionKeyDesc.java b/fe/src/main/java/org/apache/doris/analysis/PartitionKeyDesc.java
index 88380f1..7f55611 100644
--- a/fe/src/main/java/org/apache/doris/analysis/PartitionKeyDesc.java
+++ b/fe/src/main/java/org/apache/doris/analysis/PartitionKeyDesc.java
@@ -80,7 +80,7 @@ public class PartitionKeyDesc {
         return upperValues != null;
     }
 
-    public PartitionRangeType getPartitionType () {
+    public PartitionRangeType getPartitionType() {
         return partitionType;
     }
 
diff --git a/fe/src/main/java/org/apache/doris/analysis/ReplacePartitionClause.java b/fe/src/main/java/org/apache/doris/analysis/ReplacePartitionClause.java
new file mode 100644
index 0000000..db399ce
--- /dev/null
+++ b/fe/src/main/java/org/apache/doris/analysis/ReplacePartitionClause.java
@@ -0,0 +1,116 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.analysis;
+
+import org.apache.doris.common.AnalysisException;
+import org.apache.doris.common.util.PropertyAnalyzer;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.Maps;
+
+import java.util.List;
+import java.util.Map;
+
+// clause which is used to replace temporary partition
+// eg:
+// ALTER TABLE tbl REPLACE PARTITION (p1, p2, p3) WITH TEMPORARY PARTITION(tp1, tp2);
+public class ReplacePartitionClause extends AlterTableClause {
+    private List<String> partitionNames;
+    private List<String> tempPartitionNames;
+    private Map<String, String> properties = Maps.newHashMap();
+
+    // "isStrictMode" is got from property "strict_range", and default is true.
+    // If true, when replacing partition, the range of partitions must same as the range of temp partitions.
+    private boolean isStrictRange;
+    
+    // "useTempPartitionName" is got from property "use_temp_partition_name", and default is false.
+    // If false, after replacing, the replaced partition's name will remain unchanged.
+    // Otherwise, the replaced partition's name will be the temp partitions name.
+    // This parameter is valid only when the number of partitions is the same as the number of temp partitions.
+    // For example:
+    // 1. REPLACE PARTITION (p1, p2, p3) WITH TEMPORARY PARTITION(tp1, tp2) PROPERTIES("use_temp_partition_name" = "false");
+    //      "use_temp_partition_name" will take no effect after replacing, and the partition names will be "tp1" and "tp2".
+    //
+    // 2. REPLACE PARTITION (p1, p2) WITH TEMPORARY PARTITION(tp1, tp2) PROPERTIES("use_temp_partition_name" = "false");
+    //      alter replacing, the partition names will be "p1" and "p2".
+    //      but if "use_temp_partition_name" is true, the partition names will be "tp1" and "tp2".
+    private boolean useTempPartitionName;
+
+    public ReplacePartitionClause(List<String> partitionNames, List<String> tempPartitionNames,
+            Map<String, String> properties) {
+        this.partitionNames = partitionNames;
+        this.tempPartitionNames = tempPartitionNames;
+        this.needTableStable = false;
+        this.properties = properties;
+    }
+
+    public List<String> getPartitionNames() {
+        return partitionNames;
+    }
+
+    public List<String> getTempPartitionNames() {
+        return tempPartitionNames;
+    }
+
+    public boolean isStrictRange() {
+        return isStrictRange;
+    }
+
+    public boolean useTempPartitionName() {
+        return useTempPartitionName;
+    }
+
+    @Override
+    public void analyze(Analyzer analyzer) throws AnalysisException {
+        if (partitionNames.isEmpty()) {
+            throw new AnalysisException("No partition specified");
+        }
+
+        if (tempPartitionNames.isEmpty()) {
+            throw new AnalysisException("No temp partition specified");
+        }
+
+        this.isStrictRange = PropertyAnalyzer.analyzeBooleanProp(properties, PropertyAnalyzer.PROPERTIES_STRICT_RANGE, true);
+        this.useTempPartitionName = PropertyAnalyzer.analyzeBooleanProp(properties,
+                PropertyAnalyzer.PROPERTIES_USE_TEMP_PARTITION_NAME, false);
+
+        if (properties != null && !properties.isEmpty()) {
+            throw new AnalysisException("Unknown properties: " + properties.keySet());
+        }
+    }
+
+    @Override
+    public Map<String, String> getProperties() {
+        return this.properties;
+    }
+
+    @Override
+    public String toSql() {
+        StringBuilder sb = new StringBuilder();
+        sb.append("REPLACE PARTITION(");
+        sb.append(Joiner.on(", ").join(partitionNames)).append(")");
+        sb.append(" WITH TEMPORARY PARTITION(");
+        sb.append(Joiner.on(", ").join(tempPartitionNames)).append(")");
+        return sb.toString();
+    }
+
+    @Override
+    public String toString() {
+        return toSql();
+    }
+}
diff --git a/fe/src/main/java/org/apache/doris/analysis/ShowPartitionsStmt.java b/fe/src/main/java/org/apache/doris/analysis/ShowPartitionsStmt.java
index 85cfd3c..73e7671 100644
--- a/fe/src/main/java/org/apache/doris/analysis/ShowPartitionsStmt.java
+++ b/fe/src/main/java/org/apache/doris/analysis/ShowPartitionsStmt.java
@@ -63,13 +63,15 @@ public class ShowPartitionsStmt extends ShowStmt {
     private Expr whereClause;
     private List<OrderByElement> orderByElements;
     private LimitElement limitElement;
+    private boolean isTempPartition = false;
+
     private List<OrderByPair> orderByPairs;
     private Map<String, Expr> filterMap;
 
     private ProcNodeInterface node;
 
     public ShowPartitionsStmt(TableName tableName, Expr whereClause, List<OrderByElement> orderByElements,
-                              LimitElement limitElement) {
+            LimitElement limitElement, boolean isTempPartition) {
         this.dbName = tableName.getDb();
         this.tableName = tableName.getTbl();
         this.whereClause = whereClause;
@@ -78,22 +80,7 @@ public class ShowPartitionsStmt extends ShowStmt {
         if (whereClause != null) {
             this.filterMap = new HashMap<>();
         }
-    }
-
-    public String getDbName() {
-        return dbName;
-    }
-
-    public String getTableName() {
-        return tableName;
-    }
-
-    public Expr getWhereClause() {
-        return whereClause;
-    }
-
-    public List<OrderByElement> getOrderByElements() {
-        return orderByElements;
+        this.isTempPartition = isTempPartition;
     }
 
     public List<OrderByPair> getOrderByPairs() {
@@ -140,7 +127,11 @@ public class ShowPartitionsStmt extends ShowStmt {
             stringBuilder.append("/dbs/");
             stringBuilder.append(db.getId());
             stringBuilder.append("/").append(table.getId());
-            stringBuilder.append("/").append("partitions");
+            if (isTempPartition) {
+                stringBuilder.append("/temp_partitions");
+            } else {
+                stringBuilder.append("/partitions");
+            }
 
             LOG.debug("process SHOW PROC '{}';", stringBuilder.toString());
 
diff --git a/fe/src/main/java/org/apache/doris/analysis/SingleRangePartitionDesc.java b/fe/src/main/java/org/apache/doris/analysis/SingleRangePartitionDesc.java
index fda6bd4..a0cd20d 100644
--- a/fe/src/main/java/org/apache/doris/analysis/SingleRangePartitionDesc.java
+++ b/fe/src/main/java/org/apache/doris/analysis/SingleRangePartitionDesc.java
@@ -120,7 +120,7 @@ public class SingleRangePartitionDesc {
         versionInfo = PropertyAnalyzer.analyzeVersionInfo(properties);
 
         // analyze in memory
-        isInMemory = PropertyAnalyzer.analyzeInMemory(properties, false);
+        isInMemory = PropertyAnalyzer.analyzeBooleanProp(properties, PropertyAnalyzer.PROPERTIES_INMEMORY, false);
 
         if (otherProperties == null) {
             // check unknown properties
diff --git a/fe/src/main/java/org/apache/doris/catalog/Catalog.java b/fe/src/main/java/org/apache/doris/catalog/Catalog.java
index aebd76e..b75141d 100644
--- a/fe/src/main/java/org/apache/doris/catalog/Catalog.java
+++ b/fe/src/main/java/org/apache/doris/catalog/Catalog.java
@@ -65,6 +65,7 @@ import org.apache.doris.analysis.RangePartitionDesc;
 import org.apache.doris.analysis.RecoverDbStmt;
 import org.apache.doris.analysis.RecoverPartitionStmt;
 import org.apache.doris.analysis.RecoverTableStmt;
+import org.apache.doris.analysis.ReplacePartitionClause;
 import org.apache.doris.analysis.RestoreStmt;
 import org.apache.doris.analysis.RollupRenameClause;
 import org.apache.doris.analysis.ShowAlterStmt.AlterType;
@@ -164,6 +165,7 @@ import org.apache.doris.persist.ModifyTablePropertyOperationLog;
 import org.apache.doris.persist.OperationType;
 import org.apache.doris.persist.PartitionPersistInfo;
 import org.apache.doris.persist.RecoverInfo;
+import org.apache.doris.persist.ReplacePartitionOperationLog;
 import org.apache.doris.persist.ReplicaPersistInfo;
 import org.apache.doris.persist.Storage;
 import org.apache.doris.persist.StorageInfo;
@@ -1435,7 +1437,9 @@ public class Catalog {
 
                 OlapTable olapTable = (OlapTable) table;
                 long tableId = olapTable.getId();
-                for (Partition partition : olapTable.getPartitions()) {
+                List<Partition> allPartitions = Lists.newArrayList(olapTable.getPartitions());
+                allPartitions.addAll(olapTable.getAllTempPartitions());
+                for (Partition partition : allPartitions) {
                     long partitionId = partition.getId();
                     TStorageMedium medium = olapTable.getPartitionInfo().getDataProperty(
                             partitionId).getStorageMedium();
@@ -2895,6 +2899,7 @@ public class Catalog {
     public void addPartition(Database db, String tableName, AddPartitionClause addPartitionClause) throws DdlException {
         SingleRangePartitionDesc singlePartitionDesc = addPartitionClause.getSingeRangePartitionDesc();
         DistributionDesc distributionDesc = addPartitionClause.getDistributionDesc();
+        boolean isTempPartition = addPartitionClause.isTempPartition();
 
         DistributionInfo distributionInfo = null;
         OlapTable olapTable = null;
@@ -2931,8 +2936,12 @@ public class Catalog {
                 throw new DdlException("Only support adding partition to range partitioned table");
             }
 
+            if (isTempPartition) {
+                partitionInfo = olapTable.getTempPartitonRangeInfo();
+            }
+
             // check partition name
-            if (olapTable.getPartition(partitionName) != null) {
+            if (olapTable.checkPartitionNameExist(partitionName)) {
                 if (singlePartitionDesc.isSetIfNotExists()) {
                     LOG.info("add partition[{}] which already exists", partitionName);
                     return;
@@ -2950,6 +2959,7 @@ public class Catalog {
             if (!properties.containsKey(PropertyAnalyzer.PROPERTIES_INMEMORY)) {
                 properties.put(PropertyAnalyzer.PROPERTIES_INMEMORY, olapTable.isInMemory().toString());
             }
+
             RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo;
             singlePartitionDesc.analyze(rangePartitionInfo.getPartitionColumns().size(), properties);
             rangePartitionInfo.checkAndCreateRange(singlePartitionDesc);
@@ -3009,7 +3019,7 @@ public class Catalog {
         Preconditions.checkNotNull(indexIdToStorageType);
         Preconditions.checkNotNull(indexIdToSchema);
 
-        // create partition without lock
+        // create partition outside db lock
         DataProperty dataProperty = singlePartitionDesc.getPartitionDataProperty();
         Preconditions.checkNotNull(dataProperty);
 
@@ -3045,17 +3055,15 @@ public class Catalog {
                     throw new DdlException("Table[" + tableName + "] is not OLAP table");
                 }
 
-                // check partition type
                 olapTable = (OlapTable) table;
-                PartitionInfo partitionInfo = olapTable.getPartitionInfo();
-                if (partitionInfo.getType() != PartitionType.RANGE) {
-                    throw new DdlException("Only support adding partition to range partitioned table");
+                if (olapTable.getState() != OlapTableState.NORMAL) {
+                    throw new DdlException("Table[" + tableName + "]'s state is not NORMAL");
                 }
 
                 // check partition name
-                if (olapTable.getPartition(partitionName) != null) {
+                if (olapTable.checkPartitionNameExist(partitionName)) {
                     if (singlePartitionDesc.isSetIfNotExists()) {
-                        LOG.debug("add partition[{}] which already exists", partitionName);
+                        LOG.info("add partition[{}] which already exists", partitionName);
                         return;
                     } else {
                         ErrorReport.reportDdlException(ErrorCode.ERR_SAME_NAME_PARTITION, partitionName);
@@ -3087,19 +3095,34 @@ public class Catalog {
                     throw new DdlException("Table[" + tableName + "]'s meta has been changed. try again.");
                 }
 
+                // check partition type
+                PartitionInfo partitionInfo = olapTable.getPartitionInfo();
+                if (isTempPartition) {
+                    partitionInfo = olapTable.getTempPartitonRangeInfo();
+                }
+                if (partitionInfo.getType() != PartitionType.RANGE) {
+                    throw new DdlException("Only support adding partition to range partitioned table");
+                }
+
                 // update partition info
                 RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo;
                 rangePartitionInfo.handleNewSinglePartitionDesc(singlePartitionDesc, partitionId);
 
-                olapTable.addPartition(partition);
+                if (isTempPartition) {
+                    olapTable.addTempPartition(partition);
+                } else {
+                    olapTable.addPartition(partition);
+                }
+
                 // log
                 PartitionPersistInfo info = new PartitionPersistInfo(db.getId(), olapTable.getId(), partition,
                         rangePartitionInfo.getRange(partitionId), dataProperty,
                         rangePartitionInfo.getReplicationNum(partitionId),
-                        rangePartitionInfo.getIsInMemory(partitionId));
+                        rangePartitionInfo.getIsInMemory(partitionId),
+                        isTempPartition);
                 editLog.logAddPartition(info);
 
-                LOG.info("succeed in creating partition[{}]", partitionId);
+                LOG.info("succeed in creating partition[{}], temp: {}", partitionId, isTempPartition);
             } finally {
                 db.writeUnlock();
             }
@@ -3117,8 +3140,15 @@ public class Catalog {
         try {
             OlapTable olapTable = (OlapTable) db.getTable(info.getTableId());
             Partition partition = info.getPartition();
-            olapTable.addPartition(partition);
+
             PartitionInfo partitionInfo = olapTable.getPartitionInfo();
+            if (info.isTempPartition()) {
+                partitionInfo = olapTable.getTempPartitonRangeInfo();
+                olapTable.addTempPartition(partition);
+            } else {
+                olapTable.addPartition(partition);
+            }
+
             ((RangePartitionInfo) partitionInfo).unprotectHandleNewSinglePartitionDesc(partition.getId(),
                     info.getRange(), info.getDataProperty(), info.getReplicationNum(),
                     info.isInMemory());
@@ -3146,18 +3176,16 @@ public class Catalog {
     }
 
     public void dropPartition(Database db, OlapTable olapTable, DropPartitionClause clause) throws DdlException {
-        DynamicPartitionUtil.checkAlterAllowed(olapTable);
         Preconditions.checkArgument(db.isWriteLockHeldByCurrentThread());
 
         String partitionName = clause.getPartitionName();
+        boolean isTempPartition = clause.isTempPartition();
 
         if (olapTable.getState() != OlapTableState.NORMAL) {
             throw new DdlException("Table[" + olapTable.getName() + "]'s state is not NORMAL");
         }
 
-        PartitionInfo partitionInfo = olapTable.getPartitionInfo();
-        Partition partition = olapTable.getPartition(partitionName);
-        if (partition == null) {
+        if (!olapTable.checkPartitionNameExist(partitionName, isTempPartition)) {
             if (clause.isSetIfExists()) {
                 LOG.info("drop partition[{}] which does not exist", partitionName);
                 return;
@@ -3166,20 +3194,28 @@ public class Catalog {
             }
         }
 
+        PartitionInfo partitionInfo = olapTable.getPartitionInfo();
         if (partitionInfo.getType() != PartitionType.RANGE) {
-            String errMsg = "Alter table [" + olapTable.getName() + "] failed. Not a partitioned table";
-            LOG.warn(errMsg);
-            throw new DdlException(errMsg);
+            throw new DdlException("Alter table [" + olapTable.getName() + "] failed. Not a partitioned table");
         }
 
+        if (isTempPartition) {
+            partitionInfo = olapTable.getTempPartitonRangeInfo();
+        }
+
+
         // drop
-        olapTable.dropPartition(db.getId(), partitionName);
+        if (isTempPartition) {
+            olapTable.dropTempPartition(partitionName, true);
+        } else {
+            olapTable.dropPartition(db.getId(), partitionName);
+        }
 
         // log
-        DropPartitionInfo info = new DropPartitionInfo(db.getId(), olapTable.getId(), partitionName);
+        DropPartitionInfo info = new DropPartitionInfo(db.getId(), olapTable.getId(), partitionName, isTempPartition);
         editLog.logDropPartition(info);
 
-        LOG.info("succeed in droping partition[{}]", partition.getId());
+        LOG.info("succeed in droping partition[{}]", partitionName);
     }
 
     public void replayDropPartition(DropPartitionInfo info) {
@@ -3187,7 +3223,11 @@ public class Catalog {
         db.writeLock();
         try {
             OlapTable olapTable = (OlapTable) db.getTable(info.getTableId());
-            olapTable.dropPartition(info.getDbId(), info.getPartitionName());
+            if (info.isTempPartition()) {
+                olapTable.dropTempPartition(info.getPartitionName(), true);
+            } else {
+                olapTable.dropPartition(info.getDbId(), info.getPartitionName());
+            }
         } finally {
             db.writeUnlock();
         }
@@ -3254,7 +3294,8 @@ public class Catalog {
         }
 
         // 3. in memory
-        boolean isInMemory = PropertyAnalyzer.analyzeInMemory(properties, partitionInfo.getIsInMemory(partition.getId()));
+        boolean isInMemory = PropertyAnalyzer.analyzeBooleanProp(properties,
+                PropertyAnalyzer.PROPERTIES_INMEMORY, partitionInfo.getIsInMemory(partition.getId()));
 
         // check if has other undefined properties
         if (properties != null && !properties.isEmpty()) {
@@ -3539,7 +3580,7 @@ public class Catalog {
         }
 
         // set in memory
-        boolean isInMemory = PropertyAnalyzer.analyzeInMemory(properties, false);
+        boolean isInMemory = PropertyAnalyzer.analyzeBooleanProp(properties, PropertyAnalyzer.PROPERTIES_INMEMORY, false);
         olapTable.setIsInMemory(isInMemory);
 
         if (partitionInfo.getType() == PartitionType.UNPARTITIONED) {
@@ -5128,7 +5169,7 @@ public class Catalog {
         }
 
         // check if name is already used
-        if (table.getPartition(newPartitionName) != null) {
+        if (table.checkPartitionNameExist(newPartitionName)) {
             throw new DdlException("Partition name[" + newPartitionName + "] is already used");
         }
 
@@ -6115,6 +6156,9 @@ public class Catalog {
      * 1. using the same schema to create new table(partitions)
      * 2. use the new created table(partitions) to replace the old ones.
      * 
+     * if no partition specified, it will truncate all partitions of this table, including all temp partitions,
+     * otherwise, it will only truncate those specified partitions.
+     * 
      */
     public void truncateTable(TruncateTableStmt truncateTableStmt) throws DdlException {
         TableRef tblRef = truncateTableStmt.getTblRef();
@@ -6128,6 +6172,7 @@ public class Catalog {
             ErrorReport.reportDdlException(ErrorCode.ERR_BAD_DB_ERROR, dbTbl.getDb());
         }
 
+        boolean truncateEntireTable = tblRef.getPartitions() == null || tblRef.getPartitions().isEmpty();
         db.readLock();
         try {
             Table table = db.getTable(dbTbl.getTbl());
@@ -6144,7 +6189,7 @@ public class Catalog {
                 throw new DdlException("Table' state is not NORMAL: " + olapTable.getState());
             }
             
-            if (tblRef.getPartitions() != null && !tblRef.getPartitions().isEmpty()) {
+            if (!truncateEntireTable) {
                 for (String partName: tblRef.getPartitions()) {
                     Partition partition = olapTable.getPartition(partName);
                     if (partition == null) {
@@ -6160,7 +6205,6 @@ public class Catalog {
             }
             
             copiedTbl = olapTable.selectiveCopy(origPartitions.keySet(), true, IndexExtState.VISIBLE);
-
         } finally {
             db.readUnlock();
         }
@@ -6254,10 +6298,11 @@ public class Catalog {
             }
 
             // replace
-            truncateTableInternal(olapTable, newPartitions);
+            truncateTableInternal(olapTable, newPartitions, truncateEntireTable);
 
             // write edit log
-            TruncateTableInfo info = new TruncateTableInfo(db.getId(), olapTable.getId(), newPartitions);
+            TruncateTableInfo info = new TruncateTableInfo(db.getId(), olapTable.getId(), newPartitions,
+                    truncateEntireTable);
             editLog.logTruncateTable(info);
         } finally {
             db.writeUnlock();
@@ -6267,7 +6312,7 @@ public class Catalog {
                 tblRef.getName().toSql(), tblRef.getPartitions());
     }
 
-    private void truncateTableInternal(OlapTable olapTable, List<Partition> newPartitions) {
+    private void truncateTableInternal(OlapTable olapTable, List<Partition> newPartitions, boolean isEntireTable) {
         // use new partitions to replace the old ones.
         Set<Long> oldTabletIds = Sets.newHashSet();
         for (Partition newPartition : newPartitions) {
@@ -6280,6 +6325,11 @@ public class Catalog {
             }
         }
 
+        if (isEntireTable) {
+            // drop all temp partitions
+            olapTable.dropAllTempPartitions();
+        }
+
         // remove the tablets in old partitions
         for (Long tabletId : oldTabletIds) {
             Catalog.getCurrentInvertedIndex().deleteTablet(tabletId);
@@ -6291,7 +6341,7 @@ public class Catalog {
         db.writeLock();
         try {
             OlapTable olapTable = (OlapTable) db.getTable(info.getTblId());
-            truncateTableInternal(olapTable, info.getPartitions());
+            truncateTableInternal(olapTable, info.getPartitions(), info.isEntireTable());
 
             if (!Catalog.isCheckpointThread()) {
                 // add tablet to inverted index
@@ -6411,5 +6461,72 @@ public class Catalog {
             db.writeUnlock();
         }
     }
+
+    /*
+     * The entry of replacing partitions with temp partitions.
+     */
+    public void replaceTempPartition(Database db, String tableName, ReplacePartitionClause clause) throws DdlException {
+        List<String> partitionNames = clause.getPartitionNames();
+        List<String> tempPartitonNames = clause.getTempPartitionNames();
+        boolean isStrictRange = clause.isStrictRange();
+        boolean useTempPartitionName = clause.useTempPartitionName();
+        db.writeLock();
+        try {
+            Table table = db.getTable(tableName);
+            if (table == null) {
+                ErrorReport.reportDdlException(ErrorCode.ERR_BAD_TABLE_ERROR, tableName);
+            }
+
+            if (table.getType() != TableType.OLAP) {
+                throw new DdlException("Table[" + tableName + "] is not OLAP table");
+            }
+
+            OlapTable olapTable = (OlapTable) table;
+            // check partition exist
+            for (String partName : partitionNames) {
+                if (!olapTable.checkPartitionNameExist(partName, false)) {
+                    throw new DdlException("Partition[" + partName + "] does not exist");
+                }
+            }
+            for (String partName : tempPartitonNames) {
+                if (!olapTable.checkPartitionNameExist(partName, true)) {
+                    throw new DdlException("Temp partition[" + partName + "] does not exist");
+                }
+            }
+
+            olapTable.replaceTempPartitions(partitionNames, tempPartitonNames, isStrictRange, useTempPartitionName);
+
+            // write log
+            ReplacePartitionOperationLog info = new ReplacePartitionOperationLog(db.getId(), olapTable.getId(),
+                    partitionNames, tempPartitonNames, isStrictRange, useTempPartitionName);
+            editLog.logReplaceTempPartition(info);
+            LOG.info("finished to replace partitions {} with temp partitions {} from table: {}",
+                    clause.getPartitionNames(), clause.getTempPartitionNames(), tableName);
+        } finally {
+            db.writeUnlock();
+        }
+    }
+
+    public void replayReplaceTempPartition(ReplacePartitionOperationLog replaceTempPartitionLog) {
+        Database db = getDb(replaceTempPartitionLog.getDbId());
+        if (db == null) {
+            return;
+        }
+        db.writeLock();
+        try {
+            OlapTable olapTable = (OlapTable) db.getTable(replaceTempPartitionLog.getTblId());
+            if (olapTable == null) {
+                return;
+            }
+            olapTable.replaceTempPartitions(replaceTempPartitionLog.getPartitions(),
+                    replaceTempPartitionLog.getTempPartitions(),
+                    replaceTempPartitionLog.isStrictRange(),
+                    replaceTempPartitionLog.useTempPartitionName());
+        } catch (DdlException e) {
+            LOG.warn("should not happen. {}", e);
+        } finally {
+            db.writeUnlock();
+        }
+    }
 }
 
diff --git a/fe/src/main/java/org/apache/doris/catalog/CatalogRecycleBin.java b/fe/src/main/java/org/apache/doris/catalog/CatalogRecycleBin.java
index be02688..8cf3794 100644
--- a/fe/src/main/java/org/apache/doris/catalog/CatalogRecycleBin.java
+++ b/fe/src/main/java/org/apache/doris/catalog/CatalogRecycleBin.java
@@ -27,6 +27,7 @@ import org.apache.doris.common.FeMetaVersion;
 import org.apache.doris.common.io.Text;
 import org.apache.doris.common.io.Writable;
 import org.apache.doris.common.util.MasterDaemon;
+import org.apache.doris.common.util.RangeUtils;
 import org.apache.doris.persist.RecoverInfo;
 import org.apache.doris.task.AgentBatchTask;
 import org.apache.doris.task.AgentTaskExecutor;
@@ -44,6 +45,7 @@ import org.apache.logging.log4j.Logger;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -211,7 +213,9 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
     private void onEraseOlapTable(OlapTable olapTable) {
         // inverted index
         TabletInvertedIndex invertedIndex = Catalog.getCurrentInvertedIndex();
-        for (Partition partition : olapTable.getPartitions()) {
+        Collection<Partition> allPartitions = olapTable.getPartitions();
+        allPartitions.addAll(olapTable.getAllTempPartitions());
+        for (Partition partition : allPartitions) {
             for (MaterializedIndex index : partition.getMaterializedIndices(IndexExtState.ALL)) {
                 for (Tablet tablet : index.getTablets()) {
                     invertedIndex.deleteTablet(tablet.getId());
@@ -512,7 +516,7 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
         Map<Long, Range<PartitionKey>> idToRangeMap = partitionInfo.getIdToRange();
         try {
             for (Range<PartitionKey> existRange : idToRangeMap.values()) {
-                RangePartitionInfo.checkRangeIntersect(recoverRange, existRange);
+                RangeUtils.checkRangeIntersect(recoverRange, existRange);
             }
         } catch (DdlException e) {
             throw new DdlException("Can not recover partition[" + partitionName + "]. " + e.getMessage());
@@ -862,7 +866,7 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
             out.writeLong(dbId);
             out.writeLong(tableId);
             partition.write(out);
-            RangePartitionInfo.writeRange(out, range);
+            RangeUtils.writeRange(out, range);
             dataProperty.write(out);
             out.writeShort(replicationNum);
             out.writeBoolean(isInMemory);
@@ -872,7 +876,7 @@ public class CatalogRecycleBin extends MasterDaemon implements Writable {
             dbId = in.readLong();
             tableId = in.readLong();
             partition = Partition.read(in);
-            range = RangePartitionInfo.readRange(in);
+            range = RangeUtils.readRange(in);
             dataProperty = DataProperty.read(in);
             replicationNum = in.readShort();
             if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_72) {
diff --git a/fe/src/main/java/org/apache/doris/catalog/DistributionInfo.java b/fe/src/main/java/org/apache/doris/catalog/DistributionInfo.java
index d2b425e..10a3819 100644
--- a/fe/src/main/java/org/apache/doris/catalog/DistributionInfo.java
+++ b/fe/src/main/java/org/apache/doris/catalog/DistributionInfo.java
@@ -23,6 +23,7 @@ import org.apache.doris.common.io.Text;
 import org.apache.doris.common.io.Writable;
 
 import com.google.common.collect.Lists;
+import com.google.gson.annotations.SerializedName;
 
 import org.apache.commons.lang.NotImplementedException;
 
@@ -40,6 +41,10 @@ public abstract class DistributionInfo implements Writable {
         RANDOM
     }
 
+    // for Gson runtime type adaptor
+    @SerializedName(value = "typeStr")
+    protected String typeStr;
+    @SerializedName(value = "type")
     protected DistributionInfoType type;
     
     public DistributionInfo() {
@@ -48,6 +53,7 @@ public abstract class DistributionInfo implements Writable {
 
     public DistributionInfo(DistributionInfoType type) {
         this.type = type;
+        this.typeStr = this.type.name();
     }
 
     public DistributionInfoType getType() {
diff --git a/fe/src/main/java/org/apache/doris/catalog/HashDistributionInfo.java b/fe/src/main/java/org/apache/doris/catalog/HashDistributionInfo.java
index 498bee4..53cc361 100644
--- a/fe/src/main/java/org/apache/doris/catalog/HashDistributionInfo.java
+++ b/fe/src/main/java/org/apache/doris/catalog/HashDistributionInfo.java
@@ -22,6 +22,7 @@ import org.apache.doris.analysis.HashDistributionDesc;
 
 import com.google.common.base.Joiner;
 import com.google.common.collect.Lists;
+import com.google.gson.annotations.SerializedName;
 
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -33,7 +34,9 @@ import java.util.List;
  * Hash Distribution Info.
  */
 public class HashDistributionInfo extends DistributionInfo {
+    @SerializedName(value = "distributionColumns")
     private List<Column> distributionColumns;
+    @SerializedName(value = "bucketNum")
     private int bucketNum;
 
     public HashDistributionInfo() {
diff --git a/fe/src/main/java/org/apache/doris/catalog/MaterializedIndex.java b/fe/src/main/java/org/apache/doris/catalog/MaterializedIndex.java
index 5d6841b..65a4d37 100644
--- a/fe/src/main/java/org/apache/doris/catalog/MaterializedIndex.java
+++ b/fe/src/main/java/org/apache/doris/catalog/MaterializedIndex.java
@@ -19,8 +19,10 @@ package org.apache.doris.catalog;
 
 import org.apache.doris.common.io.Text;
 import org.apache.doris.common.io.Writable;
+import org.apache.doris.persist.gson.GsonPostProcessable;
 
 import com.google.common.collect.Lists;
+import com.google.gson.annotations.SerializedName;
 
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -34,7 +36,7 @@ import java.util.Map.Entry;
 /**
  * The OlapTraditional table is a materialized table which stored as rowcolumnar file or columnar file
  */
-public class MaterializedIndex extends MetaObject implements Writable {
+public class MaterializedIndex extends MetaObject implements Writable, GsonPostProcessable {
     public enum IndexState {
         NORMAL,
         @Deprecated
@@ -54,17 +56,22 @@ public class MaterializedIndex extends MetaObject implements Writable {
         SHADOW // index state in SHADOW
     }
 
+    @SerializedName(value = "id")
     private long id;
-
+    @SerializedName(value = "state")
     private IndexState state;
+    @SerializedName(value = "rowCount")
     private long rowCount;
 
     private Map<Long, Tablet> idToTablets;
+    @SerializedName(value = "tablets")
     // this is for keeping tablet order
     private List<Tablet> tablets;
 
     // for push after rollup index finished
+    @SerializedName(value = "rollupIndexId")
     private long rollupIndexId;
+    @SerializedName(value = "rollupFinishedVersion")
     private long rollupFinishedVersion;
 
     public MaterializedIndex() {
@@ -280,4 +287,12 @@ public class MaterializedIndex extends MetaObject implements Writable {
 
         return buffer.toString();
     }
+
+    @Override
+    public void gsonPostProcess() {
+        // build "idToTablets" from "tablets"
+        for (Tablet tablet : tablets) {
+            idToTablets.put(tablet.getId(), tablet);
+        }
+    }
 }
diff --git a/fe/src/main/java/org/apache/doris/catalog/OlapTable.java b/fe/src/main/java/org/apache/doris/catalog/OlapTable.java
index 860a72d..3337ffd 100644
--- a/fe/src/main/java/org/apache/doris/catalog/OlapTable.java
+++ b/fe/src/main/java/org/apache/doris/catalog/OlapTable.java
@@ -38,6 +38,7 @@ import org.apache.doris.common.Pair;
 import org.apache.doris.common.io.DeepCopy;
 import org.apache.doris.common.io.Text;
 import org.apache.doris.common.util.PropertyAnalyzer;
+import org.apache.doris.common.util.RangeUtils;
 import org.apache.doris.common.util.Util;
 import org.apache.doris.system.SystemInfoService;
 import org.apache.doris.thrift.TOlapTable;
@@ -103,11 +104,14 @@ public class OlapTable extends Table {
 
     private KeysType keysType;
     private PartitionInfo partitionInfo;
-    private DistributionInfo defaultDistributionInfo;
-
     private Map<Long, Partition> idToPartition;
     private Map<String, Partition> nameToPartition;
 
+    private DistributionInfo defaultDistributionInfo;
+
+    // all info about temporary partitions are save in "tempPartitions"
+    private TempPartitions tempPartitions = new TempPartitions();
+
     // bloom filter columns
     private Set<String> bfColumns;
     private double bfFpp;
@@ -176,6 +180,10 @@ public class OlapTable extends Table {
 
         this.keysType = keysType;
         this.partitionInfo = partitionInfo;
+        if (partitionInfo.getType() == PartitionType.RANGE) {
+            tempPartitions = new TempPartitions(((RangePartitionInfo) this.partitionInfo).getPartitionColumns());
+        }
+        
         this.defaultDistributionInfo = defaultDistributionInfo;
 
         this.bfColumns = null;
@@ -624,6 +632,14 @@ public class OlapTable extends Table {
         return nameToPartition.get(partitionName);
     }
 
+    public Partition getPartition(String partitionName, boolean isTempPartition) {
+        if (isTempPartition) {
+            return tempPartitions.getPartition(partitionName);
+        } else {
+            return nameToPartition.get(partitionName);
+        }
+    }
+
     public Set<String> getPartitionNames() {
         return Sets.newHashSet(nameToPartition.keySet());
     }
@@ -910,6 +926,8 @@ public class OlapTable extends Table {
             out.writeBoolean(true);
             tableProperty.write(out);
         }
+
+        tempPartitions.write(out);
     }
 
     public void readFields(DataInput in) throws IOException {
@@ -1017,6 +1035,11 @@ public class OlapTable extends Table {
             }
         }
 
+        // temp partitions
+        if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_74) {
+            tempPartitions = TempPartitions.read(in);
+        }
+
         // In the present, the fullSchema could be rebuilt by schema change while the properties is changed by MV.
         // After that, some properties of fullSchema and nameToColumn may be not same as properties of base columns.
         // So, here we need to rebuild the fullSchema to ensure the correctness of the properties.
@@ -1241,4 +1264,141 @@ public class OlapTable extends Table {
         tableProperty.modifyTableProperties(PropertyAnalyzer.PROPERTIES_INMEMORY, Boolean.valueOf(isInMemory).toString());
         tableProperty.buildInMemory();
     }
+
+    // return true if partition with given name already exist, both in partitions and temp partitions.
+    // return false otherwise
+    public boolean checkPartitionNameExist(String partitionName) {
+        if (nameToPartition.containsKey(partitionName)) {
+            return true;
+        }
+        return tempPartitions.hasPartition(partitionName);
+    }
+
+    // if includeTempPartition is true, check if temp partition with given name exist,
+    // if includeTempPartition is false, check if normal partition with given name exist.
+    // return true if exist, otherwise, return false;
+    public boolean checkPartitionNameExist(String partitionName, boolean isTempPartition) {
+        if (isTempPartition) {
+            return tempPartitions.hasPartition(partitionName);
+        } else {
+            return nameToPartition.containsKey(partitionName);
+        }
+    }
+
+    public void dropTempPartition(String partitionName, boolean needDropTablet) {
+        tempPartitions.dropPartition(partitionName, needDropTablet);
+    }
+
+    /*
+     * replace partitions in 'partitionNames' with partitions in 'tempPartitionNames'.
+     * If strictRange is true, the replaced ranges must be exactly same.
+     * What is "exactly same"?
+     *      1. {[0, 10), [10, 20)} === {[0, 20)}
+     *      2. {[0, 10), [15, 20)} === {[0, 10), [15, 18), [18, 20)}
+     *      3. {[0, 10), [15, 20)} === {[0, 10), [15, 20)}
+     *      4. {[0, 10), [15, 20)} !== {[0, 20)}
+     *      
+     * If useTempPartitionName is false and replaced partition number are equal,
+     * the replaced partitions' name will remain unchanged.
+     * What is "remain unchange"?
+     *      1. replace partition (p1, p2) with temporary partition (tp1, tp2). After replacing, the partition
+     *         names are still p1 and p2.
+     * 
+     */
+    public void replaceTempPartitions(List<String> partitionNames, List<String> tempPartitionNames,
+            boolean strictRange, boolean useTempPartitionName) throws DdlException {
+        RangePartitionInfo rangeInfo = (RangePartitionInfo) partitionInfo;
+        RangePartitionInfo tempRangeInfo = tempPartitions.getPartitionInfo();
+
+        if (strictRange) {
+            // check if range of partitions and temp partitions are exactly same
+            List<Range<PartitionKey>> rangeList = Lists.newArrayList();
+            List<Range<PartitionKey>> tempRangeList = Lists.newArrayList();
+            for (String partName : partitionNames) {
+                Partition partition = nameToPartition.get(partName);
+                Preconditions.checkNotNull(partition);
+                rangeList.add(rangeInfo.getRange(partition.getId()));
+            }
+
+            for (String partName : tempPartitionNames) {
+                Partition partition = tempPartitions.getPartition(partName);
+                Preconditions.checkNotNull(partition);
+                tempRangeList.add(tempRangeInfo.getRange(partition.getId()));
+            }
+            RangeUtils.checkRangeListsMatch(rangeList, tempRangeList);
+        } else {
+            // check after replacing, whether the range will conflict
+            Set<Long> replacePartitionIds = Sets.newHashSet();
+            for (String partName : partitionNames) {
+                Partition partition = nameToPartition.get(partName);
+                Preconditions.checkNotNull(partition);
+                replacePartitionIds.add(partition.getId());
+            }
+            List<Range<PartitionKey>> replacePartitionRanges = Lists.newArrayList();
+            for (String partName : tempPartitionNames) {
+                Partition partition = tempPartitions.getPartition(partName);
+                Preconditions.checkNotNull(partition);
+                replacePartitionRanges.add(tempRangeInfo.getRange(partition.getId()));
+            }
+            List<Range<PartitionKey>> sortedRangeList = rangeInfo.getRangeList(replacePartitionIds);
+            RangeUtils.checkRangeConflict(sortedRangeList, replacePartitionRanges);
+        }
+        
+        // begin to replace
+        // 1. drop old partitions
+        List<Partition> droppedPartitions = Lists.newArrayList();
+        for (String partitionName : partitionNames) {
+            Partition partition = dropPartition(-1, partitionName, true);
+            droppedPartitions.add(partition);
+        }
+
+        // 2. add temp partitions' range info to rangeInfo, and remove them from tempPartitionInfo
+        for (String partitionName : tempPartitionNames) {
+            Partition partition = tempPartitions.getPartition(partitionName);
+            // add
+            addPartition(partition);
+            rangeInfo.addPartition(partition.getId(), tempRangeInfo.getRange(partition.getId()),
+                    tempRangeInfo.getDataProperty(partition.getId()),
+                    tempRangeInfo.getReplicationNum(partition.getId()),
+                    tempRangeInfo.getIsInMemory(partition.getId()));
+            // drop
+            dropTempPartition(partitionName, false /* do not drop the tablet */);
+        }
+
+        // 3. delete old partition's tablets in inverted index
+        for (Partition partition : droppedPartitions) {
+            for (MaterializedIndex index : partition.getMaterializedIndices(IndexExtState.VISIBLE)) {
+                for (Tablet tablet : index.getTablets()) {
+                    Catalog.getCurrentInvertedIndex().deleteTablet(tablet.getId());
+                }
+            }
+        }
+
+        // change the name so that after replacing, the partition name remain unchanged
+        if (!useTempPartitionName && partitionNames.size() == tempPartitionNames.size()) {
+            for (int i = 0; i < tempPartitionNames.size(); i++) {
+                renamePartition(tempPartitionNames.get(i), partitionNames.get(i));
+            }
+        }
+    }
+
+    public PartitionInfo getTempPartitonRangeInfo() {
+        return tempPartitions.getPartitionInfo();
+    }
+
+    public void addTempPartition(Partition partition) {
+        tempPartitions.addPartition(partition);
+    }
+
+    public void dropAllTempPartitions() {
+        tempPartitions.dropAll();
+    }
+
+    public Collection<Partition> getAllTempPartitions() {
+        return tempPartitions.getAllPartitions();
+    }
+
+    public boolean existTempPartitions() {
+        return !tempPartitions.isEmpty();
+    }
 }
diff --git a/fe/src/main/java/org/apache/doris/catalog/Partition.java b/fe/src/main/java/org/apache/doris/catalog/Partition.java
index aca8b31..3afc65b 100644
--- a/fe/src/main/java/org/apache/doris/catalog/Partition.java
+++ b/fe/src/main/java/org/apache/doris/catalog/Partition.java
@@ -28,6 +28,7 @@ import org.apache.doris.meta.MetaContext;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Maps;
+import com.google.gson.annotations.SerializedName;
 
 import org.apache.kudu.client.shaded.com.google.common.collect.Lists;
 import org.apache.logging.log4j.LogManager;
@@ -57,21 +58,26 @@ public class Partition extends MetaObject implements Writable {
         SCHEMA_CHANGE
     }
 
+    @SerializedName(value = "id")
     private long id;
+    @SerializedName(value = "name")
     private String name;
+    @SerializedName(value = "state")
     private PartitionState state;
-
+    @SerializedName(value = "baseIndex")
     private MaterializedIndex baseIndex;
     /*
      * Visible rollup indexes are indexes which are visible to user.
      * User can do query on them, show them in related 'show' stmt.
      */
+    @SerializedName(value = "idToVisibleRollupIndex")
     private Map<Long, MaterializedIndex> idToVisibleRollupIndex = Maps.newHashMap();
     /*
      * Shadow indexes are indexes which are not visible to user.
      * Query will not run on these shadow indexes, and user can not see them neither.
      * But load process will load data into these shadow indexes.
      */
+    @SerializedName(value = "idToShadowIndex")
     private Map<Long, MaterializedIndex> idToShadowIndex = Maps.newHashMap();
 
     /*
@@ -81,12 +87,17 @@ public class Partition extends MetaObject implements Writable {
      */
 
     // not have committedVersion because committedVersion = nextVersion - 1
+    @SerializedName(value = "committedVersionHash")
     private long committedVersionHash;
+    @SerializedName(value = "visibleVersion")
     private long visibleVersion;
+    @SerializedName(value = "visibleVersionHash")
     private long visibleVersionHash;
+    @SerializedName(value = "nextVersion")
     private long nextVersion;
+    @SerializedName(value = "nextVersionHash")
     private long nextVersionHash;
-
+    @SerializedName(value = "distributionInfo")
     private DistributionInfo distributionInfo;
 
     private Partition() {
diff --git a/fe/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java b/fe/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java
index fb91acd..ab5f2b0 100644
--- a/fe/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java
+++ b/fe/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java
@@ -21,9 +21,9 @@ import org.apache.doris.analysis.PartitionKeyDesc;
 import org.apache.doris.analysis.SingleRangePartitionDesc;
 import org.apache.doris.common.AnalysisException;
 import org.apache.doris.common.DdlException;
+import org.apache.doris.common.util.RangeUtils;
 
 import com.google.common.base.Preconditions;
-import com.google.common.collect.BoundType;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Range;
 
@@ -35,11 +35,11 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.Comparator;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 public class RangePartitionInfo extends PartitionInfo {
     private static final Logger LOG = LogManager.getLogger(RangePartitionInfo.class);
@@ -48,12 +48,6 @@ public class RangePartitionInfo extends PartitionInfo {
     // partition id -> partition range
     private Map<Long, Range<PartitionKey>> idToRange;
 
-    private static final Comparator<Map.Entry<Long, Range<PartitionKey>>> RANGE_MAP_ENTRY_COMPARATOR;
-
-    static {
-        RANGE_MAP_ENTRY_COMPARATOR = Comparator.comparing(o -> o.getValue().lowerEndpoint());
-    }
-
     public RangePartitionInfo() {
         // for persist
         super();
@@ -103,9 +97,7 @@ public class RangePartitionInfo extends PartitionInfo {
             throws AnalysisException, DdlException {
         Range<PartitionKey> newRange = null;
         // generate and sort the existing ranges
-        List<Map.Entry<Long, Range<PartitionKey>>> sortedRanges = new ArrayList<Map.Entry<Long, Range<PartitionKey>>>(
-                this.idToRange.entrySet());
-        Collections.sort(sortedRanges, RANGE_MAP_ENTRY_COMPARATOR);
+        List<Map.Entry<Long, Range<PartitionKey>>> sortedRanges = getSortedRangeMap();
 
         // create upper values for new range
         PartitionKey newRangeUpper = null;
@@ -159,19 +151,11 @@ public class RangePartitionInfo extends PartitionInfo {
 
         if (currentRange != null) {
             // check if range intersected
-            checkRangeIntersect(newRange, currentRange);
+            RangeUtils.checkRangeIntersect(newRange, currentRange);
         }
         return newRange;
     }
 
-    public static void checkRangeIntersect(Range<PartitionKey> range1, Range<PartitionKey> range2) throws DdlException {
-        if (range2.isConnected(range1)) {
-            if (!range2.intersection(range1).isEmpty()) {
-                throw new DdlException("Range " + range1 + " is intersected with range: " + range2);
-            }
-        }
-    }
-
     public Range<PartitionKey> handleNewSinglePartitionDesc(SingleRangePartitionDesc desc, 
             long partitionId) throws DdlException {
         Preconditions.checkArgument(desc.isAnalyzed());
@@ -220,80 +204,19 @@ public class RangePartitionInfo extends PartitionInfo {
 
     public List<Map.Entry<Long, Range<PartitionKey>>> getSortedRangeMap() {
         List<Map.Entry<Long, Range<PartitionKey>>> sortedList = Lists.newArrayList(this.idToRange.entrySet());
-        Collections.sort(sortedList, RANGE_MAP_ENTRY_COMPARATOR);
+        Collections.sort(sortedList, RangeUtils.RANGE_MAP_ENTRY_COMPARATOR);
         return sortedList;
     }
 
-    public static void writeRange(DataOutput out, Range<PartitionKey> range) throws IOException {
-        boolean hasLowerBound = false;
-        boolean hasUpperBound = false;
-
-        // write lower bound if lower bound exists
-        hasLowerBound = range.hasLowerBound();
-        out.writeBoolean(hasLowerBound);
-        if (hasLowerBound) {
-            PartitionKey lowerBound = range.lowerEndpoint();
-            out.writeBoolean(range.lowerBoundType() == BoundType.CLOSED);
-            lowerBound.write(out);
-        }
-
-        // write upper bound if upper bound exists
-        hasUpperBound = range.hasUpperBound();
-        out.writeBoolean(hasUpperBound);
-        if (hasUpperBound) {
-            PartitionKey upperBound = range.upperEndpoint();
-            out.writeBoolean(range.upperBoundType() == BoundType.CLOSED);
-            upperBound.write(out);
-        }
-    }
-
-    public static Range<PartitionKey> readRange(DataInput in) throws IOException {
-        boolean hasLowerBound = false;
-        boolean hasUpperBound = false;
-        boolean lowerBoundClosed = false;
-        boolean upperBoundClosed = false;
-        PartitionKey lowerBound = null;
-        PartitionKey upperBound = null;
-
-        hasLowerBound = in.readBoolean();
-        if (hasLowerBound) {
-            lowerBoundClosed = in.readBoolean();
-            lowerBound = PartitionKey.read(in);
-        }
-
-        hasUpperBound = in.readBoolean();
-        if (hasUpperBound) {
-            upperBoundClosed = in.readBoolean();
-            upperBound = PartitionKey.read(in);
-        }
-
-        // Totally 9 cases. Both lower bound and upper bound could be open, closed or not exist
-        if (hasLowerBound && lowerBoundClosed && hasUpperBound && upperBoundClosed) {
-            return Range.closed(lowerBound, upperBound);
-        }
-        if (hasLowerBound && lowerBoundClosed && hasUpperBound && !upperBoundClosed) {
-            return Range.closedOpen(lowerBound, upperBound);
-        }
-        if (hasLowerBound && !lowerBoundClosed && hasUpperBound && upperBoundClosed) {
-            return Range.openClosed(lowerBound, upperBound);
-        }
-        if (hasLowerBound && !lowerBoundClosed && hasUpperBound && !upperBoundClosed) {
-            return Range.open(lowerBound, upperBound);
-        }
-        if (hasLowerBound && lowerBoundClosed && !hasUpperBound) {
-            return Range.atLeast(lowerBound);
-        }
-        if (hasLowerBound && !lowerBoundClosed && !hasUpperBound) {
-            return Range.greaterThan(lowerBound);
-        }
-        if (!hasLowerBound && hasUpperBound && upperBoundClosed) {
-            return Range.atMost(upperBound);
-        }
-        if (!hasLowerBound && hasUpperBound && !upperBoundClosed) {
-            return Range.lessThan(upperBound);
+    // get a sorted range list, exclude partitions which ids are in 'excludePartitionIds'
+    public List<Range<PartitionKey>> getRangeList(Set<Long> excludePartitionIds) {
+        List<Range<PartitionKey>> resultList = Lists.newArrayList();
+        for (Map.Entry<Long, Range<PartitionKey>> entry : idToRange.entrySet()) {
+            if (!excludePartitionIds.contains(entry.getKey())) {
+                resultList.add(entry.getValue());
+            }
         }
-        // Neither lower bound nor upper bound exists, return null. This means just one partition
-        return null;
+        return resultList;
     }
 
     public boolean checkRange(Range<PartitionKey> newRange) {
@@ -327,7 +250,7 @@ public class RangePartitionInfo extends PartitionInfo {
         out.writeInt(idToRange.size());
         for (Map.Entry<Long, Range<PartitionKey>> entry : idToRange.entrySet()) {
             out.writeLong(entry.getKey());
-            RangePartitionInfo.writeRange(out, entry.getValue());
+            RangeUtils.writeRange(out, entry.getValue());
         }
     }
 
@@ -345,7 +268,7 @@ public class RangePartitionInfo extends PartitionInfo {
         counter = in.readInt();
         for (int i = 0; i < counter; i++) {
             long partitionId = in.readLong();
-            Range<PartitionKey> range = RangePartitionInfo.readRange(in);
+            Range<PartitionKey> range = RangeUtils.readRange(in);
             idToRange.put(partitionId, range);
         }
     }
@@ -367,7 +290,7 @@ public class RangePartitionInfo extends PartitionInfo {
         // sort range
         List<Map.Entry<Long, Range<PartitionKey>>> entries =
                 new ArrayList<Map.Entry<Long, Range<PartitionKey>>>(this.idToRange.entrySet());
-        Collections.sort(entries, RANGE_MAP_ENTRY_COMPARATOR);
+        Collections.sort(entries, RangeUtils.RANGE_MAP_ENTRY_COMPARATOR);
 
         idx = 0;
         for (Map.Entry<Long, Range<PartitionKey>> entry : entries) {
diff --git a/fe/src/main/java/org/apache/doris/catalog/Replica.java b/fe/src/main/java/org/apache/doris/catalog/Replica.java
index 4588d5d..1e579ad 100644
--- a/fe/src/main/java/org/apache/doris/catalog/Replica.java
+++ b/fe/src/main/java/org/apache/doris/catalog/Replica.java
@@ -21,6 +21,8 @@ import org.apache.doris.common.FeMetaVersion;
 import org.apache.doris.common.io.Text;
 import org.apache.doris.common.io.Writable;
 
+import com.google.gson.annotations.SerializedName;
+
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
@@ -64,21 +66,31 @@ public class Replica implements Writable {
         SCHEMA_ERROR // replica's schema hash does not equal to index's schema hash
     }
     
+    @SerializedName(value = "id")
     private long id;
+    @SerializedName(value = "backendId")
     private long backendId;
+    @SerializedName(value = "version")
     private long version;
+    @SerializedName(value = "versionHash")
     private long versionHash;
     private int schemaHash = -1;
-
+    @SerializedName(value = "dataSize")
     private long dataSize = 0;
+    @SerializedName(value = "rowCount")
     private long rowCount = 0;
+    @SerializedName(value = "state")
     private ReplicaState state;
     
+    @SerializedName(value = "lastFailedVersion")
     private long lastFailedVersion = -1L;
+    @SerializedName(value = "lastFailedVersionHash")
     private long lastFailedVersionHash = 0L;
     // not serialized, not very important
     private long lastFailedTimestamp = 0;
+    @SerializedName(value = "lastSuccessVersion")
     private long lastSuccessVersion = -1L;
+    @SerializedName(value = "lastSuccessVersionHash")
     private long lastSuccessVersionHash = 0L;
 
 	private AtomicLong versionCount = new AtomicLong(-1);
diff --git a/fe/src/main/java/org/apache/doris/catalog/Tablet.java b/fe/src/main/java/org/apache/doris/catalog/Tablet.java
index 71370fb..1b1cb35 100644
--- a/fe/src/main/java/org/apache/doris/catalog/Tablet.java
+++ b/fe/src/main/java/org/apache/doris/catalog/Tablet.java
@@ -30,6 +30,7 @@ import com.google.common.collect.HashMultimap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Multimap;
 import com.google.common.collect.Sets;
+import com.google.gson.annotations.SerializedName;
 
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
@@ -63,12 +64,15 @@ public class Tablet extends MetaObject implements Writable {
         NEED_FURTHER_REPAIR, // one of replicas need a definite repair.
     }
 
+    @SerializedName(value = "id")
     private long id;
+    @SerializedName(value = "replicas")
     private List<Replica> replicas;
-
+    @SerializedName(value = "checkedVersion")
     private long checkedVersion;
+    @SerializedName(value = "checkedVersionHash")
     private long checkedVersionHash;
-
+    @SerializedName(value = "isConsistent")
     private boolean isConsistent;
 
     // last time that the tablet checker checks this tablet.
diff --git a/fe/src/main/java/org/apache/doris/catalog/TempPartitions.java b/fe/src/main/java/org/apache/doris/catalog/TempPartitions.java
new file mode 100644
index 0000000..49948be
--- /dev/null
+++ b/fe/src/main/java/org/apache/doris/catalog/TempPartitions.java
@@ -0,0 +1,145 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.catalog;
+
+import org.apache.doris.catalog.MaterializedIndex.IndexExtState;
+import org.apache.doris.common.io.Writable;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+// This class saved all temp partitions of a table.
+// temp partition is used to implement the overwrite load.
+// user can load data into some of the temp partitions,
+// and then replace the formal partitions with these temp partitions
+// to make a overwrite load.
+public class TempPartitions implements Writable {
+    private Map<Long, Partition> idToPartition = Maps.newHashMap();
+    private Map<String, Partition> nameToPartition = Maps.newHashMap();
+    private RangePartitionInfo partitionInfo;
+
+    public TempPartitions() {
+    }
+
+    public TempPartitions(List<Column> partCols) {
+        partitionInfo = new RangePartitionInfo(partCols);
+    }
+
+    public RangePartitionInfo getPartitionInfo() {
+        return partitionInfo;
+    }
+
+    public void addPartition(Partition partition) {
+        idToPartition.put(partition.getId(), partition);
+        nameToPartition.put(partition.getName(), partition);
+    }
+
+    /*
+     * Drop temp partitions.
+     * If needDropTablet is true, also drop the tablet from tablet inverted index.
+     */
+    public void dropPartition(String partitionName, boolean needDropTablet) {
+        Partition partition = nameToPartition.get(partitionName);
+        if (partition != null) {
+            idToPartition.remove(partition.getId());
+            nameToPartition.remove(partitionName);
+
+            Preconditions.checkState(partitionInfo.getType() == PartitionType.RANGE);
+            RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo;
+            // drop partition info
+            rangePartitionInfo.dropPartition(partition.getId());
+
+            if (!Catalog.isCheckpointThread() && needDropTablet) {
+                TabletInvertedIndex invertedIndex = Catalog.getCurrentInvertedIndex();
+                for (MaterializedIndex index : partition.getMaterializedIndices(IndexExtState.ALL)) {
+                    for (Tablet tablet : index.getTablets()) {
+                        invertedIndex.deleteTablet(tablet.getId());
+                    }
+                }
+            }
+        }
+    }
+
+    public Partition getPartition(long partitionId) {
+        return idToPartition.get(partitionId);
+    }
+
+    public Partition getPartition(String partitionName) {
+        return nameToPartition.get(partitionName);
+    }
+
+    public List<Partition> getAllPartitions() {
+        return Lists.newArrayList(idToPartition.values());
+    }
+
+    public boolean hasPartition(String partName) {
+        return nameToPartition.containsKey(partName);
+    }
+
+    public boolean isEmpty() {
+        return idToPartition.isEmpty();
+    }
+
+    // drop all temp partitions
+    public void dropAll() {
+        for (String partName : nameToPartition.keySet()) {
+            dropPartition(partName, true);
+        }
+    }
+
+    @Override
+    public void write(DataOutput out) throws IOException {
+        // PartitionInfo is hard to serialized by GSON, so I have to use the old way...
+        int size = idToPartition.size();
+        out.writeInt(size);
+        for (Partition partition : idToPartition.values()) {
+            partition.write(out);
+        }
+        if (partitionInfo != null) {
+            out.writeBoolean(true);
+            partitionInfo.write(out);
+        } else {
+            out.writeBoolean(false);
+        }
+    }
+
+    public static TempPartitions read(DataInput in) throws IOException {
+        TempPartitions tempPartitions = new TempPartitions();
+        tempPartitions.readFields(in);
+        return tempPartitions;
+    }
+
+    private void readFields(DataInput in) throws IOException {
+        int size = in.readInt();
+        for (int i = 0; i < size; i++) {
+            Partition partition = Partition.read(in);
+            idToPartition.put(partition.getId(), partition);
+            nameToPartition.put(partition.getName(), partition);
+        }
+        if (in.readBoolean()) {
+            partitionInfo = (RangePartitionInfo) RangePartitionInfo.read(in);
+        }
+    }
+}
diff --git a/fe/src/main/java/org/apache/doris/clone/DynamicPartitionScheduler.java b/fe/src/main/java/org/apache/doris/clone/DynamicPartitionScheduler.java
index 19b87d3..49036e9 100644
--- a/fe/src/main/java/org/apache/doris/clone/DynamicPartitionScheduler.java
+++ b/fe/src/main/java/org/apache/doris/clone/DynamicPartitionScheduler.java
@@ -17,9 +17,6 @@
 
 package org.apache.doris.clone;
 
-import com.google.common.collect.Maps;
-import com.google.common.collect.Range;
-import com.google.common.collect.Sets;
 import org.apache.doris.analysis.AddPartitionClause;
 import org.apache.doris.analysis.DistributionDesc;
 import org.apache.doris.analysis.HashDistributionDesc;
@@ -43,7 +40,13 @@ import org.apache.doris.common.DdlException;
 import org.apache.doris.common.Pair;
 import org.apache.doris.common.util.DynamicPartitionUtil;
 import org.apache.doris.common.util.MasterDaemon;
+import org.apache.doris.common.util.RangeUtils;
 import org.apache.doris.common.util.TimeUtils;
+
+import com.google.common.collect.Maps;
+import com.google.common.collect.Range;
+import com.google.common.collect.Sets;
+
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
@@ -200,7 +203,7 @@ public class DynamicPartitionScheduler extends MasterDaemon {
                     for (Range<PartitionKey> partitionKeyRange : info.getIdToRange().values()) {
                         // only support single column partition now
                         try {
-                            RangePartitionInfo.checkRangeIntersect(partitionKeyRange, addPartitionKeyRange);
+                            RangeUtils.checkRangeIntersect(partitionKeyRange, addPartitionKeyRange);
                         } catch (DdlException e) {
                             isPartitionExists = true;
                             if (addPartitionKeyRange.equals(partitionKeyRange)) {
@@ -231,7 +234,7 @@ public class DynamicPartitionScheduler extends MasterDaemon {
                     DistributionDesc distributionDesc = new HashDistributionDesc(dynamicPartitionProperty.getBuckets(), distColumnNames);
 
                     // add partition according to partition desc and distribution desc
-                    addPartitionClauses.add(new AddPartitionClause(rangePartitionDesc, distributionDesc, null));
+                    addPartitionClauses.add(new AddPartitionClause(rangePartitionDesc, distributionDesc, null, false));
                 }
                 tableName = olapTable.getName();
             } finally {
diff --git a/fe/src/main/java/org/apache/doris/common/FeMetaVersion.java b/fe/src/main/java/org/apache/doris/common/FeMetaVersion.java
index daddd35..0d260e8 100644
--- a/fe/src/main/java/org/apache/doris/common/FeMetaVersion.java
+++ b/fe/src/main/java/org/apache/doris/common/FeMetaVersion.java
@@ -157,6 +157,9 @@ public final class FeMetaVersion {
     public static final int VERSION_72 = 72;
     // broker persist isAlive
     public static final int VERSION_73 = 73;
+    // temp partitions
+    public static final int VERSION_74 = 74;
+
     // note: when increment meta version, should assign the latest version to VERSION_CURRENT
-    public static final int VERSION_CURRENT = VERSION_73;
+    public static final int VERSION_CURRENT = VERSION_74;
 }
diff --git a/fe/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java b/fe/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java
index 217cd86..447111b 100644
--- a/fe/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java
+++ b/fe/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java
@@ -17,13 +17,6 @@
 
 package org.apache.doris.common.proc;
 
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-
-
-import com.google.common.collect.Range;
 import org.apache.doris.analysis.BinaryPredicate;
 import org.apache.doris.analysis.DateLiteral;
 import org.apache.doris.analysis.Expr;
@@ -38,7 +31,7 @@ import org.apache.doris.catalog.DistributionInfo.DistributionInfoType;
 import org.apache.doris.catalog.HashDistributionInfo;
 import org.apache.doris.catalog.OlapTable;
 import org.apache.doris.catalog.Partition;
-import org.apache.doris.catalog.PartitionKey;
+import org.apache.doris.catalog.PartitionInfo;
 import org.apache.doris.catalog.PartitionType;
 import org.apache.doris.catalog.RangePartitionInfo;
 import org.apache.doris.catalog.Table.TableType;
@@ -52,15 +45,23 @@ import org.apache.doris.common.util.ListComparator;
 import org.apache.doris.common.util.OrderByPair;
 import org.apache.doris.common.util.TimeUtils;
 
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
 
 
 /*
- * SHOW PROC /dbs/dbId/tableId/partitions
- * show partitions' detail info within a table
+ * SHOW PROC /dbs/dbId/tableId/partitions, or
+ * SHOW PROC /dbs/dbId/tableId/temp_partitions
+ * show [temp] partitions' detail info within a table
  */
 public class PartitionsProcDir implements ProcDirInterface {
     public static final ImmutableList<String> TITLE_NAMES = new ImmutableList.Builder<String>()
@@ -74,10 +75,12 @@ public class PartitionsProcDir implements ProcDirInterface {
 
     private Database db;
     private OlapTable olapTable;
+    private boolean isTempPartition = false;
 
-    public PartitionsProcDir(Database db, OlapTable olapTable) {
+    public PartitionsProcDir(Database db, OlapTable olapTable, boolean isTempPartition) {
         this.db = db;
         this.olapTable = olapTable;
+        this.isTempPartition = isTempPartition;
     }
 
     public boolean filter(String columnName, Comparable element, Map<String, Expr> filterMap) throws AnalysisException {
@@ -197,7 +200,7 @@ public class PartitionsProcDir implements ProcDirInterface {
         return result;
     }
 
-    public  List<List<Comparable>> getPartitionInfos() {
+    private List<List<Comparable>> getPartitionInfos() {
         Preconditions.checkNotNull(db);
         Preconditions.checkNotNull(olapTable);
         Preconditions.checkState(olapTable.getType() == TableType.OLAP);
@@ -206,121 +209,86 @@ public class PartitionsProcDir implements ProcDirInterface {
         List<List<Comparable>> partitionInfos = new ArrayList<List<Comparable>>();
         db.readLock();
         try {
-            RangePartitionInfo rangePartitionInfo = null;
+            Set<String> partitionsNames;
+            if (isTempPartition) {
+                partitionsNames = olapTable.getAllTempPartitions().stream().map(p -> p.getName()).collect(Collectors.toSet());
+            } else {
+                partitionsNames = olapTable.getPartitions().stream().map(p -> p.getName()).collect(Collectors.toSet());
+            }
+
             Joiner joiner = Joiner.on(", ");
-            if (olapTable.getPartitionInfo().getType() == PartitionType.RANGE) {
-                rangePartitionInfo = (RangePartitionInfo) olapTable.getPartitionInfo();
-                List<Map.Entry<Long, Range<PartitionKey>>> sortedRange = rangePartitionInfo.getSortedRangeMap();
-                for (Map.Entry<Long, Range<PartitionKey>> entry : sortedRange) {
-                    long partitionId = entry.getKey();
-                    Partition partition = olapTable.getPartition(partitionId);
-                    List<Comparable> partitionInfo = new ArrayList<Comparable>();
-                    String partitionName = partition.getName();
-                    partitionInfo.add(partitionId);
-                    partitionInfo.add(partitionName);
-                    partitionInfo.add(partition.getVisibleVersion());
-                    partitionInfo.add(partition.getVisibleVersionHash());
-                    partitionInfo.add(partition.getState());
-
-                    // partition
-                    List<Column> partitionColumns = rangePartitionInfo.getPartitionColumns();
+            PartitionInfo tblPartitionInfo;
+            if (isTempPartition) {
+                tblPartitionInfo = olapTable.getTempPartitonRangeInfo();
+                if (tblPartitionInfo == null) {
+                    // not temp partitons in this table, return empty result
+                    return partitionInfos;
+                }
+            } else {
+                tblPartitionInfo = olapTable.getPartitionInfo();
+            }
+            for (String partName : partitionsNames) {
+                Partition partition = olapTable.getPartition(partName, isTempPartition);
+                long partitionId = partition.getId();
+
+                List<Comparable> partitionInfo = new ArrayList<Comparable>();
+                String partitionName = partition.getName();
+                partitionInfo.add(partitionId);
+                partitionInfo.add(partitionName);
+                partitionInfo.add(partition.getVisibleVersion());
+                partitionInfo.add(partition.getVisibleVersionHash());
+                partitionInfo.add(partition.getState());
+
+                if (tblPartitionInfo.getType() == PartitionType.RANGE) {
+                    // partition range info
+                    List<Column> partitionColumns = ((RangePartitionInfo) tblPartitionInfo).getPartitionColumns();
                     List<String> colNames = new ArrayList<String>();
                     for (Column column : partitionColumns) {
                         colNames.add(column.getName());
                     }
                     partitionInfo.add(joiner.join(colNames));
-
-                    partitionInfo.add(entry.getValue().toString());
-
-                    // distribution
-                    DistributionInfo distributionInfo = partition.getDistributionInfo();
-                    if (distributionInfo.getType() == DistributionInfoType.HASH) {
-                        HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo;
-                        List<Column> distributionColumns = hashDistributionInfo.getDistributionColumns();
-                        StringBuilder sb = new StringBuilder();
-                        for (int i = 0; i < distributionColumns.size(); i++) {
-                            if (i != 0) {
-                                sb.append(", ");
-                            }
-                            sb.append(distributionColumns.get(i).getName());
-                        }
-                        partitionInfo.add(sb.toString());
-                    } else {
-                        partitionInfo.add("ALL KEY");
-                    }
-
-                    partitionInfo.add(distributionInfo.getBucketNum());
-
-                    short replicationNum = olapTable.getPartitionInfo().getReplicationNum(partitionId);
-                    partitionInfo.add(String.valueOf(replicationNum));
-
-                    DataProperty dataProperty = rangePartitionInfo.getDataProperty(partitionId);
-                    partitionInfo.add(dataProperty.getStorageMedium().name());
-                    partitionInfo.add(TimeUtils.longToTimeString(dataProperty.getCooldownTimeMs()));
-
-                    partitionInfo.add(TimeUtils.longToTimeString(partition.getLastCheckTime()));
-
-                    long dataSize = partition.getDataSize();
-                    Pair<Double, String> sizePair = DebugUtil.getByteUint(dataSize);
-                    String readableSize = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(sizePair.first) + " "
-                        + sizePair.second;
-                    partitionInfo.add(readableSize);
-
-                    partitionInfo.add(olapTable.getPartitionInfo().getIsInMemory(partitionId));
-                    partitionInfos.add(partitionInfo);
-                }
-            } else {
-                for (Partition partition : olapTable.getPartitions()) {
-                    List<Comparable> partitionInfo = new ArrayList<Comparable>();
-                    String partitionName = partition.getName();
-                    long partitionId = partition.getId();
-                    partitionInfo.add(partitionId);
-                    partitionInfo.add(partitionName);
-                    partitionInfo.add(partition.getVisibleVersion());
-                    partitionInfo.add(partition.getVisibleVersionHash());
-                    partitionInfo.add(partition.getState());
-
-                    // partition
+                    partitionInfo.add(((RangePartitionInfo) tblPartitionInfo).getRange(partitionId).toString());
+                } else {
                     partitionInfo.add("");
                     partitionInfo.add("");
+                }
 
-                    // distribution
-                    DistributionInfo distributionInfo = partition.getDistributionInfo();
-                    if (distributionInfo.getType() == DistributionInfoType.HASH) {
-                        HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo;
-                        List<Column> distributionColumns = hashDistributionInfo.getDistributionColumns();
-                        StringBuilder sb = new StringBuilder();
-                        for (int i = 0; i < distributionColumns.size(); i++) {
-                            if (i != 0) {
-                                sb.append(", ");
-                            }
-                            sb.append(distributionColumns.get(i).getName());
+                // distribution
+                DistributionInfo distributionInfo = partition.getDistributionInfo();
+                if (distributionInfo.getType() == DistributionInfoType.HASH) {
+                    HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo;
+                    List<Column> distributionColumns = hashDistributionInfo.getDistributionColumns();
+                    StringBuilder sb = new StringBuilder();
+                    for (int i = 0; i < distributionColumns.size(); i++) {
+                        if (i != 0) {
+                            sb.append(", ");
                         }
-                        partitionInfo.add(sb.toString());
-                    } else {
-                        partitionInfo.add("ALL KEY");
+                        sb.append(distributionColumns.get(i).getName());
                     }
+                    partitionInfo.add(sb.toString());
+                } else {
+                    partitionInfo.add("ALL KEY");
+                }
 
-                    partitionInfo.add(distributionInfo.getBucketNum());
+                partitionInfo.add(distributionInfo.getBucketNum());
 
-                    short replicationNum = olapTable.getPartitionInfo().getReplicationNum(partitionId);
-                    partitionInfo.add(String.valueOf(replicationNum));
+                short replicationNum = tblPartitionInfo.getReplicationNum(partitionId);
+                partitionInfo.add(String.valueOf(replicationNum));
 
-                    DataProperty dataProperty = olapTable.getPartitionInfo().getDataProperty(partitionId);
-                    partitionInfo.add(dataProperty.getStorageMedium().name());
-                    partitionInfo.add(TimeUtils.longToTimeString(dataProperty.getCooldownTimeMs()));
+                DataProperty dataProperty = tblPartitionInfo.getDataProperty(partitionId);
+                partitionInfo.add(dataProperty.getStorageMedium().name());
+                partitionInfo.add(TimeUtils.longToTimeString(dataProperty.getCooldownTimeMs()));
 
-                    partitionInfo.add(TimeUtils.longToTimeString(partition.getLastCheckTime()));
+                partitionInfo.add(TimeUtils.longToTimeString(partition.getLastCheckTime()));
 
-                    long dataSize = partition.getDataSize();
-                    Pair<Double, String> sizePair = DebugUtil.getByteUint(dataSize);
-                    String readableSize = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(sizePair.first) + " "
+                long dataSize = partition.getDataSize();
+                Pair<Double, String> sizePair = DebugUtil.getByteUint(dataSize);
+                String readableSize = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(sizePair.first) + " "
                         + sizePair.second;
-                    partitionInfo.add(readableSize);
-                    partitionInfo.add(olapTable.getPartitionInfo().getIsInMemory(partitionId));
+                partitionInfo.add(readableSize);
+                partitionInfo.add(tblPartitionInfo.getIsInMemory(partitionId));
 
-                    partitionInfos.add(partitionInfo);
-                }
+                partitionInfos.add(partitionInfo);
             }
         } finally {
             db.readUnlock();
diff --git a/fe/src/main/java/org/apache/doris/common/proc/TableProcDir.java b/fe/src/main/java/org/apache/doris/common/proc/TableProcDir.java
index f02c2ec..3e8d811 100644
--- a/fe/src/main/java/org/apache/doris/common/proc/TableProcDir.java
+++ b/fe/src/main/java/org/apache/doris/common/proc/TableProcDir.java
@@ -40,9 +40,11 @@ public class TableProcDir implements ProcDirInterface {
 
     public static final String INDEX_SCHEMA = "index_schema";
     private static final String PARTITIONS = "partitions";
+    private static final String TEMP_PARTITIONS = "temp_partitions";
 
     private static final ImmutableList<String> CHILDREN_NODES = new ImmutableList.Builder<String>()
             .add(PARTITIONS)
+            .add(TEMP_PARTITIONS)
             .add(INDEX_SCHEMA)
             .build();
 
@@ -81,12 +83,18 @@ public class TableProcDir implements ProcDirInterface {
 
         if (entryName.equals(PARTITIONS)) {
             if (table.getType() == TableType.OLAP) {
-                return new PartitionsProcDir(db, (OlapTable) table);
+                return new PartitionsProcDir(db, (OlapTable) table, false);
             } else if (table.getType() == TableType.ELASTICSEARCH) {
                 return new EsPartitionsProcDir(db, (EsTable) table);
             } else {
                 throw new AnalysisException("Table[" + table.getName() + "] is not a OLAP or ELASTICSEARCH table");
             }
+        } else if (entryName.equals(TEMP_PARTITIONS)) {
+            if (table.getType() == TableType.OLAP) {
+                return new PartitionsProcDir(db, (OlapTable) table, true);
+            } else {
+                throw new AnalysisException("Table[" + table.getName() + "] does not have temp partitions");
+            }
         } else if (entryName.equals(INDEX_SCHEMA)) {
             return new IndexInfoProcDir(db, table);
         } else {
diff --git a/fe/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java b/fe/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java
index 408a039..34309ab 100644
--- a/fe/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java
+++ b/fe/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java
@@ -28,9 +28,9 @@ import org.apache.doris.common.AnalysisException;
 import org.apache.doris.common.Config;
 import org.apache.doris.common.Pair;
 import org.apache.doris.system.SystemInfoService;
+import org.apache.doris.thrift.TStorageFormat;
 import org.apache.doris.thrift.TStorageMedium;
 import org.apache.doris.thrift.TStorageType;
-import org.apache.doris.thrift.TStorageFormat;
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
@@ -82,6 +82,9 @@ public class PropertyAnalyzer {
 
     public static final String PROPERTIES_INMEMORY = "in_memory";
 
+    public static final String PROPERTIES_STRICT_RANGE = "strict_range";
+    public static final String PROPERTIES_USE_TEMP_PARTITION_NAME = "use_temp_partition_name";
+
     public static DataProperty analyzeDataProperty(Map<String, String> properties, DataProperty oldDataProperty)
             throws AnalysisException {
         if (properties == null) {
@@ -399,12 +402,13 @@ public class PropertyAnalyzer {
         }
     }
 
-    public static boolean analyzeInMemory(Map<String, String> properties, boolean defaultInMemory) {
-        if (properties != null && properties.containsKey(PROPERTIES_INMEMORY)) {
-            String inMemory = properties.get(PROPERTIES_INMEMORY);
-            properties.remove(PROPERTIES_INMEMORY);
-            return Boolean.parseBoolean(inMemory);
+    // analyze common boolean properties, such as "in_memory" = "false"
+    public static boolean analyzeBooleanProp(Map<String, String> properties, String propKey, boolean defaultVal) {
+        if (properties != null && properties.containsKey(propKey)) {
+            String val = properties.get(propKey);
+            properties.remove(propKey);
+            return Boolean.parseBoolean(val);
         }
-        return defaultInMemory;
+        return defaultVal;
     }
 }
diff --git a/fe/src/main/java/org/apache/doris/common/util/RangeUtils.java b/fe/src/main/java/org/apache/doris/common/util/RangeUtils.java
new file mode 100644
index 0000000..e91b94a
--- /dev/null
+++ b/fe/src/main/java/org/apache/doris/common/util/RangeUtils.java
@@ -0,0 +1,210 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.common.util;
+
+import org.apache.doris.catalog.PartitionKey;
+import org.apache.doris.common.DdlException;
+
+import com.google.common.collect.BoundType;
+import com.google.common.collect.Range;
+import com.google.common.collect.RangeMap;
+import com.google.common.collect.TreeRangeMap;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+
+public class RangeUtils {
+    
+    public static final Comparator<Map.Entry<Long, Range<PartitionKey>>> RANGE_MAP_ENTRY_COMPARATOR =
+            Comparator.comparing(o -> o.getValue().lowerEndpoint());
+
+    public static final Comparator<Range<PartitionKey>> RANGE_COMPARATOR = 
+            Comparator.comparing(o -> o.lowerEndpoint());
+
+    public static void checkRangeIntersect(Range<PartitionKey> range1, Range<PartitionKey> range2) throws DdlException {
+        if (range2.isConnected(range1)) {
+            if (!range2.intersection(range1).isEmpty()) {
+                throw new DdlException("Range " + range1 + " is intersected with range: " + range2);
+            }
+        }
+    }
+
+    /*
+     * Pass only if the 2 range lists are exactly same
+     * What is "exactly same"?
+     *      1. {[0, 10), [10, 20)} exactly same as {[0, 20)}
+     *      2. {[0, 10), [15, 20)} exactly same as {[0, 10), [15, 18), [18, 20)}
+     *      3. {[0, 10), [15, 20)} exactly same as {[0, 10), [15, 20)}
+     *      4. {[0, 10), [15, 20)} NOT exactly same as {[0, 20)}
+     *      
+     *  Here I will use an example to explain the algorithm:
+     *      list1: {[0, 10), [15, 20)}
+     *      list2: {[0, 10), [15, 18), [18, 20)}
+     *  
+     *  1. sort 2 lists first (the above 2 lists are already sorted)
+     *  2. Begin to compare ranges from index 0: [0, 10) and [0, 10)
+     *      2.1 lower bounds (0 and 0) are equal.
+     *      2.2 upper bounds (10 and 10) are equal. 
+     *  3. Begin to compare next 2 ranges [15, 20) and [15, 18)
+     *      3.1 lower bounds (15 and 15) are equal.
+     *      3.2 upper bounds (20 and 18) are not equal. and 20 > 18
+     *      3.3 Split range [15, 20) to [15, 18) and [18, 20)
+     *  4. Begin to compare next 2 ranges [18, 20) and [18, 20), the first [18, 20) is the splitted range
+     *      4.1 lower bounds (18 and 18) are equal.
+     *      4.2 upper bounds (20 and 20) are equal.
+     *  5. Not more next ranges, so 2 lists are equal.
+     */
+    public static void checkRangeListsMatch(List<Range<PartitionKey>> list1, List<Range<PartitionKey>> list2) throws DdlException {
+        Collections.sort(list1, RangeUtils.RANGE_COMPARATOR);
+        Collections.sort(list2, RangeUtils.RANGE_COMPARATOR);
+
+        int idx1 = 0;
+        int idx2 = 0;
+        Range<PartitionKey> range1 = list1.get(idx1);
+        Range<PartitionKey> range2 = list2.get(idx2);
+        while (true) {
+            if (range1.lowerEndpoint().compareTo(range2.lowerEndpoint()) != 0) {
+                throw new DdlException("2 range lists are not stricly matched. "
+                        + range1.lowerEndpoint() + " vs. " + range2.lowerEndpoint());
+            }
+            
+            int res = range1.upperEndpoint().compareTo(range2.upperEndpoint());
+            if (res == 0) {
+                ++idx1;
+                ++idx2;
+                if (idx1 == list1.size() || idx2 == list2.size()) {
+                    break;
+                }
+                range1 = list1.get(idx1);
+                range2 = list2.get(idx2);
+                continue;
+            } else if (res > 0) {
+                if (++idx2 == list2.size()) {
+                    break;
+                }
+                range1 = Range.closedOpen(range2.upperEndpoint(), range1.upperEndpoint());
+                range2 = list2.get(idx2);
+            } else {
+                if (++idx1 == list1.size()) {
+                    break;
+                }
+                range2 = Range.closedOpen(range1.upperEndpoint(), range2.upperEndpoint());
+                range1 = list1.get(idx1);
+            }
+        }
+
+        if (idx1 < list1.size() || idx2 < list2.size()) {
+            throw new DdlException("2 range lists are not stricly matched. "
+                    + list1 + " vs. " + list2);
+        }
+    }
+
+    public static void writeRange(DataOutput out, Range<PartitionKey> range) throws IOException {
+        boolean hasLowerBound = false;
+        boolean hasUpperBound = false;
+
+        // write lower bound if lower bound exists
+        hasLowerBound = range.hasLowerBound();
+        out.writeBoolean(hasLowerBound);
+        if (hasLowerBound) {
+            PartitionKey lowerBound = range.lowerEndpoint();
+            out.writeBoolean(range.lowerBoundType() == BoundType.CLOSED);
+            lowerBound.write(out);
+        }
+
+        // write upper bound if upper bound exists
+        hasUpperBound = range.hasUpperBound();
+        out.writeBoolean(hasUpperBound);
+        if (hasUpperBound) {
+            PartitionKey upperBound = range.upperEndpoint();
+            out.writeBoolean(range.upperBoundType() == BoundType.CLOSED);
+            upperBound.write(out);
+        }
+    }
+
+    public static Range<PartitionKey> readRange(DataInput in) throws IOException {
+        boolean hasLowerBound = false;
+        boolean hasUpperBound = false;
+        boolean lowerBoundClosed = false;
+        boolean upperBoundClosed = false;
+        PartitionKey lowerBound = null;
+        PartitionKey upperBound = null;
+
+        hasLowerBound = in.readBoolean();
+        if (hasLowerBound) {
+            lowerBoundClosed = in.readBoolean();
+            lowerBound = PartitionKey.read(in);
+        }
+
+        hasUpperBound = in.readBoolean();
+        if (hasUpperBound) {
+            upperBoundClosed = in.readBoolean();
+            upperBound = PartitionKey.read(in);
+        }
+
+        // Totally 9 cases. Both lower bound and upper bound could be open, closed or not exist
+        if (hasLowerBound && lowerBoundClosed && hasUpperBound && upperBoundClosed) {
+            return Range.closed(lowerBound, upperBound);
+        }
+        if (hasLowerBound && lowerBoundClosed && hasUpperBound && !upperBoundClosed) {
+            return Range.closedOpen(lowerBound, upperBound);
+        }
+        if (hasLowerBound && !lowerBoundClosed && hasUpperBound && upperBoundClosed) {
+            return Range.openClosed(lowerBound, upperBound);
+        }
+        if (hasLowerBound && !lowerBoundClosed && hasUpperBound && !upperBoundClosed) {
+            return Range.open(lowerBound, upperBound);
+        }
+        if (hasLowerBound && lowerBoundClosed && !hasUpperBound) {
+            return Range.atLeast(lowerBound);
+        }
+        if (hasLowerBound && !lowerBoundClosed && !hasUpperBound) {
+            return Range.greaterThan(lowerBound);
+        }
+        if (!hasLowerBound && hasUpperBound && upperBoundClosed) {
+            return Range.atMost(upperBound);
+        }
+        if (!hasLowerBound && hasUpperBound && !upperBoundClosed) {
+            return Range.lessThan(upperBound);
+        }
+        // Neither lower bound nor upper bound exists, return null. This means just one partition
+        return null;
+    }
+
+    // check if any ranges in "rangesToBeChecked" conflict with ranges in "baseRanges".
+    public static void checkRangeConflict(List<Range<PartitionKey>> baseRanges,
+            List<Range<PartitionKey>> rangesToBeChecked) throws DdlException {
+
+        RangeMap<PartitionKey, Long> baseRangeMap = TreeRangeMap.create();
+        long idx = 0;
+        for (Range<PartitionKey> baseRange : baseRanges) {
+            baseRangeMap.put(baseRange, idx++);
+        }
+
+        for (Range<PartitionKey> range : rangesToBeChecked) {
+            if (!baseRangeMap.subRangeMap(range).asMapOfRanges().isEmpty()) {
+                throw new DdlException("Range: " + range + " conflicts with existing range");
+            }
+        }
+    }
+}
diff --git a/fe/src/main/java/org/apache/doris/http/action/SystemAction.java b/fe/src/main/java/org/apache/doris/http/action/SystemAction.java
index 4966d4f..e4347b2 100644
--- a/fe/src/main/java/org/apache/doris/http/action/SystemAction.java
+++ b/fe/src/main/java/org/apache/doris/http/action/SystemAction.java
@@ -18,6 +18,7 @@
 package org.apache.doris.http.action;
 
 import org.apache.doris.analysis.RedirectStatus;
+import org.apache.doris.analysis.UserIdentity;
 import org.apache.doris.catalog.Catalog;
 import org.apache.doris.common.AnalysisException;
 import org.apache.doris.common.proc.ProcDirInterface;
@@ -86,6 +87,7 @@ public class SystemAction extends WebBaseAction {
             context.setCatalog(Catalog.getCurrentCatalog());
             context.setCluster(SystemInfoService.DEFAULT_CLUSTER);
             context.setQualifiedUser(PaloAuth.ADMIN_USER);
+            context.setCurrentUserIdentity(UserIdentity.ADMIN);
             MasterOpExecutor masterOpExecutor = new MasterOpExecutor(showProcStmt, context,
                     RedirectStatus.FORWARD_NO_SYNC);
             try {
diff --git a/fe/src/main/java/org/apache/doris/journal/JournalEntity.java b/fe/src/main/java/org/apache/doris/journal/JournalEntity.java
index 17a5c3c..2b4cf5d 100644
--- a/fe/src/main/java/org/apache/doris/journal/JournalEntity.java
+++ b/fe/src/main/java/org/apache/doris/journal/JournalEntity.java
@@ -63,6 +63,7 @@ import org.apache.doris.persist.OperationType;
 import org.apache.doris.persist.PartitionPersistInfo;
 import org.apache.doris.persist.PrivInfo;
 import org.apache.doris.persist.RecoverInfo;
+import org.apache.doris.persist.ReplacePartitionOperationLog;
 import org.apache.doris.persist.ReplicaPersistInfo;
 import org.apache.doris.persist.RoutineLoadOperation;
 import org.apache.doris.persist.TableInfo;
@@ -172,8 +173,7 @@ public class JournalEntity implements Writable {
                 break;
             }
             case OperationType.OP_DROP_PARTITION: {
-                data = new DropPartitionInfo();
-                ((DropPartitionInfo) data).readFields(in);
+                data = DropPartitionInfo.read(in);
                 isRead = true;
                 break;
             }
@@ -517,6 +517,11 @@ public class JournalEntity implements Writable {
                 isRead = true;
                 break;
             }
+            case OperationType.OP_REPLACE_TEMP_PARTITION: {
+                data = ReplacePartitionOperationLog.read(in);
+                isRead = true;
+                break;
+            }
             default: {
                 IOException e = new IOException();
                 LOG.error("UNKNOWN Operation Type {}", opCode, e);
diff --git a/fe/src/main/java/org/apache/doris/journal/local/LocalJournalCursor.java b/fe/src/main/java/org/apache/doris/journal/local/LocalJournalCursor.java
index 8b5fa71..4d7c403 100644
--- a/fe/src/main/java/org/apache/doris/journal/local/LocalJournalCursor.java
+++ b/fe/src/main/java/org/apache/doris/journal/local/LocalJournalCursor.java
@@ -241,8 +241,7 @@ public final class LocalJournalCursor implements JournalCursor {
                 break;
             }
             case OperationType.OP_DROP_PARTITION: {
-                DropPartitionInfo info = new DropPartitionInfo();
-                info.readFields(in);
+                DropPartitionInfo info = DropPartitionInfo.read(in);
                 ret.setData(info);
                 break;
             }
diff --git a/fe/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java b/fe/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java
index 0914524..513d800 100644
--- a/fe/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java
+++ b/fe/src/main/java/org/apache/doris/load/routineload/RoutineLoadJob.java
@@ -1155,7 +1155,6 @@ public abstract class RoutineLoadJob extends AbstractTxnStateChangeCallback impl
 
     abstract String customPropertiesJsonToString();
 
-
     public boolean needRemove() {
         if (!isFinal()) {
             return false;
diff --git a/fe/src/main/java/org/apache/doris/persist/DropPartitionInfo.java b/fe/src/main/java/org/apache/doris/persist/DropPartitionInfo.java
index 9077b5e..4587074 100644
--- a/fe/src/main/java/org/apache/doris/persist/DropPartitionInfo.java
+++ b/fe/src/main/java/org/apache/doris/persist/DropPartitionInfo.java
@@ -17,25 +17,36 @@
 
 package org.apache.doris.persist;
 
-import org.apache.doris.common.io.Writable;
+import org.apache.doris.catalog.Catalog;
+import org.apache.doris.common.FeMetaVersion;
 import org.apache.doris.common.io.Text;
+import org.apache.doris.common.io.Writable;
+import org.apache.doris.persist.gson.GsonUtils;
+
+import com.google.gson.annotations.SerializedName;
 
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
 public class DropPartitionInfo implements Writable {
+    @SerializedName(value = "dbId")
     private Long dbId;
+    @SerializedName(value = "tableId")
     private Long tableId;
+    @SerializedName(value = "partitionName")
     private String partitionName;
+    @SerializedName(value = "isTempPartition")
+    private boolean isTempPartition = false;
     
-    public DropPartitionInfo() {
+    private DropPartitionInfo() {
     }
 
-    public DropPartitionInfo(Long dbId, Long tableId, String partitionName) {
+    public DropPartitionInfo(Long dbId, Long tableId, String partitionName, boolean isTempPartition) {
         this.dbId = dbId;
         this.tableId = tableId;
         this.partitionName = partitionName;
+        this.isTempPartition = isTempPartition;
     }
     
     public Long getDbId() {
@@ -50,17 +61,34 @@ public class DropPartitionInfo implements Writable {
         return partitionName;
     }
 
-    public void write(DataOutput out) throws IOException {
-        out.writeLong(dbId);
-        out.writeLong(tableId);
-        Text.writeString(out, partitionName);
+    public boolean isTempPartition() {
+        return isTempPartition;
     }
-    public void readFields(DataInput in) throws IOException {
+
+    private void readFields(DataInput in) throws IOException {
         dbId = in.readLong();
         tableId = in.readLong();
         partitionName = Text.readString(in);
     }
-    
+
+    public static DropPartitionInfo read(DataInput in) throws IOException {
+        if (Catalog.getCurrentCatalogJournalVersion() < FeMetaVersion.VERSION_74) {
+            DropPartitionInfo info = new DropPartitionInfo();
+            info.readFields(in);
+            return info;
+        } else {
+            String json = Text.readString(in);
+            return GsonUtils.GSON.fromJson(json, DropPartitionInfo.class);
+        }
+    }
+
+    @Override
+    public void write(DataOutput out) throws IOException {
+        String json = GsonUtils.GSON.toJson(this);
+        Text.writeString(out, json);
+    }
+
+    @Override
     public boolean equals(Object obj) {
         if (this == obj) {
             return true;
diff --git a/fe/src/main/java/org/apache/doris/persist/EditLog.java b/fe/src/main/java/org/apache/doris/persist/EditLog.java
index b98c654..327144f 100644
--- a/fe/src/main/java/org/apache/doris/persist/EditLog.java
+++ b/fe/src/main/java/org/apache/doris/persist/EditLog.java
@@ -720,6 +720,11 @@ public class EditLog {
                     catalog.replayModifyTableProperty(opCode, modifyTablePropertyOperationLog);
                     break;
                 }
+                case OperationType.OP_REPLACE_TEMP_PARTITION: {
+                    ReplacePartitionOperationLog replaceTempPartitionLog = (ReplacePartitionOperationLog) journal.getData();
+                    catalog.replayReplaceTempPartition(replaceTempPartitionLog);
+                    break;
+                }
                 default: {
                     IOException e = new IOException();
                     LOG.error("UNKNOWN Operation Type {}", opCode, e);
@@ -1241,4 +1246,8 @@ public class EditLog {
     public void logModifyInMemory(ModifyTablePropertyOperationLog info) {
         logEdit(OperationType.OP_MODIFY_IN_MEMORY, info);
     }
+
+    public void logReplaceTempPartition(ReplacePartitionOperationLog info) {
+        logEdit(OperationType.OP_REPLACE_TEMP_PARTITION, info);
+    }
 }
diff --git a/fe/src/main/java/org/apache/doris/persist/OperationType.java b/fe/src/main/java/org/apache/doris/persist/OperationType.java
index aa61ffc..87b6984 100644
--- a/fe/src/main/java/org/apache/doris/persist/OperationType.java
+++ b/fe/src/main/java/org/apache/doris/persist/OperationType.java
@@ -43,6 +43,7 @@ public class OperationType {
     public static final short OP_RESTORE_JOB = 117;
     public static final short OP_TRUNCATE_TABLE = 118;
     public static final short OP_MODIFY_VIEW_DEF = 119;
+    public static final short OP_REPLACE_TEMP_PARTITION = 210;
 
     // 20~29 120~129 220~229 ...
     public static final short OP_START_ROLLUP = 20;
diff --git a/fe/src/main/java/org/apache/doris/persist/PartitionPersistInfo.java b/fe/src/main/java/org/apache/doris/persist/PartitionPersistInfo.java
index 49f4fcd..5b4fd3f 100644
--- a/fe/src/main/java/org/apache/doris/persist/PartitionPersistInfo.java
+++ b/fe/src/main/java/org/apache/doris/persist/PartitionPersistInfo.java
@@ -21,9 +21,9 @@ import org.apache.doris.catalog.Catalog;
 import org.apache.doris.catalog.DataProperty;
 import org.apache.doris.catalog.Partition;
 import org.apache.doris.catalog.PartitionKey;
-import org.apache.doris.catalog.RangePartitionInfo;
 import org.apache.doris.common.FeMetaVersion;
 import org.apache.doris.common.io.Writable;
+import org.apache.doris.common.util.RangeUtils;
 
 import com.google.common.collect.Range;
 
@@ -39,14 +39,15 @@ public class PartitionPersistInfo implements Writable {
     private Range<PartitionKey> range;
     private DataProperty dataProperty;
     private short replicationNum;
-    private boolean isInMemory;
+    private boolean isInMemory = false;
+    private boolean isTempPartition = false;
     
     public PartitionPersistInfo() {
     }
 
     public PartitionPersistInfo(long dbId, long tableId, Partition partition, Range<PartitionKey> range,
                                 DataProperty dataProperty, short replicationNum,
-                                boolean isInMemory) {
+                                boolean isInMemory, boolean isTempPartition) {
         this.dbId = dbId;
         this.tableId = tableId;
         this.partition = partition;
@@ -56,6 +57,7 @@ public class PartitionPersistInfo implements Writable {
 
         this.replicationNum = replicationNum;
         this.isInMemory = isInMemory;
+        this.isTempPartition = isTempPartition;
     }
     
     public Long getDbId() {
@@ -86,27 +88,37 @@ public class PartitionPersistInfo implements Writable {
         return isInMemory;
     }
 
+    public boolean isTempPartition() {
+        return isTempPartition;
+    }
+
     public void write(DataOutput out) throws IOException {
         out.writeLong(dbId);
         out.writeLong(tableId);
         partition.write(out);
 
-        RangePartitionInfo.writeRange(out, range);
+        RangeUtils.writeRange(out, range);
         dataProperty.write(out);
         out.writeShort(replicationNum);
         out.writeBoolean(isInMemory);
+        out.writeBoolean(isTempPartition);
     }
+
     public void readFields(DataInput in) throws IOException {
         dbId = in.readLong();
         tableId = in.readLong();
         partition = Partition.read(in);
 
-        range = RangePartitionInfo.readRange(in);
+        range = RangeUtils.readRange(in);
         dataProperty = DataProperty.read(in);
         replicationNum = in.readShort();
         if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_72) {
             isInMemory = in.readBoolean();
         }
+
+        if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_74) {
+            isTempPartition = in.readBoolean();
+        }
     }
     
     public boolean equals(Object obj) {
diff --git a/fe/src/main/java/org/apache/doris/persist/ReplacePartitionOperationLog.java b/fe/src/main/java/org/apache/doris/persist/ReplacePartitionOperationLog.java
new file mode 100644
index 0000000..9571634
--- /dev/null
+++ b/fe/src/main/java/org/apache/doris/persist/ReplacePartitionOperationLog.java
@@ -0,0 +1,93 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.persist;
+
+import org.apache.doris.common.io.Text;
+import org.apache.doris.common.io.Writable;
+import org.apache.doris.persist.gson.GsonUtils;
+
+import com.google.gson.annotations.SerializedName;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.List;
+
+/*
+ * For serialize "replace temp partition" operation log
+ */
+public class ReplacePartitionOperationLog implements Writable {
+
+    @SerializedName(value = "dbId")
+    private long dbId;
+    @SerializedName(value = "tblId")
+    private long tblId;
+    @SerializedName(value = "partitions")
+    private List<String> partitions;
+    @SerializedName(value = "tempPartitions")
+    private List<String> tempPartitions;
+    @SerializedName(value = "strictRange")
+    private boolean strictRange;
+    @SerializedName(value = "useTempPartitionName")
+    private boolean useTempPartitionName;
+
+    public ReplacePartitionOperationLog(long dbId, long tblId, List<String> partitionNames,
+            List<String> tempPartitonNames, boolean strictRange, boolean useTempPartitionName) {
+        this.dbId = dbId;
+        this.tblId = tblId;
+        this.partitions = partitionNames;
+        this.tempPartitions = tempPartitonNames;
+        this.strictRange = strictRange;
+        this.useTempPartitionName = useTempPartitionName;
+    }
+
+    public long getDbId() {
+        return dbId;
+    }
+
+    public long getTblId() {
+        return tblId;
+    }
+
+    public List<String> getPartitions() {
+        return partitions;
+    }
+
+    public List<String> getTempPartitions() {
+        return tempPartitions;
+    }
+
+    public boolean isStrictRange() {
+        return strictRange;
+    }
+
+    public boolean useTempPartitionName() {
+        return useTempPartitionName;
+    }
+
+    public static ReplacePartitionOperationLog read(DataInput in) throws IOException {
+        String json = Text.readString(in);
+        return GsonUtils.GSON.fromJson(json, ReplacePartitionOperationLog.class);
+    }
+
+    @Override
+    public void write(DataOutput out) throws IOException {
+        String json = GsonUtils.GSON.toJson(this);
+        Text.writeString(out, json);
+    }
+}
diff --git a/fe/src/main/java/org/apache/doris/persist/TruncateTableInfo.java b/fe/src/main/java/org/apache/doris/persist/TruncateTableInfo.java
index 870eac2..3032123 100644
--- a/fe/src/main/java/org/apache/doris/persist/TruncateTableInfo.java
+++ b/fe/src/main/java/org/apache/doris/persist/TruncateTableInfo.java
@@ -17,10 +17,15 @@
 
 package org.apache.doris.persist;
 
+import org.apache.doris.catalog.Catalog;
 import org.apache.doris.catalog.Partition;
+import org.apache.doris.common.FeMetaVersion;
+import org.apache.doris.common.io.Text;
 import org.apache.doris.common.io.Writable;
+import org.apache.doris.persist.gson.GsonUtils;
 
 import com.google.common.collect.Lists;
+import com.google.gson.annotations.SerializedName;
 
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -29,18 +34,24 @@ import java.util.List;
 
 public class TruncateTableInfo implements Writable {
 
+    @SerializedName(value = "dbId")
     private long dbId;
+    @SerializedName(value = "tblId")
     private long tblId;
+    @SerializedName(value = "partitions")
     private List<Partition> partitions = Lists.newArrayList();
+    @SerializedName(value = "isEntireTable")
+    private boolean isEntireTable = false;
 
     private TruncateTableInfo() {
 
     }
 
-    public TruncateTableInfo(long dbId, long tblId, List<Partition> partitions) {
+    public TruncateTableInfo(long dbId, long tblId, List<Partition> partitions, boolean isEntireTable) {
         this.dbId = dbId;
         this.tblId = tblId;
         this.partitions = partitions;
+        this.isEntireTable = isEntireTable;
     }
 
     public long getDbId() {
@@ -55,23 +66,28 @@ public class TruncateTableInfo implements Writable {
         return partitions;
     }
 
+    public boolean isEntireTable() {
+        return isEntireTable;
+    }
+
     public static TruncateTableInfo read(DataInput in) throws IOException {
-        TruncateTableInfo info = new TruncateTableInfo();
-        info.readFields(in);
-        return info;
+        if (Catalog.getCurrentCatalogJournalVersion() < FeMetaVersion.VERSION_74) {
+            TruncateTableInfo info = new TruncateTableInfo();
+            info.readFields(in);
+            return info;
+        } else {
+            String json = Text.readString(in);
+            return GsonUtils.GSON.fromJson(json, TruncateTableInfo.class);
+        }
     }
 
     @Override
     public void write(DataOutput out) throws IOException {
-        out.writeLong(dbId);
-        out.writeLong(tblId);
-        out.writeInt(partitions.size());
-        for (Partition partition : partitions) {
-            partition.write(out);
-        }
+        String json = GsonUtils.GSON.toJson(this);
+        Text.writeString(out, json);
     }
 
-    public void readFields(DataInput in) throws IOException {
+    private void readFields(DataInput in) throws IOException {
         dbId = in.readLong();
         tblId = in.readLong();
         int size = in.readInt();
@@ -80,5 +96,4 @@ public class TruncateTableInfo implements Writable {
             partitions.add(partition);
         }
     }
-
 }
diff --git a/fe/src/main/java/org/apache/doris/persist/gson/GsonPostProcessable.java b/fe/src/main/java/org/apache/doris/persist/gson/GsonPostProcessable.java
new file mode 100644
index 0000000..c09f2ad
--- /dev/null
+++ b/fe/src/main/java/org/apache/doris/persist/gson/GsonPostProcessable.java
@@ -0,0 +1,22 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.persist.gson;
+
+public interface GsonPostProcessable {
+    public void gsonPostProcess();
+}
diff --git a/fe/src/main/java/org/apache/doris/persist/gson/GsonUtils.java b/fe/src/main/java/org/apache/doris/persist/gson/GsonUtils.java
index b1e0228..d136304 100644
--- a/fe/src/main/java/org/apache/doris/persist/gson/GsonUtils.java
+++ b/fe/src/main/java/org/apache/doris/persist/gson/GsonUtils.java
@@ -17,6 +17,9 @@
 
 package org.apache.doris.persist.gson;
 
+import org.apache.doris.catalog.DistributionInfo;
+import org.apache.doris.catalog.HashDistributionInfo;
+import org.apache.doris.catalog.RandomDistributionInfo;
 import org.apache.doris.catalog.ScalarType;
 
 import com.google.common.base.Preconditions;
@@ -27,7 +30,6 @@ import com.google.common.collect.LinkedHashMultimap;
 import com.google.common.collect.LinkedListMultimap;
 import com.google.common.collect.Multimap;
 import com.google.common.collect.Table;
-import com.google.common.reflect.TypeToken;
 import com.google.gson.ExclusionStrategy;
 import com.google.gson.FieldAttributes;
 import com.google.gson.Gson;
@@ -40,8 +42,14 @@ import com.google.gson.JsonObject;
 import com.google.gson.JsonParseException;
 import com.google.gson.JsonSerializationContext;
 import com.google.gson.JsonSerializer;
+import com.google.gson.TypeAdapter;
+import com.google.gson.TypeAdapterFactory;
 import com.google.gson.annotations.SerializedName;
+import com.google.gson.reflect.TypeToken;
+import com.google.gson.stream.JsonReader;
+import com.google.gson.stream.JsonWriter;
 
+import java.io.IOException;
 import java.lang.reflect.Method;
 import java.lang.reflect.ParameterizedType;
 import java.lang.reflect.Type;
@@ -72,6 +80,12 @@ public class GsonUtils {
             // TODO: register other sub type after Doris support more types.
             .registerSubtype(ScalarType.class, ScalarType.class.getSimpleName());
 
+    // runtime adapter for class "DistributionInfo"
+    private static RuntimeTypeAdapterFactory<DistributionInfo> distributionInfoTypeAdapterFactory = RuntimeTypeAdapterFactory
+            .of(DistributionInfo.class, "clazz")
+            .registerSubtype(HashDistributionInfo.class, HashDistributionInfo.class.getSimpleName())
+            .registerSubtype(RandomDistributionInfo.class, RandomDistributionInfo.class.getSimpleName());
+
     // the builder of GSON instance.
     // Add any other adapters if necessary.
     private static final GsonBuilder GSON_BUILDER = new GsonBuilder()
@@ -79,7 +93,9 @@ public class GsonUtils {
             .enableComplexMapKeySerialization()
             .registerTypeHierarchyAdapter(Table.class, new GuavaTableAdapter())
             .registerTypeHierarchyAdapter(Multimap.class, new GuavaMultimapAdapter())
-            .registerTypeAdapterFactory(columnTypeAdapterFactory);
+            .registerTypeAdapterFactory(new PostProcessTypeAdapterFactory())
+            .registerTypeAdapterFactory(columnTypeAdapterFactory)
+            .registerTypeAdapterFactory(distributionInfoTypeAdapterFactory);
 
     // this instance is thread-safe.
     public static final Gson GSON = GSON_BUILDER.create();
@@ -224,7 +240,7 @@ public class GsonUtils {
         private static final Type asMapReturnType = getAsMapMethod().getGenericReturnType();
 
         private static Type asMapType(Type multimapType) {
-            return TypeToken.of(multimapType).resolveType(asMapReturnType).getType();
+            return com.google.common.reflect.TypeToken.of(multimapType).resolveType(asMapReturnType).getType();
         }
 
         private static Method getAsMapMethod() {
@@ -280,4 +296,30 @@ public class GsonUtils {
             return map;
         }
     }
+
+    public static class PostProcessTypeAdapterFactory implements TypeAdapterFactory {
+
+        public PostProcessTypeAdapterFactory() {
+        }
+
+        @Override
+        public <T> TypeAdapter<T> create(Gson gson, TypeToken<T> type) {
+            TypeAdapter<T> delegate = gson.getDelegateAdapter(this, type);
+
+            return new TypeAdapter<T>() {
+                public void write(JsonWriter out, T value) throws IOException {
+                    delegate.write(out, value);
+                }
+
+                public T read(JsonReader reader) throws IOException {
+                    T obj = delegate.read(reader);
+                    if (obj instanceof GsonPostProcessable) {
+                        ((GsonPostProcessable) obj).gsonPostProcess();
+                    }
+                    return obj;
+                }
+            };
+        }
+    }
+
 }
diff --git a/fe/src/main/java/org/apache/doris/planner/OlapScanNode.java b/fe/src/main/java/org/apache/doris/planner/OlapScanNode.java
index 153949e..1c5dfab 100644
--- a/fe/src/main/java/org/apache/doris/planner/OlapScanNode.java
+++ b/fe/src/main/java/org/apache/doris/planner/OlapScanNode.java
@@ -588,13 +588,9 @@ public class OlapScanNode extends ScanNode {
         OlapScanNode olapScanNode = new OlapScanNode(id, desc, planNodeName);
         olapScanNode.numInstances = 1;
 
-        Collection<Long> partitionIds = new ArrayList<Long>();
         ArrayList<Partition> partitions = Lists.newArrayList(olapScanNode.olapTable.getPartitions());
         Preconditions.checkState(!partitions.isEmpty());
-        if (!partitions.isEmpty()) {
-            olapScanNode.selectedIndexId = partitions.get(0).getBaseIndex().getId();
-        }
-
+        olapScanNode.selectedIndexId = partitions.get(0).getBaseIndex().getId();
         olapScanNode.selectedPartitionNum = 1;
         olapScanNode.selectedTabletsNum = 1;
         olapScanNode.totalTabletsNum = 1;
diff --git a/fe/src/main/java/org/apache/doris/planner/SingleNodePlanner.java b/fe/src/main/java/org/apache/doris/planner/SingleNodePlanner.java
index eb6a70a..5b7b012 100644
--- a/fe/src/main/java/org/apache/doris/planner/SingleNodePlanner.java
+++ b/fe/src/main/java/org/apache/doris/planner/SingleNodePlanner.java
@@ -448,7 +448,7 @@ public class SingleNodePlanner {
                                 && child.getChild(0).getType().isNumericType()) {
                             returnColumns.add(((SlotRef) child.getChild(0)).getDesc().getColumn());
                         } else {
-                            turnOffReason = "aggExpr.getChild(0)[aggExpr.getChild(0).toSql()] is not Numeric CastExpr";
+                            turnOffReason = "aggExpr.getChild(0)[" + aggExpr.getChild(0).toSql()+ "] is not Numeric CastExpr";
                             aggExprValidate = false;
                             break;
                         }
diff --git a/fe/src/main/java/org/apache/doris/qe/ShowExecutor.java b/fe/src/main/java/org/apache/doris/qe/ShowExecutor.java
index d380a1f..17ac878 100644
--- a/fe/src/main/java/org/apache/doris/qe/ShowExecutor.java
+++ b/fe/src/main/java/org/apache/doris/qe/ShowExecutor.java
@@ -1215,12 +1215,17 @@ public class ShowExecutor {
                 }
                 boolean stop = false;
                 Collection<Partition> partitions = new ArrayList<Partition>();
-                List<String> partitionNames = showStmt.getPartitionNames();
                 if (showStmt.hasPartition()) {
-                    for (Partition partition : olapTable.getPartitions()) {
-                        if (partitionNames.contains(partition.getName())) {
-                            partitions.add(partition);
+                    List<String> partitionNames = showStmt.getPartitionNames();
+                    for (String partName : partitionNames) {
+                        Partition partition = olapTable.getPartition(partName);
+                        if (partition == null) {
+                            partition = olapTable.getPartition(partName, true);
                         }
+                        if (partition == null) {
+                            throw new AnalysisException("Unknown partition: " + partName);
+                        }
+                        partitions.add(partition);
                     }
                 } else {
                     partitions = olapTable.getPartitions();
diff --git a/fe/src/main/java/org/apache/doris/task/LoadEtlTask.java b/fe/src/main/java/org/apache/doris/task/LoadEtlTask.java
index 4f37819..81b700f 100644
--- a/fe/src/main/java/org/apache/doris/task/LoadEtlTask.java
+++ b/fe/src/main/java/org/apache/doris/task/LoadEtlTask.java
@@ -185,7 +185,7 @@ public abstract class LoadEtlTask extends MasterTask {
                         Partition partition = table.getPartition(partitionId);
                         if (partition == null) {
                             throw new MetaNotFoundException("partition does not exist. id: " + partitionId);
-                        } 
+                        }
                         // yiguolei: real time load do not need get version here
                     } finally {
                         db.readUnlock();
diff --git a/fe/src/main/java/org/apache/doris/task/UpdateTabletMetaInfoTask.java b/fe/src/main/java/org/apache/doris/task/UpdateTabletMetaInfoTask.java
index 406cf43..67861ab 100644
--- a/fe/src/main/java/org/apache/doris/task/UpdateTabletMetaInfoTask.java
+++ b/fe/src/main/java/org/apache/doris/task/UpdateTabletMetaInfoTask.java
@@ -17,10 +17,6 @@
 
 package org.apache.doris.task;
 
-import java.util.List;
-import java.util.Set;
-
-import org.apache.commons.lang3.tuple.Triple;
 import org.apache.doris.catalog.Catalog;
 import org.apache.doris.catalog.TabletMeta;
 import org.apache.doris.common.MarkedCountDownLatch;
@@ -31,10 +27,15 @@ import org.apache.doris.thrift.TTabletMetaInfo;
 import org.apache.doris.thrift.TTabletMetaType;
 import org.apache.doris.thrift.TTaskType;
 import org.apache.doris.thrift.TUpdateTabletMetaInfoReq;
+
+import com.google.common.collect.Lists;
+
+import org.apache.commons.lang3.tuple.Triple;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
-import com.google.common.collect.Lists;
+import java.util.List;
+import java.util.Set;
 
 public class UpdateTabletMetaInfoTask extends AgentTask {
 
diff --git a/fe/src/main/jflex/sql_scanner.flex b/fe/src/main/jflex/sql_scanner.flex
index aba90b4..2cdbc95 100644
--- a/fe/src/main/jflex/sql_scanner.flex
+++ b/fe/src/main/jflex/sql_scanner.flex
@@ -334,6 +334,7 @@ import org.apache.doris.qe.SqlModeHelper;
         keywordMap.put("tables", new Integer(SqlParserSymbols.KW_TABLES));
         keywordMap.put("tablet", new Integer(SqlParserSymbols.KW_TABLET));
         keywordMap.put("task", new Integer(SqlParserSymbols.KW_TASK));
+        keywordMap.put("temporary", new Integer(SqlParserSymbols.KW_TEMPORARY));
         keywordMap.put("terminated", new Integer(SqlParserSymbols.KW_TERMINATED));
         keywordMap.put("than", new Integer(SqlParserSymbols.KW_THAN));
         keywordMap.put("then", new Integer(SqlParserSymbols.KW_THEN));
diff --git a/fe/src/test/java/org/apache/doris/analysis/ShowPartitionsStmtTest.java b/fe/src/test/java/org/apache/doris/analysis/ShowPartitionsStmtTest.java
index 86860ae..3965066 100644
--- a/fe/src/test/java/org/apache/doris/analysis/ShowPartitionsStmtTest.java
+++ b/fe/src/test/java/org/apache/doris/analysis/ShowPartitionsStmtTest.java
@@ -65,7 +65,7 @@ public class ShowPartitionsStmtTest {
 
     @Test
     public void testNormal() throws UserException {
-        ShowPartitionsStmt stmt = new ShowPartitionsStmt(new TableName("testDb", "testTable"), null, null, null);
+        ShowPartitionsStmt stmt = new ShowPartitionsStmt(new TableName("testDb", "testTable"), null, null, null, false);
         stmt.analyzeImpl(analyzer);
         Assert.assertEquals("SHOW PARTITIONS FROM `testCluster:testDb`.`testTable`", stmt.toString());
     }
@@ -75,7 +75,7 @@ public class ShowPartitionsStmtTest {
         SlotRef slotRef = new SlotRef(null, "LastConsistencyCheckTime");
         StringLiteral stringLiteral = new StringLiteral("2019-12-22 10:22:11");
         BinaryPredicate binaryPredicate = new BinaryPredicate(BinaryPredicate.Operator.GT, slotRef, stringLiteral);
-        ShowPartitionsStmt stmt = new ShowPartitionsStmt(new TableName("testDb", "testTable"), binaryPredicate, null, null);
+        ShowPartitionsStmt stmt = new ShowPartitionsStmt(new TableName("testDb", "testTable"), binaryPredicate, null, null, false);
         stmt.analyzeImpl(analyzer);
         Assert.assertEquals("SHOW PARTITIONS FROM `testCluster:testDb`.`testTable` WHERE `LastConsistencyCheckTime` > '2019-12-22 10:22:11'", stmt.toString());
     }
@@ -85,7 +85,7 @@ public class ShowPartitionsStmtTest {
         SlotRef slotRef = new SlotRef(null, "PartitionName");
         StringLiteral stringLiteral = new StringLiteral("%p2019%");
         LikePredicate likePredicate = new LikePredicate(LikePredicate.Operator.LIKE, slotRef, stringLiteral);
-        ShowPartitionsStmt stmt = new ShowPartitionsStmt(new TableName("testDb", "testTable"), likePredicate, null, null);
+        ShowPartitionsStmt stmt = new ShowPartitionsStmt(new TableName("testDb", "testTable"), likePredicate, null, null, false);
         stmt.analyzeImpl(analyzer);
         Assert.assertEquals("SHOW PARTITIONS FROM `testCluster:testDb`.`testTable` WHERE `PartitionName` LIKE '%p2019%'", stmt.toString());
     }
@@ -95,7 +95,7 @@ public class ShowPartitionsStmtTest {
         SlotRef slotRef = new SlotRef(null, "PartitionId");
         OrderByElement orderByElement = new OrderByElement(slotRef, true, false);
         LimitElement limitElement = new LimitElement(10);
-        ShowPartitionsStmt stmt = new ShowPartitionsStmt(new TableName("testDb", "testTable"), null, Arrays.asList(orderByElement), limitElement);
+        ShowPartitionsStmt stmt = new ShowPartitionsStmt(new TableName("testDb", "testTable"), null, Arrays.asList(orderByElement), limitElement, false);
         stmt.analyzeImpl(analyzer);
         Assert.assertEquals("SHOW PARTITIONS FROM `testCluster:testDb`.`testTable` ORDER BY `PartitionId` ASC LIMIT 10", stmt.toString());
     }
@@ -105,7 +105,7 @@ public class ShowPartitionsStmtTest {
         SlotRef slotRef = new SlotRef(null, "DataSize");
         StringLiteral stringLiteral = new StringLiteral("3.2 GB");
         BinaryPredicate binaryPredicate = new BinaryPredicate(BinaryPredicate.Operator.EQ, slotRef, stringLiteral);
-        ShowPartitionsStmt stmt = new ShowPartitionsStmt(new TableName("testDb", "testTable"), binaryPredicate, null, null);
+        ShowPartitionsStmt stmt = new ShowPartitionsStmt(new TableName("testDb", "testTable"), binaryPredicate, null, null, false);
         expectedEx.expect(AnalysisException.class);
         expectedEx.expectMessage("Only the columns of PartitionId/PartitionName/" +
                 "State/Buckets/ReplicationNum/LastConsistencyCheckTime are supported.");
diff --git a/fe/src/test/java/org/apache/doris/catalog/TempPartitionTest.java b/fe/src/test/java/org/apache/doris/catalog/TempPartitionTest.java
new file mode 100644
index 0000000..fcd83de
--- /dev/null
+++ b/fe/src/test/java/org/apache/doris/catalog/TempPartitionTest.java
@@ -0,0 +1,604 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.catalog;
+
+import org.apache.doris.alter.AlterJobV2;
+import org.apache.doris.analysis.AlterTableStmt;
+import org.apache.doris.analysis.CreateDbStmt;
+import org.apache.doris.analysis.CreateTableStmt;
+import org.apache.doris.analysis.RecoverPartitionStmt;
+import org.apache.doris.analysis.ShowPartitionsStmt;
+import org.apache.doris.analysis.ShowStmt;
+import org.apache.doris.analysis.ShowTabletStmt;
+import org.apache.doris.analysis.TruncateTableStmt;
+import org.apache.doris.catalog.MaterializedIndex.IndexExtState;
+import org.apache.doris.common.AnalysisException;
+import org.apache.doris.common.FeMetaVersion;
+import org.apache.doris.common.jmockit.Deencapsulation;
+import org.apache.doris.meta.MetaContext;
+import org.apache.doris.qe.ConnectContext;
+import org.apache.doris.qe.ShowExecutor;
+import org.apache.doris.qe.ShowResultSet;
+import org.apache.doris.utframe.UtFrameUtils;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+
+public class TempPartitionTest {
+
+    private static String tempPartitionFile = "./TempPartitionTest";
+    private static String tblFile = "./tblFile";
+    private static String runningDir = "fe/mocked/TempPartitionTest/" + UUID.randomUUID().toString() + "/";
+
+    private static ConnectContext ctx;
+
+    @BeforeClass
+    public static void setup() throws Exception {
+        UtFrameUtils.createMinDorisCluster(runningDir);
+        ctx = UtFrameUtils.createDefaultCtx();
+    }
+
+    @AfterClass
+    public static void tearDown() {
+        File file = new File(runningDir);
+        file.delete();
+        File file2 = new File(tempPartitionFile);
+        file2.delete();
+        File file3 = new File(tblFile);
+        file3.delete();
+    }
+
+    @Before
+    public void before() {
+
+    }
+
+    private List<List<String>> checkShowPartitionsResultNum(String tbl, boolean isTemp, int expected) throws Exception {
+        String showStr = "show " + (isTemp ? "temporary" : "") + " partitions from " + tbl;
+        ShowPartitionsStmt showStmt = (ShowPartitionsStmt) UtFrameUtils.parseAndAnalyzeStmt(showStr, ctx);
+        ShowExecutor executor = new ShowExecutor(ctx, (ShowStmt) showStmt);
+        ShowResultSet showResultSet = executor.execute();
+        List<List<String>> rows = showResultSet.getResultRows();
+        Assert.assertEquals(expected, rows.size());
+        return rows;
+    }
+
+    private void alterTable(String sql, boolean expectedException) throws Exception {
+        try {
+            AlterTableStmt alterTableStmt = (AlterTableStmt) UtFrameUtils.parseAndAnalyzeStmt(sql, ctx);
+            Catalog.getCurrentCatalog().getAlterInstance().processAlterTable(alterTableStmt);
+            if (expectedException) {
+                Assert.fail("expected exception not thrown");
+            }
+        } catch (Exception e) {
+            if (expectedException) {
+                System.out.println("got exception: " + e.getMessage());
+            } else {
+                throw e;
+            }
+        }
+    }
+
+    private List<List<String>> checkTablet(String tbl, String partitions, int expected) throws Exception {
+        String showStr = "show tablet from " + tbl + " partition (" + partitions + ");";
+        ShowTabletStmt showStmt = (ShowTabletStmt) UtFrameUtils.parseAndAnalyzeStmt(showStr, ctx);
+        ShowExecutor executor = new ShowExecutor(ctx, (ShowStmt) showStmt);
+        ShowResultSet showResultSet = executor.execute();
+        List<List<String>> rows = showResultSet.getResultRows();
+        if (expected != -1) {
+            Assert.assertEquals(expected, rows.size());
+        }
+        return rows;
+    }
+
+    private long getPartitionIdByTabletId(long tabletId) {
+        TabletInvertedIndex index = Catalog.getCurrentInvertedIndex();
+        TabletMeta tabletMeta = index.getTabletMeta(tabletId);
+        if (tabletMeta == null) {
+            return -1;
+        }
+        return tabletMeta.getPartitionId();
+    }
+    
+    private void getPartitionNameToTabletIdMap(String tbl, boolean isTemp, Map<String, Long> partNameToTabletId) throws Exception {
+        partNameToTabletId.clear();
+        String showStr = "show " + (isTemp ? "temporary" : "") + " partitions from " + tbl;
+        ShowPartitionsStmt showStmt = (ShowPartitionsStmt) UtFrameUtils.parseAndAnalyzeStmt(showStr, ctx);
+        ShowExecutor executor = new ShowExecutor(ctx, (ShowStmt) showStmt);
+        ShowResultSet showResultSet = executor.execute();
+        List<List<String>> rows = showResultSet.getResultRows();
+        Map<Long, String> partIdToName = Maps.newHashMap();
+        for (List<String> row : rows) {
+            partIdToName.put(Long.valueOf(row.get(0)), row.get(1));
+        }
+
+        rows = checkTablet(tbl, Joiner.on(",").join(partIdToName.values()), -1);
+        for (List<String> row : rows) {
+            long tabletId = Long.valueOf(row.get(0));
+            long partitionId = getPartitionIdByTabletId(tabletId);
+            String partName = partIdToName.get(partitionId);
+            partNameToTabletId.put(partName, tabletId);
+        }
+    }
+
+    private void checkTabletExists(Collection<Long> tabletIds, boolean checkExist) {
+        TabletInvertedIndex invertedIndex = Catalog.getCurrentInvertedIndex();
+        for (Long tabletId : tabletIds) {
+            if (checkExist) {
+                Assert.assertNotNull(invertedIndex.getTabletMeta(tabletId));
+            } else {
+                Assert.assertNull(invertedIndex.getTabletMeta(tabletId));
+            }
+        }
+    }
+
+    private void checkPartitionExist(OlapTable tbl, String partName, boolean isTemp, boolean checkExist) {
+        if (checkExist) {
+            Assert.assertNotNull(tbl.getPartition(partName, isTemp));
+        } else {
+            Assert.assertNull(tbl.getPartition(partName, isTemp));
+        }
+    }
+
+    @Test
+    public void testForSinglePartitionTable() throws Exception {
+        // create database db1
+        String createDbStmtStr = "create database db1;";
+        CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseAndAnalyzeStmt(createDbStmtStr, ctx);
+        Catalog.getCurrentCatalog().createDb(createDbStmt);
+        System.out.println(Catalog.getCurrentCatalog().getDbNames());
+        // create table tbl1
+        String createTblStmtStr1 = "create table db1.tbl1(k1 int) distributed by hash(k1) buckets 3 properties('replication_num' = '1');";
+        CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createTblStmtStr1, ctx);
+        Catalog.getCurrentCatalog().createTable(createTableStmt);
+
+        // add temp partition
+        String stmtStr = "alter table db1.tbl1 add temporary partition p1 values less than ('10');";
+        alterTable(stmtStr, true);
+
+        // drop temp partition
+        stmtStr = "alter table db1.tbl1 drop temporary partition tbl1;";
+        alterTable(stmtStr, true);
+
+        // show temp partition
+        checkShowPartitionsResultNum("db1.tbl1", true, 0);
+    }
+
+    @Test
+    public void testForMultiPartitionTable() throws Exception {
+        // create database db2
+        String createDbStmtStr = "create database db2;";
+        CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseAndAnalyzeStmt(createDbStmtStr, ctx);
+        Catalog.getCurrentCatalog().createDb(createDbStmt);
+        System.out.println(Catalog.getCurrentCatalog().getDbNames());
+
+        // create table tbl2
+        String createTblStmtStr1 = "create table db2.tbl2 (k1 int, k2 int)\n" +
+                "partition by range(k1)\n" +
+                "(\n" +
+                "partition p1 values less than('10'),\n" +
+                "partition p2 values less than('20'),\n" +
+                "partition p3 values less than('30')\n" +
+                ")\n" +
+                "distributed by hash(k2) buckets 1\n" +
+                "properties('replication_num' = '1');";
+        CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createTblStmtStr1, ctx);
+        Catalog.getCurrentCatalog().createTable(createTableStmt);
+
+        Database db2 = Catalog.getCurrentCatalog().getDb("default_cluster:db2");
+        OlapTable tbl2 = (OlapTable) db2.getTable("tbl2");
+
+        testSerializeOlapTable(tbl2);
+
+        Map<String, Long> originPartitionTabletIds = Maps.newHashMap();
+        getPartitionNameToTabletIdMap("db2.tbl2", false, originPartitionTabletIds);
+        Assert.assertEquals(3, originPartitionTabletIds.keySet().size());
+
+        // show temp partition
+        checkShowPartitionsResultNum("db2.tbl2", true, 0);
+        checkShowPartitionsResultNum("db2.tbl2", false, 3);
+
+        // add temp partition with duplicate name
+        String stmtStr = "alter table db2.tbl2 add temporary partition p1 values less than('10');";
+        alterTable(stmtStr, true);
+
+        // add temp partition
+        stmtStr = "alter table db2.tbl2 add temporary partition tp1 values less than('10');";
+        alterTable(stmtStr, false);
+
+        stmtStr = "alter table db2.tbl2 add temporary partition tp2 values less than('10');";
+        alterTable(stmtStr, true);
+
+        stmtStr = "alter table db2.tbl2 add temporary partition tp1 values less than('20');";
+        alterTable(stmtStr, true);
+
+        stmtStr = "alter table db2.tbl2 add temporary partition tp2 values less than('20');";
+        alterTable(stmtStr, false);
+
+        stmtStr = "alter table db2.tbl2 add temporary partition tp3 values [('18'), ('30'));";
+        alterTable(stmtStr, true);
+
+        stmtStr = "alter table db2.tbl2 add temporary partition tp3 values [('20'), ('30'));";
+        alterTable(stmtStr, false);
+
+        Map<String, Long> tempPartitionTabletIds = Maps.newHashMap();
+        getPartitionNameToTabletIdMap("db2.tbl2", true, tempPartitionTabletIds);
+        Assert.assertEquals(3, tempPartitionTabletIds.keySet().size());
+
+        System.out.println("partition tablets: " + originPartitionTabletIds);
+        System.out.println("temp partition tablets: " + tempPartitionTabletIds);
+
+        testSerializeOlapTable(tbl2);
+
+        // drop non exist temp partition
+        stmtStr = "alter table db2.tbl2 drop temporary partition tp4;";
+        alterTable(stmtStr, true);
+
+        stmtStr = "alter table db2.tbl2 drop temporary partition if exists tp4;";
+        alterTable(stmtStr, false);
+
+        stmtStr = "alter table db2.tbl2 drop temporary partition tp3;";
+        alterTable(stmtStr, false);
+        
+        Map<String, Long> originPartitionTabletIds2 = Maps.newHashMap();
+        getPartitionNameToTabletIdMap("db2.tbl2", false, originPartitionTabletIds2);
+        Assert.assertEquals(originPartitionTabletIds2, originPartitionTabletIds);
+
+        Map<String, Long> tempPartitionTabletIds2 = Maps.newHashMap();
+        getPartitionNameToTabletIdMap("db2.tbl2", true, tempPartitionTabletIds2);
+        Assert.assertEquals(2, tempPartitionTabletIds2.keySet().size());
+        Assert.assertTrue(!tempPartitionTabletIds2.containsKey("tp3"));
+
+        checkShowPartitionsResultNum("db2.tbl2", true, 2);
+        checkShowPartitionsResultNum("db2.tbl2", false, 3);
+
+        stmtStr = "alter table db2.tbl2 add temporary partition tp3 values less than('30');";
+        alterTable(stmtStr, false);
+        checkShowPartitionsResultNum("db2.tbl2", true, 3);
+
+        stmtStr = "alter table db2.tbl2 drop partition p1;";
+        alterTable(stmtStr, false);
+        checkShowPartitionsResultNum("db2.tbl2", true, 3);
+        checkShowPartitionsResultNum("db2.tbl2", false, 2);
+
+        originPartitionTabletIds2 = Maps.newHashMap();
+        getPartitionNameToTabletIdMap("db2.tbl2", false, originPartitionTabletIds2);
+        Assert.assertEquals(2, originPartitionTabletIds2.size());
+        Assert.assertTrue(!originPartitionTabletIds2.containsKey("p1"));
+
+        String recoverStr = "recover partition p1 from db2.tbl2;";
+        RecoverPartitionStmt recoverStmt = (RecoverPartitionStmt) UtFrameUtils.parseAndAnalyzeStmt(recoverStr, ctx);
+        Catalog.getCurrentCatalog().recoverPartition(recoverStmt);
+        checkShowPartitionsResultNum("db2.tbl2", true, 3);
+        checkShowPartitionsResultNum("db2.tbl2", false, 3);
+
+        originPartitionTabletIds2 = Maps.newHashMap();
+        getPartitionNameToTabletIdMap("db2.tbl2", false, originPartitionTabletIds2);
+        Assert.assertEquals(originPartitionTabletIds2, originPartitionTabletIds);
+
+        tempPartitionTabletIds2 = Maps.newHashMap();
+        getPartitionNameToTabletIdMap("db2.tbl2", true, tempPartitionTabletIds2);
+        Assert.assertEquals(3, tempPartitionTabletIds2.keySet().size());
+
+        // Here, we should have 3 partitions p1,p2,p3, and 3 temp partitions tp1,tp2,tp3
+        System.out.println("we have partition tablets: " + originPartitionTabletIds2);
+        System.out.println("we have temp partition tablets: " + tempPartitionTabletIds2);
+
+        stmtStr = "alter table db2.tbl2 replace partition(p1, p2) with temporary partition(tp2, tp3);";
+        alterTable(stmtStr, true);
+
+        stmtStr = "alter table db2.tbl2 replace partition(p1, p2) with temporary partition(tp1, tp2) properties('invalid' = 'invalid');";
+        alterTable(stmtStr, true);
+
+        stmtStr = "alter table db2.tbl2 replace partition(p1, p2) with temporary partition(tp2, tp3) properties('strict_range' = 'false');";
+        alterTable(stmtStr, true);
+
+        stmtStr = "alter table db2.tbl2 replace partition(p1, p2) with temporary partition(tp1, tp2) properties('strict_range' = 'false', 'use_temp_partition_name' = 'true');";
+        alterTable(stmtStr, false);
+        checkShowPartitionsResultNum("db2.tbl2", true, 1);
+        checkShowPartitionsResultNum("db2.tbl2", false, 3);
+
+        checkTabletExists(tempPartitionTabletIds2.values(), true);
+        checkTabletExists(Lists.newArrayList(originPartitionTabletIds2.get("p3")), true);
+        checkTabletExists(Lists.newArrayList(originPartitionTabletIds2.get("p1"), originPartitionTabletIds2.get("p2")), false);
+        
+        String truncateStr = "truncate table db2.tbl2 partition (p3);";
+        TruncateTableStmt truncateTableStmt = (TruncateTableStmt)UtFrameUtils.parseAndAnalyzeStmt(truncateStr, ctx);
+        Catalog.getCurrentCatalog().truncateTable(truncateTableStmt);
+        checkShowPartitionsResultNum("db2.tbl2", true, 1);
+        checkShowPartitionsResultNum("db2.tbl2", false, 3);
+        checkPartitionExist(tbl2, "tp1", false, true);
+        checkPartitionExist(tbl2, "tp2", false, true);
+        checkPartitionExist(tbl2, "p3", false, true);
+        checkPartitionExist(tbl2, "tp3", true, true);
+
+        stmtStr = "alter table db2.tbl2 drop partition p3;";
+        alterTable(stmtStr, false);
+        stmtStr = "alter table db2.tbl2 add partition p31 values less than('25');";
+        alterTable(stmtStr, false);
+        stmtStr = "alter table db2.tbl2 add partition p32 values less than('35');";
+        alterTable(stmtStr, false);
+
+        // for now, we have 4 partitions: tp1, tp2, p31, p32, 1 temp partition: tp3
+        checkShowPartitionsResultNum("db2.tbl2", false, 4);
+        checkShowPartitionsResultNum("db2.tbl2", true, 1);
+
+        stmtStr = "alter table db2.tbl2 replace partition(p31) with temporary partition(tp3);";
+        alterTable(stmtStr, true);
+        stmtStr = "alter table db2.tbl2 replace partition(p31, p32) with temporary partition(tp3);";
+        alterTable(stmtStr, true);
+        stmtStr = "alter table db2.tbl2 replace partition(p31, p32) with temporary partition(tp3) properties('strict_range' = 'false');";
+        alterTable(stmtStr, false);
+        checkShowPartitionsResultNum("db2.tbl2", false, 3);
+        checkShowPartitionsResultNum("db2.tbl2", true, 0);
+        checkPartitionExist(tbl2, "tp1", false, true);
+        checkPartitionExist(tbl2, "tp2", false, true);
+        checkPartitionExist(tbl2, "tp3", false, true);
+
+        stmtStr = "alter table db2.tbl2 add temporary partition p1 values less than('10');";
+        alterTable(stmtStr, false);
+        stmtStr = "alter table db2.tbl2 add temporary partition p2 values less than('20');";
+        alterTable(stmtStr, false);
+        stmtStr = "alter table db2.tbl2 add temporary partition p3 values less than('30');";
+        alterTable(stmtStr, false);
+        stmtStr = "alter table db2.tbl2 replace partition(tp1, tp2) with temporary partition(p1, p2);";
+        alterTable(stmtStr, false);
+        checkPartitionExist(tbl2, "tp1", false, true);
+        checkPartitionExist(tbl2, "tp2", false, true);
+        checkPartitionExist(tbl2, "tp3", false, true);
+        checkPartitionExist(tbl2, "p1", true, false);
+        checkPartitionExist(tbl2, "p2", true, false);
+        checkPartitionExist(tbl2, "p3", true, true);
+
+        stmtStr = "alter table db2.tbl2 replace partition(tp3) with temporary partition(p3) properties('use_temp_partition_name' = 'true');";
+        alterTable(stmtStr, false);
+        checkPartitionExist(tbl2, "tp1", false, true);
+        checkPartitionExist(tbl2, "tp2", false, true);
+        checkPartitionExist(tbl2, "p3", false, true);
+        checkPartitionExist(tbl2, "p1", true, false);
+        checkPartitionExist(tbl2, "p2", true, false);
+        checkPartitionExist(tbl2, "p3", true, false);
+        checkShowPartitionsResultNum("db2.tbl2", false, 3);
+        checkShowPartitionsResultNum("db2.tbl2", true, 0);
+
+        stmtStr = "alter table db2.tbl2 add temporary partition tp1 values less than('10');"; // name conflict
+        alterTable(stmtStr, true);
+        stmtStr = "alter table db2.tbl2 rename partition p3 tp3;";
+        alterTable(stmtStr, false);
+        stmtStr = "alter table db2.tbl2 add temporary partition p1 values less than('10');";
+        alterTable(stmtStr, false);
+
+        originPartitionTabletIds2 = Maps.newHashMap();
+        getPartitionNameToTabletIdMap("db2.tbl2", false, originPartitionTabletIds2);
+        Assert.assertEquals(3, originPartitionTabletIds2.size());
+
+        tempPartitionTabletIds2 = Maps.newHashMap();
+        getPartitionNameToTabletIdMap("db2.tbl2", true, tempPartitionTabletIds2);
+        Assert.assertEquals(1, tempPartitionTabletIds2.keySet().size());
+        
+        // for now , we have 3 partitions: tp1, tp2, tp3, 1 temp partition: p1
+        System.out.println("we have partition tablets: " + originPartitionTabletIds2);
+        System.out.println("we have temp partition tablets: " + tempPartitionTabletIds2);
+
+        stmtStr = "alter table db2.tbl2 add rollup r1(k1);";
+        alterTable(stmtStr, true);
+
+        truncateStr = "truncate table db2.tbl2";
+        truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(truncateStr, ctx);
+        Catalog.getCurrentCatalog().truncateTable(truncateTableStmt);
+        checkShowPartitionsResultNum("db2.tbl2", false, 3);
+        checkShowPartitionsResultNum("db2.tbl2", true, 0);
+
+        stmtStr = "alter table db2.tbl2 add rollup r1(k1);";
+        alterTable(stmtStr, false);
+
+        stmtStr = "alter table db2.tbl2 add temporary partition p2 values less than('20');";
+        alterTable(stmtStr, true);
+
+        // wait rollup finish
+        Map<Long, AlterJobV2> alterJobs = Catalog.getCurrentCatalog().getRollupHandler().getAlterJobsV2();
+        for (AlterJobV2 alterJobV2 : alterJobs.values()) {
+            while (!alterJobV2.getJobState().isFinalState()) {
+                System.out.println(
+                        "alter job " + alterJobV2.getDbId() + " is running. state: " + alterJobV2.getJobState());
+                Thread.sleep(5000);
+            }
+            System.out.println("alter job " + alterJobV2.getDbId() + " is done. state: " + alterJobV2.getJobState());
+            Assert.assertEquals(AlterJobV2.JobState.FINISHED, alterJobV2.getJobState());
+        }
+        // waiting table state to normal
+        Thread.sleep(500);
+
+        stmtStr = "alter table db2.tbl2 add temporary partition p2 values less than('20');";
+        alterTable(stmtStr, false);
+
+        TempPartitions tempPartitions = Deencapsulation.getField(tbl2, "tempPartitions");
+        testSerializeTempPartitions(tempPartitions);
+
+        stmtStr = "alter table db2.tbl2 replace partition (tp1, tp2) with temporary partition (p2) properties('strict_range' = 'false');";
+        alterTable(stmtStr, false);
+        checkShowPartitionsResultNum("db2.tbl2", false, 2);
+        checkShowPartitionsResultNum("db2.tbl2", true, 0);
+        checkPartitionExist(tbl2, "p2", false, true);
+        checkPartitionExist(tbl2, "tp3", false, true);
+        checkPartitionExist(tbl2, "tp1", false, false);
+        checkPartitionExist(tbl2, "tp2", false, false);
+        checkPartitionExist(tbl2, "p2", true, false);
+
+        checkTablet("db2.tbl2", "p2", 2);
+        checkTablet("db2.tbl2", "tp3", 2);
+
+        // for now, we have 2 partitions: p2, tp3, [min, 20), [20, 30). 0 temp partition. 
+        stmtStr = "alter table db2.tbl2 add temporary partition tp4 values less than('20') ('in_memory' = 'true') distributed by hash(k1) buckets 3";
+        alterTable(stmtStr, true);
+        stmtStr = "alter table db2.tbl2 add temporary partition tp4 values less than('20') ('in_memory' = 'true', 'replication_num' = '2') distributed by hash(k2) buckets 3";
+        alterTable(stmtStr, true);
+        stmtStr = "alter table db2.tbl2 add temporary partition tp4 values less than('20') ('in_memory' = 'true', 'replication_num' = '1') distributed by hash(k2) buckets 3";
+        alterTable(stmtStr, false);
+        
+        Partition p2 = tbl2.getPartition("p2");
+        Assert.assertNotNull(p2);
+        Assert.assertFalse(tbl2.getPartitionInfo().getIsInMemory(p2.getId()));
+        Assert.assertEquals(1, p2.getDistributionInfo().getBucketNum());
+
+        stmtStr = "alter table db2.tbl2 replace partition (p2) with temporary partition (tp4)";
+        alterTable(stmtStr, false);
+        
+        // for now, we have 2 partitions: p2, tp3, [min, 20), [20, 30). 0 temp partition. and p2 bucket is 3, 'in_memory' is true.
+        p2 = tbl2.getPartition("p2");
+        Assert.assertNotNull(p2);
+        Assert.assertTrue(tbl2.getPartitionInfo().getIsInMemory(p2.getId()));
+        Assert.assertEquals(3, p2.getDistributionInfo().getBucketNum());
+    }
+
+    @Test
+    public void testForStrictRangeCheck() throws Exception {
+        // create database db3
+        String createDbStmtStr = "create database db3;";
+        CreateDbStmt createDbStmt = (CreateDbStmt) UtFrameUtils.parseAndAnalyzeStmt(createDbStmtStr, ctx);
+        Catalog.getCurrentCatalog().createDb(createDbStmt);
+        System.out.println(Catalog.getCurrentCatalog().getDbNames());
+
+        // create table tbl3
+        String createTblStmtStr1 = "create table db3.tbl3 (k1 int, k2 int)\n" +
+                "partition by range(k1)\n" +
+                "(\n" +
+                "partition p1 values less than('10'),\n" +
+                "partition p2 values less than('20'),\n" +
+                "partition p3 values less than('30')\n" +
+                ")\n" +
+                "distributed by hash(k2) buckets 1\n" +
+                "properties('replication_num' = '1');";
+        CreateTableStmt createTableStmt = (CreateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(createTblStmtStr1, ctx);
+        Catalog.getCurrentCatalog().createTable(createTableStmt);
+
+        Database db3 = Catalog.getCurrentCatalog().getDb("default_cluster:db3");
+        OlapTable tbl3 = (OlapTable) db3.getTable("tbl3");
+
+        // base range is [min, 10), [10, 20), [20, 30)
+
+        // 1. add temp ranges: [10, 15), [15, 25), [25, 30), and replace the [10, 20), [20, 30)
+        String stmtStr = "alter table db3.tbl3 add temporary partition tp1 values [('10'), ('15'))";
+        alterTable(stmtStr, false);
+        stmtStr = "alter table db3.tbl3 add temporary partition tp2 values [('15'), ('25'))";
+        alterTable(stmtStr, false);
+        stmtStr = "alter table db3.tbl3 add temporary partition tp3 values [('25'), ('30'))";
+        alterTable(stmtStr, false);
+        stmtStr = "alter table db3.tbl3 replace partition (p2, p3) with temporary partition(tp1, tp2, tp3)";
+        alterTable(stmtStr, false);
+
+        // now base range is [min, 10), [10, 15), [15, 25), [25, 30) -> p1,tp1,tp2,tp3
+        stmtStr = "truncate table db3.tbl3";
+        TruncateTableStmt truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(stmtStr, ctx);
+        Catalog.getCurrentCatalog().truncateTable(truncateTableStmt);
+        // 2. add temp ranges: [10, 31), and replace the [10, 15), [15, 25), [25, 30)
+        stmtStr = "alter table db3.tbl3 add temporary partition tp4 values [('10'), ('31'))";
+        alterTable(stmtStr, false);
+        stmtStr = "alter table db3.tbl3 replace partition (tp1, tp2, tp3) with temporary partition(tp4)";
+        alterTable(stmtStr, true);
+        // drop the tp4, and add temp partition tp4 [10,30) to to replace tp1, tp2, tp3
+        stmtStr = "alter table db3.tbl3 drop temporary partition tp4";
+        alterTable(stmtStr, false);
+        stmtStr = "alter table db3.tbl3 add temporary partition tp4 values [('10'), ('30'))";
+        alterTable(stmtStr, false);
+        stmtStr = "alter table db3.tbl3 replace partition (tp1, tp2, tp3) with temporary partition(tp4)";
+        alterTable(stmtStr, false);
+
+        // now base range is [min, 10), [10, 30) -> p1,tp4
+        stmtStr = "truncate table db3.tbl3";
+        truncateTableStmt = (TruncateTableStmt) UtFrameUtils.parseAndAnalyzeStmt(stmtStr, ctx);
+        Catalog.getCurrentCatalog().truncateTable(truncateTableStmt);
+        // 3. add temp partition tp5 [50, 60) and replace partition tp4
+        stmtStr = "alter table db3.tbl3 add temporary partition tp5 values [('50'), ('60'))";
+        alterTable(stmtStr, false);
+        stmtStr = "alter table db3.tbl3 replace partition (tp4) with temporary partition(tp5)";
+        alterTable(stmtStr, true);
+        stmtStr = "alter table db3.tbl3 replace partition (tp4) with temporary partition(tp5) properties('strict_range' = 'true', 'use_temp_partition_name' = 'true')";
+        alterTable(stmtStr, true);
+        stmtStr = "alter table db3.tbl3 replace partition (tp4) with temporary partition(tp5) properties('strict_range' = 'false', 'use_temp_partition_name' = 'true')";
+        alterTable(stmtStr, false);
+
+        // now base range is [min, 10), [50, 60) -> p1,tp5
+        checkShowPartitionsResultNum("db3.tbl3", false, 2);
+        checkShowPartitionsResultNum("db3.tbl3", true, 0);
+    }
+    
+    private void testSerializeOlapTable(OlapTable tbl) throws IOException, AnalysisException {
+        MetaContext metaContext = new MetaContext();
+        metaContext.setMetaVersion(FeMetaVersion.VERSION_74);
+        metaContext.setThreadLocalInfo();
+
+        // 1. Write objects to file
+        File file = new File(tempPartitionFile);
+        file.createNewFile();
+        DataOutputStream out = new DataOutputStream(new FileOutputStream(file));
+
+        tbl.write(out);
+        out.flush();
+        out.close();
+
+        // 2. Read objects from file
+        DataInputStream in = new DataInputStream(new FileInputStream(file));
+
+        OlapTable readTbl = (OlapTable) Table.read(in);
+        Assert.assertEquals(tbl.getId(), readTbl.getId());
+        Assert.assertEquals(tbl.getAllTempPartitions().size(), readTbl.getAllTempPartitions().size());
+        file.delete();
+    }
+
+    private void testSerializeTempPartitions(TempPartitions tempPartitionsInstance) throws IOException, AnalysisException {
+        MetaContext metaContext = new MetaContext();
+        metaContext.setMetaVersion(FeMetaVersion.VERSION_74);
+        metaContext.setThreadLocalInfo();
+
+        // 1. Write objects to file
+        File file = new File(tempPartitionFile);
+        file.createNewFile();
+        DataOutputStream out = new DataOutputStream(new FileOutputStream(file));
+    
+        tempPartitionsInstance.write(out);
+        out.flush();
+        out.close();
+    
+        // 2. Read objects from file
+        DataInputStream in = new DataInputStream(new FileInputStream(file));
+    
+        TempPartitions readTempPartition = TempPartitions.read(in);
+        List<Partition> partitions = readTempPartition.getAllPartitions();
+        Assert.assertEquals(1, partitions.size());
+        Assert.assertEquals(2, partitions.get(0).getMaterializedIndices(IndexExtState.VISIBLE).size());
+    }
+}
diff --git a/fe/src/test/java/org/apache/doris/http/DorisHttpTestCase.java b/fe/src/test/java/org/apache/doris/http/DorisHttpTestCase.java
index 1ebf412..84a47fe 100644
--- a/fe/src/test/java/org/apache/doris/http/DorisHttpTestCase.java
+++ b/fe/src/test/java/org/apache/doris/http/DorisHttpTestCase.java
@@ -17,10 +17,6 @@
 
 package org.apache.doris.http;
 
-import mockit.Expectations;
-import mockit.Mock;
-import mockit.MockUp;
-import mockit.Mocked;
 import org.apache.doris.alter.MaterializedViewHandler;
 import org.apache.doris.alter.SchemaChangeHandler;
 import org.apache.doris.catalog.Catalog;
@@ -52,9 +48,7 @@ import org.apache.doris.system.SystemInfoService;
 import org.apache.doris.thrift.TStorageMedium;
 
 import com.google.common.collect.Lists;
-import okhttp3.Credentials;
-import okhttp3.MediaType;
-import okhttp3.OkHttpClient;
+
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -66,7 +60,15 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
+
 import junit.framework.AssertionFailedError;
+import mockit.Expectations;
+import mockit.Mock;
+import mockit.MockUp;
+import mockit.Mocked;
+import okhttp3.Credentials;
+import okhttp3.MediaType;
+import okhttp3.OkHttpClient;
 
 abstract public class DorisHttpTestCase {
 
diff --git a/fe/src/test/java/org/apache/doris/persist/gson/GsonDerivedClassSerializationTest.java b/fe/src/test/java/org/apache/doris/persist/gson/GsonDerivedClassSerializationTest.java
index 35f75d8..e39938b 100644
--- a/fe/src/test/java/org/apache/doris/persist/gson/GsonDerivedClassSerializationTest.java
+++ b/fe/src/test/java/org/apache/doris/persist/gson/GsonDerivedClassSerializationTest.java
@@ -3,6 +3,7 @@ package org.apache.doris.persist.gson;
 import org.apache.doris.common.io.Text;
 import org.apache.doris.common.io.Writable;
 import org.apache.doris.persist.gson.GsonUtils.HiddenAnnotationExclusionStrategy;
+import org.apache.doris.persist.gson.GsonUtils.PostProcessTypeAdapterFactory;
 
 import com.google.common.collect.Maps;
 import com.google.gson.Gson;
@@ -34,6 +35,8 @@ import java.util.Map;
  * register 2 derived classes to the factory. And then register the factory
  * to the GsonBuilder to create GSON instance.
  * 
+ * 
+ * 
  */
 public class GsonDerivedClassSerializationTest {
     private static String fileName = "./GsonDerivedClassSerializationTest";
@@ -66,15 +69,23 @@ public class GsonDerivedClassSerializationTest {
         }
     }
 
-    public static class ChildClassA extends ParentClass {
+    public static class ChildClassA extends ParentClass implements GsonPostProcessable {
         @SerializedName(value = "tag")
         public String tagA;
 
+        public String postTagA;
+
         public ChildClassA(int flag, String tag) {
             // pass "ChildClassA.class.getSimpleName()" to field "clazz"
             super(flag, ChildClassA.class.getSimpleName());
             this.tagA = tag;
         }
+
+        @Override
+        public void gsonPostProcess() {
+            this.postTagA = "after post";
+
+        }
     }
 
     public static class ChildClassB extends ParentClass {
@@ -123,6 +134,7 @@ public class GsonDerivedClassSerializationTest {
             .enableComplexMapKeySerialization()
             // register the RuntimeTypeAdapterFactory
             .registerTypeAdapterFactory(runtimeTypeAdapterFactory)
+            .registerTypeAdapterFactory(new PostProcessTypeAdapterFactory())
             .create();
 
     @Test
@@ -143,6 +155,7 @@ public class GsonDerivedClassSerializationTest {
         Assert.assertTrue(parentClass instanceof ChildClassA);
         Assert.assertEquals(1, ((ChildClassA) parentClass).flag);
         Assert.assertEquals("A", ((ChildClassA) parentClass).tagA);
+        Assert.assertEquals("after post", ((ChildClassA) parentClass).postTagA);
     }
 
     @Test
diff --git a/fe/src/test/java/org/apache/doris/utframe/UtFrameUtils.java b/fe/src/test/java/org/apache/doris/utframe/UtFrameUtils.java
index 58de9a4..10b7f4d 100644
--- a/fe/src/test/java/org/apache/doris/utframe/UtFrameUtils.java
+++ b/fe/src/test/java/org/apache/doris/utframe/UtFrameUtils.java
@@ -23,6 +23,7 @@ import org.apache.doris.analysis.SqlScanner;
 import org.apache.doris.analysis.StatementBase;
 import org.apache.doris.analysis.UserIdentity;
 import org.apache.doris.catalog.Catalog;
+import org.apache.doris.common.AnalysisException;
 import org.apache.doris.common.DdlException;
 import org.apache.doris.common.Pair;
 import org.apache.doris.mysql.privilege.PaloAuth;
@@ -76,9 +77,19 @@ public class UtFrameUtils {
         System.out.println("begin to parse stmt: " + originStmt);
         SqlScanner input = new SqlScanner(new StringReader(originStmt), ctx.getSessionVariable().getSqlMode());
         SqlParser parser = new SqlParser(input);
-
-        StatementBase statementBase = (StatementBase) parser.parse().value;
         Analyzer analyzer = new Analyzer(ctx.getCatalog(), ctx);
+        StatementBase statementBase = null;
+        try {
+            statementBase = (StatementBase) parser.parse().value;
+        } catch (AnalysisException e) {
+            String errorMessage = parser.getErrorMsg(originStmt);
+            System.err.println("parse failed: " + errorMessage);
+            if (errorMessage == null) {
+                throw e;
+            } else {
+                throw new AnalysisException(errorMessage, e);
+            }
+        }
         statementBase.analyze(analyzer);
         return statementBase;
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org