You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@inlong.apache.org by do...@apache.org on 2022/02/15 06:08:35 UTC

[incubator-inlong] branch master updated: [INLONG-2492][Manager] Plug-in support for DataStorage (#2494)

This is an automated email from the ASF dual-hosted git repository.

dockerzhang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-inlong.git


The following commit(s) were added to refs/heads/master by this push:
     new b549cec  [INLONG-2492][Manager] Plug-in support for DataStorage (#2494)
b549cec is described below

commit b549cecb87ed354a3608df88203b2fea59495ae4
Author: healchow <he...@gmail.com>
AuthorDate: Tue Feb 15 14:08:29 2022 +0800

    [INLONG-2492][Manager] Plug-in support for DataStorage (#2494)
---
 .../inlong/manager/common/enums/BizConstant.java   |  15 +-
 .../manager/common/enums/BizErrorCodeEnum.java     |  33 +-
 .../common/exceptions/BusinessException.java       |   2 +-
 ...rageApproveInfo.java => StorageApproveDTO.java} |  14 +-
 ...eSummaryInfo.java => StorageBriefResponse.java} |  10 +-
 ...HiveFieldInfo.java => StorageFieldRequest.java} |  16 +-
 ...iveFieldInfo.java => StorageFieldResponse.java} |  11 +-
 ...{StorageHiveDTO.java => StorageForSortDTO.java} |  36 +-
 ...eListResponse.java => StorageListResponse.java} |  20 +-
 .../pojo/datastorage/StoragePageRequest.java       |  10 +-
 ...aseStorageResponse.java => StorageRequest.java} |  28 +-
 ...aseStorageRequest.java => StorageResponse.java} |  51 +-
 .../HiveStorageDTO.java}                           |  90 ++--
 .../HiveStorageListResponse.java}                  |   8 +-
 .../HiveStorageRequest.java}                       |  16 +-
 .../HiveStorageResponse.java}                      |  42 +-
 .../pojo/datastream/DataStreamApproveInfo.java     |   9 +-
 .../common/pojo/datastream/FullStreamRequest.java  |   7 +-
 .../common/pojo/datastream/FullStreamResponse.java |   7 +-
 ...amSummaryInfo.java => StreamBriefResponse.java} |  15 +-
 .../manager/common/util/CommonBeanUtils.java       |  31 +-
 .../manager/common/util/JsonTypeDefine.java}       |  25 +-
 .../inlong/manager/common/util/Preconditions.java  |  16 +-
 inlong-manager/manager-dao/pom.xml                 |   4 -
 .../{StorageHiveEntity.java => StorageEntity.java} |  33 +-
 ...iveFieldEntity.java => StorageFieldEntity.java} |   9 +-
 ...eEntityMapper.java => StorageEntityMapper.java} |  88 ++--
 ...tyMapper.java => StorageFieldEntityMapper.java} |  43 +-
 .../src/main/resources/generatorConfig.xml         |   4 +-
 .../main/resources/mappers/StorageEntityMapper.xml | 380 +++++++++++++++
 .../resources/mappers/StorageFieldEntityMapper.xml | 113 +++++
 .../resources/mappers/StorageHiveEntityMapper.xml  | 524 ---------------------
 .../mappers/StorageHiveFieldEntityMapper.xml       | 206 --------
 .../test/resources/sql/apache_inlong_manager.sql   | 253 +++++-----
 inlong-manager/manager-service/pom.xml             |  12 +
 .../manager/service/CommandLineRunnerImpl.java     |  81 ++++
 .../manager/service/core/DataStreamService.java    |   9 +-
 .../core/impl/BusinessProcessOperation.java        |   9 +-
 .../service/core/impl/DataStreamServiceImpl.java   |  69 ++-
 .../service/core/impl/StorageBaseOperation.java    | 189 --------
 .../service/core/impl/StorageHiveOperation.java    | 366 --------------
 .../service/core/impl/StorageServiceImpl.java      | 342 --------------
 .../manager/service/storage/StorageOperation.java  | 105 +++++
 .../service/storage/StorageOperationFactory.java   |  51 ++
 .../service/{core => storage}/StorageService.java  |  86 ++--
 .../service/storage/StorageServiceImpl.java        | 444 +++++++++++++++++
 .../service/storage/hive/HiveStorageOperation.java | 239 ++++++++++
 .../hive/CreateHiveTableEventSelector.java         |   9 +-
 .../hive/CreateHiveTableForStreamListener.java     |  11 +-
 .../thirdpart/hive/CreateHiveTableListener.java    |  11 +-
 .../service/thirdpart/hive/HiveTableOperator.java  |  61 +--
 .../mq/CreatePulsarGroupForStreamTaskListener.java |   5 +-
 .../thirdpart/sort/PushHiveConfigTaskListener.java |  83 ++--
 .../workflow/business/NewBusinessWorkflowForm.java |  13 +-
 .../stream/CreateStreamWorkflowDefinition.java     |  17 +-
 .../apache/inlong/manager/service/BaseConfig.java  |   7 +
 .../src/test/resources/application-test.properties |  10 +
 .../test/resources/sql/apache_inlong_manager.sql   | 293 ++++++------
 .../manager-web/sql/apache_inlong_manager.sql      | 320 ++++++-------
 .../web/controller/DataStreamController.java       |  11 +-
 .../manager/web/controller/StorageController.java  |  38 +-
 .../manager/service/core/BusinessServiceTest.java  |   6 +-
 ...ageServiceTest.java => StorageServiceTest.java} |  31 +-
 .../src/test/resources/application-test.properties |  10 +
 .../test/resources/sql/apache_inlong_manager.sql   | 254 +++++-----
 inlong-manager/pom.xml                             |  12 +
 66 files changed, 2583 insertions(+), 2790 deletions(-)

diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/enums/BizConstant.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/enums/BizConstant.java
index 8de8a2a..aa76288 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/enums/BizConstant.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/enums/BizConstant.java
@@ -50,12 +50,18 @@ public class BizConstant {
 
     public static final String CLUSTER_HIVE_TOPO = "HIVE_TOPO";
 
-    public static final String ID_IS_EMPTY = "id cannot empty during the update/delete operation";
+    public static final String ID_IS_EMPTY = "primary key is empty";
 
-    public static final String GROUP_ID_IS_EMPTY = "business group id is empty";
+    public static final String GROUP_ID_IS_EMPTY = "data group id is empty";
 
     public static final String STREAM_ID_IS_EMPTY = "data stream id is empty";
 
+    public static final String REQUEST_IS_EMPTY = "request is empty";
+
+    public static final String STORAGE_TYPE_IS_EMPTY = "storageType is empty";
+
+    public static final String STORAGE_TYPE_NOT_SAME = "Expected storageType is %s, but found %s";
+
     public static final String PULSAR_TOPIC_TYPE_SERIAL = "SERIAL";
 
     public static final String PULSAR_TOPIC_TYPE_PARALLEL = "PARALLEL";
@@ -66,7 +72,8 @@ public class BizConstant {
 
     public static final String PREFIX_RLQ = "rlq"; // prefix of the Topic of the retry letter queue
 
-    public static final Integer ENABLE_CREATE_TABLE = 1; // Enable create table
+    public static final Integer ENABLE_CREATE_RESOURCE = 1; // Enable create resource
+
+    public static final Integer DISABLE_CREATE_RESOURCE = 0; // Disable create resource
 
-    public static final Integer DISABLE_CREATE_TABLE = 0; // Disable create table
 }
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/enums/BizErrorCodeEnum.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/enums/BizErrorCodeEnum.java
index b7a2290..4e6f5b6 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/enums/BizErrorCodeEnum.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/enums/BizErrorCodeEnum.java
@@ -23,6 +23,8 @@ public enum BizErrorCodeEnum {
     PERMISSION_REQUIRED(2003, "The current user does not have operation authority"),
     AUTHENTICATION_REQUIRED(2004, "Authentication failed"),
 
+    USER_IS_NOT_MANAGER(110, "%s is not the manager, please contact %s"),
+
     BUSINESS_NOT_FOUND(1001, "Business does not exist/no operation authority"),
     BUSINESS_DUPLICATE(1002, "Business already exists"),
     BUSINESS_SAVE_FAILED(1003, "Failed to save/update business information"),
@@ -64,18 +66,17 @@ public enum BizErrorCodeEnum {
 
     HIVE_OPERATION_FAILED(1311, "Hive operation failed"),
 
-    STORAGE_TYPE_NOT_SUPPORTED(1401, "Storage type is not supported"),
+    STORAGE_TYPE_IS_NULL(1400, "Storage type is null"),
+    STORAGE_TYPE_NOT_SUPPORT(1401, "Storage type '%s' not support"),
     STORAGE_INFO_NOT_FOUND(1402, "Storage information does not exist/no operation authority"),
-    STORAGE_SAVE_FAILED(1403, "Failed to save/update business information"),
-    STORAGE_HIVE_FIELD_SAVE_FAILED(1404, "Failed to save/update HIVE data storage field"),
-    STORAGE_OPT_NOT_ALLOWED(1405,
-            "The current business status does not allow adding/modifying/deleting data storage information"),
-    STORAGE_DB_NAME_UPDATE_NOT_ALLOWED(1408,
-            "The current business status does not allow modification of the storage target database name"),
-    STORAGE_TB_NAME_UPDATE_NOT_ALLOWED(1409,
-            "The current business status does not allow modification of the storage target table name"),
-    STORAGE_HIVE_FIELD_UPDATE_NOT_ALLOWED(1410,
-            "It is not allowed to modify/delete field information in the current business state"),
+    STORAGE_INFO_INCORRECT(1402, "Storage information was incorrect"),
+    STORAGE_ALREADY_EXISTS(1403, "Storage already exist with the groupId and streamId"),
+    STORAGE_SAVE_FAILED(1404, "Failed to save or update storage info"),
+    STORAGE_FIELD_SAVE_FAILED(1405, "Failed to save or update storage field"),
+    STORAGE_OPT_NOT_ALLOWED(1406, "Current status does not allow add/modification/delete storage info"),
+    STORAGE_DB_NAME_UPDATE_NOT_ALLOWED(1407, "Current status does not allow modification the database name"),
+    STORAGE_TB_NAME_UPDATE_NOT_ALLOWED(1408, "Current status does not allow modification the table name"),
+    STORAGE_FIELD_UPDATE_NOT_ALLOWED(1409, "Current status not allowed to modification/delete field"),
 
     WORKFLOW_EXE_FAILED(4000, "Workflow execution exception"),
 
@@ -94,18 +95,18 @@ public enum BizErrorCodeEnum {
     ;
 
     private final int code;
-    private final String defaultMessage;
+    private final String message;
 
-    BizErrorCodeEnum(int code, String defaultMessage) {
+    BizErrorCodeEnum(int code, String message) {
         this.code = code;
-        this.defaultMessage = defaultMessage;
+        this.message = message;
     }
 
     public int getCode() {
         return code;
     }
 
-    public String getDefaultMessage() {
-        return defaultMessage;
+    public String getMessage() {
+        return message;
     }
 }
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/exceptions/BusinessException.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/exceptions/BusinessException.java
index 55e0e10..c1108c6 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/exceptions/BusinessException.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/exceptions/BusinessException.java
@@ -28,7 +28,7 @@ import org.apache.inlong.manager.common.enums.BizErrorCodeEnum;
 public class BusinessException extends BaseException {
 
     public BusinessException(BizErrorCodeEnum errorCodeEnum) {
-        super(errorCodeEnum.getCode(), errorCodeEnum.getDefaultMessage());
+        super(errorCodeEnum.getCode(), errorCodeEnum.getMessage());
     }
 
     public BusinessException(String message) {
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageApproveInfo.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageApproveDTO.java
similarity index 80%
rename from inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageApproveInfo.java
rename to inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageApproveDTO.java
index 0809ee4..34453e7 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageApproveInfo.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageApproveDTO.java
@@ -19,24 +19,18 @@ package org.apache.inlong.manager.common.pojo.datastorage;
 
 import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
-import lombok.AllArgsConstructor;
-import lombok.Builder;
 import lombok.Data;
-import lombok.NoArgsConstructor;
 
 /**
- * Data storage approve info
+ * Storage approve info
  */
 @Data
-@Builder
-@NoArgsConstructor
-@AllArgsConstructor
-@ApiModel("Data storage approve info")
-public class StorageApproveInfo {
+@ApiModel("Storage approve info")
+public class StorageApproveDTO {
 
     private Integer id;
 
-    @ApiModelProperty("Storage type, support: HIVE")
+    @ApiModelProperty("Storage type, such as HIVE")
     private String storageType;
 
     @ApiModelProperty("Storage cluster URL")
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageSummaryInfo.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageBriefResponse.java
similarity index 86%
rename from inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageSummaryInfo.java
rename to inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageBriefResponse.java
index 98a3aa3..668eb9d 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageSummaryInfo.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageBriefResponse.java
@@ -22,21 +22,21 @@ import io.swagger.annotations.ApiModelProperty;
 import lombok.Data;
 
 /**
- * Basic data storage information
+ * Brief response of the storage
  */
 @Data
-@ApiModel("Basic data storage information")
-public class StorageSummaryInfo {
+@ApiModel("Brief response of the storage")
+public class StorageBriefResponse {
 
     private Integer id;
 
-    @ApiModelProperty("Business group id")
+    @ApiModelProperty("Data group id")
     private String inlongGroupId;
 
     @ApiModelProperty("Data stream id")
     private String inlongStreamId;
 
-    @ApiModelProperty("Storage type, support:HIVE")
+    @ApiModelProperty("Storage type, such as HIVE")
     private String storageType;
 
     @ApiModelProperty("Storage cluster ID")
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveFieldInfo.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageFieldRequest.java
similarity index 81%
copy from inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveFieldInfo.java
copy to inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageFieldRequest.java
index 9aa1c9d..c826f46 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveFieldInfo.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageFieldRequest.java
@@ -22,16 +22,11 @@ import io.swagger.annotations.ApiModelProperty;
 import lombok.Data;
 
 /**
- * Hive storage field info
+ * Storage field info
  */
 @Data
-@ApiModel("Hive storage field info")
-public class StorageHiveFieldInfo {
-
-    private Integer id;
-
-    @ApiModelProperty("Storage ID")
-    private Integer storageId;
+@ApiModel("Storage field info")
+public class StorageFieldRequest {
 
     @ApiModelProperty("Field name")
     private String fieldName;
@@ -51,10 +46,7 @@ public class StorageHiveFieldInfo {
     @ApiModelProperty("Source field type")
     private String sourceFieldType;
 
-    @ApiModelProperty("Field order ")
+    @ApiModelProperty("Field order")
     private Short rankNum;
 
-    @ApiModelProperty("is deleted? 0: deleted, 1: not deleted")
-    private Integer isDeleted = 0;
-
 }
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveFieldInfo.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageFieldResponse.java
similarity index 86%
rename from inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveFieldInfo.java
rename to inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageFieldResponse.java
index 9aa1c9d..aa0f201 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveFieldInfo.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageFieldResponse.java
@@ -22,11 +22,11 @@ import io.swagger.annotations.ApiModelProperty;
 import lombok.Data;
 
 /**
- * Hive storage field info
+ * Storage field response
  */
 @Data
-@ApiModel("Hive storage field info")
-public class StorageHiveFieldInfo {
+@ApiModel("Storage field response")
+public class StorageFieldResponse {
 
     private Integer id;
 
@@ -51,10 +51,7 @@ public class StorageHiveFieldInfo {
     @ApiModelProperty("Source field type")
     private String sourceFieldType;
 
-    @ApiModelProperty("Field order ")
+    @ApiModelProperty("Field order")
     private Short rankNum;
 
-    @ApiModelProperty("is deleted? 0: deleted, 1: not deleted")
-    private Integer isDeleted = 0;
-
 }
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveDTO.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageForSortDTO.java
similarity index 62%
rename from inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveDTO.java
rename to inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageForSortDTO.java
index 9906ea0..e3f8958 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveDTO.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageForSortDTO.java
@@ -17,39 +17,23 @@
 
 package org.apache.inlong.manager.common.pojo.datastorage;
 
+import io.swagger.annotations.ApiModel;
 import lombok.Data;
 
 /**
- * Hive info
+ * Storage info for Sort
  */
 @Data
-public class StorageHiveDTO {
+@ApiModel("Storage info for Sort")
+public class StorageForSortDTO {
 
     private Integer id;
     private String inlongGroupId;
     private String inlongStreamId;
-    private Integer enableCreateTable;
-
-    // Hive server info
-    private String jdbcUrl;
-    private String username;
-    private String password;
-
-    // Hive db and table info
-    private String dbName;
-    private String tableName;
-    private String hdfsDefaultFs;
-    private String warehouseDir;
-
-    private Integer partitionInterval;
-    private String partitionUnit;
-    private String primaryPartition;
-    private String secondaryPartition;
-    private String partitionCreationStrategy;
-
-    private String fileFormat;
-    private String dataEncoding;
-    private String targetSeparator; // Target separator configured in the storage info
+    private String storageType;
+    private Integer storagePeriod;
+    private Integer enableCreateResource;
+    private String extParams;
     private Integer status;
     private String creator;
 
@@ -58,7 +42,7 @@ public class StorageHiveDTO {
     private String dataSourceType;
     private String dataType;
     private String description;
-    private String sourceSeparator; // Target separator configured in the stream info
+    private String sourceSeparator; // Source separator configured in the stream info
     private String dataEscapeChar;
 
-}
\ No newline at end of file
+}
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/BaseStorageListResponse.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageListResponse.java
similarity index 74%
rename from inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/BaseStorageListResponse.java
rename to inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageListResponse.java
index 80d221d..71f940f 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/BaseStorageListResponse.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageListResponse.java
@@ -19,14 +19,15 @@ package org.apache.inlong.manager.common.pojo.datastorage;
 
 import com.fasterxml.jackson.annotation.JsonFormat;
 import io.swagger.annotations.ApiModelProperty;
-import java.util.Date;
 import lombok.Data;
 
+import java.util.Date;
+
 /**
- * Response of data storage list
+ * Response of the storage list
  */
 @Data
-public class BaseStorageListResponse {
+public class StorageListResponse {
 
     @ApiModelProperty(value = "Primary key")
     private Integer id;
@@ -40,6 +41,19 @@ public class BaseStorageListResponse {
     @ApiModelProperty(value = "Data stream id")
     private String inlongStreamId;
 
+    @ApiModelProperty("Storage type, including: HIVE, ES, etc.")
+    private String storageType;
+
+    @ApiModelProperty("storage period, unit: day")
+    private Integer storagePeriod;
+
+    @ApiModelProperty(value = "Whether to enable create storage resource? 0: disable, 1: enable. default is 1")
+    private Integer enableCreateResource;
+
+    @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
+    private Date createTime;
+
     @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
     private Date modifyTime;
+
 }
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StoragePageRequest.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StoragePageRequest.java
index 083eb83..cbd9e9f 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StoragePageRequest.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StoragePageRequest.java
@@ -23,21 +23,25 @@ import lombok.Data;
 import lombok.EqualsAndHashCode;
 import org.apache.inlong.manager.common.beans.PageRequest;
 
+import javax.validation.constraints.NotNull;
+
 /**
- * Paging query conditions for data storage information
+ * Paging query conditions for storage
  */
 @Data
 @EqualsAndHashCode(callSuper = false)
-@ApiModel("Paging query conditions for data storage information")
+@ApiModel("Paging query conditions for storage")
 public class StoragePageRequest extends PageRequest {
 
-    @ApiModelProperty(value = "Business group id", required = true)
+    @ApiModelProperty(value = "Data group id", required = true)
+    @NotNull
     private String inlongGroupId;
 
     @ApiModelProperty(value = "Data stream id")
     private String inlongStreamId;
 
     @ApiModelProperty(value = "Storage type, such as HIVE", required = true)
+    @NotNull
     private String storageType;
 
     @ApiModelProperty(value = "Key word")
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/BaseStorageResponse.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageRequest.java
similarity index 72%
rename from inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/BaseStorageResponse.java
rename to inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageRequest.java
index 649b369..5b62659 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/BaseStorageResponse.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageRequest.java
@@ -17,38 +17,44 @@
 
 package org.apache.inlong.manager.common.pojo.datastorage;
 
-import com.fasterxml.jackson.annotation.JsonSubTypes;
-import com.fasterxml.jackson.annotation.JsonSubTypes.Type;
 import com.fasterxml.jackson.annotation.JsonTypeInfo;
 import com.fasterxml.jackson.annotation.JsonTypeInfo.Id;
 import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
 import lombok.Data;
-import org.apache.inlong.manager.common.enums.BizConstant;
+
+import javax.validation.constraints.NotNull;
+import java.util.List;
 
 /**
- * Basic response of data storage
+ * Request of storage
  */
 @Data
-@ApiModel("Basic response of data storage")
+@ApiModel("Request of storage")
 @JsonTypeInfo(use = Id.NAME, visible = true, property = "storageType")
-@JsonSubTypes({
-        @Type(value = StorageHiveResponse.class, name = BizConstant.STORAGE_HIVE)
-})
-public class BaseStorageResponse {
+public class StorageRequest {
 
     private Integer id;
 
-    @ApiModelProperty("Business group id")
+    @ApiModelProperty("Data group id")
+    @NotNull
     private String inlongGroupId;
 
     @ApiModelProperty("Data stream id")
+    @NotNull
     private String inlongStreamId;
 
-    @ApiModelProperty("Storage type, including: HDFS, HIVE, ES, etc.")
+    @ApiModelProperty("Storage type, including: HIVE, ES, etc.")
+    @NotNull
     private String storageType;
 
     @ApiModelProperty("Data storage period, unit: day")
     private Integer storagePeriod;
 
+    @ApiModelProperty(value = "Whether to enable create storage resource? 0: disable, 1: enable. default is 1")
+    private Integer enableCreateResource = 1;
+
+    @ApiModelProperty("Storage field list")
+    private List<StorageFieldRequest> fieldList;
+
 }
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/BaseStorageRequest.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageResponse.java
similarity index 53%
rename from inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/BaseStorageRequest.java
rename to inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageResponse.java
index ed0b185..c9a21bf 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/BaseStorageRequest.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageResponse.java
@@ -17,38 +17,61 @@
 
 package org.apache.inlong.manager.common.pojo.datastorage;
 
-import com.fasterxml.jackson.annotation.JsonSubTypes;
-import com.fasterxml.jackson.annotation.JsonSubTypes.Type;
-import com.fasterxml.jackson.annotation.JsonTypeInfo;
-import com.fasterxml.jackson.annotation.JsonTypeInfo.Id;
+import com.fasterxml.jackson.annotation.JsonFormat;
 import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
 import lombok.Data;
-import org.apache.inlong.manager.common.enums.BizConstant;
+
+import java.util.Date;
+import java.util.List;
 
 /**
- * Basic request of data storage
+ * Response of the storage
  */
 @Data
-@ApiModel("Basic request of data storage")
-@JsonTypeInfo(use = Id.NAME, visible = true, property = "storageType")
-@JsonSubTypes({
-        @Type(value = StorageHiveRequest.class, name = BizConstant.STORAGE_HIVE)
-})
-public class BaseStorageRequest {
+@ApiModel("Response of the storage")
+public class StorageResponse {
 
     private Integer id;
 
-    @ApiModelProperty("Business group id")
+    @ApiModelProperty("Data group id")
     private String inlongGroupId;
 
     @ApiModelProperty("Data stream id")
     private String inlongStreamId;
 
-    @ApiModelProperty("Storage type, including: HDFS, HIVE, ES, etc.")
+    @ApiModelProperty("Storage type, including: HIVE, ES, etc.")
     private String storageType;
 
     @ApiModelProperty("Data storage period, unit: day")
     private Integer storagePeriod;
 
+    @ApiModelProperty(value = "Whether to enable create storage resource? 0: disable, 1: enable. default is 1",
+            notes = "Such as create Hive table")
+    private Integer enableCreateResource = 1;
+
+    @ApiModelProperty("Backend operation log")
+    private String operateLog;
+
+    @ApiModelProperty("Status")
+    private Integer status;
+
+    @ApiModelProperty("Previous State")
+    private Integer previousStatus;
+
+    @ApiModelProperty("Creator")
+    private String creator;
+
+    @ApiModelProperty("Modifier")
+    private String modifier;
+
+    @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
+    private Date createTime;
+
+    @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
+    private Date modifyTime;
+
+    @ApiModelProperty("Storage field list")
+    private List<StorageFieldResponse> fieldList;
+
 }
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveResponse.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/hive/HiveStorageDTO.java
similarity index 51%
copy from inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveResponse.java
copy to inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/hive/HiveStorageDTO.java
index 4a81f63..cb16d39 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveResponse.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/hive/HiveStorageDTO.java
@@ -15,31 +15,30 @@
  * limitations under the License.
  */
 
-package org.apache.inlong.manager.common.pojo.datastorage;
+package org.apache.inlong.manager.common.pojo.datastorage.hive;
 
-import com.fasterxml.jackson.annotation.JsonFormat;
-import io.swagger.annotations.ApiModel;
+import com.fasterxml.jackson.databind.DeserializationFeature;
+import com.fasterxml.jackson.databind.ObjectMapper;
 import io.swagger.annotations.ApiModelProperty;
-import java.util.Date;
-import java.util.List;
+import lombok.AllArgsConstructor;
+import lombok.Builder;
 import lombok.Data;
-import lombok.EqualsAndHashCode;
-import lombok.ToString;
-import org.apache.inlong.manager.common.enums.BizConstant;
+import lombok.NoArgsConstructor;
+import org.apache.inlong.manager.common.enums.BizErrorCodeEnum;
+import org.apache.inlong.manager.common.exceptions.BusinessException;
+
+import javax.validation.constraints.NotNull;
 
 /**
- * Response of the Hive storage info
+ * Hive storage info
  */
 @Data
-@ToString(callSuper = true)
-@EqualsAndHashCode(callSuper = true)
-@ApiModel(value = "Response of the Hive storage info")
-public class StorageHiveResponse extends BaseStorageResponse {
-
-    private String storageType = BizConstant.STORAGE_HIVE;
+@Builder
+@NoArgsConstructor
+@AllArgsConstructor
+public class HiveStorageDTO {
 
-    @ApiModelProperty("Whether to enable create table")
-    private Integer enableCreateTable;
+    private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); // thread safe
 
     @ApiModelProperty("Hive JDBC URL")
     private String jdbcUrl;
@@ -86,31 +85,36 @@ public class StorageHiveResponse extends BaseStorageResponse {
     @ApiModelProperty("Data field separator")
     private String dataSeparator;
 
-    @ApiModelProperty("Backend operation log")
-    private String optLog;
-
-    @ApiModelProperty("Status")
-    private Integer status;
-
-    @ApiModelProperty("Previous State")
-    private Integer previousStatus;
-
-    @ApiModelProperty("Creator")
-    private String creator;
-
-    @ApiModelProperty("modifier")
-    private String modifier;
-
-    @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
-    private Date createTime;
-
-    @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
-    private Date modifyTime;
-
-    @ApiModelProperty("hive table field list")
-    private List<StorageHiveFieldInfo> hiveFieldList;
-
-    @ApiModelProperty("other ext info list")
-    private List<StorageExtInfo> extList;
+    /**
+     * Get the dto instance from the request
+     */
+    public static HiveStorageDTO getFromRequest(HiveStorageRequest request) {
+        return HiveStorageDTO.builder()
+                .jdbcUrl(request.getJdbcUrl())
+                .username(request.getUsername())
+                .password(request.getPassword())
+                .dbName(request.getDbName())
+                .tableName(request.getTableName())
+                .hdfsDefaultFs(request.getHdfsDefaultFs())
+                .warehouseDir(request.getWarehouseDir())
+                .partitionInterval(request.getPartitionInterval())
+                .partitionUnit(request.getPartitionUnit())
+                .primaryPartition(request.getPrimaryPartition())
+                .secondaryPartition(request.getSecondaryPartition())
+                .partitionCreationStrategy(request.getPartitionCreationStrategy())
+                .fileFormat(request.getFileFormat())
+                .dataEncoding(request.getDataEncoding())
+                .dataSeparator(request.getDataSeparator())
+                .build();
+    }
+
+    public static HiveStorageDTO getFromJson(@NotNull String extParams) {
+        try {
+            OBJECT_MAPPER.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
+            return OBJECT_MAPPER.readValue(extParams, HiveStorageDTO.class);
+        } catch (Exception e) {
+            throw new BusinessException(BizErrorCodeEnum.STORAGE_INFO_INCORRECT.getMessage());
+        }
+    }
 
 }
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveListResponse.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/hive/HiveStorageListResponse.java
similarity index 87%
rename from inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveListResponse.java
rename to inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/hive/HiveStorageListResponse.java
index 78873c6..35c5f5e 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveListResponse.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/hive/HiveStorageListResponse.java
@@ -15,12 +15,13 @@
  * limitations under the License.
  */
 
-package org.apache.inlong.manager.common.pojo.datastorage;
+package org.apache.inlong.manager.common.pojo.datastorage.hive;
 
 import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
 import lombok.Data;
 import lombok.EqualsAndHashCode;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageListResponse;
 
 /**
  * Response of Hive storage list
@@ -28,7 +29,7 @@ import lombok.EqualsAndHashCode;
 @Data
 @EqualsAndHashCode(callSuper = true)
 @ApiModel("Response of Hive storage paging list")
-public class StorageHiveListResponse extends BaseStorageListResponse {
+public class HiveStorageListResponse extends StorageListResponse {
 
     @ApiModelProperty("target database name")
     private String dbName;
@@ -48,7 +49,4 @@ public class StorageHiveListResponse extends BaseStorageListResponse {
     @ApiModelProperty("partition type, like: H-hour, D-day, W-week, M-month, O-once, R-regulation")
     private String partitionType;
 
-    @ApiModelProperty("storage period, unit: day")
-    private Integer storagePeriod;
-
 }
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveRequest.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/hive/HiveStorageRequest.java
similarity index 87%
rename from inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveRequest.java
rename to inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/hive/HiveStorageRequest.java
index f562f33..72b44f7 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveRequest.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/hive/HiveStorageRequest.java
@@ -15,15 +15,16 @@
  * limitations under the License.
  */
 
-package org.apache.inlong.manager.common.pojo.datastorage;
+package org.apache.inlong.manager.common.pojo.datastorage.hive;
 
 import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
-import java.util.List;
 import lombok.Data;
 import lombok.EqualsAndHashCode;
 import lombok.ToString;
 import org.apache.inlong.manager.common.enums.BizConstant;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageRequest;
+import org.apache.inlong.manager.common.util.JsonTypeDefine;
 
 /**
  * Request of the Hive storage info
@@ -32,9 +33,8 @@ import org.apache.inlong.manager.common.enums.BizConstant;
 @ToString(callSuper = true)
 @EqualsAndHashCode(callSuper = true)
 @ApiModel(value = "Request of the Hive storage info")
-public class StorageHiveRequest extends BaseStorageRequest {
-
-    private String storageType = BizConstant.STORAGE_HIVE;
+@JsonTypeDefine(value = BizConstant.STORAGE_HIVE)
+public class HiveStorageRequest extends StorageRequest {
 
     @ApiModelProperty("Whether to enable create table, 1: enable, 0: disable, default is 1")
     private Integer enableCreateTable = 1;
@@ -84,10 +84,4 @@ public class StorageHiveRequest extends BaseStorageRequest {
     @ApiModelProperty("Data field separator")
     private String dataSeparator;
 
-    @ApiModelProperty("Hive table field list")
-    private List<StorageHiveFieldInfo> hiveFieldList;
-
-    @ApiModelProperty("Other ext info list")
-    private List<StorageExtInfo> extList;
-
 }
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveResponse.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/hive/HiveStorageResponse.java
similarity index 69%
rename from inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveResponse.java
rename to inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/hive/HiveStorageResponse.java
index 4a81f63..5ac2f4c 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/StorageHiveResponse.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastorage/hive/HiveStorageResponse.java
@@ -15,32 +15,27 @@
  * limitations under the License.
  */
 
-package org.apache.inlong.manager.common.pojo.datastorage;
+package org.apache.inlong.manager.common.pojo.datastorage.hive;
 
-import com.fasterxml.jackson.annotation.JsonFormat;
 import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
-import java.util.Date;
-import java.util.List;
 import lombok.Data;
 import lombok.EqualsAndHashCode;
 import lombok.ToString;
 import org.apache.inlong.manager.common.enums.BizConstant;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageResponse;
 
 /**
- * Response of the Hive storage info
+ * Response of the Hive storage
  */
 @Data
 @ToString(callSuper = true)
 @EqualsAndHashCode(callSuper = true)
-@ApiModel(value = "Response of the Hive storage info")
-public class StorageHiveResponse extends BaseStorageResponse {
+@ApiModel(value = "Response of the Hive storage")
+public class HiveStorageResponse extends StorageResponse {
 
     private String storageType = BizConstant.STORAGE_HIVE;
 
-    @ApiModelProperty("Whether to enable create table")
-    private Integer enableCreateTable;
-
     @ApiModelProperty("Hive JDBC URL")
     private String jdbcUrl;
 
@@ -86,31 +81,4 @@ public class StorageHiveResponse extends BaseStorageResponse {
     @ApiModelProperty("Data field separator")
     private String dataSeparator;
 
-    @ApiModelProperty("Backend operation log")
-    private String optLog;
-
-    @ApiModelProperty("Status")
-    private Integer status;
-
-    @ApiModelProperty("Previous State")
-    private Integer previousStatus;
-
-    @ApiModelProperty("Creator")
-    private String creator;
-
-    @ApiModelProperty("modifier")
-    private String modifier;
-
-    @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
-    private Date createTime;
-
-    @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
-    private Date modifyTime;
-
-    @ApiModelProperty("hive table field list")
-    private List<StorageHiveFieldInfo> hiveFieldList;
-
-    @ApiModelProperty("other ext info list")
-    private List<StorageExtInfo> extList;
-
 }
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastream/DataStreamApproveInfo.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastream/DataStreamApproveInfo.java
index f237b7c..353f01a 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastream/DataStreamApproveInfo.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastream/DataStreamApproveInfo.java
@@ -17,11 +17,12 @@
 
 package org.apache.inlong.manager.common.pojo.datastream;
 
+import java.util.List;
+
 import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
-import java.util.List;
 import lombok.Data;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageApproveInfo;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageApproveDTO;
 
 /**
  * Data stream approval information
@@ -39,7 +40,7 @@ public class DataStreamApproveInfo {
     @ApiModelProperty(value = "Data stream id")
     private String inlongStreamId;
 
-    @ApiModelProperty(value = "Data storage information list")
-    private List<StorageApproveInfo> storageList;
+    @ApiModelProperty(value = "Data storage approve list")
+    private List<StorageApproveDTO> storageList;
 
 }
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastream/FullStreamRequest.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastream/FullStreamRequest.java
index 2e31cfb..bcfc55a 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastream/FullStreamRequest.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastream/FullStreamRequest.java
@@ -19,13 +19,14 @@ package org.apache.inlong.manager.common.pojo.datastream;
 
 import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
-import java.util.List;
 import lombok.Data;
 import org.apache.inlong.manager.common.pojo.datasource.SourceDbBasicInfo;
 import org.apache.inlong.manager.common.pojo.datasource.SourceDbDetailInfo;
 import org.apache.inlong.manager.common.pojo.datasource.SourceFileBasicInfo;
 import org.apache.inlong.manager.common.pojo.datasource.SourceFileDetailInfo;
-import org.apache.inlong.manager.common.pojo.datastorage.BaseStorageRequest;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageRequest;
+
+import java.util.List;
 
 /**
  * All request info on the data stream page, including data stream, data source, and data storage
@@ -50,6 +51,6 @@ public class FullStreamRequest {
     private List<SourceDbDetailInfo> dbDetailInfoList;
 
     @ApiModelProperty("Data storage information")
-    private List<BaseStorageRequest> storageInfo;
+    private List<StorageRequest> storageInfo;
 
 }
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastream/FullStreamResponse.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastream/FullStreamResponse.java
index 26e742d..e7027a0 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastream/FullStreamResponse.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastream/FullStreamResponse.java
@@ -19,13 +19,14 @@ package org.apache.inlong.manager.common.pojo.datastream;
 
 import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
-import java.util.List;
 import lombok.Data;
 import org.apache.inlong.manager.common.pojo.datasource.SourceDbBasicInfo;
 import org.apache.inlong.manager.common.pojo.datasource.SourceDbDetailInfo;
 import org.apache.inlong.manager.common.pojo.datasource.SourceFileBasicInfo;
 import org.apache.inlong.manager.common.pojo.datasource.SourceFileDetailInfo;
-import org.apache.inlong.manager.common.pojo.datastorage.BaseStorageResponse;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageResponse;
+
+import java.util.List;
 
 /**
  * All response info on the data stream page, including data stream, data source, and data storage
@@ -50,6 +51,6 @@ public class FullStreamResponse {
     private List<SourceDbDetailInfo> dbDetailInfoList;
 
     @ApiModelProperty("Data storage information")
-    private List<BaseStorageResponse> storageInfo;
+    private List<StorageResponse> storageInfo;
 
 }
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastream/DataStreamSummaryInfo.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastream/StreamBriefResponse.java
similarity index 87%
rename from inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastream/DataStreamSummaryInfo.java
rename to inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastream/StreamBriefResponse.java
index 68b3c10..cad1605 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastream/DataStreamSummaryInfo.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/pojo/datastream/StreamBriefResponse.java
@@ -20,17 +20,18 @@ package org.apache.inlong.manager.common.pojo.datastream;
 import com.fasterxml.jackson.annotation.JsonFormat;
 import io.swagger.annotations.ApiModel;
 import io.swagger.annotations.ApiModelProperty;
+import lombok.Data;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageBriefResponse;
+
 import java.util.Date;
 import java.util.List;
-import lombok.Data;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageSummaryInfo;
 
 /**
- * Storage summary of the data stream
+ * Summary response of the data stream
  */
 @Data
-@ApiModel("Storage summary of the data stream")
-public class DataStreamSummaryInfo {
+@ApiModel("Summary response of the data stream")
+public class StreamBriefResponse {
 
     @ApiModelProperty(value = "Primary key")
     private Integer id;
@@ -47,8 +48,8 @@ public class DataStreamSummaryInfo {
     @ApiModelProperty(value = "Data source type, support: FILE/DB/AUTO_PUSH")
     private String dataSourceType;
 
-    @ApiModelProperty(value = "Storage information list")
-    private List<StorageSummaryInfo> storageList;
+    @ApiModelProperty(value = "Storage summary list")
+    private List<StorageBriefResponse> storageList;
 
     @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss")
     private Date modifyTime;
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/util/CommonBeanUtils.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/util/CommonBeanUtils.java
index 61a1bb5..d866806 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/util/CommonBeanUtils.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/util/CommonBeanUtils.java
@@ -17,14 +17,15 @@
 
 package org.apache.inlong.manager.common.util;
 
+import org.springframework.beans.BeanUtils;
+import org.springframework.beans.BeanWrapper;
+import org.springframework.beans.BeanWrapperImpl;
+
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 import java.util.function.Supplier;
-import org.springframework.beans.BeanUtils;
-import org.springframework.beans.BeanWrapper;
-import org.springframework.beans.BeanWrapperImpl;
 
 /**
  * Copy the tool class of the Bean property in the List
@@ -35,9 +36,9 @@ public class CommonBeanUtils extends BeanUtils {
      * Usage scenario: Loop replication for each Java entity in the List
      *
      * @param sources Source entity list
-     * @param target target entity list
-     * @param <S> The type of the source entity list
-     * @param <T> The type of the target entity list
+     * @param target  target entity list
+     * @param <S>     The type of the source entity list
+     * @param <T>     The type of the target entity list
      * @return target entity list
      */
     public static <S, T> List<T> copyListProperties(List<S> sources, Supplier<T> target) {
@@ -58,8 +59,8 @@ public class CommonBeanUtils extends BeanUtils {
      *
      * @param source source data content
      * @param target target type
-     * @param <S> source type
-     * @param <T> target type
+     * @param <S>    source type
+     * @param <T>    target type
      * @return the target type object after copying
      */
     public static <S, T> T copyProperties(S source, Supplier<T> target) {
@@ -74,22 +75,24 @@ public class CommonBeanUtils extends BeanUtils {
     /**
      * Copy the content of the source instance to the target instance
      *
-     * @param source source data content
-     * @param target target data
+     * @param source     source data content
+     * @param target     target data
      * @param ignoreNull Whether to ignore null values
-     * @param <S> source type
-     * @param <T> target type
+     * @param <S>        source type
+     * @param <T>        target type
      * @apiNote If ignoreNull = false, non-null attributes in the target instance may be overwritten
      */
-    public static <S, T> void copyProperties(S source, T target, boolean ignoreNull) {
+    public static <S, T> T copyProperties(S source, T target, boolean ignoreNull) {
         if (source == null) {
-            return;
+            return target;
         }
         if (ignoreNull) {
             copyProperties(source, target, getNullPropertyNames(source));
         } else {
             copyProperties(source, target);
         }
+
+        return target;
     }
 
     /**
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/StorageEsService.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/util/JsonTypeDefine.java
similarity index 61%
rename from inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/StorageEsService.java
rename to inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/util/JsonTypeDefine.java
index c8ee23c..e34873f 100644
--- a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/StorageEsService.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/util/JsonTypeDefine.java
@@ -15,7 +15,26 @@
  * limitations under the License.
  */
 
-package org.apache.inlong.manager.service.core;
+package org.apache.inlong.manager.common.util;
 
-public interface StorageEsService {
-}
+import com.fasterxml.jackson.annotation.JsonTypeInfo;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Define the Json type for JsonTypeInfo
+ *
+ * @see JsonTypeInfo
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.TYPE)
+public @interface JsonTypeDefine {
+
+    String value() default "";
+
+    String desc() default "";
+
+}
\ No newline at end of file
diff --git a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/util/Preconditions.java b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/util/Preconditions.java
index bba9078..4bbe861 100644
--- a/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/util/Preconditions.java
+++ b/inlong-manager/manager-common/src/main/java/org/apache/inlong/manager/common/util/Preconditions.java
@@ -50,19 +50,19 @@ public class Preconditions {
         checkTrue(str == null || str.isEmpty(), errMsg);
     }
 
-    public static void checkEmpty(Collection str, String errMsg) {
+    public static void checkEmpty(Collection<?> str, String errMsg) {
         checkTrue(str == null || str.isEmpty(), errMsg);
     }
 
-    public static void checkEmpty(Collection collection, Supplier<String> errMsg) {
+    public static void checkEmpty(Collection<?> collection, Supplier<String> errMsg) {
         checkTrue(collection == null || collection.isEmpty(), errMsg);
     }
 
-    public static void checkEmpty(Map map, String errMsg) {
+    public static void checkEmpty(Map<?, ?> map, String errMsg) {
         checkTrue(map == null || map.isEmpty(), errMsg);
     }
 
-    public static void checkEmpty(Map map, Supplier<String> errMsg) {
+    public static void checkEmpty(Map<?, ?> map, Supplier<String> errMsg) {
         checkTrue(map == null || map.isEmpty(), errMsg);
     }
 
@@ -74,19 +74,19 @@ public class Preconditions {
         checkTrue(str != null && !str.isEmpty(), errMsg);
     }
 
-    public static void checkNotEmpty(Collection collection, String errMsg) {
+    public static void checkNotEmpty(Collection<?> collection, String errMsg) {
         checkTrue(collection != null && !collection.isEmpty(), errMsg);
     }
 
-    public static void checkNotEmpty(Collection collection, Supplier<String> errMsg) {
+    public static void checkNotEmpty(Collection<?> collection, Supplier<String> errMsg) {
         checkTrue(collection != null && !collection.isEmpty(), errMsg);
     }
 
-    public static void checkNotEmpty(Map map, String errMsg) {
+    public static void checkNotEmpty(Map<?, ?> map, String errMsg) {
         checkTrue(map != null && !map.isEmpty(), errMsg);
     }
 
-    public static void checkNotEmpty(Map map, Supplier<String> errMsg) {
+    public static void checkNotEmpty(Map<?, ?> map, Supplier<String> errMsg) {
         checkTrue(map != null && !map.isEmpty(), errMsg);
     }
 
diff --git a/inlong-manager/manager-dao/pom.xml b/inlong-manager/manager-dao/pom.xml
index 16348a7..f168b59 100644
--- a/inlong-manager/manager-dao/pom.xml
+++ b/inlong-manager/manager-dao/pom.xml
@@ -54,10 +54,6 @@
             </exclusions>
         </dependency>
         <dependency>
-            <groupId>org.apache.logging.log4j</groupId>
-            <artifactId>log4j-api</artifactId>
-        </dependency>
-        <dependency>
             <groupId>org.springframework.boot</groupId>
             <artifactId>spring-boot-starter-jdbc</artifactId>
         </dependency>
diff --git a/inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/entity/StorageHiveEntity.java b/inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/entity/StorageEntity.java
similarity index 86%
rename from inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/entity/StorageHiveEntity.java
rename to inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/entity/StorageEntity.java
index d3b3542..587d53e 100644
--- a/inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/entity/StorageHiveEntity.java
+++ b/inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/entity/StorageEntity.java
@@ -17,20 +17,35 @@
 
 package org.apache.inlong.manager.dao.entity;
 
+import lombok.Data;
+
 import java.io.Serializable;
 import java.util.Date;
-import lombok.Data;
 
 @Data
-public class StorageHiveEntity implements Serializable {
+public class StorageEntity implements Serializable {
 
     private static final long serialVersionUID = 1L;
     private Integer id;
     private String inlongGroupId;
     private String inlongStreamId;
+    private String storageType;
+    private Integer storagePeriod;
+    private Integer enableCreateResource;
 
-    private Integer enableCreateTable;
+    private String operateLog;
+    private Integer status;
+    private Integer previousStatus;
+    private Integer isDeleted;
+    private String creator;
+    private String modifier;
+    private Date createTime;
+    private Date modifyTime;
 
+    // Another fields saved as JSON string in extParams
+    private String extParams;
+
+    /*
     private String jdbcUrl;
     private String username;
     private String password;
@@ -47,16 +62,6 @@ public class StorageHiveEntity implements Serializable {
 
     private String fileFormat;
     private String dataEncoding;
-    private String dataSeparator;
-    private Integer storagePeriod;
-    private String optLog;
-
-    private Integer status;
-    private Integer previousStatus;
-    private Integer isDeleted;
-    private String creator;
-    private String modifier;
-    private Date createTime;
-    private Date modifyTime;
+    private String dataSeparator;*/
 
 }
\ No newline at end of file
diff --git a/inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/entity/StorageHiveFieldEntity.java b/inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/entity/StorageFieldEntity.java
similarity index 88%
rename from inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/entity/StorageHiveFieldEntity.java
rename to inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/entity/StorageFieldEntity.java
index 361473e..65a7d79 100644
--- a/inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/entity/StorageHiveFieldEntity.java
+++ b/inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/entity/StorageFieldEntity.java
@@ -17,15 +17,20 @@
 
 package org.apache.inlong.manager.dao.entity;
 
-import java.io.Serializable;
 import lombok.Data;
 
+import java.io.Serializable;
+
 @Data
-public class StorageHiveFieldEntity implements Serializable {
+public class StorageFieldEntity implements Serializable {
 
     private static final long serialVersionUID = 1L;
     private Integer id;
+    private String inlongGroupId;
+    private String inlongStreamId;
     private Integer storageId;
+    private String storageType;
+
     private String fieldName;
     private String fieldType;
     private String fieldComment;
diff --git a/inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/mapper/StorageHiveEntityMapper.java b/inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/mapper/StorageEntityMapper.java
similarity index 50%
rename from inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/mapper/StorageHiveEntityMapper.java
rename to inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/mapper/StorageEntityMapper.java
index c30e712..de80711 100644
--- a/inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/mapper/StorageHiveEntityMapper.java
+++ b/inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/mapper/StorageEntityMapper.java
@@ -17,28 +17,32 @@
 
 package org.apache.inlong.manager.dao.mapper;
 
-import java.util.List;
 import org.apache.ibatis.annotations.Param;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageHiveDTO;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageBriefResponse;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageForSortDTO;
 import org.apache.inlong.manager.common.pojo.datastorage.StoragePageRequest;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageSummaryInfo;
-import org.apache.inlong.manager.dao.entity.StorageHiveEntity;
+import org.apache.inlong.manager.dao.entity.StorageEntity;
 import org.springframework.stereotype.Repository;
 
-@Repository
-public interface StorageHiveEntityMapper {
-
-    int deleteByPrimaryKey(Integer id);
+import java.util.List;
 
-    int insert(StorageHiveEntity record);
+@Repository
+public interface StorageEntityMapper {
 
-    int insertSelective(StorageHiveEntity record);
+    int insert(StorageEntity record);
 
-    StorageHiveEntity selectByPrimaryKey(Integer id);
+    int insertSelective(StorageEntity record);
 
-    int updateByPrimaryKeySelective(StorageHiveEntity record);
+    StorageEntity selectByPrimaryKey(Integer id);
 
-    int updateByPrimaryKey(StorageHiveEntity record);
+    /**
+     * According to the business group id and data stream id, query the number of valid Hive storage
+     *
+     * @param groupId business group id
+     * @param streamId data stream id
+     * @return Hive storage entity size
+     */
+    int selectCount(@Param("groupId") String groupId, @Param("streamId") String streamId);
 
     /**
      * Paging query storage list based on conditions
@@ -46,7 +50,13 @@ public interface StorageHiveEntityMapper {
      * @param request Paging query conditions
      * @return Hive storage entity list
      */
-    List<StorageHiveEntity> selectByCondition(@Param("request") StoragePageRequest request);
+    List<StorageEntity> selectByCondition(@Param("request") StoragePageRequest request);
+
+    /**
+     * Query the storage summary from the given groupId and streamId
+     */
+    List<StorageBriefResponse> selectSummary(@Param("groupId") String groupId,
+            @Param("streamId") String streamId);
 
     /**
      * According to the business group id and data stream id, query valid storage information
@@ -55,42 +65,50 @@ public interface StorageHiveEntityMapper {
      * @param streamId data stream id
      * @return Hive storage entity list
      */
-    List<StorageHiveEntity> selectByIdentifier(@Param("groupId") String groupId, @Param("streamId") String streamId);
+    List<StorageEntity> selectByIdentifier(@Param("groupId") String groupId, @Param("streamId") String streamId);
 
     /**
-     * According to the business group id and data stream id, query the number of valid Hive storage
+     * According to the group id, stream id and storage type, query valid storage entity list.
      *
-     * @param groupId business group id
-     * @param streamId data stream id
-     * @return Hive storage entity size
+     * @param groupId business group id.
+     * @param streamId data stream id.
+     * @param storageType storage type.
+     * @return storage entity list.
      */
-    int selectCountByIdentifier(@Param("groupId") String groupId, @Param("streamId") String streamId);
-
-    int updateStorageStatusById(StorageHiveEntity entity);
+    List<StorageEntity> selectByIdAndType(@Param("groupId") String groupId, @Param("streamId") String streamId,
+            @Param("storageType") String storageType);
 
     /**
-     * Given a list of data stream ids, filter out data stream id list with Hive storage
+     * Filter stream ids with the specified groupId and storageType from the given stream id list.
      *
-     * @param groupId business group id
-     * @param streamIdList data stream id list
-     * @return a list of data stream ids with Hive storage
+     * @param groupId InLong group id.
+     * @param storageType Storage type.
+     * @param streamIdList InLong stream id list.
+     * @return List of InLong stream id with the given storage type
      */
-    List<String> selectDataStreamExists(@Param("groupId") String groupId,
+    List<String> selectExistsStreamId(@Param("groupId") String groupId, @Param("storageType") String storageType,
             @Param("streamIdList") List<String> streamIdList);
 
     /**
-     * According to the business group id and data stream id, query Hive storage summary information
+     * Get the distinct storage type from the given groupId and streamId
      */
-    List<StorageSummaryInfo> selectSummary(@Param("groupId") String groupId, @Param("streamId") String streamId);
+    List<String> selectStorageType(@Param("groupId") String groupId, @Param("streamId") String streamId);
 
     /**
-     * Select Hive configs for Sort under the business group id and stream id
+     * Select all config for Sort under the group id and stream id.
      *
-     * @param groupId Business group id
-     * @param streamId Data stream id, if is null, get all configs under the group id
-     * @return Hive Sort config
+     * @param groupId Data group id.
+     * @param streamId Data stream id, if is null, get all configs under the group id.
+     * @return Sort config.
      */
-    List<StorageHiveDTO> selectAllHiveConfig(@Param("groupId") String groupId,
-            @Param("streamId") String streamId);
+    List<StorageForSortDTO> selectAllConfig(@Param("groupId") String groupId, @Param("streamId") String streamId);
+
+    int updateByPrimaryKeySelective(StorageEntity record);
+
+    int updateByPrimaryKey(StorageEntity record);
+
+    int updateStorageStatus(StorageEntity entity);
+
+    int deleteByPrimaryKey(Integer id);
 
 }
\ No newline at end of file
diff --git a/inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/mapper/StorageHiveFieldEntityMapper.java b/inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/mapper/StorageFieldEntityMapper.java
similarity index 54%
rename from inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/mapper/StorageHiveFieldEntityMapper.java
rename to inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/mapper/StorageFieldEntityMapper.java
index 0d1e492..967772c 100644
--- a/inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/mapper/StorageHiveFieldEntityMapper.java
+++ b/inlong-manager/manager-dao/src/main/java/org/apache/inlong/manager/dao/mapper/StorageFieldEntityMapper.java
@@ -17,50 +17,45 @@
 
 package org.apache.inlong.manager.dao.mapper;
 
-import java.util.List;
 import org.apache.ibatis.annotations.Param;
-import org.apache.inlong.manager.dao.entity.StorageHiveFieldEntity;
+import org.apache.inlong.manager.dao.entity.StorageFieldEntity;
 import org.springframework.stereotype.Repository;
 
-@Repository
-public interface StorageHiveFieldEntityMapper {
-
-    int deleteByPrimaryKey(Integer id);
-
-    int insert(StorageHiveFieldEntity record);
+import java.util.List;
 
-    int insertSelective(StorageHiveFieldEntity record);
+@Repository
+public interface StorageFieldEntityMapper {
 
-    StorageHiveFieldEntity selectByPrimaryKey(Integer id);
+    int insert(StorageFieldEntity record);
 
-    int updateByPrimaryKeySelective(StorageHiveFieldEntity record);
+    void insertAll(@Param("list") List<StorageFieldEntity> fieldList);
 
-    int updateByPrimaryKey(StorageHiveFieldEntity record);
+    StorageFieldEntity selectByPrimaryKey(Integer id);
 
-    List<StorageHiveFieldEntity> selectHiveFields(@Param("groupId") String groupId, @Param("streamId") String streamId);
+    List<StorageFieldEntity> selectFields(@Param("groupId") String groupId, @Param("streamId") String streamId);
 
     /**
-     * According to the storage primary key, logically delete the corresponding field information
+     * According to the storage id, query the Hive field
      *
      * @param storageId storage id
-     * @return rows deleted
+     * @return Hive field list
      */
-    int logicDeleteAll(@Param("storageId") Integer storageId);
+    List<StorageFieldEntity> selectByStorageId(@Param("storageId") Integer storageId);
 
     /**
-     * According to the storage primary key, physically delete the corresponding field information
+     * According to the storage id, logically delete the corresponding field information.
      *
-     * @param storageId storage id
-     * @return rows deleted
+     * @param storageId Storage id.
+     * @return rows deleted.
      */
-    int deleteAllByStorageId(@Param("storageId") Integer storageId);
+    int logicDeleteAll(@Param("storageId") Integer storageId);
 
     /**
-     * According to the storage id, query the Hive field
+     * According to the storage id, physically delete the corresponding field information
      *
-     * @param storageId storage id
-     * @return Hive field list
+     * @param storageId Storage id.
+     * @return rows deleted.
      */
-    List<StorageHiveFieldEntity> selectByStorageId(@Param("storageId") Integer storageId);
+    int deleteAll(@Param("storageId") Integer storageId);
 
 }
\ No newline at end of file
diff --git a/inlong-manager/manager-dao/src/main/resources/generatorConfig.xml b/inlong-manager/manager-dao/src/main/resources/generatorConfig.xml
index 942c638..3ab64ae 100644
--- a/inlong-manager/manager-dao/src/main/resources/generatorConfig.xml
+++ b/inlong-manager/manager-dao/src/main/resources/generatorConfig.xml
@@ -177,14 +177,14 @@
                 enableCountByExample="false" enableDeleteByExample="false"
                 enableSelectByExample="false" enableUpdateByExample="false"/>
 
-        <table tableName="storage_hive" domainObjectName="StorageHiveEntity"
+        <table tableName="storage" domainObjectName="StorageEntity"
                 enableSelectByPrimaryKey="true"
                 enableUpdateByPrimaryKey="true"
                 enableDeleteByPrimaryKey="true" enableInsert="true"
                 enableCountByExample="false" enableDeleteByExample="false"
                 enableSelectByExample="false" enableUpdateByExample="false"/>
 
-        <table tableName="storage_hive_field" domainObjectName="StorageHiveFieldEntity"
+        <table tableName="storage_field" domainObjectName="StorageFieldEntity"
                 enableSelectByPrimaryKey="true"
                 enableUpdateByPrimaryKey="true"
                 enableDeleteByPrimaryKey="true" enableInsert="true"
diff --git a/inlong-manager/manager-dao/src/main/resources/mappers/StorageEntityMapper.xml b/inlong-manager/manager-dao/src/main/resources/mappers/StorageEntityMapper.xml
new file mode 100644
index 0000000..bcd2f65
--- /dev/null
+++ b/inlong-manager/manager-dao/src/main/resources/mappers/StorageEntityMapper.xml
@@ -0,0 +1,380 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
+<mapper namespace="org.apache.inlong.manager.dao.mapper.StorageEntityMapper">
+    <resultMap id="BaseResultMap" type="org.apache.inlong.manager.dao.entity.StorageEntity">
+        <id column="id" jdbcType="INTEGER" property="id"/>
+        <result column="inlong_group_id" jdbcType="VARCHAR" property="inlongGroupId"/>
+        <result column="inlong_stream_id" jdbcType="VARCHAR" property="inlongStreamId"/>
+        <result column="storage_type" jdbcType="VARCHAR" property="storageType"/>
+        <result column="storage_period" jdbcType="INTEGER" property="storagePeriod"/>
+        <result column="enable_create_resource" jdbcType="TINYINT" property="enableCreateResource"/>
+        <result column="ext_params" jdbcType="VARCHAR" property="extParams"/>
+        <result column="operate_log" jdbcType="VARCHAR" property="operateLog"/>
+
+        <result column="status" jdbcType="INTEGER" property="status"/>
+        <result column="previous_status" jdbcType="INTEGER" property="previousStatus"/>
+        <result column="is_deleted" jdbcType="INTEGER" property="isDeleted"/>
+        <result column="creator" jdbcType="VARCHAR" property="creator"/>
+        <result column="modifier" jdbcType="VARCHAR" property="modifier"/>
+        <result column="create_time" jdbcType="TIMESTAMP" property="createTime"/>
+        <result column="modify_time" jdbcType="TIMESTAMP" property="modifyTime"/>
+    </resultMap>
+
+    <sql id="Base_Column_List">
+        id, inlong_group_id, inlong_stream_id, storage_type, storage_period, enable_create_resource, ext_params,
+        operate_log, status, previous_status, is_deleted, creator, modifier, create_time, modify_time
+    </sql>
+
+    <select id="selectByPrimaryKey" parameterType="java.lang.Integer" resultMap="BaseResultMap">
+        select
+        <include refid="Base_Column_List"/>
+        from data_storage
+        where id = #{id,jdbcType=INTEGER}
+    </select>
+    <select id="selectCount" resultType="java.lang.Integer">
+        select count(1)
+        from data_storage
+        <where>
+            is_deleted = 0
+            <if test="groupId != null and groupId != ''">
+                and inlong_group_id = #{groupId, jdbcType=VARCHAR}
+            </if>
+            <if test="streamId != null and streamId != ''">
+                and inlong_stream_id = #{streamId, jdbcType=VARCHAR}
+            </if>
+        </where>
+    </select>
+    <select id="selectByCondition"
+            parameterType="org.apache.inlong.manager.common.pojo.datastorage.StoragePageRequest"
+            resultMap="BaseResultMap">
+        select
+        <include refid="Base_Column_List"/>
+        from data_storage
+        <where>
+            is_deleted = 0
+            and storage_type = #{request.storageType, jdbcType=VARCHAR}
+            and inlong_group_id = #{request.inlongGroupId, jdbcType=VARCHAR}
+            <if test="request.inlongStreamId != null and request.inlongStreamId != ''">
+                and inlong_stream_id = #{request.inlongStreamId, jdbcType=VARCHAR}
+            </if>
+            <if test="request.keyWord != null and request.keyWord != ''">
+                and (
+                inlong_group_id like CONCAT('%', #{request.keyWord}, '%')
+                or inlong_stream_id like CONCAT('%', #{request.keyWord}, '%')
+                )
+            </if>
+            <if test="request.status != null and request.status != ''">
+                and status = #{request.status, jdbcType=INTEGER}
+            </if>
+            order by modify_time desc
+        </where>
+    </select>
+    <select id="selectSummary"
+            resultType="org.apache.inlong.manager.common.pojo.datastorage.StorageBriefResponse">
+        select s.id,
+               s.inlong_group_id,
+               s.inlong_stream_id,
+               s.storage_type
+        from data_storage s
+        where s.is_deleted = 0
+          and s.inlong_group_id = #{groupId, jdbcType=VARCHAR}
+          and s.inlong_stream_id = #{streamId, jdbcType=VARCHAR}
+    </select>
+    <select id="selectByIdentifier" resultType="org.apache.inlong.manager.dao.entity.StorageEntity">
+        select
+        <include refid="Base_Column_List"/>
+        from data_storage
+        <where>
+            is_deleted = 0
+            and inlong_group_id = #{groupId, jdbcType=VARCHAR}
+            <if test="streamId != null and streamId != ''">
+                and inlong_stream_id = #{streamId, jdbcType=VARCHAR}
+            </if>
+        </where>
+    </select>
+    <select id="selectByIdAndType" resultType="org.apache.inlong.manager.dao.entity.StorageEntity">
+        select
+        <include refid="Base_Column_List"/>
+        from data_storage
+        <where>
+            is_deleted = 0
+            and inlong_group_id = #{groupId, jdbcType=VARCHAR}
+            <if test="streamId != null and streamId != ''">
+                and inlong_stream_id = #{streamId, jdbcType=VARCHAR}
+            </if>
+            <if test="storageType != null and storageType != ''">
+                and storage_type = #{storageType, jdbcType=VARCHAR}
+            </if>
+        </where>
+    </select>
+    <select id="selectExistsStreamId" resultType="java.lang.String">
+        select inlong_stream_id
+        from data_storage
+        <where>
+            inlong_group_id = #{groupId, jdbcType=VARCHAR}
+            and storage_type = #{storageType, jdbcType=VARCHAR}
+            and is_deleted = 0
+            and inlong_stream_id in
+            <foreach collection="streamIdList" open="(" close=")" separator="," index="index" item="item">
+                #{item}
+            </foreach>
+        </where>
+    </select>
+    <select id="selectStorageType" resultType="java.lang.String">
+        select distinct (storage_type)
+        from data_storage
+        <where>
+            is_deleted = 0
+            <if test="groupId != null and groupId != ''">
+                and inlong_group_id = #{groupId, jdbcType=VARCHAR}
+            </if>
+            <if test="streamId != null and streamId != ''">
+                and inlong_stream_id = #{streamId, jdbcType=VARCHAR}
+            </if>
+        </where>
+    </select>
+    <select id="selectAllConfig" resultType="org.apache.inlong.manager.common.pojo.datastorage.StorageForSortDTO">
+        select storage.id,
+        storage.inlong_group_id,
+        storage.inlong_stream_id,
+        storage.storage_type,
+        storage.storage_period,
+        storage.enable_create_resource,
+        storage.ext_params,
+        storage.status,
+        storage.creator,
+
+        stream.mq_resource_obj,
+        stream.data_source_type,
+        stream.data_type,
+        stream.description,
+        stream.data_separator as sourceSeparator,
+        stream.data_escape_char
+        from data_stream stream, data_storage storage
+        <where>
+            stream.is_deleted = 0
+            and storage.is_deleted = 0
+            and stream.inlong_group_id = storage.inlong_group_id
+            and stream.inlong_stream_id = storage.inlong_stream_id
+            and stream.inlong_group_id = #{groupId, jdbcType=VARCHAR}
+            <if test="streamId != null and streamId != ''">
+                and stream.inlong_stream_id = #{streamId, jdbcType=VARCHAR}
+            </if>
+        </where>
+    </select>
+
+    <delete id="deleteByPrimaryKey" parameterType="java.lang.Integer">
+        delete
+        from data_storage
+        where id = #{id,jdbcType=INTEGER}
+    </delete>
+
+    <insert id="insert" useGeneratedKeys="true" keyProperty="id"
+            parameterType="org.apache.inlong.manager.dao.entity.StorageEntity">
+        insert into data_storage (id, inlong_group_id, inlong_stream_id,
+                                  storage_type, storage_period,
+                                  enable_create_resource, ext_params,
+                                  operate_log, status,
+                                  previous_status, is_deleted, creator,
+                                  modifier, create_time, modify_time)
+        values (#{id,jdbcType=INTEGER}, #{inlongGroupId,jdbcType=VARCHAR}, #{inlongStreamId,jdbcType=VARCHAR},
+                #{storageType,jdbcType=VARCHAR}, #{storagePeriod,jdbcType=INTEGER},
+                #{enableCreateResource,jdbcType=TINYINT}, #{extParams,jdbcType=VARCHAR},
+                #{operateLog,jdbcType=VARCHAR}, #{status,jdbcType=INTEGER},
+                #{previousStatus,jdbcType=INTEGER}, #{isDeleted,jdbcType=INTEGER}, #{creator,jdbcType=VARCHAR},
+                #{modifier,jdbcType=VARCHAR}, #{createTime,jdbcType=TIMESTAMP}, #{modifyTime,jdbcType=TIMESTAMP})
+    </insert>
+    <insert id="insertSelective" useGeneratedKeys="true" keyProperty="id"
+            parameterType="org.apache.inlong.manager.dao.entity.StorageEntity">
+        insert into data_storage
+        <trim prefix="(" suffix=")" suffixOverrides=",">
+            <if test="id != null">
+                id,
+            </if>
+            <if test="inlongGroupId != null">
+                inlong_group_id,
+            </if>
+            <if test="inlongStreamId != null">
+                inlong_stream_id,
+            </if>
+            <if test="storageType != null">
+                storage_type,
+            </if>
+            <if test="storagePeriod != null">
+                storage_period,
+            </if>
+            <if test="enableCreateResource != null">
+                enable_create_resource,
+            </if>
+            <if test="extParams != null">
+                ext_params,
+            </if>
+            <if test="operateLog != null">
+                operate_log,
+            </if>
+            <if test="status != null">
+                status,
+            </if>
+            <if test="previousStatus != null">
+                previous_status,
+            </if>
+            <if test="isDeleted != null">
+                is_deleted,
+            </if>
+            <if test="creator != null">
+                creator,
+            </if>
+            <if test="modifier != null">
+                modifier,
+            </if>
+            <if test="createTime != null">
+                create_time,
+            </if>
+            <if test="modifyTime != null">
+                modify_time,
+            </if>
+        </trim>
+        <trim prefix="values (" suffix=")" suffixOverrides=",">
+            <if test="id != null">
+                #{id,jdbcType=INTEGER},
+            </if>
+            <if test="inlongGroupId != null">
+                #{inlongGroupId,jdbcType=VARCHAR},
+            </if>
+            <if test="inlongStreamId != null">
+                #{inlongStreamId,jdbcType=VARCHAR},
+            </if>
+            <if test="storageType != null">
+                #{storageType,jdbcType=VARCHAR},
+            </if>
+            <if test="storagePeriod != null">
+                #{storagePeriod,jdbcType=INTEGER},
+            </if>
+            <if test="enableCreateResource != null">
+                #{enableCreateResource,jdbcType=TINYINT},
+            </if>
+            <if test="extParams != null">
+                #{extParams,jdbcType=VARCHAR},
+            </if>
+            <if test="operateLog != null">
+                #{operateLog,jdbcType=VARCHAR},
+            </if>
+            <if test="status != null">
+                #{status,jdbcType=INTEGER},
+            </if>
+            <if test="previousStatus != null">
+                #{previousStatus,jdbcType=INTEGER},
+            </if>
+            <if test="isDeleted != null">
+                #{isDeleted,jdbcType=INTEGER},
+            </if>
+            <if test="creator != null">
+                #{creator,jdbcType=VARCHAR},
+            </if>
+            <if test="modifier != null">
+                #{modifier,jdbcType=VARCHAR},
+            </if>
+            <if test="createTime != null">
+                #{createTime,jdbcType=TIMESTAMP},
+            </if>
+            <if test="modifyTime != null">
+                #{modifyTime,jdbcType=TIMESTAMP},
+            </if>
+        </trim>
+    </insert>
+
+    <update id="updateByPrimaryKeySelective" parameterType="org.apache.inlong.manager.dao.entity.StorageEntity">
+        update data_storage
+        <set>
+            <if test="inlongGroupId != null">
+                inlong_group_id = #{inlongGroupId,jdbcType=VARCHAR},
+            </if>
+            <if test="inlongStreamId != null">
+                inlong_stream_id = #{inlongStreamId,jdbcType=VARCHAR},
+            </if>
+            <if test="storagePeriod != null">
+                storage_period = #{storagePeriod,jdbcType=INTEGER},
+            </if>
+            <if test="storageType != null">
+                storage_type = #{storageType,jdbcType=VARCHAR},
+            </if>
+            <if test="enableCreateResource != null">
+                enable_create_resource = #{enableCreateResource,jdbcType=TINYINT},
+            </if>
+            <if test="extParams != null">
+                ext_params = #{extParams,jdbcType=VARCHAR},
+            </if>
+            <if test="operateLog != null">
+                operate_log = #{operateLog,jdbcType=VARCHAR},
+            </if>
+            <if test="status != null">
+                status = #{status,jdbcType=INTEGER},
+            </if>
+            <if test="previousStatus != null">
+                previous_status = #{previousStatus,jdbcType=INTEGER},
+            </if>
+            <if test="isDeleted != null">
+                is_deleted = #{isDeleted,jdbcType=INTEGER},
+            </if>
+            <if test="creator != null">
+                creator = #{creator,jdbcType=VARCHAR},
+            </if>
+            <if test="modifier != null">
+                modifier = #{modifier,jdbcType=VARCHAR},
+            </if>
+            <if test="createTime != null">
+                create_time = #{createTime,jdbcType=TIMESTAMP},
+            </if>
+            <if test="modifyTime != null">
+                modify_time = #{modifyTime,jdbcType=TIMESTAMP},
+            </if>
+        </set>
+        where id = #{id,jdbcType=INTEGER}
+    </update>
+    <update id="updateByPrimaryKey" parameterType="org.apache.inlong.manager.dao.entity.StorageEntity">
+        update data_storage
+        set inlong_group_id        = #{inlongGroupId,jdbcType=VARCHAR},
+            inlong_stream_id       = #{inlongStreamId,jdbcType=VARCHAR},
+            storage_type           = #{storageType,jdbcType=VARCHAR},
+            storage_period         = #{storagePeriod,jdbcType=INTEGER},
+            enable_create_resource = #{enableCreateResource,jdbcType=TINYINT},
+            ext_params             = #{extParams,jdbcType=VARCHAR},
+            operate_log            = #{operateLog,jdbcType=VARCHAR},
+            status                 = #{status,jdbcType=INTEGER},
+            previous_status        = #{previousStatus,jdbcType=INTEGER},
+            is_deleted             = #{isDeleted,jdbcType=INTEGER},
+            creator                = #{creator,jdbcType=VARCHAR},
+            modifier               = #{modifier,jdbcType=VARCHAR},
+            create_time            = #{createTime,jdbcType=TIMESTAMP},
+            modify_time            = #{modifyTime,jdbcType=TIMESTAMP}
+        where id = #{id,jdbcType=INTEGER}
+    </update>
+    <update id="updateStorageStatus" parameterType="org.apache.inlong.manager.dao.entity.StorageEntity">
+        update data_storage
+        set status          = #{status,jdbcType=INTEGER},
+            previous_status = status,
+            operate_log     = #{operateLog,jdbcType=VARCHAR},
+            modify_time     = now()
+        where id = #{id,jdbcType=INTEGER}
+    </update>
+
+</mapper>
\ No newline at end of file
diff --git a/inlong-manager/manager-dao/src/main/resources/mappers/StorageFieldEntityMapper.xml b/inlong-manager/manager-dao/src/main/resources/mappers/StorageFieldEntityMapper.xml
new file mode 100644
index 0000000..799ab0f
--- /dev/null
+++ b/inlong-manager/manager-dao/src/main/resources/mappers/StorageFieldEntityMapper.xml
@@ -0,0 +1,113 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements.  See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership.  The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing,
+    software distributed under the License is distributed on an
+    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+    KIND, either express or implied.  See the License for the
+    specific language governing permissions and limitations
+    under the License.
+-->
+
+<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
+<mapper namespace="org.apache.inlong.manager.dao.mapper.StorageFieldEntityMapper">
+    <resultMap id="BaseResultMap" type="org.apache.inlong.manager.dao.entity.StorageFieldEntity">
+        <id column="id" jdbcType="INTEGER" property="id"/>
+        <result column="inlong_group_id" jdbcType="VARCHAR" property="inlongGroupId"/>
+        <result column="inlong_stream_id" jdbcType="VARCHAR" property="inlongStreamId"/>
+        <result column="storage_id" jdbcType="INTEGER" property="storageId"/>
+        <result column="storage_type" jdbcType="VARCHAR" property="storageType"/>
+        <result column="field_name" jdbcType="VARCHAR" property="fieldName"/>
+        <result column="field_type" jdbcType="VARCHAR" property="fieldType"/>
+        <result column="field_comment" jdbcType="VARCHAR" property="fieldComment"/>
+        <result column="source_field_name" jdbcType="VARCHAR" property="sourceFieldName"/>
+        <result column="source_field_type" jdbcType="VARCHAR" property="sourceFieldType"/>
+        <result column="rank_num" jdbcType="SMALLINT" property="rankNum"/>
+        <result column="is_deleted" jdbcType="INTEGER" property="isDeleted"/>
+    </resultMap>
+    <sql id="Base_Column_List">
+        id, storage_id, field_name, field_type, field_comment,
+        source_field_name, source_field_type, rank_num, is_deleted
+    </sql>
+
+    <insert id="insert" parameterType="org.apache.inlong.manager.dao.entity.StorageFieldEntity">
+        insert into data_storage_field (id, inlong_group_id, inlong_stream_id,
+                                        storage_id, storage_type,
+                                        field_name, field_type, field_comment,
+                                        source_field_name, source_field_type,
+                                        rank_num, is_deleted)
+        values (#{id,jdbcType=INTEGER}, #{inlongGroupId,jdbcType=VARCHAR}, #{inlongStreamId,jdbcType=VARCHAR},
+                #{storageId,jdbcType=INTEGER}, #{storageType,jdbcType=VARCHAR},
+                #{fieldName,jdbcType=VARCHAR}, #{fieldType,jdbcType=VARCHAR}, #{fieldComment,jdbcType=VARCHAR},
+                #{sourceFieldName,jdbcType=VARCHAR}, #{sourceFieldType,jdbcType=VARCHAR},
+                #{rankNum,jdbcType=SMALLINT}, #{isDeleted,jdbcType=INTEGER})
+    </insert>
+    <insert id="insertAll">
+        insert into data_storage_field (
+        id, inlong_group_id,
+        inlong_stream_id, storage_id,
+        storage_type, field_name,
+        field_type, field_comment,
+        source_field_name, source_field_type,
+        rank_num, is_deleted
+        )
+        values
+        <foreach collection="list" index="index" item="item" separator=",">
+            (
+            #{item.id,jdbcType=INTEGER}, #{item.inlongGroupId,jdbcType=VARCHAR},
+            #{item.inlongStreamId,jdbcType=VARCHAR}, #{item.storageId,jdbcType=INTEGER},
+            #{item.storageType,jdbcType=VARCHAR}, #{item.fieldName,jdbcType=VARCHAR},
+            #{item.fieldType,jdbcType=VARCHAR}, #{item.fieldComment,jdbcType=VARCHAR},
+            #{item.sourceFieldName,jdbcType=VARCHAR}, #{item.sourceFieldType,jdbcType=VARCHAR},
+            #{item.rankNum,jdbcType=SMALLINT}, #{item.isDeleted,jdbcType=INTEGER}
+            )
+        </foreach>
+    </insert>
+
+    <select id="selectByPrimaryKey" parameterType="java.lang.Integer" resultMap="BaseResultMap">
+        select
+        <include refid="Base_Column_List"/>
+        from data_storage_field
+        where id = #{id,jdbcType=INTEGER}
+    </select>
+    <select id="selectByStorageId" resultMap="BaseResultMap">
+        select
+        <include refid="Base_Column_List"/>
+        from data_storage_field
+        where storage_id = #{storageId, jdbcType=INTEGER}
+        and is_deleted = 0
+        order by rank_num asc
+    </select>
+    <select id="selectFields" resultMap="BaseResultMap">
+        select field.*
+        from data_storage_field field,
+             data_storage storage
+        where storage.inlong_inlong_group_id = #{inlongGroupId, jdbcType=VARCHAR}
+          and storage.inlong_inlong_stream_id = #{inlongStreamId, jdbcType=VARCHAR}
+          and field.storage_id = storage.id
+          and field.is_deleted = 0
+          and storage.is_deleted = 0
+    </select>
+
+    <update id="logicDeleteAll">
+        update data_storage_field
+        set is_deleted = id
+        where storage_id = #{storageId, jdbcType=INTEGER}
+          and is_deleted = 0
+    </update>
+
+    <delete id="deleteAll">
+        delete
+        from data_storage_field
+        where storage_id = #{storageId,jdbcType=INTEGER}
+    </delete>
+</mapper>
\ No newline at end of file
diff --git a/inlong-manager/manager-dao/src/main/resources/mappers/StorageHiveEntityMapper.xml b/inlong-manager/manager-dao/src/main/resources/mappers/StorageHiveEntityMapper.xml
deleted file mode 100644
index eb40ef9..0000000
--- a/inlong-manager/manager-dao/src/main/resources/mappers/StorageHiveEntityMapper.xml
+++ /dev/null
@@ -1,524 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one
-    or more contributor license agreements.  See the NOTICE file
-    distributed with this work for additional information
-    regarding copyright ownership.  The ASF licenses this file
-    to you under the Apache License, Version 2.0 (the
-    "License"); you may not use this file except in compliance
-    with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing,
-    software distributed under the License is distributed on an
-    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-    KIND, either express or implied.  See the License for the
-    specific language governing permissions and limitations
-    under the License.
--->
-
-<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
-<mapper namespace="org.apache.inlong.manager.dao.mapper.StorageHiveEntityMapper">
-    <resultMap id="BaseResultMap" type="org.apache.inlong.manager.dao.entity.StorageHiveEntity">
-        <id column="id" jdbcType="INTEGER" property="id"/>
-        <result column="inlong_group_id" jdbcType="VARCHAR" property="inlongGroupId"/>
-        <result column="inlong_stream_id" jdbcType="VARCHAR" property="inlongStreamId"/>
-        <result column="enable_create_table" jdbcType="TINYINT" property="enableCreateTable"/>
-        <result column="jdbc_url" jdbcType="VARCHAR" property="jdbcUrl"/>
-        <result column="username" jdbcType="VARCHAR" property="username"/>
-        <result column="password" jdbcType="VARCHAR" property="password"/>
-        <result column="db_name" jdbcType="VARCHAR" property="dbName"/>
-        <result column="table_name" jdbcType="VARCHAR" property="tableName"/>
-        <result column="hdfs_default_fs" jdbcType="VARCHAR" property="hdfsDefaultFs"/>
-        <result column="warehouse_dir" jdbcType="VARCHAR" property="warehouseDir"/>
-
-        <result column="partition_interval" jdbcType="INTEGER" property="partitionInterval"/>
-        <result column="partition_unit" jdbcType="VARCHAR" property="partitionUnit"/>
-        <result column="primary_partition" jdbcType="VARCHAR" property="primaryPartition"/>
-        <result column="secondary_partition" jdbcType="VARCHAR" property="secondaryPartition"/>
-        <result column="partition_creation_strategy" jdbcType="VARCHAR" property="partitionCreationStrategy"/>
-
-        <result column="file_format" jdbcType="VARCHAR" property="fileFormat"/>
-        <result column="data_encoding" jdbcType="VARCHAR" property="dataEncoding"/>
-        <result column="data_separator" jdbcType="VARCHAR" property="dataSeparator"/>
-        <result column="storage_period" jdbcType="INTEGER" property="storagePeriod"/>
-        <result column="opt_log" jdbcType="VARCHAR" property="optLog"/>
-
-        <result column="status" jdbcType="INTEGER" property="status"/>
-        <result column="previous_status" jdbcType="INTEGER" property="previousStatus"/>
-        <result column="is_deleted" jdbcType="INTEGER" property="isDeleted"/>
-        <result column="creator" jdbcType="VARCHAR" property="creator"/>
-        <result column="modifier" jdbcType="VARCHAR" property="modifier"/>
-        <result column="create_time" jdbcType="TIMESTAMP" property="createTime"/>
-        <result column="modify_time" jdbcType="TIMESTAMP" property="modifyTime"/>
-    </resultMap>
-
-    <sql id="Base_Column_List">
-        id, inlong_group_id, inlong_stream_id, enable_create_table, jdbc_url, username, password, db_name, table_name,
-        hdfs_default_fs, warehouse_dir, partition_interval, partition_unit, primary_partition, secondary_partition,
-        partition_creation_strategy, file_format, data_encoding, data_separator, storage_period,
-        opt_log, status, previous_status, is_deleted, creator, modifier, create_time, modify_time
-    </sql>
-
-    <select id="selectByPrimaryKey" parameterType="java.lang.Integer" resultMap="BaseResultMap">
-        select
-        <include refid="Base_Column_List"/>
-        from storage_hive
-        where id = #{id,jdbcType=INTEGER}
-    </select>
-    <select id="selectByCondition" parameterType="org.apache.inlong.manager.common.pojo.datastorage.StoragePageRequest"
-            resultMap="BaseResultMap">
-        select
-        <include refid="Base_Column_List"/>
-        from storage_hive
-        <where>
-            is_deleted = 0
-            <if test="request.inlongGroupId != null and request.inlongGroupId != ''">
-                and inlong_group_id = #{request.inlongGroupId, jdbcType=VARCHAR}
-            </if>
-            <if test="request.inlongStreamId != null and request.inlongStreamId != ''">
-                and inlong_stream_id = #{request.inlongStreamId, jdbcType=VARCHAR}
-            </if>
-            <if test="request.keyWord != null and request.keyWord != ''">
-                and (
-                inlong_group_id like CONCAT('%', #{request.keyWord}, '%')
-                or inlong_stream_id like CONCAT('%', #{request.keyWord}, '%')
-                )
-            </if>
-            <if test="request.status != null and request.status != ''">
-                and status = #{request.status, jdbcType=INTEGER}
-            </if>
-            order by modify_time desc
-        </where>
-    </select>
-    <select id="selectByIdentifier" resultType="org.apache.inlong.manager.dao.entity.StorageHiveEntity">
-        select
-        <include refid="Base_Column_List"/>
-        from storage_hive
-        <where>
-            is_deleted = 0
-            and inlong_group_id = #{groupId, jdbcType=VARCHAR}
-            <if test="streamId != null and streamId != ''">
-                and inlong_stream_id = #{streamId, jdbcType=VARCHAR}
-            </if>
-        </where>
-    </select>
-    <select id="selectCountByIdentifier" resultType="java.lang.Integer">
-        select count(1)
-        from storage_hive
-        <where>
-            is_deleted = 0
-            <if test="groupId != null and groupId != ''">
-                and inlong_group_id = #{groupId, jdbcType=VARCHAR}
-            </if>
-            <if test="streamId != null and streamId != ''">
-                and inlong_stream_id = #{streamId, jdbcType=VARCHAR}
-            </if>
-        </where>
-    </select>
-    <select id="selectDataStreamExists" resultType="java.lang.String">
-        select inlong_stream_id
-        from storage_hive
-        <where>
-            <if test="groupId != null and groupId != ''">
-                and inlong_group_id = #{groupId, jdbcType=VARCHAR}
-            </if>
-            and inlong_stream_id in
-            <foreach collection="streamIdList" open="(" close=")" separator="," index="index" item="item">
-                #{item}
-            </foreach>
-            and is_deleted = 0
-        </where>
-    </select>
-    <select id="selectSummary" resultType="org.apache.inlong.manager.common.pojo.datastorage.StorageSummaryInfo">
-        select s.id,
-               s.inlong_group_id,
-               s.inlong_stream_id,
-               "HIVE" as storage_type
-        from storage_hive s
-        where s.is_deleted = 0
-          and s.inlong_group_id = #{groupId, jdbcType=VARCHAR}
-          and s.inlong_stream_id = #{streamId, jdbcType=VARCHAR}
-    </select>
-    <select id="selectAllHiveConfig" resultType="org.apache.inlong.manager.common.pojo.datastorage.StorageHiveDTO">
-        select hive.id,
-               hive.inlong_group_id,
-               hive.inlong_stream_id,
-
-               hive.jdbc_url,
-               hive.username,
-               hive.password,
-               hive.db_name,
-               hive.table_name,
-               hive.hdfs_default_fs,
-               hive.warehouse_dir,
-
-               hive.partition_interval,
-               hive.partition_unit,
-               hive.primary_partition,
-               hive.secondary_partition,
-               hive.partition_creation_strategy,
-
-               hive.file_format,
-               hive.data_encoding,
-               hive.data_separator   as targetSeparator,
-               hive.status,
-               hive.creator,
-
-               stream.mq_resource_obj,
-               stream.data_source_type,
-               stream.data_type,
-               stream.description,
-               stream.data_separator as sourceSeparator,
-               stream.data_escape_char
-        from data_stream stream,
-             storage_hive hive
-        <where>
-            stream.is_deleted = 0
-            and hive.is_deleted = 0
-            and stream.inlong_group_id = hive.inlong_group_id
-            and stream.inlong_stream_id = hive.inlong_stream_id
-            and stream.inlong_group_id = #{groupId, jdbcType=VARCHAR}
-            <if test="streamId != null and streamId != ''">
-                and stream.inlong_stream_id = #{streamId, jdbcType=VARCHAR}
-            </if>
-        </where>
-    </select>
-
-    <delete id="deleteByPrimaryKey" parameterType="java.lang.Integer">
-        delete
-        from storage_hive
-        where id = #{id,jdbcType=INTEGER}
-    </delete>
-
-    <insert id="insert" useGeneratedKeys="true" keyProperty="id"
-            parameterType="org.apache.inlong.manager.dao.entity.StorageHiveEntity">
-        insert into storage_hive (id, inlong_group_id,
-                                  inlong_stream_id, enable_create_table,
-                                  jdbc_url, username, password,
-                                  db_name, table_name, hdfs_default_fs,
-                                  warehouse_dir, partition_interval,
-                                  partition_unit, primary_partition,
-                                  secondary_partition, partition_creation_strategy,
-                                  file_format, data_encoding, data_separator,
-                                  storage_period, opt_log, status,
-                                  previous_status, is_deleted, creator,
-                                  modifier, create_time, modify_time)
-        values (#{id,jdbcType=INTEGER}, #{inlongGroupId,jdbcType=VARCHAR},
-                #{inlongStreamId,jdbcType=VARCHAR}, #{enableCreateTable,jdbcType=TINYINT},
-                #{jdbcUrl,jdbcType=VARCHAR}, #{username,jdbcType=VARCHAR}, #{password,jdbcType=VARCHAR},
-                #{dbName,jdbcType=VARCHAR}, #{tableName,jdbcType=VARCHAR}, #{hdfsDefaultFs,jdbcType=VARCHAR},
-                #{warehouseDir,jdbcType=VARCHAR}, #{partitionInterval,jdbcType=INTEGER},
-                #{partitionUnit,jdbcType=VARCHAR}, #{primaryPartition,jdbcType=VARCHAR},
-                #{secondaryPartition,jdbcType=VARCHAR}, #{partitionCreationStrategy,jdbcType=VARCHAR},
-                #{fileFormat,jdbcType=VARCHAR}, #{dataEncoding,jdbcType=VARCHAR}, #{dataSeparator,jdbcType=VARCHAR},
-                #{storagePeriod,jdbcType=INTEGER}, #{optLog,jdbcType=VARCHAR}, #{status,jdbcType=INTEGER},
-                #{previousStatus,jdbcType=INTEGER}, #{isDeleted,jdbcType=INTEGER}, #{creator,jdbcType=VARCHAR},
-                #{modifier,jdbcType=VARCHAR}, #{createTime,jdbcType=TIMESTAMP}, #{modifyTime,jdbcType=TIMESTAMP})
-    </insert>
-    <insert id="insertSelective" useGeneratedKeys="true" keyProperty="id"
-            parameterType="org.apache.inlong.manager.dao.entity.StorageHiveEntity">
-        insert into storage_hive
-        <trim prefix="(" suffix=")" suffixOverrides=",">
-            <if test="id != null">
-                id,
-            </if>
-            <if test="inlongGroupId != null">
-                inlong_group_id,
-            </if>
-            <if test="inlongStreamId != null">
-                inlong_stream_id,
-            </if>
-            <if test="enableCreateTable != null">
-                enable_create_table,
-            </if>
-            <if test="jdbcUrl != null">
-                jdbc_url,
-            </if>
-            <if test="username != null">
-                username,
-            </if>
-            <if test="password != null">
-                password,
-            </if>
-            <if test="dbName != null">
-                db_name,
-            </if>
-            <if test="tableName != null">
-                table_name,
-            </if>
-            <if test="hdfsDefaultFs != null">
-                hdfs_default_fs,
-            </if>
-            <if test="warehouseDir != null">
-                warehouse_dir,
-            </if>
-            <if test="partitionInterval != null">
-                partition_interval,
-            </if>
-            <if test="partitionUnit != null">
-                partition_unit,
-            </if>
-            <if test="primaryPartition != null">
-                primary_partition,
-            </if>
-            <if test="secondaryPartition != null">
-                secondary_partition,
-            </if>
-            <if test="partitionCreationStrategy != null">
-                partition_creation_strategy,
-            </if>
-            <if test="fileFormat != null">
-                file_format,
-            </if>
-            <if test="dataEncoding != null">
-                data_encoding,
-            </if>
-            <if test="dataSeparator != null">
-                data_separator,
-            </if>
-            <if test="storagePeriod != null">
-                storage_period,
-            </if>
-            <if test="optLog != null">
-                opt_log,
-            </if>
-            <if test="status != null">
-                status,
-            </if>
-            <if test="previousStatus != null">
-                previous_status,
-            </if>
-            <if test="isDeleted != null">
-                is_deleted,
-            </if>
-            <if test="creator != null">
-                creator,
-            </if>
-            <if test="modifier != null">
-                modifier,
-            </if>
-            <if test="createTime != null">
-                create_time,
-            </if>
-            <if test="modifyTime != null">
-                modify_time,
-            </if>
-        </trim>
-        <trim prefix="values (" suffix=")" suffixOverrides=",">
-            <if test="id != null">
-                #{id,jdbcType=INTEGER},
-            </if>
-            <if test="inlongGroupId != null">
-                #{inlongGroupId,jdbcType=VARCHAR},
-            </if>
-            <if test="inlongStreamId != null">
-                #{inlongStreamId,jdbcType=VARCHAR},
-            </if>
-            <if test="enableCreateTable != null">
-                #{enableCreateTable,jdbcType=TINYINT},
-            </if>
-            <if test="jdbcUrl != null">
-                #{jdbcUrl,jdbcType=VARCHAR},
-            </if>
-            <if test="username != null">
-                #{username,jdbcType=VARCHAR},
-            </if>
-            <if test="password != null">
-                #{password,jdbcType=VARCHAR},
-            </if>
-            <if test="dbName != null">
-                #{dbName,jdbcType=VARCHAR},
-            </if>
-            <if test="tableName != null">
-                #{tableName,jdbcType=VARCHAR},
-            </if>
-            <if test="hdfsDefaultFs != null">
-                #{hdfsDefaultFs,jdbcType=VARCHAR},
-            </if>
-            <if test="warehouseDir != null">
-                #{warehouseDir,jdbcType=VARCHAR},
-            </if>
-            <if test="partitionInterval != null">
-                #{partitionInterval,jdbcType=INTEGER},
-            </if>
-            <if test="partitionUnit != null">
-                #{partitionUnit,jdbcType=VARCHAR},
-            </if>
-            <if test="primaryPartition != null">
-                #{primaryPartition,jdbcType=VARCHAR},
-            </if>
-            <if test="secondaryPartition != null">
-                #{secondaryPartition,jdbcType=VARCHAR},
-            </if>
-            <if test="partitionCreationStrategy != null">
-                #{partitionCreationStrategy,jdbcType=VARCHAR},
-            </if>
-            <if test="fileFormat != null">
-                #{fileFormat,jdbcType=VARCHAR},
-            </if>
-            <if test="dataEncoding != null">
-                #{dataEncoding,jdbcType=VARCHAR},
-            </if>
-            <if test="dataSeparator != null">
-                #{dataSeparator,jdbcType=VARCHAR},
-            </if>
-            <if test="storagePeriod != null">
-                #{storagePeriod,jdbcType=INTEGER},
-            </if>
-            <if test="optLog != null">
-                #{optLog,jdbcType=VARCHAR},
-            </if>
-            <if test="status != null">
-                #{status,jdbcType=INTEGER},
-            </if>
-            <if test="previousStatus != null">
-                #{previousStatus,jdbcType=INTEGER},
-            </if>
-            <if test="isDeleted != null">
-                #{isDeleted,jdbcType=INTEGER},
-            </if>
-            <if test="creator != null">
-                #{creator,jdbcType=VARCHAR},
-            </if>
-            <if test="modifier != null">
-                #{modifier,jdbcType=VARCHAR},
-            </if>
-            <if test="createTime != null">
-                #{createTime,jdbcType=TIMESTAMP},
-            </if>
-            <if test="modifyTime != null">
-                #{modifyTime,jdbcType=TIMESTAMP},
-            </if>
-        </trim>
-    </insert>
-
-    <update id="updateByPrimaryKeySelective" parameterType="org.apache.inlong.manager.dao.entity.StorageHiveEntity">
-        update storage_hive
-        <set>
-            <if test="inlongGroupId != null">
-                inlong_group_id = #{inlongGroupId,jdbcType=VARCHAR},
-            </if>
-            <if test="inlongStreamId != null">
-                inlong_stream_id = #{inlongStreamId,jdbcType=VARCHAR},
-            </if>
-            <if test="enableCreateTable != null">
-                enable_create_table = #{enableCreateTable,jdbcType=TINYINT},
-            </if>
-            <if test="jdbcUrl != null">
-                jdbc_url = #{jdbcUrl,jdbcType=VARCHAR},
-            </if>
-            <if test="username != null">
-                username = #{username,jdbcType=VARCHAR},
-            </if>
-            <if test="password != null">
-                password = #{password,jdbcType=VARCHAR},
-            </if>
-            <if test="dbName != null">
-                db_name = #{dbName,jdbcType=VARCHAR},
-            </if>
-            <if test="tableName != null">
-                table_name = #{tableName,jdbcType=VARCHAR},
-            </if>
-            <if test="hdfsDefaultFs != null">
-                hdfs_default_fs = #{hdfsDefaultFs,jdbcType=VARCHAR},
-            </if>
-            <if test="warehouseDir != null">
-                warehouse_dir = #{warehouseDir,jdbcType=VARCHAR},
-            </if>
-            <if test="partitionInterval != null">
-                partition_interval = #{partitionInterval,jdbcType=INTEGER},
-            </if>
-            <if test="partitionUnit != null">
-                partition_unit = #{partitionUnit,jdbcType=VARCHAR},
-            </if>
-            <if test="primaryPartition != null">
-                primary_partition = #{primaryPartition,jdbcType=VARCHAR},
-            </if>
-            <if test="secondaryPartition != null">
-                secondary_partition = #{secondaryPartition,jdbcType=VARCHAR},
-            </if>
-            <if test="partitionCreationStrategy != null">
-                partition_creation_strategy = #{partitionCreationStrategy,jdbcType=VARCHAR},
-            </if>
-            <if test="fileFormat != null">
-                file_format = #{fileFormat,jdbcType=VARCHAR},
-            </if>
-            <if test="dataEncoding != null">
-                data_encoding = #{dataEncoding,jdbcType=VARCHAR},
-            </if>
-            <if test="dataSeparator != null">
-                data_separator = #{dataSeparator,jdbcType=VARCHAR},
-            </if>
-            <if test="storagePeriod != null">
-                storage_period = #{storagePeriod,jdbcType=INTEGER},
-            </if>
-            <if test="optLog != null">
-                opt_log = #{optLog,jdbcType=VARCHAR},
-            </if>
-            <if test="status != null">
-                status = #{status,jdbcType=INTEGER},
-            </if>
-            <if test="previousStatus != null">
-                previous_status = #{previousStatus,jdbcType=INTEGER},
-            </if>
-            <if test="isDeleted != null">
-                is_deleted = #{isDeleted,jdbcType=INTEGER},
-            </if>
-            <if test="creator != null">
-                creator = #{creator,jdbcType=VARCHAR},
-            </if>
-            <if test="modifier != null">
-                modifier = #{modifier,jdbcType=VARCHAR},
-            </if>
-            <if test="createTime != null">
-                create_time = #{createTime,jdbcType=TIMESTAMP},
-            </if>
-            <if test="modifyTime != null">
-                modify_time = #{modifyTime,jdbcType=TIMESTAMP},
-            </if>
-        </set>
-        where id = #{id,jdbcType=INTEGER}
-    </update>
-    <update id="updateByPrimaryKey" parameterType="org.apache.inlong.manager.dao.entity.StorageHiveEntity">
-        update storage_hive
-        set inlong_group_id             = #{inlongGroupId,jdbcType=VARCHAR},
-            inlong_stream_id            = #{inlongStreamId,jdbcType=VARCHAR},
-            enable_create_table         = #{enableCreateTable,jdbcType=TINYINT},
-            jdbc_url                    = #{jdbcUrl,jdbcType=VARCHAR},
-            username                    = #{username,jdbcType=VARCHAR},
-            password                    = #{password,jdbcType=VARCHAR},
-            db_name                     = #{dbName,jdbcType=VARCHAR},
-            table_name                  = #{tableName,jdbcType=VARCHAR},
-            hdfs_default_fs             = #{hdfsDefaultFs,jdbcType=VARCHAR},
-            warehouse_dir               = #{warehouseDir,jdbcType=VARCHAR},
-            partition_interval          = #{partitionInterval,jdbcType=INTEGER},
-            partition_unit              = #{partitionUnit,jdbcType=VARCHAR},
-            primary_partition           = #{primaryPartition,jdbcType=VARCHAR},
-            secondary_partition         = #{secondaryPartition,jdbcType=VARCHAR},
-            partition_creation_strategy = #{partitionCreationStrategy,jdbcType=VARCHAR},
-            file_format                 = #{fileFormat,jdbcType=VARCHAR},
-            data_encoding               = #{dataEncoding,jdbcType=VARCHAR},
-            data_separator              = #{dataSeparator,jdbcType=VARCHAR},
-            storage_period              = #{storagePeriod,jdbcType=INTEGER},
-            opt_log                     = #{optLog,jdbcType=VARCHAR},
-            status                      = #{status,jdbcType=INTEGER},
-            previous_status             = #{previousStatus,jdbcType=INTEGER},
-            is_deleted                  = #{isDeleted,jdbcType=INTEGER},
-            creator                     = #{creator,jdbcType=VARCHAR},
-            modifier                    = #{modifier,jdbcType=VARCHAR},
-            create_time                 = #{createTime,jdbcType=TIMESTAMP},
-            modify_time                 = #{modifyTime,jdbcType=TIMESTAMP}
-        where id = #{id,jdbcType=INTEGER}
-    </update>
-    <update id="updateStorageStatusById" parameterType="org.apache.inlong.manager.dao.entity.StorageHiveEntity">
-        update storage_hive
-        set status          = #{status,jdbcType=INTEGER},
-            previous_status = status,
-            opt_log         = #{optLog,jdbcType=VARCHAR},
-            modify_time     = now()
-        where id = #{id,jdbcType=INTEGER}
-    </update>
-
-</mapper>
\ No newline at end of file
diff --git a/inlong-manager/manager-dao/src/main/resources/mappers/StorageHiveFieldEntityMapper.xml b/inlong-manager/manager-dao/src/main/resources/mappers/StorageHiveFieldEntityMapper.xml
deleted file mode 100644
index afe1798..0000000
--- a/inlong-manager/manager-dao/src/main/resources/mappers/StorageHiveFieldEntityMapper.xml
+++ /dev/null
@@ -1,206 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one
-    or more contributor license agreements.  See the NOTICE file
-    distributed with this work for additional information
-    regarding copyright ownership.  The ASF licenses this file
-    to you under the Apache License, Version 2.0 (the
-    "License"); you may not use this file except in compliance
-    with the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing,
-    software distributed under the License is distributed on an
-    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-    KIND, either express or implied.  See the License for the
-    specific language governing permissions and limitations
-    under the License.
--->
-
-<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
-<mapper namespace="org.apache.inlong.manager.dao.mapper.StorageHiveFieldEntityMapper">
-    <resultMap id="BaseResultMap" type="org.apache.inlong.manager.dao.entity.StorageHiveFieldEntity">
-        <id column="id" jdbcType="INTEGER" property="id"/>
-        <result column="storage_id" jdbcType="VARCHAR" property="storageId"/>
-        <result column="field_name" jdbcType="VARCHAR" property="fieldName"/>
-        <result column="field_type" jdbcType="VARCHAR" property="fieldType"/>
-        <result column="field_comment" jdbcType="VARCHAR" property="fieldComment"/>
-        <result column="is_required" jdbcType="INTEGER" property="isRequired"/>
-        <result column="source_field_name" jdbcType="VARCHAR" property="sourceFieldName"/>
-        <result column="source_field_type" jdbcType="VARCHAR" property="sourceFieldType"/>
-        <result column="rank_num" jdbcType="SMALLINT" property="rankNum"/>
-        <result column="is_deleted" jdbcType="INTEGER" property="isDeleted"/>
-    </resultMap>
-    <sql id="Base_Column_List">
-        id, storage_id, field_name, field_type, field_comment, is_required,
-        source_field_name, source_field_type, rank_num, is_deleted
-    </sql>
-    <select id="selectByPrimaryKey" parameterType="java.lang.Integer" resultMap="BaseResultMap">
-        select
-        <include refid="Base_Column_List"/>
-        from storage_hive_field
-        where id = #{id,jdbcType=INTEGER}
-    </select>
-    <select id="selectByStorageId" resultMap="BaseResultMap">
-        select
-        <include refid="Base_Column_List"/>
-        from storage_hive_field
-        where storage_id = #{storageId, jdbcType=INTEGER}
-        and is_deleted = 0
-        order by rank_num asc
-    </select>
-
-    <delete id="deleteByPrimaryKey" parameterType="java.lang.Integer">
-        delete
-        from storage_hive_field
-        where id = #{id,jdbcType=INTEGER}
-    </delete>
-    <delete id="deleteAllByStorageId">
-        delete
-        from storage_hive_field
-        where storage_id = #{storageId,jdbcType=INTEGER}
-    </delete>
-
-    <insert id="insert" parameterType="org.apache.inlong.manager.dao.entity.StorageHiveFieldEntity">
-        insert into storage_hive_field (id, storage_id, field_name,
-                                        field_type, field_comment, is_required,
-                                        source_field_name, source_field_type,
-                                        rank_num, is_deleted)
-        values (#{id,jdbcType=INTEGER}, #{storageId,jdbcType=INTEGER}, #{fieldName,jdbcType=VARCHAR},
-                #{fieldType,jdbcType=VARCHAR}, #{fieldComment,jdbcType=VARCHAR}, #{isRequired,jdbcType=INTEGER},
-                #{sourceFieldName,jdbcType=VARCHAR}, #{sourceFieldType,jdbcType=VARCHAR},
-                #{rankNum,jdbcType=SMALLINT}, #{isDeleted,jdbcType=INTEGER})
-    </insert>
-    <insert id="insertSelective" parameterType="org.apache.inlong.manager.dao.entity.StorageHiveFieldEntity">
-        insert into storage_hive_field
-        <trim prefix="(" suffix=")" suffixOverrides=",">
-            <if test="id != null">
-                id,
-            </if>
-            <if test="storageId != null">
-                storage_id,
-            </if>
-            <if test="fieldName != null">
-                field_name,
-            </if>
-            <if test="fieldType != null">
-                field_type,
-            </if>
-            <if test="fieldComment != null">
-                field_comment,
-            </if>
-            <if test="isRequired != null">
-                is_required,
-            </if>
-            <if test="sourceFieldName != null">
-                source_field_name,
-            </if>
-            <if test="sourceFieldType != null">
-                source_field_type,
-            </if>
-            <if test="rankNum != null">
-                rank_num,
-            </if>
-            <if test="isDeleted != null">
-                is_deleted,
-            </if>
-        </trim>
-        <trim prefix="values (" suffix=")" suffixOverrides=",">
-            <if test="id != null">
-                #{id,jdbcType=INTEGER},
-            </if>
-            <if test="storageId != null">
-                #{storageId,jdbcType=INTEGER},
-            </if>
-            <if test="fieldName != null">
-                #{fieldName,jdbcType=VARCHAR},
-            </if>
-            <if test="fieldType != null">
-                #{fieldType,jdbcType=VARCHAR},
-            </if>
-            <if test="fieldComment != null">
-                #{fieldComment,jdbcType=VARCHAR},
-            </if>
-            <if test="isRequired != null">
-                #{isRequired,jdbcType=INTEGER},
-            </if>
-            <if test="sourceFieldName != null">
-                #{sourceFieldName,jdbcType=VARCHAR},
-            </if>
-            <if test="sourceFieldType != null">
-                #{sourceFieldType,jdbcType=VARCHAR},
-            </if>
-            <if test="rankNum != null">
-                #{rankNum,jdbcType=SMALLINT},
-            </if>
-            <if test="isDeleted != null">
-                #{isDeleted,jdbcType=INTEGER},
-            </if>
-        </trim>
-    </insert>
-    <update id="updateByPrimaryKeySelective"
-            parameterType="org.apache.inlong.manager.dao.entity.StorageHiveFieldEntity">
-        update storage_hive_field
-        <set>
-            <if test="storageId != null">
-                storage_id = #{storageId,jdbcType=INTEGER},
-            </if>
-            <if test="fieldName != null">
-                field_name = #{fieldName,jdbcType=VARCHAR},
-            </if>
-            <if test="fieldType != null">
-                field_type = #{fieldType,jdbcType=VARCHAR},
-            </if>
-            <if test="fieldComment != null">
-                field_comment = #{fieldComment,jdbcType=VARCHAR},
-            </if>
-            <if test="isRequired != null">
-                is_required = #{isRequired,jdbcType=INTEGER},
-            </if>
-            <if test="sourceFieldName != null">
-                source_field_name = #{sourceFieldName,jdbcType=VARCHAR},
-            </if>
-            <if test="sourceFieldType != null">
-                source_field_type = #{sourceFieldType,jdbcType=VARCHAR},
-            </if>
-            <if test="rankNum != null">
-                rank_num = #{rankNum,jdbcType=SMALLINT},
-            </if>
-            <if test="isDeleted != null">
-                is_deleted = #{isDeleted,jdbcType=INTEGER},
-            </if>
-        </set>
-        where id = #{id,jdbcType=INTEGER}
-    </update>
-    <update id="updateByPrimaryKey" parameterType="org.apache.inlong.manager.dao.entity.StorageHiveFieldEntity">
-        update storage_hive_field
-        set storage_id        = #{storageId,jdbcType=INTEGER},
-            field_name        = #{fieldName,jdbcType=VARCHAR},
-            field_type        = #{fieldType,jdbcType=VARCHAR},
-            field_comment     = #{fieldComment,jdbcType=VARCHAR},
-            is_required       = #{isRequired,jdbcType=INTEGER},
-            source_field_name = #{sourceFieldName,jdbcType=VARCHAR},
-            source_field_type = #{sourceFieldType,jdbcType=VARCHAR},
-            rank_num          = #{rankNum,jdbcType=SMALLINT},
-            is_deleted        = #{isDeleted,jdbcType=INTEGER}
-        where id = #{id,jdbcType=INTEGER}
-    </update>
-    <update id="logicDeleteAll">
-        update storage_hive_field
-        set is_deleted = 1
-        where storage_id = #{storageId, jdbcType=INTEGER}
-          and is_deleted = 0
-    </update>
-
-    <select id="selectHiveFields" resultMap="BaseResultMap">
-        select f.*
-        from storage_hive_field f,
-             storage_hive s
-        where s.inlong_group_id = #{groupId, jdbcType=VARCHAR}
-          and s.inlong_stream_id = #{streamId, jdbcType=VARCHAR}
-          and f.storage_id = s.id
-          and f.is_deleted = 0
-          and s.is_deleted = 0
-    </select>
-</mapper>
\ No newline at end of file
diff --git a/inlong-manager/manager-dao/src/test/resources/sql/apache_inlong_manager.sql b/inlong-manager/manager-dao/src/test/resources/sql/apache_inlong_manager.sql
index 7bdcb27..75e1654 100644
--- a/inlong-manager/manager-dao/src/test/resources/sql/apache_inlong_manager.sql
+++ b/inlong-manager/manager-dao/src/test/resources/sql/apache_inlong_manager.sql
@@ -72,11 +72,11 @@ DROP TABLE IF EXISTS `business`;
 CREATE TABLE `business`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`     varchar(128) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
     `name`                varchar(128)          DEFAULT '' COMMENT 'Business name, English, numbers and underscore',
     `cn_name`             varchar(256)          DEFAULT NULL COMMENT 'Chinese display name',
     `description`         varchar(256)          DEFAULT '' COMMENT 'Business Introduction',
-    `middleware_type`     varchar(10)           DEFAULT 'TUBE' COMMENT 'The middleware type of message queue, high throughput: TUBE, high consistency: PULSAR',
+    `middleware_type`     varchar(20)           DEFAULT 'TUBE' COMMENT 'The middleware type of message queue, high throughput: TUBE, high consistency: PULSAR',
     `queue_module`        VARCHAR(20)  NULL     DEFAULT 'parallel' COMMENT 'Queue model of Pulsar, parallel: multiple partitions, high throughput, out-of-order messages; serial: single partition, low throughput, and orderly messages',
     `topic_partition_num` INT(4)       NULL     DEFAULT '3' COMMENT 'The number of partitions of Pulsar Topic, 1-20',
     `mq_resource_obj`     varchar(128) NOT NULL COMMENT 'MQ resource object, for Tube, its Topic, for Pulsar, its Namespace',
@@ -105,7 +105,7 @@ DROP TABLE IF EXISTS `business_pulsar`;
 CREATE TABLE `business_pulsar`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`     varchar(128) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
     `ensemble`            int(3)                DEFAULT '3' COMMENT 'The writable nodes number of ledger',
     `write_quorum`        int(3)                DEFAULT '3' COMMENT 'The copies number of ledger',
     `ack_quorum`          int(3)                DEFAULT '2' COMMENT 'The number of requested acks',
@@ -128,7 +128,7 @@ DROP TABLE IF EXISTS `business_ext`;
 CREATE TABLE `business_ext`
 (
     `id`              int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id` varchar(128) NOT NULL COMMENT 'Business group id',
+    `inlong_group_id` varchar(256) NOT NULL COMMENT 'Business group id',
     `key_name`        varchar(64)  NOT NULL COMMENT 'Configuration item name',
     `key_value`       varchar(256)          DEFAULT NULL COMMENT 'The value of the configuration item',
     `is_deleted`      tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
@@ -227,12 +227,12 @@ DROP TABLE IF EXISTS `consumption`;
 CREATE TABLE `consumption`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `consumer_group_name` varchar(255)          DEFAULT NULL COMMENT 'consumer group name',
-    `consumer_group_id`   varchar(255) NOT NULL COMMENT 'Consumer group ID',
+    `consumer_group_name` varchar(256)          DEFAULT NULL COMMENT 'consumer group name',
+    `consumer_group_id`   varchar(256) NOT NULL COMMENT 'Consumer group ID',
     `in_charges`          varchar(512) NOT NULL COMMENT 'Person in charge of consumption',
-    `inlong_group_id`     varchar(255) NOT NULL COMMENT 'Business group id',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Business group id',
     `middleware_type`     varchar(10)           DEFAULT 'TUBE' COMMENT 'The middleware type of message queue, high throughput: TUBE, high consistency: PULSAR',
-    `topic`               varchar(255) NOT NULL COMMENT 'Consumption topic',
+    `topic`               varchar(256) NOT NULL COMMENT 'Consumption topic',
     `filter_enabled`      int(2)                DEFAULT '0' COMMENT 'Whether to filter, default 0, not filter consume',
     `inlong_stream_id`    varchar(1024)         DEFAULT NULL COMMENT 'Data stream ID for consumption, if filter_enable is 1, it cannot empty',
     `status`              int(4)       NOT NULL COMMENT 'Status: draft, pending approval, approval rejected, approval passed',
@@ -252,13 +252,13 @@ CREATE TABLE `consumption_pulsar`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT,
     `consumption_id`      int(11)      DEFAULT NULL COMMENT 'ID of the consumption information to which it belongs, guaranteed to be uniquely associated with consumption information',
-    `consumer_group_id`   varchar(255) NOT NULL COMMENT 'Consumer group ID',
-    `consumer_group_name` varchar(255) DEFAULT NULL COMMENT 'Consumer group name',
-    `inlong_group_id`     varchar(255) NOT NULL COMMENT 'Business group ID',
+    `consumer_group_id`   varchar(256) NOT NULL COMMENT 'Consumer group ID',
+    `consumer_group_name` varchar(256) DEFAULT NULL COMMENT 'Consumer group name',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Business group ID',
     `is_rlq`              tinyint(1)   DEFAULT '0' COMMENT 'Whether to configure the retry letter topic, 0: no configuration, 1: configuration',
-    `retry_letter_topic`  varchar(255) DEFAULT NULL COMMENT 'The name of the retry queue topic',
+    `retry_letter_topic`  varchar(256) DEFAULT NULL COMMENT 'The name of the retry queue topic',
     `is_dlq`              tinyint(1)   DEFAULT '0' COMMENT 'Whether to configure dead letter topic, 0: no configuration, 1: means configuration',
-    `dead_letter_topic`   varchar(255) DEFAULT NULL COMMENT 'dead letter topic name',
+    `dead_letter_topic`   varchar(256) DEFAULT NULL COMMENT 'dead letter topic name',
     `is_deleted`          tinyint(1)   DEFAULT '0' COMMENT 'Whether to delete',
     PRIMARY KEY (`id`)
 ) COMMENT ='Pulsar consumption table';
@@ -332,8 +332,8 @@ DROP TABLE IF EXISTS `data_stream`;
 CREATE TABLE `data_stream`
 (
     `id`                     int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_stream_id`       varchar(128) NOT NULL COMMENT 'Data stream id, non-deleted globally unique',
-    `inlong_group_id`        varchar(128) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id`       varchar(256) NOT NULL COMMENT 'Data stream id, non-deleted globally unique',
+    `inlong_group_id`        varchar(256) NOT NULL COMMENT 'Owning business group id',
     `name`                   varchar(64)       DEFAULT NULL COMMENT 'The name of the data stream page display, can be Chinese',
     `description`            varchar(256)      DEFAULT '' COMMENT 'Introduction to data stream',
     `mq_resource_obj`        varchar(128)      DEFAULT NULL COMMENT 'MQ resource object, in the data stream, Tube is data_stream_id, Pulsar is Topic',
@@ -368,8 +368,8 @@ DROP TABLE IF EXISTS `data_stream_ext`;
 CREATE TABLE `data_stream_ext`
 (
     `id`               int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`  varchar(128) NOT NULL COMMENT 'Owning business group id',
-    `inlong_stream_id` varchar(128) NOT NULL COMMENT 'Owning data stream id',
+    `inlong_group_id`  varchar(256) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id` varchar(256) NOT NULL COMMENT 'Owning data stream id',
     `key_name`         varchar(64)  NOT NULL COMMENT 'Configuration item name',
     `key_value`        varchar(256)          DEFAULT NULL COMMENT 'The value of the configuration item',
     `is_deleted`       tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
@@ -384,20 +384,16 @@ DROP TABLE IF EXISTS `data_stream_field`;
 CREATE TABLE `data_stream_field`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`     varchar(128) NOT NULL COMMENT 'Owning business group id',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Owning business group id',
     `inlong_stream_id`    varchar(256) NOT NULL COMMENT 'Owning data stream id',
-    `is_predefined_field` tinyint(1)   DEFAULT '0' COMMENT 'Whether it is a predefined field, 0: no, 1: yes',
-    `field_name`          varchar(20)  NOT NULL COMMENT 'field name',
-    `field_value`         varchar(128) DEFAULT NULL COMMENT 'Field value, required if it is a predefined field',
-    `pre_expression`      varchar(256) DEFAULT NULL COMMENT 'Pre-defined field value expression',
-    `field_type`          varchar(20)  NOT NULL COMMENT 'field type',
-    `field_comment`       varchar(50)  DEFAULT NULL COMMENT 'Field description',
-    `rank_num`            smallint(6)  DEFAULT '0' COMMENT 'Field order (front-end display field order)',
-    `is_deleted`          tinyint(1)   DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
-    `is_exist`            tinyint(1)   DEFAULT '0' COMMENT 'Does it exist, 0: does not exist, 1: exists',
-    `bon_field_path`      varchar(256) DEFAULT NULL COMMENT 'BON field path',
-    `bon_field_type`      varchar(64)  DEFAULT NULL COMMENT 'BON field type',
-    `encrypt_level`       varchar(20)  DEFAULT NULL COMMENT 'Encryption level',
+    `is_predefined_field` tinyint(1)    DEFAULT '0' COMMENT 'Whether it is a predefined field, 0: no, 1: yes',
+    `field_name`          varchar(50)  NOT NULL COMMENT 'field name',
+    `field_value`         varchar(128)  DEFAULT NULL COMMENT 'Field value, required if it is a predefined field',
+    `pre_expression`      varchar(256)  DEFAULT NULL COMMENT 'Pre-defined field value expression',
+    `field_type`          varchar(50)  NOT NULL COMMENT 'field type',
+    `field_comment`       varchar(2000) DEFAULT NULL COMMENT 'field description',
+    `rank_num`            smallint(6)   DEFAULT '0' COMMENT 'Field order (front-end display field order)',
+    `is_deleted`          tinyint(1)    DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
     PRIMARY KEY (`id`)
 );
 
@@ -409,13 +405,13 @@ CREATE TABLE `operation_log`
 (
     `id`                  int(11)   NOT NULL AUTO_INCREMENT,
     `authentication_type` varchar(64)        DEFAULT NULL COMMENT 'Authentication type',
-    `operation_type`      varchar(255)       DEFAULT NULL COMMENT 'operation type',
+    `operation_type`      varchar(256)       DEFAULT NULL COMMENT 'operation type',
     `http_method`         varchar(64)        DEFAULT NULL COMMENT 'Request method',
-    `invoke_method`       varchar(255)       DEFAULT NULL COMMENT 'invoke method',
-    `operator`            varchar(255)       DEFAULT NULL COMMENT 'operator',
-    `proxy`               varchar(255)       DEFAULT NULL COMMENT 'proxy',
-    `request_url`         varchar(255)       DEFAULT NULL COMMENT 'Request URL',
-    `remote_address`      varchar(255)       DEFAULT NULL COMMENT 'Request IP',
+    `invoke_method`       varchar(256)       DEFAULT NULL COMMENT 'invoke method',
+    `operator`            varchar(256)       DEFAULT NULL COMMENT 'operator',
+    `proxy`               varchar(256)       DEFAULT NULL COMMENT 'proxy',
+    `request_url`         varchar(256)       DEFAULT NULL COMMENT 'Request URL',
+    `remote_address`      varchar(256)       DEFAULT NULL COMMENT 'Request IP',
     `cost_time`           bigint(20)         DEFAULT NULL COMMENT 'time-consuming',
     `body`                text COMMENT 'Request body',
     `param`               text COMMENT 'parameter',
@@ -433,11 +429,11 @@ CREATE TABLE `role`
 (
     `id`          int(11)      NOT NULL AUTO_INCREMENT,
     `role_code`   varchar(100) NOT NULL COMMENT 'Role code',
-    `role_name`   varchar(255) NOT NULL COMMENT 'Role Chinese name',
+    `role_name`   varchar(256) NOT NULL COMMENT 'Role Chinese name',
     `create_time` datetime     NOT NULL,
     `update_time` datetime     NOT NULL DEFAULT CURRENT_TIMESTAMP,
-    `create_by`   varchar(255) NOT NULL,
-    `update_by`   varchar(255) NOT NULL,
+    `create_by`   varchar(256) NOT NULL,
+    `update_by`   varchar(256) NOT NULL,
     `disabled`    tinyint(1)   NOT NULL DEFAULT '0' COMMENT 'Is it disabled?',
     PRIMARY KEY (`id`),
     UNIQUE KEY `unique_role_code` (`role_code`),
@@ -451,7 +447,7 @@ DROP TABLE IF EXISTS `source_db_basic`;
 CREATE TABLE `source_db_basic`
 (
     `id`               int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`  varchar(128) NOT NULL COMMENT 'Owning business group id',
+    `inlong_group_id`  varchar(256) NOT NULL COMMENT 'Owning business group id',
     `inlong_stream_id` varchar(256) NOT NULL COMMENT 'Owning data stream id',
     `sync_type`        tinyint(1)            DEFAULT '0' COMMENT 'Data synchronization type, 0: FULL, full amount, 1: INCREMENTAL, incremental',
     `is_deleted`       tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
@@ -470,8 +466,8 @@ DROP TABLE IF EXISTS `source_db_detail`;
 CREATE TABLE `source_db_detail`
 (
     `id`               int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`  varchar(128) NOT NULL COMMENT 'Owning business group id',
-    `inlong_stream_id` varchar(128) NOT NULL COMMENT 'Owning data stream id',
+    `inlong_group_id`  varchar(256) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id` varchar(256) NOT NULL COMMENT 'Owning data stream id',
     `access_type`      varchar(20)  NOT NULL COMMENT 'Collection type, with Agent, DataProxy client, LoadProxy',
     `db_name`          varchar(128)          DEFAULT NULL COMMENT 'database name',
     `transfer_ip`      varchar(64)           DEFAULT NULL COMMENT 'Transfer IP',
@@ -498,8 +494,8 @@ DROP TABLE IF EXISTS `source_file_basic`;
 CREATE TABLE `source_file_basic`
 (
     `id`                int(11)      NOT NULL AUTO_INCREMENT COMMENT 'ID',
-    `inlong_group_id`   varchar(128) NOT NULL COMMENT 'Business group id',
-    `inlong_stream_id`  varchar(128) NOT NULL COMMENT 'Data stream id',
+    `inlong_group_id`   varchar(256) NOT NULL COMMENT 'Business group id',
+    `inlong_stream_id`  varchar(256) NOT NULL COMMENT 'Data stream id',
     `is_hybrid_source`  tinyint(1)            DEFAULT '0' COMMENT 'Whether to mix data sources',
     `is_table_mapping`  tinyint(1)            DEFAULT '0' COMMENT 'Is there a table name mapping',
     `date_offset`       int(4)                DEFAULT '0' COMMENT 'Time offset\n',
@@ -523,8 +519,8 @@ DROP TABLE IF EXISTS `source_file_detail`;
 CREATE TABLE `source_file_detail`
 (
     `id`               int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`  varchar(128) NOT NULL COMMENT 'Owning business group id',
-    `inlong_stream_id` varchar(128) NOT NULL COMMENT 'Owning data stream id',
+    `inlong_group_id`  varchar(256) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id` varchar(256) NOT NULL COMMENT 'Owning data stream id',
     `access_type`      varchar(20)           DEFAULT 'Agent' COMMENT 'Collection type, there are Agent, DataProxy client, LoadProxy, the file can only be Agent temporarily',
     `server_name`      varchar(64)           DEFAULT NULL COMMENT 'The name of the data source service. If it is empty, add configuration through the following fields',
     `ip`               varchar(128) NOT NULL COMMENT 'Data source IP address',
@@ -562,62 +558,47 @@ CREATE TABLE `storage_ext`
 );
 
 -- ----------------------------
--- Table structure for storage_hive
+-- Table structure for data_storage
 -- ----------------------------
-DROP TABLE IF EXISTS `storage_hive`;
-CREATE TABLE `storage_hive`
+DROP TABLE IF EXISTS `data_storage`;
+CREATE TABLE `data_storage`
 (
-    `id`                          int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`             varchar(128) NOT NULL COMMENT 'Owning business group id',
-    `inlong_stream_id`            varchar(128) NOT NULL COMMENT 'Owning data stream id',
-    `jdbc_url`                    varchar(255)          DEFAULT NULL COMMENT 'Hive JDBC connection URL, such as "jdbc:hive2://127.0.0.1:10000"',
-    `username`                    varchar(128)          DEFAULT NULL COMMENT 'Username',
-    `password`                    varchar(255)          DEFAULT NULL COMMENT 'User password',
-    `db_name`                     varchar(128)          DEFAULT NULL COMMENT 'Target database name',
-    `table_name`                  varchar(128)          DEFAULT NULL COMMENT 'Target data table name',
-    `hdfs_default_fs`             varchar(255)          DEFAULT NULL COMMENT 'HDFS defaultFS, such as "hdfs://127.0.0.1:9000"',
-    `warehouse_dir`               varchar(250)          DEFAULT '/user/hive/warehouse' COMMENT 'Hive table storage path on HDFS, such as "/user/hive/warehouse"',
-    `partition_interval`          int(5)                DEFAULT NULL COMMENT 'Partition interval, support: 1(D / H), 10 I, 30 I',
-    `partition_unit`              varchar(10)           DEFAULT 'D' COMMENT 'Partition type, support: D-day, H-hour, I-minute',
-    `primary_partition`           varchar(255)          DEFAULT 'dt' COMMENT 'primary partition field',
-    `secondary_partition`         varchar(256)          DEFAULT NULL COMMENT 'secondary partition field',
-    `partition_creation_strategy` varchar(50)           DEFAULT 'COMPLETED' COMMENT 'Partition creation strategy, support: ARRIVED, COMPLETED',
-    `file_format`                 varchar(15)           DEFAULT 'TextFile' COMMENT 'The stored table format, TextFile, RCFile, SequenceFile, Avro',
-    `data_encoding`               varchar(20)           DEFAULT 'UTF-8' COMMENT 'data encoding type',
-    `data_separator`              varchar(10)           DEFAULT NULL COMMENT 'data field separator',
-    `storage_period`              int(5)                DEFAULT '10' COMMENT 'Data storage period, unit: day',
-    `opt_log`                     varchar(5000)         DEFAULT NULL COMMENT 'Background operation log',
-    `status`                      int(4)                DEFAULT '0' COMMENT 'status',
-    `previous_status`             int(4)                DEFAULT '0' COMMENT 'Previous status',
-    `is_deleted`                  tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
-    `creator`                     varchar(64)           DEFAULT NULL COMMENT 'creator name',
-    `modifier`                    varchar(64)           DEFAULT NULL COMMENT 'modifier name',
-    `create_time`                 timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'create time',
-    `modify_time`                 timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'modify time',
-    `temp_view`                   text                  DEFAULT NULL COMMENT 'Temporary view, used to save un-submitted and unapproved intermediate data after modification',
+    `id`                     int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
+    `inlong_group_id`        varchar(256) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id`       varchar(256) NOT NULL COMMENT 'Owning data stream id',
+    `storage_type`           varchar(15)           DEFAULT 'HIVE' COMMENT 'Storage type, including: HIVE, ES, etc',
+    `storage_period`         int(11)               DEFAULT '10' COMMENT 'Data storage period, unit: day',
+    `enable_create_resource` tinyint(1)            DEFAULT '1' COMMENT 'Whether to enable create storage resource? 0: disable, 1: enable. default is 1',
+    `ext_params`             text COMMENT 'Another fields, will saved as JSON type',
+    `operate_log`            varchar(5000)         DEFAULT NULL COMMENT 'Background operate log',
+    `status`                 int(11)               DEFAULT '0' COMMENT 'Status',
+    `previous_status`        int(11)               DEFAULT '0' COMMENT 'Previous status',
+    `is_deleted`             tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 1: deleted',
+    `creator`                varchar(64)  NOT NULL COMMENT 'Creator name',
+    `modifier`               varchar(64)           DEFAULT NULL COMMENT 'Modifier name',
+    `create_time`            timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
+    `modify_time`            timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
     PRIMARY KEY (`id`)
 );
 
 -- ----------------------------
--- Table structure for storage_hive_field
+-- Table structure for storage_field
 -- ----------------------------
-DROP TABLE IF EXISTS `storage_hive_field`;
-CREATE TABLE `storage_hive_field`
+DROP TABLE IF EXISTS `data_storage_field`;
+CREATE TABLE `data_storage_field`
 (
-    `id`                int(11)     NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `storage_id`        int(11)     NOT NULL COMMENT 'Hive data storage id',
-    `source_field_name` varchar(20) NOT NULL COMMENT 'source field name',
-    `source_field_type` varchar(20) NOT NULL COMMENT 'source field type',
-    `field_name`        varchar(20) NOT NULL COMMENT 'field name',
-    `field_type`        varchar(20) NOT NULL COMMENT 'field type',
-    `field_comment`     varchar(2000) DEFAULT '' COMMENT 'Field description',
-    `is_required`       tinyint(1)    DEFAULT NULL COMMENT 'Is it required, 0: not necessary, 1: required',
-    `bon_field_path`    varchar(256)  DEFAULT NULL COMMENT 'BON field path',
-    `bon_field_type`    varchar(64)   DEFAULT NULL COMMENT 'BON field type',
-    `encrypt_level`     varchar(20)   DEFAULT NULL COMMENT 'Encryption level',
-    `is_exist`          tinyint(1)    DEFAULT '0' COMMENT 'Does it exist, 0: does not exist, 1: exists',
+    `id`                int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
+    `inlong_group_id`   varchar(256) NOT NULL COMMENT 'inlong group id',
+    `inlong_stream_id`  varchar(256) NOT NULL COMMENT 'inlong stream id',
+    `storage_id`        int(11)      NOT NULL COMMENT 'data storage id',
+    `storage_type`      varchar(15)  NOT NULL COMMENT 'storage type',
+    `source_field_name` varchar(50)   DEFAULT NULL COMMENT 'source field name',
+    `source_field_type` varchar(50)   DEFAULT NULL COMMENT 'source field type',
+    `field_name`        varchar(50)  NOT NULL COMMENT 'field name',
+    `field_type`        varchar(50)  NOT NULL COMMENT 'field type',
+    `field_comment`     varchar(2000) DEFAULT NULL COMMENT 'field description',
     `rank_num`          smallint(6)   DEFAULT '0' COMMENT 'Field order (front-end display field order)',
-    `is_deleted`        tinyint(1)    DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`        tinyint(1)    DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     PRIMARY KEY (`id`)
 );
 
@@ -630,12 +611,12 @@ CREATE TABLE `task`
     `id`          bigint(20)   NOT NULL,
     `taskflow_id` bigint(20)   NOT NULL COMMENT 'Owning task flow id',
     `task_def_id` bigint(20)    DEFAULT NULL COMMENT 'task definition id',
-    `task_name`   varchar(255) NOT NULL COMMENT 'task name',
-    `status`      varchar(255)  DEFAULT NULL COMMENT 'task status',
-    `post_param`  varchar(255)  DEFAULT NULL COMMENT 'Task parameters',
+    `task_name`   varchar(256) NOT NULL COMMENT 'task name',
+    `status`      varchar(256)  DEFAULT NULL COMMENT 'task status',
+    `post_param`  varchar(256)  DEFAULT NULL COMMENT 'Task parameters',
     `resultmsg`   varchar(1000) DEFAULT NULL COMMENT 'Execution result log',
     `create_time` datetime     NOT NULL COMMENT 'Create time',
-    `create_by`   varchar(255) NOT NULL COMMENT 'creator',
+    `create_by`   varchar(256) NOT NULL COMMENT 'creator',
     `update_time` datetime      DEFAULT NULL COMMENT 'last modified time',
     `update_by`   varchar(0)    DEFAULT NULL COMMENT 'last modified person',
     PRIMARY KEY (`id`)
@@ -650,10 +631,10 @@ CREATE TABLE `task_def`
     `id`              bigint(20)   NOT NULL,
     `taskflow_def_id` bigint(20)   NOT NULL COMMENT 'Task flow definition id',
     `parent_id`       bigint(20)   DEFAULT NULL COMMENT 'parent task id',
-    `implclass`       varchar(255) DEFAULT NULL COMMENT 'task processing flow class',
-    `task_name`       varchar(255) DEFAULT NULL COMMENT 'task name',
+    `implclass`       varchar(256) DEFAULT NULL COMMENT 'task processing flow class',
+    `task_name`       varchar(256) DEFAULT NULL COMMENT 'task name',
     `create_time`     datetime     NOT NULL COMMENT 'Create time',
-    `create_by`       varchar(255) NOT NULL COMMENT 'creator',
+    `create_by`       varchar(256) NOT NULL COMMENT 'creator',
     `update_time`     datetime     DEFAULT NULL COMMENT 'last modified time',
     `update_by`       datetime     DEFAULT NULL COMMENT 'last modified person',
     `delivery_id`     bigint(20)   DEFAULT NULL COMMENT 'Task push method',
@@ -668,12 +649,12 @@ CREATE TABLE `taskflow`
 (
     `id`              bigint(20)   NOT NULL AUTO_INCREMENT,
     `taskflow_def_id` bigint(20)   NOT NULL COMMENT 'Taskflow definition id',
-    `status`          varchar(255) DEFAULT NULL COMMENT 'status',
-    `create_by`       varchar(255) NOT NULL COMMENT 'creator',
+    `status`          varchar(256) DEFAULT NULL COMMENT 'status',
+    `create_by`       varchar(256) NOT NULL COMMENT 'creator',
     `create_time`     datetime     DEFAULT NULL COMMENT 'Create time',
     `update_time`     datetime     DEFAULT NULL COMMENT 'last modified time',
-    `update_by`       varchar(255) DEFAULT NULL COMMENT 'last modified person',
-    `event`           varchar(255) DEFAULT NULL COMMENT 'trigger event',
+    `update_by`       varchar(256) DEFAULT NULL COMMENT 'last modified person',
+    `event`           varchar(256) DEFAULT NULL COMMENT 'trigger event',
     PRIMARY KEY (`id`)
 );
 
@@ -684,12 +665,12 @@ DROP TABLE IF EXISTS `taskflow_def`;
 CREATE TABLE `taskflow_def`
 (
     `id`            bigint(20)   NOT NULL AUTO_INCREMENT,
-    `name`          varchar(255) NOT NULL COMMENT 'Workflow definition name',
-    `descrip`       varchar(255) DEFAULT NULL COMMENT 'Workflow function description',
+    `name`          varchar(256) NOT NULL COMMENT 'Workflow definition name',
+    `descrip`       varchar(256) DEFAULT NULL COMMENT 'Workflow function description',
     `create_time`   datetime     NOT NULL COMMENT 'Create time',
-    `create_by`     varchar(255) NOT NULL COMMENT 'creator',
+    `create_by`     varchar(256) NOT NULL COMMENT 'creator',
     `isValid`       int(11)      DEFAULT NULL COMMENT 'logical deletion',
-    `trigger_event` varchar(255) DEFAULT NULL COMMENT 'trigger event',
+    `trigger_event` varchar(256) DEFAULT NULL COMMENT 'trigger event',
     PRIMARY KEY (`id`)
 );
 
@@ -700,14 +681,14 @@ DROP TABLE IF EXISTS `user`;
 CREATE TABLE `user`
 (
     `id`           int(11)      NOT NULL AUTO_INCREMENT,
-    `name`         varchar(255) NOT NULL COMMENT 'account name',
+    `name`         varchar(256) NOT NULL COMMENT 'account name',
     `password`     varchar(64)  NOT NULL COMMENT 'password md5',
     `account_type` int(11)      NOT NULL DEFAULT '1' COMMENT 'account type, 0-manager 1-normal',
     `due_date`     datetime              DEFAULT NULL COMMENT 'due date for account',
     `create_time`  datetime     NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
     `update_time`  datetime              DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'update time',
-    `create_by`    varchar(255) NOT NULL COMMENT 'create by sb.',
-    `update_by`    varchar(255)          DEFAULT NULL COMMENT 'update by sb.',
+    `create_by`    varchar(256) NOT NULL COMMENT 'create by sb.',
+    `update_by`    varchar(256)          DEFAULT NULL COMMENT 'update by sb.',
     PRIMARY KEY (`id`),
     UNIQUE KEY `unique_user_name` (`name`)
 );
@@ -724,12 +705,12 @@ DROP TABLE IF EXISTS `user_role`;
 CREATE TABLE `user_role`
 (
     `id`          int(11)      NOT NULL AUTO_INCREMENT,
-    `user_name`   varchar(255) NOT NULL COMMENT 'username rtx',
-    `role_code`   varchar(255) NOT NULL COMMENT 'role',
+    `user_name`   varchar(256) NOT NULL COMMENT 'username rtx',
+    `role_code`   varchar(256) NOT NULL COMMENT 'role',
     `create_time` datetime     NOT NULL,
     `update_time` datetime     NOT NULL DEFAULT CURRENT_TIMESTAMP,
-    `create_by`   varchar(255) NOT NULL,
-    `update_by`   varchar(255) NOT NULL,
+    `create_by`   varchar(256) NOT NULL,
+    `update_by`   varchar(256) NOT NULL,
     `disabled`    tinyint(1)   NOT NULL DEFAULT '0' COMMENT 'Is it disabled?',
     PRIMARY KEY (`id`)
 );
@@ -741,11 +722,11 @@ DROP TABLE IF EXISTS `wf_approver`;
 CREATE TABLE `wf_approver`
 (
     `id`                int(11)       NOT NULL AUTO_INCREMENT,
-    `process_name`      varchar(255)  NOT NULL COMMENT 'process definition name',
-    `task_name`         varchar(255)  NOT NULL COMMENT 'Approval task name',
+    `process_name`      varchar(256)  NOT NULL COMMENT 'process definition name',
+    `task_name`         varchar(256)  NOT NULL COMMENT 'Approval task name',
     `filter_key`        varchar(64)   NOT NULL COMMENT 'filter condition KEY',
-    `filter_value`      varchar(255)           DEFAULT NULL COMMENT 'Filter matching value',
-    `filter_value_desc` varchar(255)           DEFAULT NULL COMMENT 'Filter value description',
+    `filter_value`      varchar(256)           DEFAULT NULL COMMENT 'Filter matching value',
+    `filter_value_desc` varchar(256)           DEFAULT NULL COMMENT 'Filter value description',
     `approvers`         varchar(1024) NOT NULL COMMENT 'Approvers, separated by commas',
     `creator`           varchar(64)   NOT NULL COMMENT 'creator',
     `modifier`          varchar(64)   NOT NULL COMMENT 'modifier',
@@ -772,12 +753,12 @@ CREATE TABLE `wf_event_log`
 (
     `id`                   int(11)      NOT NULL AUTO_INCREMENT,
     `process_inst_id`      int(11)      NOT NULL,
-    `process_name`         varchar(255)  DEFAULT NULL COMMENT 'Process name',
-    `process_display_name` varchar(255) NOT NULL COMMENT 'Process name',
-    `inlong_group_id`      varchar(128)  DEFAULT NULL COMMENT 'Business group id',
+    `process_name`         varchar(256)  DEFAULT NULL COMMENT 'Process name',
+    `process_display_name` varchar(256) NOT NULL COMMENT 'Process name',
+    `inlong_group_id`      varchar(256)  DEFAULT NULL COMMENT 'Business group id',
     `task_inst_id`         int(11)       DEFAULT NULL COMMENT 'Task ID',
-    `element_name`         varchar(255) NOT NULL COMMENT 'The name of the component that triggered the event',
-    `element_display_name` varchar(255) NOT NULL COMMENT 'Chinese name of the component that triggered the event',
+    `element_name`         varchar(256) NOT NULL COMMENT 'The name of the component that triggered the event',
+    `element_display_name` varchar(256) NOT NULL COMMENT 'Chinese name of the component that triggered the event',
     `event_type`           varchar(64)  NOT NULL COMMENT 'Event type: process event/task event',
     `event`                varchar(64)  NOT NULL COMMENT 'Event name',
     `listener`             varchar(1024) DEFAULT NULL COMMENT 'Event listener name',
@@ -798,12 +779,12 @@ DROP TABLE IF EXISTS `wf_process_instance`;
 CREATE TABLE `wf_process_instance`
 (
     `id`              int(11)      NOT NULL AUTO_INCREMENT,
-    `name`            varchar(255) NOT NULL COMMENT 'process name',
-    `display_name`    varchar(255) NOT NULL COMMENT 'Process display name',
-    `type`            varchar(255)          DEFAULT NULL COMMENT 'Process classification',
-    `title`           varchar(255)          DEFAULT NULL COMMENT 'Process title',
-    `inlong_group_id` varchar(128)          DEFAULT NULL COMMENT 'Business group id: to facilitate related business',
-    `applicant`       varchar(255) NOT NULL COMMENT 'applicant',
+    `name`            varchar(256) NOT NULL COMMENT 'process name',
+    `display_name`    varchar(256) NOT NULL COMMENT 'Process display name',
+    `type`            varchar(256)          DEFAULT NULL COMMENT 'Process classification',
+    `title`           varchar(256)          DEFAULT NULL COMMENT 'Process title',
+    `inlong_group_id` varchar(256)          DEFAULT NULL COMMENT 'Business group id: to facilitate related business',
+    `applicant`       varchar(256) NOT NULL COMMENT 'applicant',
     `state`           varchar(64)  NOT NULL COMMENT 'state',
     `form_data`       mediumtext COMMENT 'form information',
     `start_time`      datetime     NOT NULL COMMENT 'start time',
@@ -822,14 +803,14 @@ CREATE TABLE `wf_task_instance`
     `id`                   int(11)       NOT NULL AUTO_INCREMENT,
     `type`                 varchar(64)   NOT NULL COMMENT 'Task type: UserTask user task/ServiceTask system task',
     `process_inst_id`      int(11)       NOT NULL COMMENT 'process ID',
-    `process_name`         varchar(255)  NOT NULL COMMENT 'process name',
-    `process_display_name` varchar(255)  NOT NULL COMMENT 'process name',
-    `name`                 varchar(255)  NOT NULL COMMENT 'task name',
-    `display_name`         varchar(255)  NOT NULL COMMENT 'Task display name',
+    `process_name`         varchar(256)  NOT NULL COMMENT 'process name',
+    `process_display_name` varchar(256)  NOT NULL COMMENT 'process name',
+    `name`                 varchar(256)  NOT NULL COMMENT 'task name',
+    `display_name`         varchar(256)  NOT NULL COMMENT 'Task display name',
     `applicant`            varchar(64)   DEFAULT NULL COMMENT 'applicant',
     `approvers`            varchar(1024) NOT NULL COMMENT 'approvers',
     `state`                varchar(64)   NOT NULL COMMENT 'state',
-    `operator`             varchar(255)  DEFAULT NULL COMMENT 'actual operator',
+    `operator`             varchar(256)  DEFAULT NULL COMMENT 'actual operator',
     `remark`               varchar(1024) DEFAULT NULL COMMENT 'Remark information',
     `form_data`            mediumtext COMMENT 'form information submitted by the current task',
     `start_time`           datetime      NOT NULL COMMENT 'start time',
@@ -869,7 +850,7 @@ CREATE TABLE `cluster_set_inlongid`
 (
     `id`              int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
     `set_name`        varchar(128) NOT NULL COMMENT 'ClusterSet name, English, numbers and underscore',
-    `inlong_group_id` varchar(128) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
+    `inlong_group_id` varchar(256) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
     PRIMARY KEY (`id`),
     UNIQUE KEY `unique_cluster_set_inlongid` (`set_name`, `inlong_group_id`)
 );
diff --git a/inlong-manager/manager-service/pom.xml b/inlong-manager/manager-service/pom.xml
index 6fd6f54..6f15b82 100644
--- a/inlong-manager/manager-service/pom.xml
+++ b/inlong-manager/manager-service/pom.xml
@@ -58,6 +58,14 @@
             <groupId>org.springframework.boot</groupId>
             <artifactId>spring-boot-starter-logging</artifactId>
         </dependency>
+        <dependency>
+            <groupId>org.apache.logging.log4j</groupId>
+            <artifactId>log4j-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.logging.log4j</groupId>
+            <artifactId>log4j-core</artifactId>
+        </dependency>
 
         <dependency>
             <groupId>org.apache.hadoop</groupId>
@@ -133,6 +141,10 @@
             <artifactId>jackson-dataformat-yaml</artifactId>
         </dependency>
         <dependency>
+            <groupId>org.reflections</groupId>
+            <artifactId>reflections</artifactId>
+        </dependency>
+        <dependency>
             <groupId>org.mockito</groupId>
             <artifactId>mockito-core</artifactId>
             <scope>test</scope>
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/CommandLineRunnerImpl.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/CommandLineRunnerImpl.java
new file mode 100644
index 0000000..b4910e3
--- /dev/null
+++ b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/CommandLineRunnerImpl.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.manager.service;
+
+import com.fasterxml.jackson.annotation.JsonTypeInfo;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.jsontype.NamedType;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.inlong.manager.common.util.JsonTypeDefine;
+import org.reflections.Reflections;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.CommandLineRunner;
+import org.springframework.stereotype.Component;
+
+import java.lang.reflect.Modifier;
+import java.util.Set;
+
+/**
+ * Custom Command Line Runner
+ */
+@Component
+public class CommandLineRunnerImpl implements CommandLineRunner {
+
+    private static final String PROJECT_PACKAGE = "org.apache.inlong.manager.common.pojo";
+
+    @Autowired
+    private ObjectMapper objectMapper;
+
+    @Override
+    public void run(String[] args) {
+        this.initJsonTypeDefine();
+    }
+
+    /**
+     * Init all classes that marked with JsonTypeInfo annotation
+     */
+    private void initJsonTypeDefine() {
+        Reflections reflections = new Reflections(PROJECT_PACKAGE);
+        Set<Class<?>> typeSet = reflections.getTypesAnnotatedWith(JsonTypeInfo.class);
+
+        // Get all subtype of class which marked JsonTypeInfo annotation
+        for (Class<?> type : typeSet) {
+            Set<?> clazzSet = reflections.getSubTypesOf(type);
+            if (CollectionUtils.isEmpty(clazzSet)) {
+                continue;
+            }
+            // Register all subclasses
+            for (Object obj : clazzSet) {
+                Class<?> clazz = (Class<?>) obj;
+                // Skip the interface and abstract class
+                if (clazz.isInterface() || Modifier.isAbstract(clazz.getModifiers())) {
+                    continue;
+                }
+                // Get the JsonTypeDefine annotation
+                JsonTypeDefine extendClassDefine = clazz.getAnnotation(JsonTypeDefine.class);
+                if (extendClassDefine == null) {
+                    continue;
+                }
+                // Register the subtype and use the NamedType to build the relation
+                objectMapper.registerSubtypes(new NamedType(clazz, extendClassDefine.value()));
+            }
+        }
+    }
+
+}
+
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/DataStreamService.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/DataStreamService.java
index 8070d0f..4ddcbd1 100644
--- a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/DataStreamService.java
+++ b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/DataStreamService.java
@@ -18,16 +18,17 @@
 package org.apache.inlong.manager.service.core;
 
 import com.github.pagehelper.PageInfo;
-import java.util.List;
 import org.apache.inlong.manager.common.pojo.datastream.DataStreamApproveInfo;
 import org.apache.inlong.manager.common.pojo.datastream.DataStreamInfo;
 import org.apache.inlong.manager.common.pojo.datastream.DataStreamListVO;
 import org.apache.inlong.manager.common.pojo.datastream.DataStreamPageRequest;
-import org.apache.inlong.manager.common.pojo.datastream.DataStreamSummaryInfo;
 import org.apache.inlong.manager.common.pojo.datastream.DataStreamTopicVO;
-import org.apache.inlong.manager.common.pojo.datastream.FullStreamRequest;
 import org.apache.inlong.manager.common.pojo.datastream.FullPageUpdateInfo;
+import org.apache.inlong.manager.common.pojo.datastream.FullStreamRequest;
 import org.apache.inlong.manager.common.pojo.datastream.FullStreamResponse;
+import org.apache.inlong.manager.common.pojo.datastream.StreamBriefResponse;
+
+import java.util.List;
 
 /**
  * data stream service layer interface
@@ -105,7 +106,7 @@ public interface DataStreamService {
      * @param groupId Business group id
      * @return Summary list of data stream
      */
-    List<DataStreamSummaryInfo> getSummaryList(String groupId);
+    List<StreamBriefResponse> getBriefList(String groupId);
 
     /**
      * Save all information related to the data stream, its data source, and data storage
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/impl/BusinessProcessOperation.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/impl/BusinessProcessOperation.java
index 8cbf256..a315651 100644
--- a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/impl/BusinessProcessOperation.java
+++ b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/impl/BusinessProcessOperation.java
@@ -18,14 +18,12 @@
 package org.apache.inlong.manager.service.core.impl;
 
 import com.google.common.collect.Sets;
-import java.util.List;
-import java.util.Set;
 import org.apache.inlong.manager.common.enums.BizConstant;
 import org.apache.inlong.manager.common.enums.BizErrorCodeEnum;
 import org.apache.inlong.manager.common.enums.EntityStatus;
 import org.apache.inlong.manager.common.exceptions.BusinessException;
 import org.apache.inlong.manager.common.pojo.business.BusinessInfo;
-import org.apache.inlong.manager.common.pojo.datastream.DataStreamSummaryInfo;
+import org.apache.inlong.manager.common.pojo.datastream.StreamBriefResponse;
 import org.apache.inlong.manager.common.util.Preconditions;
 import org.apache.inlong.manager.service.core.BusinessService;
 import org.apache.inlong.manager.service.core.DataStreamService;
@@ -40,6 +38,9 @@ import org.slf4j.LoggerFactory;
 import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.stereotype.Service;
 
+import java.util.List;
+import java.util.Set;
+
 /**
  * Operation related to business access process
  */
@@ -144,7 +145,7 @@ public class BusinessProcessOperation {
         form.setBusinessInfo(businessInfo);
 
         // Query all data streams under the groupId and the storage information of each data stream
-        List<DataStreamSummaryInfo> infoList = streamService.getSummaryList(businessInfo.getInlongGroupId());
+        List<StreamBriefResponse> infoList = streamService.getBriefList(businessInfo.getInlongGroupId());
         form.setStreamInfoList(infoList);
 
         return form;
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/impl/DataStreamServiceImpl.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/impl/DataStreamServiceImpl.java
index 9713e48..04b59e3 100644
--- a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/impl/DataStreamServiceImpl.java
+++ b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/impl/DataStreamServiceImpl.java
@@ -20,12 +20,6 @@ package org.apache.inlong.manager.service.core.impl;
 import com.github.pagehelper.Page;
 import com.github.pagehelper.PageHelper;
 import com.github.pagehelper.PageInfo;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Date;
-import java.util.List;
-import java.util.Locale;
-import java.util.stream.Collectors;
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.inlong.manager.common.enums.BizConstant;
@@ -36,20 +30,20 @@ import org.apache.inlong.manager.common.pojo.datasource.SourceDbBasicInfo;
 import org.apache.inlong.manager.common.pojo.datasource.SourceDbDetailInfo;
 import org.apache.inlong.manager.common.pojo.datasource.SourceFileBasicInfo;
 import org.apache.inlong.manager.common.pojo.datasource.SourceFileDetailInfo;
-import org.apache.inlong.manager.common.pojo.datastorage.BaseStorageRequest;
-import org.apache.inlong.manager.common.pojo.datastorage.BaseStorageResponse;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageSummaryInfo;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageBriefResponse;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageRequest;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageResponse;
 import org.apache.inlong.manager.common.pojo.datastream.DataStreamApproveInfo;
 import org.apache.inlong.manager.common.pojo.datastream.DataStreamExtInfo;
 import org.apache.inlong.manager.common.pojo.datastream.DataStreamFieldInfo;
 import org.apache.inlong.manager.common.pojo.datastream.DataStreamInfo;
 import org.apache.inlong.manager.common.pojo.datastream.DataStreamListVO;
 import org.apache.inlong.manager.common.pojo.datastream.DataStreamPageRequest;
-import org.apache.inlong.manager.common.pojo.datastream.DataStreamSummaryInfo;
 import org.apache.inlong.manager.common.pojo.datastream.DataStreamTopicVO;
 import org.apache.inlong.manager.common.pojo.datastream.FullPageUpdateInfo;
 import org.apache.inlong.manager.common.pojo.datastream.FullStreamRequest;
 import org.apache.inlong.manager.common.pojo.datastream.FullStreamResponse;
+import org.apache.inlong.manager.common.pojo.datastream.StreamBriefResponse;
 import org.apache.inlong.manager.common.util.CommonBeanUtils;
 import org.apache.inlong.manager.common.util.Preconditions;
 import org.apache.inlong.manager.dao.entity.BusinessEntity;
@@ -63,13 +57,20 @@ import org.apache.inlong.manager.dao.mapper.DataStreamFieldEntityMapper;
 import org.apache.inlong.manager.service.core.DataStreamService;
 import org.apache.inlong.manager.service.core.SourceDbService;
 import org.apache.inlong.manager.service.core.SourceFileService;
-import org.apache.inlong.manager.service.core.StorageService;
+import org.apache.inlong.manager.service.storage.StorageService;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.stereotype.Service;
 import org.springframework.transaction.annotation.Transactional;
 
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Date;
+import java.util.List;
+import java.util.Locale;
+import java.util.stream.Collectors;
+
 /**
  * Data stream service layer implementation
  */
@@ -190,7 +191,7 @@ public class DataStreamServiceImpl implements DataStreamService {
         if (StringUtils.isNotEmpty(storageType)) {
             List<String> streamIdList = dataStreamList.stream().map(DataStreamListVO::getInlongStreamId)
                     .distinct().collect(Collectors.toList());
-            List<String> resultList = storageService.filterStreamIdByStorageType(groupId, storageType, streamIdList);
+            List<String> resultList = storageService.getExistsStreamIdList(groupId, storageType, streamIdList);
             dataStreamList.removeIf(entity -> resultList.contains(entity.getInlongStreamId()));
         }
 
@@ -270,8 +271,8 @@ public class DataStreamServiceImpl implements DataStreamService {
         }
 
         // If there is undeleted data storage information, the deletion fails
-        boolean dataStorageExist = hasDataStorage(groupId, streamId);
-        if (dataStorageExist) {
+        int storageCount = storageService.getCount(groupId, streamId);
+        if (storageCount > 0) {
             LOGGER.error("data stream has undeleted data storages, delete failed");
             throw new BusinessException(BizErrorCodeEnum.DATA_STREAM_DELETE_HAS_STORAGE);
         }
@@ -321,7 +322,7 @@ public class DataStreamServiceImpl implements DataStreamService {
             sourceFileService.logicDeleteAllByIdentifier(groupId, streamId, operator);
             sourceDbService.logicDeleteAllByIdentifier(groupId, streamId, operator);
             // Logical deletion of associated data storage information
-            storageService.logicDeleteAllByIdentifier(groupId, streamId, operator);
+            storageService.logicDeleteAll(groupId, streamId, operator);
         }
 
         LOGGER.info("success to delete all data stream, ext property and fields by groupId={}", groupId);
@@ -329,14 +330,6 @@ public class DataStreamServiceImpl implements DataStreamService {
     }
 
     /**
-     * According to groupId and streamId, query the number of associated undeleted data storage
-     */
-    private boolean hasDataStorage(String groupId, String streamId) {
-        Integer count = storageService.getCountByIdentifier(groupId, streamId);
-        return count > 0;
-    }
-
-    /**
      * According to groupId and streamId, query whether there are undeleted data sources
      */
     private boolean hasDataSource(String groupId, String streamId, String dataSourceType) {
@@ -354,23 +347,23 @@ public class DataStreamServiceImpl implements DataStreamService {
     }
 
     @Override
-    public List<DataStreamSummaryInfo> getSummaryList(String groupId) {
-        LOGGER.debug("begin to get data stream summary list by groupId={}", groupId);
+    public List<StreamBriefResponse> getBriefList(String groupId) {
+        LOGGER.debug("begin to get data stream brief list by groupId={}", groupId);
         Preconditions.checkNotNull(groupId, BizConstant.GROUP_ID_IS_EMPTY);
 
         List<DataStreamEntity> entityList = streamMapper.selectByGroupId(groupId);
-        List<DataStreamSummaryInfo> summaryInfoList = CommonBeanUtils
-                .copyListProperties(entityList, DataStreamSummaryInfo::new);
+        List<StreamBriefResponse> briefInfoList = CommonBeanUtils
+                .copyListProperties(entityList, StreamBriefResponse::new);
 
         // Query data storage based on groupId and streamId
-        for (DataStreamSummaryInfo summaryInfo : summaryInfoList) {
-            String streamId = summaryInfo.getInlongStreamId();
-            List<StorageSummaryInfo> storageList = storageService.listSummaryByIdentifier(groupId, streamId);
-            summaryInfo.setStorageList(storageList);
+        for (StreamBriefResponse briefInfo : briefInfoList) {
+            String streamId = briefInfo.getInlongStreamId();
+            List<StorageBriefResponse> storageList = storageService.listBrief(groupId, streamId);
+            briefInfo.setStorageList(storageList);
         }
 
-        LOGGER.info("success to get data stream summary list for groupId={}", groupId);
-        return summaryInfoList;
+        LOGGER.info("success to get data stream brief list for groupId={}", groupId);
+        return briefInfoList;
     }
 
     @Transactional(rollbackFor = Throwable.class)
@@ -411,7 +404,7 @@ public class DataStreamServiceImpl implements DataStreamService {
 
         // 3. Save data storage information
         if (CollectionUtils.isNotEmpty(fullStreamRequest.getStorageInfo())) {
-            for (BaseStorageRequest storageInfo : fullStreamRequest.getStorageInfo()) {
+            for (StorageRequest storageInfo : fullStreamRequest.getStorageInfo()) {
                 storageService.save(storageInfo, operator);
             }
         }
@@ -453,7 +446,7 @@ public class DataStreamServiceImpl implements DataStreamService {
             sourceDbService.deleteAllByIdentifier(groupId, streamId);
 
             // 3. Delete data storage information
-            storageService.deleteAllByIdentifier(groupId, streamId);
+            storageService.deleteAll(groupId, streamId, operator);
 
             // 4. Save the data stream of this batch
             this.saveAll(pageInfo, operator);
@@ -522,7 +515,7 @@ public class DataStreamServiceImpl implements DataStreamService {
             }
 
             // 4. Query various data storage and its extended information, field information
-            List<BaseStorageResponse> storageInfoList = storageService.listByIdentifier(groupId, streamId);
+            List<StorageResponse> storageInfoList = storageService.listStorage(groupId, streamId);
             pageInfo.setStorageInfo(storageInfoList);
 
             // 5. Add a single result to the paginated list
@@ -774,8 +767,8 @@ public class DataStreamServiceImpl implements DataStreamService {
             // Whether there is an undeleted data source
             boolean dataSourceExist = hasDataSource(groupId, streamId, streamInfo.getDataSourceType());
             // Whether there is undeleted data storage
-            boolean dataStorageExist = hasDataStorage(groupId, streamId);
-            if (dataSourceExist || dataStorageExist) {
+            int storageCount = storageService.getCount(groupId, streamId);
+            if (dataSourceExist || storageCount > 0) {
                 checkUpdatedFields(streamEntity, streamInfo);
             }
         }
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/impl/StorageBaseOperation.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/impl/StorageBaseOperation.java
deleted file mode 100644
index 56b7c6b..0000000
--- a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/impl/StorageBaseOperation.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.manager.service.core.impl;
-
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import java.util.Date;
-import java.util.List;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy;
-import java.util.concurrent.TimeUnit;
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.inlong.manager.common.enums.BizErrorCodeEnum;
-import org.apache.inlong.manager.common.enums.EntityStatus;
-import org.apache.inlong.manager.common.exceptions.BusinessException;
-import org.apache.inlong.manager.common.pojo.business.BusinessInfo;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageExtInfo;
-import org.apache.inlong.manager.common.util.CommonBeanUtils;
-import org.apache.inlong.manager.dao.entity.BusinessEntity;
-import org.apache.inlong.manager.dao.entity.StorageExtEntity;
-import org.apache.inlong.manager.dao.mapper.BusinessEntityMapper;
-import org.apache.inlong.manager.dao.mapper.StorageExtEntityMapper;
-import org.apache.inlong.manager.service.workflow.ProcessName;
-import org.apache.inlong.manager.service.workflow.WorkflowService;
-import org.apache.inlong.manager.service.workflow.business.BusinessResourceWorkflowForm;
-import org.apache.inlong.manager.service.workflow.business.NewBusinessWorkflowForm;
-import org.apache.inlong.manager.service.workflow.stream.CreateStreamWorkflowDefinition;
-import org.apache.inlong.manager.common.util.Preconditions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.transaction.annotation.Transactional;
-
-/**
- * Data is stored in the operation class of HIVE/THIV
- */
-public class StorageBaseOperation {
-
-    private static final Logger LOGGER = LoggerFactory.getLogger(StorageBaseOperation.class);
-
-    public final ExecutorService executorService = new ThreadPoolExecutor(
-            10,
-            20,
-            0L,
-            TimeUnit.MILLISECONDS,
-            new ArrayBlockingQueue<>(100),
-            new ThreadFactoryBuilder().setNameFormat("data-stream-workflow-%s").build(),
-            new CallerRunsPolicy());
-
-    @Autowired
-    private BusinessEntityMapper businessMapper;
-    @Autowired
-    private StorageExtEntityMapper storageExtMapper;
-    @Autowired
-    private WorkflowService workflowService;
-    @Autowired
-    private BusinessProcessOperation businessProcessOperation;
-
-    /**
-     * Initiate business approval process
-     *
-     * @param operator Operator
-     * @param businessEntity Business entity
-     */
-    public void startBusinessProcess(String operator, BusinessEntity businessEntity) {
-        BusinessInfo businessInfo = CommonBeanUtils.copyProperties(businessEntity, BusinessInfo::new);
-
-        NewBusinessWorkflowForm form = businessProcessOperation.genNewBusinessWorkflowForm(businessInfo);
-
-        workflowService.start(ProcessName.NEW_BUSINESS_WORKFLOW, operator, form);
-    }
-
-    /**
-     * heck whether the business status is temporary
-     *
-     * @param groupId Business group id
-     * @return Business entity, For caller reuse
-     */
-    public BusinessEntity checkBizIsTempStatus(String groupId) {
-        BusinessEntity businessEntity = businessMapper.selectByIdentifier(groupId);
-        Preconditions.checkNotNull(businessEntity, "groupId is invalid");
-        // Add/modify/delete is not allowed under certain business status
-        if (EntityStatus.BIZ_TEMP_STATUS.contains(businessEntity.getStatus())) {
-            LOGGER.error("business status was not allowed to add/update/delete data storage");
-            throw new BusinessException(BizErrorCodeEnum.STORAGE_OPT_NOT_ALLOWED);
-        }
-
-        return businessEntity;
-    }
-
-    /**
-     * Update extended information
-     * <p/>First physically delete the existing extended information, and then add this batch of extended information
-     *
-     * @param storageType Storage type
-     * @param storageId Storage ID
-     * @param extList Extended information list
-     */
-    @Transactional(rollbackFor = Throwable.class)
-    public void updateExtOpt(String storageType, Integer storageId, List<StorageExtInfo> extList) {
-        LOGGER.info("begin to update data storage ext={}", extList);
-        try {
-            storageExtMapper.deleteByStorageTypeAndId(storageType, storageId);
-            saveExtOpt(storageType, storageId, extList);
-        } catch (Exception e) {
-            LOGGER.error("failed to update data storage ext: ", e);
-            throw new BusinessException(BizErrorCodeEnum.STORAGE_SAVE_FAILED);
-        }
-    }
-
-    /**
-     * Save extended information
-     *
-     * @param storageType Data storage type
-     * @param storageId Data storage ID
-     * @param extList Extended information list
-     */
-    public void saveExtOpt(String storageType, int storageId, List<StorageExtInfo> extList) {
-        if (CollectionUtils.isEmpty(extList)) {
-            return;
-        }
-        LOGGER.info("begin to save storage ext={}", extList);
-        Date date = new Date();
-        for (StorageExtInfo extInfo : extList) {
-            StorageExtEntity extEntity = CommonBeanUtils.copyProperties(extInfo, StorageExtEntity::new);
-            extEntity.setStorageId(storageId);
-            extEntity.setStorageType(storageType);
-            extEntity.setModifyTime(date);
-            storageExtMapper.insert(extEntity);
-        }
-        LOGGER.info("success to save storage ext");
-    }
-
-    /**
-     * Asynchronously initiate a single data stream related workflow
-     *
-     * @see CreateStreamWorkflowDefinition
-     */
-    class WorkflowStartRunnable implements Runnable {
-
-        private final String operator;
-        private final BusinessEntity businessEntity;
-        private final String streamId;
-
-        public WorkflowStartRunnable(String operator, BusinessEntity businessEntity, String streamId) {
-            this.operator = operator;
-            this.businessEntity = businessEntity;
-            this.streamId = streamId;
-        }
-
-        @Override
-        public void run() {
-            String groupId = businessEntity.getInlongGroupId();
-            LOGGER.info("begin start data stream workflow, groupId={}, streamId={}", groupId, streamId);
-
-            BusinessInfo businessInfo = CommonBeanUtils.copyProperties(businessEntity, BusinessInfo::new);
-            BusinessResourceWorkflowForm form = genBizResourceWorkflowForm(businessInfo, streamId);
-
-            workflowService.start(ProcessName.CREATE_DATASTREAM_RESOURCE, operator, form);
-            LOGGER.info("success start data stream workflow, groupId={}, streamId={}", groupId, streamId);
-        }
-
-        /**
-         * Generate [Create Business Resource] form
-         */
-        private BusinessResourceWorkflowForm genBizResourceWorkflowForm(BusinessInfo businessInfo, String streamId) {
-            BusinessResourceWorkflowForm form = new BusinessResourceWorkflowForm();
-            form.setBusinessInfo(businessInfo);
-            form.setInlongStreamId(streamId);
-            return form;
-        }
-    }
-}
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/impl/StorageHiveOperation.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/impl/StorageHiveOperation.java
deleted file mode 100644
index e5cc1bd..0000000
--- a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/impl/StorageHiveOperation.java
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.manager.service.core.impl;
-
-import com.github.pagehelper.Page;
-import com.github.pagehelper.PageHelper;
-import com.github.pagehelper.PageInfo;
-import java.nio.charset.StandardCharsets;
-import java.util.Date;
-import java.util.List;
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.inlong.manager.common.enums.BizConstant;
-import org.apache.inlong.manager.common.enums.BizErrorCodeEnum;
-import org.apache.inlong.manager.common.enums.EntityStatus;
-import org.apache.inlong.manager.common.exceptions.BusinessException;
-import org.apache.inlong.manager.common.pojo.datastorage.BaseStorageRequest;
-import org.apache.inlong.manager.common.pojo.datastorage.BaseStorageResponse;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageExtInfo;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageHiveFieldInfo;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageHiveListResponse;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageHiveRequest;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageHiveResponse;
-import org.apache.inlong.manager.common.pojo.datastorage.StoragePageRequest;
-import org.apache.inlong.manager.common.util.CommonBeanUtils;
-import org.apache.inlong.manager.common.util.Preconditions;
-import org.apache.inlong.manager.dao.entity.DataStreamEntity;
-import org.apache.inlong.manager.dao.entity.StorageExtEntity;
-import org.apache.inlong.manager.dao.entity.StorageHiveEntity;
-import org.apache.inlong.manager.dao.entity.StorageHiveFieldEntity;
-import org.apache.inlong.manager.dao.mapper.DataStreamEntityMapper;
-import org.apache.inlong.manager.dao.mapper.StorageExtEntityMapper;
-import org.apache.inlong.manager.dao.mapper.StorageHiveEntityMapper;
-import org.apache.inlong.manager.dao.mapper.StorageHiveFieldEntityMapper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.beans.BeanUtils;
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.stereotype.Service;
-
-/**
- * Data is stored in the operation class of HIVE
- */
-@Service
-public class StorageHiveOperation extends StorageBaseOperation {
-
-    private static final Logger LOGGER = LoggerFactory.getLogger(StorageHiveOperation.class);
-
-    @Autowired
-    private StorageHiveEntityMapper hiveStorageMapper;
-    @Autowired
-    private StorageExtEntityMapper storageExtMapper;
-    @Autowired
-    private StorageHiveFieldEntityMapper hiveFieldMapper;
-    @Autowired
-    private DataStreamEntityMapper dataStreamMapper;
-
-    /**
-     * Save HIVE storage information
-     *
-     * @param storageInfo Storage information
-     * @return Id after saving
-     */
-    public int saveHiveStorage(BaseStorageRequest storageInfo, String operator) {
-        String groupId = storageInfo.getInlongGroupId();
-        // Make sure that there is no HIVE storage information under the current groupId and streamId
-        // (the two are mutually exclusive, only one can exist)
-        List<StorageHiveEntity> storageExist = hiveStorageMapper
-                .selectByIdentifier(groupId, storageInfo.getInlongStreamId());
-        Preconditions.checkEmpty(storageExist, "HIVE storage already exist under the groupId and streamId");
-
-        StorageHiveRequest hiveInfo = (StorageHiveRequest) storageInfo;
-        StorageHiveEntity entity = CommonBeanUtils.copyProperties(hiveInfo, StorageHiveEntity::new);
-
-        // Set the encoding type and field splitter
-        DataStreamEntity streamEntity = dataStreamMapper.selectByIdentifier(groupId, entity.getInlongStreamId());
-        if (streamEntity == null) {
-            throw new BusinessException(BizErrorCodeEnum.DATA_STREAM_NOT_FOUND);
-        }
-        String dataEncoding = streamEntity.getDataEncoding() == null
-                ? StandardCharsets.UTF_8.displayName() : streamEntity.getDataEncoding();
-        entity.setDataEncoding(dataEncoding);
-        if (entity.getDataSeparator() == null) {
-            entity.setDataSeparator(streamEntity.getDataSeparator());
-        }
-
-        entity.setStatus(EntityStatus.DATA_STORAGE_NEW.getCode());
-        entity.setCreator(operator);
-        entity.setModifier(operator);
-        Date now = new Date();
-        entity.setCreateTime(now);
-        entity.setCreateTime(now);
-        hiveStorageMapper.insertSelective(entity);
-
-        int id = entity.getId();
-        // Save field information
-        this.saveHiveFieldOpt(id, hiveInfo.getHiveFieldList());
-        // Save extended information
-        String storageType = BizConstant.STORAGE_HIVE;
-        this.saveExtOpt(storageType, id, hiveInfo.getExtList());
-
-        return id;
-    }
-
-    /**
-     * According to groupId and streamId, query the HIVE storage information to which it belongs
-     */
-    public void setHiveStorageResponse(String groupId, String streamId, List<BaseStorageResponse> requestList) {
-        List<StorageHiveEntity> hiveEntities = hiveStorageMapper.selectByIdentifier(groupId, streamId);
-
-        if (CollectionUtils.isEmpty(hiveEntities)) {
-            return;
-        }
-
-        // Get extended information and field information, and encapsulate it in the result list
-        for (StorageHiveEntity hiveEntity : hiveEntities) {
-            Integer storageId = hiveEntity.getId();
-
-            String storageType;
-            List<StorageExtEntity> extEntities;
-
-            storageType = BizConstant.STORAGE_HIVE;
-            extEntities = storageExtMapper.selectByStorageTypeAndId(BizConstant.STORAGE_HIVE, storageId);
-
-            List<StorageHiveFieldEntity> fieldEntityList = hiveFieldMapper.selectByStorageId(storageId);
-            List<StorageHiveFieldInfo> fieldInfoList = CommonBeanUtils
-                    .copyListProperties(fieldEntityList, StorageHiveFieldInfo::new);
-
-            StorageHiveResponse hiveInfo = CommonBeanUtils.copyProperties(hiveEntity, StorageHiveResponse::new);
-            hiveInfo.setStorageType(storageType);
-            hiveInfo.setExtList(CommonBeanUtils.copyListProperties(extEntities, StorageExtInfo::new));
-            hiveInfo.setHiveFieldList(fieldInfoList);
-            requestList.add(hiveInfo);
-        }
-    }
-
-    /**
-     * Logically delete HIVE storage information based on business group id and data stream id
-     *
-     * @param groupId Business group id
-     * @param streamId Data stream id
-     * @param operator Operator
-     * @return Whether succeed
-     */
-    public boolean logicDeleteHiveByIdentifier(String groupId, String streamId, String operator) {
-        List<StorageHiveEntity> hiveEntityList = hiveStorageMapper.selectByIdentifier(groupId, streamId);
-        if (CollectionUtils.isNotEmpty(hiveEntityList)) {
-            hiveEntityList.forEach(entity -> {
-                entity.setIsDeleted(EntityStatus.IS_DELETED.getCode());
-                entity.setPreviousStatus(entity.getStatus());
-                entity.setStatus(EntityStatus.DELETED.getCode());
-                entity.setModifier(operator);
-                hiveStorageMapper.updateByPrimaryKey(entity);
-
-                // Logical deletion of extended information, field information
-                storageExtMapper.logicDeleteAll(entity.getId());
-                hiveFieldMapper.logicDeleteAll(entity.getId());
-            });
-        }
-
-        return true;
-    }
-
-    /**
-     * Logically delete Hive storage information based on the primary key
-     * <p/>The business status is [Configuration successful], then asynchronously initiate the
-     * Single data stream resource creation workflow
-     *
-     * @param id Storage ID
-     * @param operator Operator
-     * @return Whether succeed
-     */
-    public boolean logicDeleteHiveStorage(Integer id, String operator) {
-        StorageHiveEntity entity = hiveStorageMapper.selectByPrimaryKey(id);
-        if (entity == null) {
-            LOGGER.error("hive storage not found by id={}, delete failed", id);
-            throw new BusinessException(BizErrorCodeEnum.STORAGE_INFO_NOT_FOUND);
-        }
-
-        super.checkBizIsTempStatus(entity.getInlongGroupId());
-
-        entity.setIsDeleted(EntityStatus.IS_DELETED.getCode());
-        entity.setPreviousStatus(entity.getStatus());
-        entity.setStatus(EntityStatus.DELETED.getCode());
-        entity.setModifier(operator);
-        int resultCount = hiveStorageMapper.updateByPrimaryKey(entity);
-
-        // Logical deletion of extended information, field information
-        storageExtMapper.logicDeleteAll(id);
-        hiveFieldMapper.logicDeleteAll(id);
-
-        return resultCount >= 0;
-    }
-
-    /**
-     * Physically delete HIVE storage information of the specified id
-     *
-     * @param groupId Business group id
-     * @param streamId Data stream id
-     * @return Whether succeed
-     */
-    public boolean deleteHiveByIdentifier(String groupId, String streamId) {
-        List<StorageHiveEntity> storageHiveEntities = hiveStorageMapper.selectByIdentifier(groupId, streamId);
-        if (CollectionUtils.isNotEmpty(storageHiveEntities)) {
-            storageHiveEntities.forEach(entity -> {
-                hiveStorageMapper.deleteByPrimaryKey(entity.getId());
-                hiveFieldMapper.deleteAllByStorageId(entity.getId());
-            });
-        }
-        return true;
-    }
-
-    /**
-     * Query HIVE storage information based on ID
-     *
-     * @param id Storage ID
-     * @return Storage information
-     */
-    public BaseStorageResponse getHiveStorage(Integer id) {
-        StorageHiveEntity entity = hiveStorageMapper.selectByPrimaryKey(id);
-        if (entity == null) {
-            LOGGER.error("hive storage not found by id={}", id);
-            return null;
-        }
-
-        StorageHiveResponse response = CommonBeanUtils.copyProperties(entity, StorageHiveResponse::new);
-        String storageType = BizConstant.STORAGE_HIVE;
-        List<StorageExtEntity> extEntityList = storageExtMapper.selectByStorageTypeAndId(storageType, id);
-        List<StorageExtInfo> extInfoList = CommonBeanUtils.copyListProperties(extEntityList, StorageExtInfo::new);
-        response.setExtList(extInfoList);
-
-        List<StorageHiveFieldEntity> entities = hiveFieldMapper.selectByStorageId(id);
-        List<StorageHiveFieldInfo> infos = CommonBeanUtils.copyListProperties(entities, StorageHiveFieldInfo::new);
-        response.setHiveFieldList(infos);
-
-        return response;
-    }
-
-    /**
-     * Query the storage list of HIVE according to conditions
-     *
-     * @param request Query conditions
-     * @return Store the paged results of the list
-     */
-    public PageInfo<StorageHiveListResponse> getHiveStorageList(StoragePageRequest request) {
-        LOGGER.info("begin to list hive storage page by {}", request);
-
-        PageHelper.startPage(request.getPageNum(), request.getPageSize());
-        Page<StorageHiveEntity> entityPage = (Page<StorageHiveEntity>) hiveStorageMapper.selectByCondition(request);
-        List<StorageHiveListResponse> detailList = CommonBeanUtils.copyListProperties(entityPage,
-                StorageHiveListResponse::new);
-
-        // Encapsulate the paging query results into the PageInfo object to obtain related paging information
-        PageInfo<StorageHiveListResponse> page = new PageInfo<>(detailList);
-        page.setTotal(entityPage.getTotal());
-
-        LOGGER.info("success to list hive storage");
-        return page;
-    }
-
-    /**
-     * Update Hive storage information
-     *
-     * @param bizStatus Business status, used to determine whether the field information can be modified
-     * @param storageInfo Storage information
-     * @param operator Operator
-     * @return Updated id
-     */
-    public Integer updateHiveStorage(Integer bizStatus, BaseStorageRequest storageInfo, String operator) {
-        StorageHiveRequest hiveInfo = (StorageHiveRequest) storageInfo;
-        // id exists, update, otherwise add
-        Integer id = hiveInfo.getId();
-        if (id != null) {
-            StorageHiveEntity entity = hiveStorageMapper.selectByPrimaryKey(hiveInfo.getId());
-            if (entity == null) {
-                LOGGER.error("hive storage not found by id={}, update failed", id);
-                throw new BusinessException(BizErrorCodeEnum.STORAGE_INFO_NOT_FOUND);
-            }
-            BeanUtils.copyProperties(hiveInfo, entity);
-            entity.setStatus(EntityStatus.BIZ_CONFIG_ING.getCode());
-            entity.setModifier(operator);
-            hiveStorageMapper.updateByPrimaryKeySelective(entity);
-
-            super.updateExtOpt(hiveInfo.getStorageType(), id, hiveInfo.getExtList());
-            this.updateHiveFieldOpt(bizStatus, id, hiveInfo.getHiveFieldList());
-        } else {
-            id = this.saveHiveStorage(hiveInfo, operator);
-        }
-
-        return id;
-    }
-
-    /**
-     * Update Hive field
-     * <p/>First physically delete the existing field information, and then add the field information of this batch
-     */
-    private void updateHiveFieldOpt(Integer bizStatus, Integer storageId, List<StorageHiveFieldInfo> fieldInfoList) {
-        if (CollectionUtils.isEmpty(fieldInfoList)) {
-            return;
-        }
-        LOGGER.info("begin to update hive field={}", fieldInfoList);
-
-        // When the business status is [Configuration successful], modification and deletion are not allowed,
-        // only adding is allowed, and the order of existing fields cannot be changed
-        if (EntityStatus.BIZ_CONFIG_SUCCESSFUL.getCode().equals(bizStatus)) {
-            List<StorageHiveFieldEntity> existsFieldList = hiveFieldMapper.selectByStorageId(storageId);
-            if (existsFieldList.size() > fieldInfoList.size()) {
-                LOGGER.error("current status was not allowed to update hive field");
-                throw new BusinessException(BizErrorCodeEnum.STORAGE_HIVE_FIELD_UPDATE_NOT_ALLOWED);
-            }
-            for (int i = 0; i < existsFieldList.size(); i++) {
-                if (!existsFieldList.get(i).getFieldName().equals(fieldInfoList.get(i).getFieldName())) {
-                    LOGGER.error("current status was not allowed to update hive field");
-                    throw new BusinessException(BizErrorCodeEnum.STORAGE_HIVE_FIELD_UPDATE_NOT_ALLOWED);
-                }
-            }
-        }
-
-        try {
-            hiveFieldMapper.deleteAllByStorageId(storageId);
-            saveHiveFieldOpt(storageId, fieldInfoList);
-            LOGGER.info("success to update hive field");
-        } catch (Exception e) {
-            LOGGER.error("failed to update hive field: ", e);
-            throw new BusinessException(BizErrorCodeEnum.STORAGE_HIVE_FIELD_SAVE_FAILED);
-        }
-    }
-
-    /**
-     * Save HIVE field
-     *
-     * @param storageId Primary key for storing information
-     * @param hiveFieldList Table field
-     */
-    private void saveHiveFieldOpt(int storageId, List<StorageHiveFieldInfo> hiveFieldList) {
-        if (CollectionUtils.isEmpty(hiveFieldList)) {
-            return;
-        }
-        LOGGER.info("begin to save hive field={}", hiveFieldList);
-        for (StorageHiveFieldInfo fieldInfo : hiveFieldList) {
-            StorageHiveFieldEntity fieldEntity = CommonBeanUtils.copyProperties(fieldInfo, StorageHiveFieldEntity::new);
-            if (StringUtils.isEmpty(fieldEntity.getFieldComment())) {
-                fieldEntity.setFieldComment(fieldEntity.getFieldName());
-            }
-            fieldEntity.setStorageId(storageId);
-            hiveFieldMapper.insert(fieldEntity);
-        }
-        LOGGER.info("success to save hive field");
-    }
-
-}
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/impl/StorageServiceImpl.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/impl/StorageServiceImpl.java
deleted file mode 100644
index fca8ce2..0000000
--- a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/impl/StorageServiceImpl.java
+++ /dev/null
@@ -1,342 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.inlong.manager.service.core.impl;
-
-import com.github.pagehelper.PageInfo;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Locale;
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.inlong.manager.common.enums.BizConstant;
-import org.apache.inlong.manager.common.enums.BizErrorCodeEnum;
-import org.apache.inlong.manager.common.enums.EntityStatus;
-import org.apache.inlong.manager.common.exceptions.BusinessException;
-import org.apache.inlong.manager.common.pojo.datastorage.BaseStorageListResponse;
-import org.apache.inlong.manager.common.pojo.datastorage.BaseStorageRequest;
-import org.apache.inlong.manager.common.pojo.datastorage.BaseStorageResponse;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageApproveInfo;
-import org.apache.inlong.manager.common.pojo.datastorage.StoragePageRequest;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageSummaryInfo;
-import org.apache.inlong.manager.common.util.Preconditions;
-import org.apache.inlong.manager.dao.entity.BusinessEntity;
-import org.apache.inlong.manager.dao.entity.StorageHiveEntity;
-import org.apache.inlong.manager.dao.mapper.StorageHiveEntityMapper;
-import org.apache.inlong.manager.service.core.StorageService;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.stereotype.Service;
-import org.springframework.transaction.annotation.Transactional;
-
-/**
- * Implementation of service layer interface for data storage
- */
-@Service
-public class StorageServiceImpl extends StorageBaseOperation implements StorageService {
-
-    private static final Logger LOGGER = LoggerFactory.getLogger(StorageServiceImpl.class);
-
-    @Autowired
-    private StorageHiveOperation hiveOperation;
-
-    @Autowired
-    private StorageHiveEntityMapper hiveStorageMapper;
-
-    @Transactional(rollbackFor = Throwable.class)
-    @Override
-    public Integer save(BaseStorageRequest storageInfo, String operator) {
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("begin to save storage info={}", storageInfo);
-        }
-
-        Preconditions.checkNotNull(storageInfo, "storage info is empty");
-        String groupId = storageInfo.getInlongGroupId();
-        Preconditions.checkNotNull(groupId, BizConstant.GROUP_ID_IS_EMPTY);
-        String streamId = storageInfo.getInlongStreamId();
-        Preconditions.checkNotNull(streamId, BizConstant.STREAM_ID_IS_EMPTY);
-
-        // Check if it can be added
-        BusinessEntity businessEntity = super.checkBizIsTempStatus(groupId);
-
-        // According to the storage type, save storage information
-        String storageType = storageInfo.getStorageType();
-        Preconditions.checkNotNull(storageType, "storageType is empty");
-
-        int id;
-        if (BizConstant.STORAGE_HIVE.equals(storageType.toUpperCase(Locale.ROOT))) {
-            id = hiveOperation.saveHiveStorage(storageInfo, operator);
-        } else {
-            LOGGER.error("the storageType={} not support", storageType);
-            throw new BusinessException(BizErrorCodeEnum.STORAGE_TYPE_NOT_SUPPORTED);
-        }
-
-        // If the business status is [Configuration Successful], then asynchronously initiate
-        // the [Single data stream Resource Creation] workflow
-        if (EntityStatus.BIZ_CONFIG_SUCCESSFUL.getCode().equals(businessEntity.getStatus())) {
-            super.executorService.execute(new WorkflowStartRunnable(operator, businessEntity, streamId));
-        }
-
-        LOGGER.info("success to save storage info");
-        return id;
-    }
-
-    @Override
-    public BaseStorageResponse getById(String storageType, Integer id) {
-        LOGGER.debug("begin to get storage by storageType={}, id={}", storageType, id);
-        Preconditions.checkNotNull(id, "storage id is null");
-        Preconditions.checkNotNull(storageType, "storageType is empty");
-
-        BaseStorageResponse storageInfo;
-        if (BizConstant.STORAGE_HIVE.equals(storageType.toUpperCase(Locale.ROOT))) {
-            storageInfo = hiveOperation.getHiveStorage(id);
-        } else {
-            LOGGER.error("the storageType={} not support", storageType);
-            throw new BusinessException(BizErrorCodeEnum.STORAGE_TYPE_NOT_SUPPORTED);
-        }
-
-        LOGGER.info("success to get storage info");
-        return storageInfo;
-    }
-
-    @Override
-    public Integer getCountByIdentifier(String groupId, String streamId) {
-        LOGGER.debug("begin to get storage count by groupId={}, streamId={}", groupId, streamId);
-        Preconditions.checkNotNull(groupId, BizConstant.GROUP_ID_IS_EMPTY);
-        Preconditions.checkNotNull(streamId, BizConstant.STREAM_ID_IS_EMPTY);
-
-        Integer count = hiveStorageMapper.selectCountByIdentifier(groupId, streamId);
-
-        LOGGER.info("the storage count={} by groupId={}, streamId={}", count, groupId, streamId);
-        return count;
-    }
-
-    @Override
-    public List<BaseStorageResponse> listByIdentifier(String groupId, String streamId) {
-        LOGGER.debug("begin to list storage by groupId={}, streamId={}", groupId, streamId);
-        Preconditions.checkNotNull(groupId, BizConstant.GROUP_ID_IS_EMPTY);
-
-        // Query HDFS, HIVE, ES storage information and encapsulate it in the result set
-        List<BaseStorageResponse> responseList = new ArrayList<>();
-        hiveOperation.setHiveStorageResponse(groupId, streamId, responseList);
-
-        LOGGER.info("success to list storage info");
-        return responseList;
-    }
-
-    @Override
-    public List<StorageSummaryInfo> listSummaryByIdentifier(String groupId, String streamId) {
-        LOGGER.debug("begin to list storage summary by groupId={}, streamId={}", groupId, streamId);
-        Preconditions.checkNotNull(groupId, BizConstant.GROUP_ID_IS_EMPTY);
-        Preconditions.checkNotNull(streamId, BizConstant.STREAM_ID_IS_EMPTY);
-
-        // Query HDFS, HIVE, ES storage information and encapsulate it in the result set
-        List<StorageSummaryInfo> totalList = new ArrayList<>();
-        List<StorageSummaryInfo> hiveSummaryList = hiveStorageMapper.selectSummary(groupId, streamId);
-
-        totalList.addAll(hiveSummaryList);
-
-        LOGGER.info("success to list storage summary");
-        return totalList;
-    }
-
-    @Override
-    public PageInfo<? extends BaseStorageListResponse> listByCondition(StoragePageRequest request) {
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("begin to list storage page by {}", request);
-        }
-        Preconditions.checkNotNull(request.getInlongGroupId(), BizConstant.GROUP_ID_IS_EMPTY);
-
-        String storageType = request.getStorageType();
-        Preconditions.checkNotNull(storageType, "storageType is empty");
-
-        PageInfo<? extends BaseStorageListResponse> page;
-        if (BizConstant.STORAGE_HIVE.equals(storageType.toUpperCase(Locale.ROOT))) {
-            page = hiveOperation.getHiveStorageList(request);
-        } else {
-            LOGGER.error("the storageType={} not support", storageType);
-            throw new BusinessException(BizErrorCodeEnum.STORAGE_TYPE_NOT_SUPPORTED);
-        }
-
-        LOGGER.info("success to list storage page");
-        return page;
-    }
-
-    @Transactional(rollbackFor = Throwable.class)
-    @Override
-    public boolean update(BaseStorageRequest storageRequest, String operator) {
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("begin to update storage info={}", storageRequest);
-        }
-
-        Preconditions.checkNotNull(storageRequest, "storage info is empty");
-        String groupId = storageRequest.getInlongGroupId();
-        Preconditions.checkNotNull(groupId, BizConstant.GROUP_ID_IS_EMPTY);
-        String streamId = storageRequest.getInlongStreamId();
-        Preconditions.checkNotNull(streamId, BizConstant.STREAM_ID_IS_EMPTY);
-
-        // Check if it can be modified
-        BusinessEntity businessEntity = super.checkBizIsTempStatus(groupId);
-
-        String storageType = storageRequest.getStorageType();
-        Preconditions.checkNotNull(storageType, "storageType is empty");
-
-        if (BizConstant.STORAGE_HIVE.equals(storageType.toUpperCase(Locale.ROOT))) {
-            hiveOperation.updateHiveStorage(businessEntity.getStatus(), storageRequest, operator);
-        } else {
-            LOGGER.error("the storageType={} not support", storageType);
-            throw new BusinessException(BizErrorCodeEnum.STORAGE_TYPE_NOT_SUPPORTED);
-        }
-
-        // The business status is [Configuration successful], then asynchronously initiate
-        // the [Single data stream resource creation] workflow
-        if (EntityStatus.BIZ_CONFIG_SUCCESSFUL.getCode().equals(businessEntity.getStatus())) {
-            super.executorService.execute(new WorkflowStartRunnable(operator, businessEntity, streamId));
-        }
-        LOGGER.info("success to update storage info");
-        return true;
-    }
-
-    @Transactional(rollbackFor = Throwable.class)
-    @Override
-    public boolean delete(String storageType, Integer id, String operator) {
-        LOGGER.debug("begin to delete storage by storageType={}, id={}", storageType, id);
-        Preconditions.checkNotNull(id, "storage id is null");
-        Preconditions.checkNotNull(storageType, "storageType is empty");
-
-        boolean result;
-        if (BizConstant.STORAGE_HIVE.equals(storageType.toUpperCase(Locale.ROOT))) {
-            result = hiveOperation.logicDeleteHiveStorage(id, operator);
-        } else {
-            LOGGER.error("the storageType={} not support", storageType);
-            throw new BusinessException(BizErrorCodeEnum.STORAGE_TYPE_NOT_SUPPORTED);
-        }
-
-        LOGGER.info("success to delete storage info");
-        return result;
-    }
-
-    @Override
-    public void updateHiveStatusById(int id, int status, String log) {
-        StorageHiveEntity entity = new StorageHiveEntity();
-        entity.setId(id);
-        entity.setStatus(status);
-        entity.setOptLog(log);
-        hiveStorageMapper.updateStorageStatusById(entity);
-    }
-
-    @Transactional(rollbackFor = Throwable.class)
-    @Override
-    public boolean deleteAllByIdentifier(String groupId, String streamId) {
-        LOGGER.debug("begin to delete all storage info by groupId={}, streamId={}", groupId, streamId);
-        Preconditions.checkNotNull(groupId, BizConstant.GROUP_ID_IS_EMPTY);
-        Preconditions.checkNotNull(streamId, BizConstant.STREAM_ID_IS_EMPTY);
-
-        // Check if it can be deleted
-        this.checkBizIsTempStatus(groupId);
-
-        hiveOperation.deleteHiveByIdentifier(groupId, streamId);
-
-        LOGGER.info("success to delete all storage info");
-        return true;
-    }
-
-    @Transactional(rollbackFor = Throwable.class)
-    @Override
-    public boolean logicDeleteAllByIdentifier(String groupId, String streamId, String operator) {
-        LOGGER.debug("begin to logic delete all storage info by groupId={}, streamId={}", groupId, streamId);
-        Preconditions.checkNotNull(groupId, BizConstant.GROUP_ID_IS_EMPTY);
-        Preconditions.checkNotNull(streamId, BizConstant.STREAM_ID_IS_EMPTY);
-
-        // Check if it can be deleted
-        this.checkBizIsTempStatus(groupId);
-
-        hiveOperation.logicDeleteHiveByIdentifier(groupId, streamId, operator);
-
-        LOGGER.info("success to logic delete all storage info");
-        return true;
-    }
-
-    @Override
-    public List<String> filterStreamIdByStorageType(String groupId, String storageType, List<String> streamIdList) {
-        LOGGER.debug("begin to filter stream by groupId={}, type={}, streamId={}", groupId, storageType, streamIdList);
-
-        List<String> resultList = new ArrayList<>();
-        if (StringUtils.isEmpty(storageType) || CollectionUtils.isEmpty(streamIdList)) {
-            return resultList;
-        }
-
-        if (BizConstant.STORAGE_HIVE.equals(storageType.toUpperCase(Locale.ROOT))) {
-            resultList = hiveStorageMapper.selectDataStreamExists(groupId, streamIdList);
-        } else {
-            LOGGER.error("the storageType={} not support", storageType);
-            throw new BusinessException(BizErrorCodeEnum.STORAGE_TYPE_NOT_SUPPORTED);
-        }
-
-        LOGGER.info("success to filter stream");
-        return resultList;
-    }
-
-    @Override
-    public List<String> getStorageTypeList(String groupId, String streamId) {
-        LOGGER.debug("begin to get storage type list by groupId={}, streamId={}", groupId, streamId);
-
-        List<String> resultList = new ArrayList<>();
-        if (StringUtils.isEmpty(streamId)) {
-            return resultList;
-        }
-
-        if (hiveStorageMapper.selectCountByIdentifier(groupId, streamId) > 0) {
-            resultList.add(BizConstant.STORAGE_HIVE);
-        }
-
-        LOGGER.info("success to get storage type list");
-        return resultList;
-    }
-
-    @Override
-    public boolean updateAfterApprove(List<StorageApproveInfo> storageApproveList, String operator) {
-        if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug("begin to update storage after approve={}", storageApproveList);
-        }
-        if (CollectionUtils.isEmpty(storageApproveList)) {
-            return true;
-        }
-
-        for (StorageApproveInfo info : storageApproveList) {
-            // According to the storage type, save storage information
-            String storageType = info.getStorageType();
-            Preconditions.checkNotNull(storageType, "storageType is empty");
-
-            if (BizConstant.STORAGE_HIVE.equals(storageType.toUpperCase(Locale.ROOT))) {
-                StorageHiveEntity hiveEntity = new StorageHiveEntity();
-                hiveEntity.setId(info.getId());
-                hiveEntity.setModifier(operator);
-                hiveEntity.setStatus(EntityStatus.DATA_STORAGE_CONFIG_ING.getCode());
-                hiveStorageMapper.updateByPrimaryKeySelective(hiveEntity);
-            } else {
-                LOGGER.error("the storageType={} not support", storageType);
-                throw new BusinessException(BizErrorCodeEnum.STORAGE_TYPE_NOT_SUPPORTED);
-            }
-        }
-
-        LOGGER.info("success to update storage after approve");
-        return true;
-    }
-
-}
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/storage/StorageOperation.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/storage/StorageOperation.java
new file mode 100644
index 0000000..d81a17c
--- /dev/null
+++ b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/storage/StorageOperation.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.manager.service.storage;
+
+import com.github.pagehelper.Page;
+import com.github.pagehelper.PageInfo;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageListResponse;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageRequest;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageResponse;
+import org.apache.inlong.manager.dao.entity.StorageEntity;
+
+import java.util.function.Supplier;
+
+/**
+ * Interface of the storage operation
+ */
+public interface StorageOperation {
+
+    /**
+     * Determines whether the current instance matches the specified type.
+     */
+    Boolean accept(String storageType);
+
+    /**
+     * Save the storage info.
+     *
+     * @param request The request of the storage.
+     * @param operator The operator name.
+     * @return Storage id after saving.
+     */
+    default Integer saveOpt(StorageRequest request, String operator) {
+        return null;
+    }
+
+    /**
+     * Save storage fields via the storage request.
+     *
+     * @param request Storage request.
+     */
+    default void saveFieldOpt(StorageRequest request) {
+    }
+
+    /**
+     * Get storage info by storage type and storage id.
+     *
+     * @param storageType Storage type.
+     * @param id Storage id.
+     * @return Storage info.
+     */
+    StorageResponse getById(String storageType, Integer id);
+
+    /**
+     * Get the target from the given entity.
+     *
+     * @param entity Get field value from the entity.
+     * @param target Encapsulate value to the target.
+     * @param <T> Type of the target.
+     * @return Target after encapsulating.
+     */
+    <T> T getFromEntity(StorageEntity entity, Supplier<T> target);
+
+    /**
+     * Get storage list response from the given storage entity page.
+     *
+     * @param entityPage The given entity page.
+     * @return Storage list response.
+     */
+    default PageInfo<? extends StorageListResponse> getPageInfo(Page<StorageEntity> entityPage) {
+        return new PageInfo<>();
+    }
+
+    /**
+     * Update the storage info.
+     *
+     * @param request Request of update.
+     * @param operator Operator's name.
+     */
+    void updateOpt(StorageRequest request, String operator);
+
+    /**
+     * Update the storage fields.
+     * <p/>If `onlyAdd` is <code>true</code>, only adding is allowed, modification and deletion are not allowed,
+     * and the order of existing fields cannot be changed
+     *
+     * @param onlyAdd Whether to add fields only.
+     * @param request The update request.
+     */
+    void updateFieldOpt(Boolean onlyAdd, StorageRequest request);
+
+}
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/storage/StorageOperationFactory.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/storage/StorageOperationFactory.java
new file mode 100644
index 0000000..4643065
--- /dev/null
+++ b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/storage/StorageOperationFactory.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.manager.service.storage;
+
+import org.apache.inlong.manager.common.enums.BizErrorCodeEnum;
+import org.apache.inlong.manager.common.exceptions.BusinessException;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+
+import java.util.List;
+import java.util.Optional;
+
+/**
+ * Factory for {@link StorageOperation}.
+ */
+@Service
+public class StorageOperationFactory {
+
+    @Autowired
+    private List<StorageOperation> storageOperationList;
+
+    /**
+     * Get a storage operation instance via the given storageType
+     */
+    public StorageOperation getInstance(String storageType) {
+        Optional<StorageOperation> instance = storageOperationList.stream()
+                .filter(inst -> inst.accept(storageType))
+                .findFirst();
+        if (!instance.isPresent()) {
+            throw new BusinessException(BizErrorCodeEnum.STORAGE_TYPE_NOT_SUPPORT,
+                    String.format(BizErrorCodeEnum.STORAGE_TYPE_NOT_SUPPORT.getMessage(), storageType));
+        }
+        return instance.get();
+    }
+
+}
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/StorageService.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/storage/StorageService.java
similarity index 60%
rename from inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/StorageService.java
rename to inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/storage/StorageService.java
index 6b13df9..9938f59 100644
--- a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/core/StorageService.java
+++ b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/storage/StorageService.java
@@ -15,16 +15,17 @@
  * limitations under the License.
  */
 
-package org.apache.inlong.manager.service.core;
+package org.apache.inlong.manager.service.storage;
 
 import com.github.pagehelper.PageInfo;
-import java.util.List;
-import org.apache.inlong.manager.common.pojo.datastorage.BaseStorageListResponse;
-import org.apache.inlong.manager.common.pojo.datastorage.BaseStorageRequest;
-import org.apache.inlong.manager.common.pojo.datastorage.BaseStorageResponse;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageApproveInfo;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageApproveDTO;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageBriefResponse;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageListResponse;
 import org.apache.inlong.manager.common.pojo.datastorage.StoragePageRequest;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageSummaryInfo;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageRequest;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageResponse;
+
+import java.util.List;
 
 /**
  * Service layer interface for data storage
@@ -32,22 +33,22 @@ import org.apache.inlong.manager.common.pojo.datastorage.StorageSummaryInfo;
 public interface StorageService {
 
     /**
-     * Save storage information
+     * Save the storage information
      *
-     * @param storageInfo Store information
-     * @param operator Edit person's name
-     * @return Primary key after saving
+     * @param request Storage request.
+     * @param operator Operator's name.
+     * @return Storage id after saving.
      */
-    Integer save(BaseStorageRequest storageInfo, String operator);
+    Integer save(StorageRequest request, String operator);
 
     /**
-     * Query storage information based on id
+     * Query storage information based on id and type.
      *
-     * @param id Data primary key
-     * @param storageType Storage type
-     * @return Store information
+     * @param id Storage id.
+     * @param storageType Storage type.
+     * @return Store info
      */
-    BaseStorageResponse getById(String storageType, Integer id);
+    StorageResponse get(Integer id, String storageType);
 
     /**
      * Query storage information based on business group id and data stream id
@@ -57,7 +58,7 @@ public interface StorageService {
      * @return Store information list
      * @apiNote Storage types only support temporarily: HIVE
      */
-    List<BaseStorageResponse> listByIdentifier(String groupId, String streamId);
+    List<StorageResponse> listStorage(String groupId, String streamId);
 
     /**
      * Query stored summary information based on business group id and data stream id, including storage cluster
@@ -67,7 +68,7 @@ public interface StorageService {
      * @return Store information list
      * @apiNote Storage types only support temporarily: HIVE
      */
-    List<StorageSummaryInfo> listSummaryByIdentifier(String groupId, String streamId);
+    List<StorageBriefResponse> listBrief(String groupId, String streamId);
 
     /**
      * Query the number of undeleted stored information based on business and data stream id
@@ -76,7 +77,7 @@ public interface StorageService {
      * @param streamId Data stream id
      * @return Number of stored information
      */
-    Integer getCountByIdentifier(String groupId, String streamId);
+    Integer getCount(String groupId, String streamId);
 
     /**
      * Paging query storage information based on conditions
@@ -84,7 +85,7 @@ public interface StorageService {
      * @param request Paging request
      * @return Store information list
      */
-    PageInfo<? extends BaseStorageListResponse> listByCondition(StoragePageRequest request);
+    PageInfo<? extends StorageListResponse> listByCondition(StoragePageRequest request);
 
     /**
      * Modify data storage information
@@ -93,17 +94,17 @@ public interface StorageService {
      * @param operator Edit person's name
      * @return whether succeed
      */
-    boolean update(BaseStorageRequest storageRequest, String operator);
+    boolean update(StorageRequest storageRequest, String operator);
 
     /**
-     * Delete data storage information based on id
+     * Delete the data storage by the given id and storage type.
      *
-     * @param storageType Storage type
-     * @param id The primary key of the data store
-     * @param operator Edit person's name
-     * @return whether succeed
+     * @param id The primary key of the data storage.
+     * @param storageType Storage type.
+     * @param operator The operator's name.
+     * @return Whether succeed
      */
-    boolean delete(String storageType, Integer id, String operator);
+    boolean delete(Integer id, String storageType, String operator);
 
     /**
      * Modify storage data status
@@ -112,26 +113,27 @@ public interface StorageService {
      * @param status Goal state
      * @param log Modify the description
      */
-    void updateHiveStatusById(int id, int status, String log);
+    void updateStatus(int id, int status, String log);
 
     /**
-     * Physically delete data storage information under specified conditions
+     * Logically delete data storage with the given conditions.
      *
-     * @param groupId Business group id
-     * @param streamId Data stream id
-     * @return whether succeed
+     * @param groupId InLong group id to which the data source belongs.
+     * @param streamId InLong stream id to which the data source belongs.
+     * @param operator The operator's name.
+     * @return Whether succeed.
      */
-    boolean deleteAllByIdentifier(String groupId, String streamId);
+    boolean logicDeleteAll(String groupId, String streamId, String operator);
 
     /**
-     * Tombstone data storage information
+     * Physically delete data storage with the given conditions.
      *
-     * @param groupId The business group id to which the data source belongs
-     * @param streamId The data stream id to which the data source belongs
-     * @param operator Operator name
-     * @return whether succeed
+     * @param groupId InLong group id.
+     * @param streamId InLong stream id.
+     * @param operator The operator's name.
+     * @return Whether succeed.
      */
-    boolean logicDeleteAllByIdentifier(String groupId, String streamId, String operator);
+    boolean deleteAll(String groupId, String streamId, String operator);
 
     /**
      * According to the existing data stream ID list, filter out the data stream ID list containing the specified
@@ -142,7 +144,7 @@ public interface StorageService {
      * @param streamIdList Data stream ID list
      * @return List of filtered data stream IDs
      */
-    List<String> filterStreamIdByStorageType(String groupId, String storageType, List<String> streamIdList);
+    List<String> getExistsStreamIdList(String groupId, String storageType, List<String> streamIdList);
 
     /**
      * According to the data stream id, query the list of storage types owned by it
@@ -160,6 +162,6 @@ public interface StorageService {
      * @param operator Edit person's name
      * @return whether succeed
      */
-    boolean updateAfterApprove(List<StorageApproveInfo> storageApproveList, String operator);
+    boolean updateAfterApprove(List<StorageApproveDTO> storageApproveList, String operator);
 
 }
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/storage/StorageServiceImpl.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/storage/StorageServiceImpl.java
new file mode 100644
index 0000000..3765186
--- /dev/null
+++ b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/storage/StorageServiceImpl.java
@@ -0,0 +1,444 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.manager.service.storage;
+
+import com.github.pagehelper.Page;
+import com.github.pagehelper.PageHelper;
+import com.github.pagehelper.PageInfo;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.inlong.manager.common.enums.BizConstant;
+import org.apache.inlong.manager.common.enums.BizErrorCodeEnum;
+import org.apache.inlong.manager.common.enums.EntityStatus;
+import org.apache.inlong.manager.common.exceptions.BusinessException;
+import org.apache.inlong.manager.common.pojo.business.BusinessInfo;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageApproveDTO;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageBriefResponse;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageListResponse;
+import org.apache.inlong.manager.common.pojo.datastorage.StoragePageRequest;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageRequest;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageResponse;
+import org.apache.inlong.manager.common.util.CommonBeanUtils;
+import org.apache.inlong.manager.common.util.Preconditions;
+import org.apache.inlong.manager.dao.entity.BusinessEntity;
+import org.apache.inlong.manager.dao.entity.StorageEntity;
+import org.apache.inlong.manager.dao.mapper.BusinessEntityMapper;
+import org.apache.inlong.manager.dao.mapper.StorageEntityMapper;
+import org.apache.inlong.manager.dao.mapper.StorageFieldEntityMapper;
+import org.apache.inlong.manager.service.core.impl.BusinessProcessOperation;
+import org.apache.inlong.manager.service.workflow.ProcessName;
+import org.apache.inlong.manager.service.workflow.WorkflowService;
+import org.apache.inlong.manager.service.workflow.business.BusinessResourceWorkflowForm;
+import org.apache.inlong.manager.service.workflow.business.NewBusinessWorkflowForm;
+import org.apache.inlong.manager.service.workflow.stream.CreateStreamWorkflowDefinition;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+import org.springframework.transaction.annotation.Transactional;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.List;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Implementation of storage service interface
+ */
+@Service
+public class StorageServiceImpl implements StorageService {
+
+    private static final Logger LOGGER = LoggerFactory.getLogger(StorageServiceImpl.class);
+    public final ExecutorService executorService = new ThreadPoolExecutor(
+            10,
+            20,
+            0L,
+            TimeUnit.MILLISECONDS,
+            new ArrayBlockingQueue<>(100),
+            new ThreadFactoryBuilder().setNameFormat("data-stream-workflow-%s").build(),
+            new CallerRunsPolicy());
+
+    @Autowired
+    private StorageOperationFactory operationFactory;
+    @Autowired
+    private BusinessEntityMapper businessMapper;
+    @Autowired
+    private StorageEntityMapper storageMapper;
+    @Autowired
+    private StorageFieldEntityMapper storageFieldMapper;
+    @Autowired
+    private WorkflowService workflowService;
+    @Autowired
+    private BusinessProcessOperation businessProcessOperation;
+
+    @Override
+    @Transactional(rollbackFor = Throwable.class)
+    public Integer save(StorageRequest request, String operator) {
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug("begin to save storage info=" + request);
+        }
+        this.checkParams(request);
+
+        // Check if it can be added
+        String groupId = request.getInlongGroupId();
+        BusinessEntity businessEntity = checkBizIsTempStatus(groupId, operator);
+
+        // Make sure that there is no storage info with the current groupId and streamId
+        String streamId = request.getInlongStreamId();
+        String storageType = request.getStorageType();
+        List<StorageEntity> storageExist = storageMapper.selectByIdAndType(groupId, streamId, storageType);
+        Preconditions.checkEmpty(storageExist, BizErrorCodeEnum.STORAGE_ALREADY_EXISTS.getMessage());
+
+        // According to the storage type, save storage information
+        StorageOperation operation = operationFactory.getInstance(storageType);
+        int id = operation.saveOpt(request, operator);
+
+        // If the business status is [Configuration Successful], then asynchronously initiate
+        // the [Single data stream Resource Creation] workflow
+        if (EntityStatus.BIZ_CONFIG_SUCCESSFUL.getCode().equals(businessEntity.getStatus())) {
+            executorService.execute(new WorkflowStartRunnable(operator, businessEntity, streamId));
+        }
+
+        LOGGER.info("success to save storage info");
+        return id;
+    }
+
+    @Override
+    public StorageResponse get(Integer id, String storageType) {
+        LOGGER.debug("begin to get storage by id={}, storageType={}", id, storageType);
+        StorageOperation operation = operationFactory.getInstance(storageType);
+        StorageResponse storageResponse = operation.getById(storageType, id);
+        LOGGER.info("success to get storage info");
+        return storageResponse;
+    }
+
+    @Override
+    public Integer getCount(String groupId, String streamId) {
+        Integer count = storageMapper.selectCount(groupId, streamId);
+        LOGGER.debug("storage count={} with groupId={}, streamId={}", count, groupId, streamId);
+        return count;
+    }
+
+    @Override
+    public List<StorageResponse> listStorage(String groupId, String streamId) {
+        LOGGER.debug("begin to list storage by groupId={}, streamId={}", groupId, streamId);
+        Preconditions.checkNotNull(groupId, BizConstant.GROUP_ID_IS_EMPTY);
+
+        List<StorageEntity> entityList = storageMapper.selectByIdentifier(groupId, streamId);
+        if (CollectionUtils.isEmpty(entityList)) {
+            return Collections.emptyList();
+        }
+        List<StorageResponse> responseList = new ArrayList<>();
+        entityList.forEach(entity -> responseList.add(this.get(entity.getId(), entity.getStorageType())));
+
+        LOGGER.info("success to list storage");
+        return responseList;
+    }
+
+    @Override
+    public List<StorageBriefResponse> listBrief(String groupId, String streamId) {
+        LOGGER.debug("begin to list storage summary by groupId=" + groupId + ", streamId=" + streamId);
+        Preconditions.checkNotNull(groupId, BizConstant.GROUP_ID_IS_EMPTY);
+        Preconditions.checkNotNull(streamId, BizConstant.STREAM_ID_IS_EMPTY);
+
+        // Query all storage information and encapsulate it in the result set
+        List<StorageBriefResponse> summaryList = storageMapper.selectSummary(groupId, streamId);
+
+        LOGGER.info("success to list storage summary");
+        return summaryList;
+    }
+
+    @Override
+    public PageInfo<? extends StorageListResponse> listByCondition(StoragePageRequest request) {
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug("begin to list storage page by " + request);
+        }
+        Preconditions.checkNotNull(request.getInlongGroupId(), BizConstant.GROUP_ID_IS_EMPTY);
+        String storageType = request.getStorageType();
+        Preconditions.checkNotNull(storageType, BizConstant.STORAGE_TYPE_IS_EMPTY);
+
+        PageHelper.startPage(request.getPageNum(), request.getPageSize());
+        Page<StorageEntity> entityPage = (Page<StorageEntity>) storageMapper.selectByCondition(request);
+
+        // Encapsulate the paging query results into the PageInfo object to obtain related paging information
+        StorageOperation operation = operationFactory.getInstance(storageType);
+        PageInfo<? extends StorageListResponse> pageInfo = operation.getPageInfo(entityPage);
+        pageInfo.setTotal(entityPage.getTotal());
+
+        LOGGER.info("success to list storage page");
+        return pageInfo;
+    }
+
+    @Override
+    @Transactional(rollbackFor = Throwable.class)
+    public boolean update(StorageRequest request, String operator) {
+        LOGGER.info("begin to update storage info={}", request);
+        this.checkParams(request);
+        Preconditions.checkNotNull(request.getId(), BizConstant.ID_IS_EMPTY);
+
+        // Check if it can be modified
+        String groupId = request.getInlongGroupId();
+        BusinessEntity businessEntity = checkBizIsTempStatus(groupId, operator);
+
+        String streamId = request.getInlongStreamId();
+        String storageType = request.getStorageType();
+
+        StorageOperation operation = operationFactory.getInstance(storageType);
+        operation.updateOpt(request, operator);
+
+        // The business status is [Configuration successful], then asynchronously initiate
+        // the [Single data stream resource creation] workflow
+        if (EntityStatus.BIZ_CONFIG_SUCCESSFUL.getCode().equals(businessEntity.getStatus())) {
+            executorService.execute(new WorkflowStartRunnable(operator, businessEntity, streamId));
+        }
+        LOGGER.info("success to update storage info");
+        return true;
+    }
+
+    @Transactional(rollbackFor = Throwable.class)
+    @Override
+    public boolean delete(Integer id, String storageType, String operator) {
+        LOGGER.info("begin to delete storage by id={}, storageType={}", id, storageType);
+        Preconditions.checkNotNull(id, BizConstant.ID_IS_EMPTY);
+        // Preconditions.checkNotNull(storageType, BizConstant.STORAGE_TYPE_IS_EMPTY);
+
+        StorageEntity entity = storageMapper.selectByPrimaryKey(id);
+        Preconditions.checkNotNull(entity, BizErrorCodeEnum.STORAGE_INFO_NOT_FOUND.getMessage());
+        checkBizIsTempStatus(entity.getInlongGroupId(), operator);
+
+        entity.setPreviousStatus(entity.getStatus());
+        entity.setStatus(EntityStatus.DELETED.getCode());
+        entity.setIsDeleted(id);
+        entity.setModifier(operator);
+        entity.setModifyTime(new Date());
+        storageMapper.updateByPrimaryKeySelective(entity);
+
+        storageFieldMapper.logicDeleteAll(id);
+
+        LOGGER.info("success to delete storage info");
+        return true;
+    }
+
+    @Override
+    public void updateStatus(int id, int status, String log) {
+        StorageEntity entity = new StorageEntity();
+        entity.setId(id);
+        entity.setStatus(status);
+        entity.setOperateLog(log);
+        storageMapper.updateStorageStatus(entity);
+    }
+
+    @Override
+    @Transactional(rollbackFor = Throwable.class)
+    public boolean logicDeleteAll(String groupId, String streamId, String operator) {
+        LOGGER.info("begin to logic delete all storage info by groupId={}, streamId={}", groupId, streamId);
+        Preconditions.checkNotNull(groupId, BizConstant.GROUP_ID_IS_EMPTY);
+        Preconditions.checkNotNull(streamId, BizConstant.STREAM_ID_IS_EMPTY);
+
+        // Check if it can be deleted
+        this.checkBizIsTempStatus(groupId, operator);
+
+        Date now = new Date();
+        List<StorageEntity> entityList = storageMapper.selectByIdentifier(groupId, streamId);
+        if (CollectionUtils.isNotEmpty(entityList)) {
+            entityList.forEach(entity -> {
+                Integer id = entity.getId();
+                entity.setPreviousStatus(entity.getStatus());
+                entity.setStatus(EntityStatus.DELETED.getCode());
+                entity.setIsDeleted(id);
+                entity.setModifier(operator);
+                entity.setModifyTime(now);
+
+                storageMapper.deleteByPrimaryKey(id);
+                storageFieldMapper.logicDeleteAll(id);
+            });
+        }
+
+        LOGGER.info("success to logic delete all storage by groupId={}, streamId={}", groupId, streamId);
+        return true;
+    }
+
+    @Override
+    @Transactional(rollbackFor = Throwable.class)
+    public boolean deleteAll(String groupId, String streamId, String operator) {
+        LOGGER.info("begin to delete all storage by groupId={}, streamId={}", groupId, streamId);
+        Preconditions.checkNotNull(groupId, BizConstant.GROUP_ID_IS_EMPTY);
+        Preconditions.checkNotNull(streamId, BizConstant.STREAM_ID_IS_EMPTY);
+
+        // Check if it can be deleted
+        this.checkBizIsTempStatus(groupId, operator);
+
+        List<StorageEntity> entityList = storageMapper.selectByIdentifier(groupId, streamId);
+        if (CollectionUtils.isNotEmpty(entityList)) {
+            entityList.forEach(entity -> {
+                storageMapper.deleteByPrimaryKey(entity.getId());
+                storageFieldMapper.deleteAll(entity.getId());
+            });
+        }
+
+        LOGGER.info("success to delete all storage by groupId={}, streamId={}", groupId, streamId);
+        return true;
+    }
+
+    @Override
+    public List<String> getExistsStreamIdList(String groupId, String storageType, List<String> streamIdList) {
+        LOGGER.debug("begin to filter stream by groupId={}, type={}, streamId={}", groupId, storageType, streamIdList);
+        if (StringUtils.isEmpty(storageType) || CollectionUtils.isEmpty(streamIdList)) {
+            return Collections.emptyList();
+        }
+
+        List<String> resultList = storageMapper.selectExistsStreamId(groupId, storageType, streamIdList);
+        LOGGER.debug("success to filter stream id list, result streamId={}", resultList);
+        return resultList;
+    }
+
+    @Override
+    public List<String> getStorageTypeList(String groupId, String streamId) {
+        LOGGER.debug("begin to get storage type list by groupId={}, streamId={}", groupId, streamId);
+        if (StringUtils.isEmpty(streamId)) {
+            return Collections.emptyList();
+        }
+
+        List<String> resultList = storageMapper.selectStorageType(groupId, streamId);
+        LOGGER.debug("success to get storage type list, result storageType={}", resultList);
+        return resultList;
+    }
+
+    @Override
+    public boolean updateAfterApprove(List<StorageApproveDTO> approveList, String operator) {
+        if (LOGGER.isDebugEnabled()) {
+            LOGGER.debug("begin to update storage after approve={}", approveList);
+        }
+        if (CollectionUtils.isEmpty(approveList)) {
+            return true;
+        }
+
+        Date now = new Date();
+        for (StorageApproveDTO dto : approveList) {
+            // According to the storage type, save storage information
+            String storageType = dto.getStorageType();
+            Preconditions.checkNotNull(storageType, BizConstant.STORAGE_TYPE_IS_EMPTY);
+
+            StorageEntity entity = new StorageEntity();
+            entity.setId(dto.getId());
+
+            int status = (dto.getStatus() == null) ? EntityStatus.DATA_STORAGE_CONFIG_ING.getCode() : dto.getStatus();
+            entity.setPreviousStatus(entity.getStatus());
+            entity.setStatus(status);
+            entity.setModifier(operator);
+            entity.setModifyTime(now);
+            storageMapper.updateByPrimaryKeySelective(entity);
+        }
+
+        LOGGER.info("success to update storage after approve");
+        return true;
+    }
+
+    /**
+     * Initiate business approval process
+     *
+     * @param operator Operator
+     * @param businessEntity Business entity
+     */
+    public void startBusinessProcess(String operator, BusinessEntity businessEntity) {
+        BusinessInfo businessInfo = CommonBeanUtils.copyProperties(businessEntity, BusinessInfo::new);
+        NewBusinessWorkflowForm form = businessProcessOperation.genNewBusinessWorkflowForm(businessInfo);
+        workflowService.start(ProcessName.NEW_BUSINESS_WORKFLOW, operator, form);
+    }
+
+    /**
+     * heck whether the business status is temporary
+     *
+     * @param groupId Business group id
+     * @return Business entity, For caller reuse
+     */
+    public BusinessEntity checkBizIsTempStatus(String groupId, String operator) {
+        BusinessEntity businessEntity = businessMapper.selectByIdentifier(groupId);
+        Preconditions.checkNotNull(businessEntity, "groupId is invalid");
+
+        List<String> managers = Arrays.asList(businessEntity.getInCharges().split(","));
+        Preconditions.checkTrue(managers.contains(operator),
+                String.format(BizErrorCodeEnum.USER_IS_NOT_MANAGER.getMessage(), operator, managers));
+
+        // Add/modify/delete is not allowed under certain group status
+        if (EntityStatus.BIZ_TEMP_STATUS.contains(businessEntity.getStatus())) {
+            LOGGER.error("business status was not allowed to add/update/delete data storage");
+            throw new BusinessException(BizErrorCodeEnum.STORAGE_OPT_NOT_ALLOWED);
+        }
+
+        return businessEntity;
+    }
+
+    private void checkParams(StorageRequest request) {
+        Preconditions.checkNotNull(request, BizConstant.REQUEST_IS_EMPTY);
+        String groupId = request.getInlongGroupId();
+        Preconditions.checkNotNull(groupId, BizConstant.GROUP_ID_IS_EMPTY);
+        String streamId = request.getInlongStreamId();
+        Preconditions.checkNotNull(streamId, BizConstant.STREAM_ID_IS_EMPTY);
+        String storageType = request.getStorageType();
+        Preconditions.checkNotNull(storageType, BizConstant.STORAGE_TYPE_IS_EMPTY);
+    }
+
+    /**
+     * Asynchronously initiate a single data stream related workflow
+     *
+     * @see CreateStreamWorkflowDefinition
+     */
+    class WorkflowStartRunnable implements Runnable {
+
+        private final String operator;
+        private final BusinessEntity businessEntity;
+        private final String streamId;
+
+        public WorkflowStartRunnable(String operator, BusinessEntity businessEntity, String streamId) {
+            this.operator = operator;
+            this.businessEntity = businessEntity;
+            this.streamId = streamId;
+        }
+
+        @Override
+        public void run() {
+            String groupId = businessEntity.getInlongGroupId();
+            LOGGER.info("begin start data stream workflow, groupId={}, streamId={}", groupId, streamId);
+
+            BusinessInfo businessInfo = CommonBeanUtils.copyProperties(businessEntity, BusinessInfo::new);
+            BusinessResourceWorkflowForm form = genBizResourceWorkflowForm(businessInfo, streamId);
+
+            workflowService.start(ProcessName.CREATE_DATASTREAM_RESOURCE, operator, form);
+            LOGGER.info("success start data stream workflow, groupId={}, streamId={}", groupId, streamId);
+        }
+
+        /**
+         * Generate [Create Business Resource] form
+         */
+        private BusinessResourceWorkflowForm genBizResourceWorkflowForm(BusinessInfo businessInfo, String streamId) {
+            BusinessResourceWorkflowForm form = new BusinessResourceWorkflowForm();
+            form.setBusinessInfo(businessInfo);
+            form.setInlongStreamId(streamId);
+            return form;
+        }
+    }
+
+}
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/storage/hive/HiveStorageOperation.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/storage/hive/HiveStorageOperation.java
new file mode 100644
index 0000000..665271c
--- /dev/null
+++ b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/storage/hive/HiveStorageOperation.java
@@ -0,0 +1,239 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.inlong.manager.service.storage.hive;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.github.pagehelper.Page;
+import com.github.pagehelper.PageInfo;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.inlong.manager.common.enums.BizConstant;
+import org.apache.inlong.manager.common.enums.BizErrorCodeEnum;
+import org.apache.inlong.manager.common.enums.EntityStatus;
+import org.apache.inlong.manager.common.exceptions.BusinessException;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageFieldRequest;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageFieldResponse;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageListResponse;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageRequest;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageResponse;
+import org.apache.inlong.manager.common.pojo.datastorage.hive.HiveStorageDTO;
+import org.apache.inlong.manager.common.pojo.datastorage.hive.HiveStorageListResponse;
+import org.apache.inlong.manager.common.pojo.datastorage.hive.HiveStorageRequest;
+import org.apache.inlong.manager.common.pojo.datastorage.hive.HiveStorageResponse;
+import org.apache.inlong.manager.common.util.CommonBeanUtils;
+import org.apache.inlong.manager.common.util.Preconditions;
+import org.apache.inlong.manager.dao.entity.StorageEntity;
+import org.apache.inlong.manager.dao.entity.StorageFieldEntity;
+import org.apache.inlong.manager.dao.mapper.StorageEntityMapper;
+import org.apache.inlong.manager.dao.mapper.StorageFieldEntityMapper;
+import org.apache.inlong.manager.service.storage.StorageOperation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+
+import javax.validation.constraints.NotNull;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.function.Supplier;
+
+/**
+ * Hive storage operation
+ */
+@Service
+public class HiveStorageOperation implements StorageOperation {
+
+    private static final Logger LOGGER = LoggerFactory.getLogger(HiveStorageOperation.class);
+
+    @Autowired
+    private ObjectMapper objectMapper;
+    @Autowired
+    private StorageEntityMapper storageMapper;
+    @Autowired
+    private StorageFieldEntityMapper storageFieldMapper;
+
+    @Override
+    public Boolean accept(String storageType) {
+        return BizConstant.STORAGE_HIVE.equals(storageType);
+    }
+
+    @Override
+    public Integer saveOpt(StorageRequest request, String operator) {
+        String storageType = request.getStorageType();
+        Preconditions.checkTrue(BizConstant.STORAGE_HIVE.equals(storageType),
+                BizErrorCodeEnum.STORAGE_TYPE_NOT_SUPPORT.getMessage() + ": " + storageType);
+
+        HiveStorageRequest hiveRequest = (HiveStorageRequest) request;
+        StorageEntity entity = CommonBeanUtils.copyProperties(hiveRequest, StorageEntity::new);
+        entity.setStatus(EntityStatus.DATA_STORAGE_NEW.getCode());
+        entity.setIsDeleted(EntityStatus.UN_DELETED.getCode());
+        entity.setCreator(operator);
+        entity.setModifier(operator);
+        Date now = new Date();
+        entity.setCreateTime(now);
+        entity.setModifyTime(now);
+
+        // get the ext params
+        HiveStorageDTO dto = HiveStorageDTO.getFromRequest(hiveRequest);
+        try {
+            entity.setExtParams(objectMapper.writeValueAsString(dto));
+        } catch (Exception e) {
+            throw new BusinessException(BizErrorCodeEnum.STORAGE_SAVE_FAILED);
+        }
+        storageMapper.insert(entity);
+
+        Integer storageId = entity.getId();
+        request.setId(storageId);
+        this.saveFieldOpt(request);
+
+        return storageId;
+    }
+
+    @Override
+    public void saveFieldOpt(StorageRequest request) {
+        List<StorageFieldRequest> fieldList = request.getFieldList();
+        LOGGER.info("begin to save field={}", fieldList);
+        if (CollectionUtils.isEmpty(fieldList)) {
+            return;
+        }
+
+        int size = fieldList.size();
+        List<StorageFieldEntity> entityList = new ArrayList<>(size);
+        String groupId = request.getInlongGroupId();
+        String streamId = request.getInlongStreamId();
+        String storageType = request.getStorageType();
+        Integer storageId = request.getId();
+        for (StorageFieldRequest fieldInfo : fieldList) {
+            StorageFieldEntity fieldEntity = CommonBeanUtils.copyProperties(fieldInfo, StorageFieldEntity::new);
+            if (StringUtils.isEmpty(fieldEntity.getFieldComment())) {
+                fieldEntity.setFieldComment(fieldEntity.getFieldName());
+            }
+            fieldEntity.setInlongGroupId(groupId);
+            fieldEntity.setInlongStreamId(streamId);
+            fieldEntity.setStorageType(storageType);
+            fieldEntity.setStorageId(storageId);
+            fieldEntity.setIsDeleted(EntityStatus.UN_DELETED.getCode());
+            entityList.add(fieldEntity);
+        }
+
+        storageFieldMapper.insertAll(entityList);
+        LOGGER.info("success to save hive field");
+    }
+
+    @Override
+    public StorageResponse getById(@NotNull String storageType, @NotNull Integer id) {
+        StorageEntity entity = storageMapper.selectByPrimaryKey(id);
+        Preconditions.checkNotNull(entity, BizErrorCodeEnum.STORAGE_INFO_NOT_FOUND.getMessage());
+        String existType = entity.getStorageType();
+        Preconditions.checkTrue(BizConstant.STORAGE_HIVE.equals(existType),
+                String.format(BizConstant.STORAGE_TYPE_NOT_SAME, BizConstant.STORAGE_HIVE, existType));
+
+        StorageResponse response = this.getFromEntity(entity, HiveStorageResponse::new);
+        List<StorageFieldEntity> entities = storageFieldMapper.selectByStorageId(id);
+        List<StorageFieldResponse> infos = CommonBeanUtils.copyListProperties(entities,
+                StorageFieldResponse::new);
+        response.setFieldList(infos);
+
+        return response;
+    }
+
+    @Override
+    public <T> T getFromEntity(StorageEntity entity, Supplier<T> target) {
+        T result = target.get();
+        if (entity == null) {
+            return result;
+        }
+
+        String existType = entity.getStorageType();
+        Preconditions.checkTrue(BizConstant.STORAGE_HIVE.equals(existType),
+                String.format(BizConstant.STORAGE_TYPE_NOT_SAME, BizConstant.STORAGE_HIVE, existType));
+
+        HiveStorageDTO dto = HiveStorageDTO.getFromJson(entity.getExtParams());
+        CommonBeanUtils.copyProperties(entity, result, true);
+        CommonBeanUtils.copyProperties(dto, result, true);
+
+        return result;
+    }
+
+    @Override
+    public PageInfo<? extends StorageListResponse> getPageInfo(Page<StorageEntity> entityPage) {
+        if (CollectionUtils.isEmpty(entityPage)) {
+            return new PageInfo<>();
+        }
+        return entityPage.toPageInfo(entity -> this.getFromEntity(entity, HiveStorageListResponse::new));
+    }
+
+    @Override
+    public void updateOpt(StorageRequest request, String operator) {
+        String storageType = request.getStorageType();
+        Preconditions.checkTrue(BizConstant.STORAGE_HIVE.equals(storageType),
+                String.format(BizConstant.STORAGE_TYPE_NOT_SAME, BizConstant.STORAGE_HIVE, storageType));
+
+        StorageEntity entity = storageMapper.selectByPrimaryKey(request.getId());
+        Preconditions.checkNotNull(entity, BizErrorCodeEnum.STORAGE_INFO_NOT_FOUND.getMessage());
+        HiveStorageRequest hiveRequest = (HiveStorageRequest) request;
+        CommonBeanUtils.copyProperties(hiveRequest, entity, true);
+        try {
+            HiveStorageDTO dto = HiveStorageDTO.getFromRequest(hiveRequest);
+            entity.setExtParams(objectMapper.writeValueAsString(dto));
+        } catch (Exception e) {
+            throw new BusinessException(BizErrorCodeEnum.STORAGE_INFO_INCORRECT.getMessage());
+        }
+
+        entity.setPreviousStatus(entity.getStatus());
+        entity.setStatus(EntityStatus.BIZ_CONFIG_ING.getCode());
+        entity.setModifier(operator);
+        entity.setModifyTime(new Date());
+        storageMapper.updateByPrimaryKeySelective(entity);
+
+        boolean onlyAdd = EntityStatus.DATA_STORAGE_CONFIG_SUCCESSFUL.getCode().equals(entity.getPreviousStatus());
+        this.updateFieldOpt(onlyAdd, hiveRequest);
+
+        LOGGER.info("success to update storage of type={}", storageType);
+    }
+
+    @Override
+    public void updateFieldOpt(Boolean onlyAdd, StorageRequest request) {
+        Integer storageId = request.getId();
+        List<StorageFieldRequest> fieldRequestList = request.getFieldList();
+        if (CollectionUtils.isEmpty(fieldRequestList)) {
+            return;
+        }
+
+        if (onlyAdd) {
+            List<StorageFieldEntity> existsFieldList = storageFieldMapper.selectByStorageId(storageId);
+            if (existsFieldList.size() > fieldRequestList.size()) {
+                throw new BusinessException(BizErrorCodeEnum.STORAGE_FIELD_UPDATE_NOT_ALLOWED);
+            }
+            for (int i = 0; i < existsFieldList.size(); i++) {
+                if (!existsFieldList.get(i).getFieldName().equals(fieldRequestList.get(i).getFieldName())) {
+                    throw new BusinessException(BizErrorCodeEnum.STORAGE_FIELD_UPDATE_NOT_ALLOWED);
+                }
+            }
+        }
+
+        // First physically delete the existing fields
+        storageFieldMapper.deleteAll(storageId);
+        // Then batch save the storage fields
+        this.saveFieldOpt(request);
+
+        LOGGER.info("success to update field");
+    }
+
+}
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/hive/CreateHiveTableEventSelector.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/hive/CreateHiveTableEventSelector.java
index 35d4cc2..7d9223b 100644
--- a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/hive/CreateHiveTableEventSelector.java
+++ b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/hive/CreateHiveTableEventSelector.java
@@ -17,8 +17,6 @@
 
 package org.apache.inlong.manager.service.thirdpart.hive;
 
-import java.util.List;
-import java.util.stream.Collectors;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.lang3.StringUtils;
@@ -28,11 +26,14 @@ import org.apache.inlong.manager.common.model.WorkflowContext;
 import org.apache.inlong.manager.common.model.definition.ProcessForm;
 import org.apache.inlong.manager.dao.entity.DataStreamEntity;
 import org.apache.inlong.manager.dao.mapper.DataStreamEntityMapper;
-import org.apache.inlong.manager.service.core.StorageService;
+import org.apache.inlong.manager.service.storage.StorageService;
 import org.apache.inlong.manager.service.workflow.business.BusinessResourceWorkflowForm;
 import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.stereotype.Component;
 
+import java.util.List;
+import java.util.stream.Collectors;
+
 @Component
 @Slf4j
 public class CreateHiveTableEventSelector implements EventSelector {
@@ -53,7 +54,7 @@ public class CreateHiveTableEventSelector implements EventSelector {
             return false;
         }
         String groupId = form.getInlongGroupId();
-        List<String> dsForHive = storageService.filterStreamIdByStorageType(groupId, BizConstant.STORAGE_HIVE,
+        List<String> dsForHive = storageService.getExistsStreamIdList(groupId, BizConstant.STORAGE_HIVE,
                 streamMapper.selectByGroupId(groupId)
                         .stream()
                         .map(DataStreamEntity::getInlongStreamId)
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/hive/CreateHiveTableForStreamListener.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/hive/CreateHiveTableForStreamListener.java
index 420afa2..759eccc 100644
--- a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/hive/CreateHiveTableForStreamListener.java
+++ b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/hive/CreateHiveTableForStreamListener.java
@@ -17,18 +17,19 @@
 
 package org.apache.inlong.manager.service.thirdpart.hive;
 
-import java.util.List;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.inlong.manager.common.event.ListenerResult;
 import org.apache.inlong.manager.common.event.task.StorageOperateListener;
 import org.apache.inlong.manager.common.event.task.TaskEvent;
 import org.apache.inlong.manager.common.model.WorkflowContext;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageHiveDTO;
-import org.apache.inlong.manager.dao.mapper.StorageHiveEntityMapper;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageForSortDTO;
+import org.apache.inlong.manager.dao.mapper.StorageEntityMapper;
 import org.apache.inlong.manager.service.workflow.business.BusinessResourceWorkflowForm;
 import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.stereotype.Service;
 
+import java.util.List;
+
 /**
  * Event listener of create hive table for one data stream
  */
@@ -37,7 +38,7 @@ import org.springframework.stereotype.Service;
 public class CreateHiveTableForStreamListener implements StorageOperateListener {
 
     @Autowired
-    private StorageHiveEntityMapper hiveEntityMapper;
+    private StorageEntityMapper storageMapper;
     @Autowired
     private HiveTableOperator hiveTableOperator;
 
@@ -53,7 +54,7 @@ public class CreateHiveTableForStreamListener implements StorageOperateListener
         String streamId = form.getInlongStreamId();
         log.info("begin create hive table for groupId={}, streamId={}", groupId, streamId);
 
-        List<StorageHiveDTO> configList = hiveEntityMapper.selectAllHiveConfig(groupId, streamId);
+        List<StorageForSortDTO> configList = storageMapper.selectAllConfig(groupId, streamId);
         hiveTableOperator.createHiveResource(groupId, configList);
 
         String result = "success to create hive table for group [" + groupId + "], stream [" + streamId + "]";
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/hive/CreateHiveTableListener.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/hive/CreateHiveTableListener.java
index 98c41c1..da84172 100644
--- a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/hive/CreateHiveTableListener.java
+++ b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/hive/CreateHiveTableListener.java
@@ -17,18 +17,19 @@
 
 package org.apache.inlong.manager.service.thirdpart.hive;
 
-import java.util.List;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.inlong.manager.common.event.ListenerResult;
 import org.apache.inlong.manager.common.event.task.StorageOperateListener;
 import org.apache.inlong.manager.common.event.task.TaskEvent;
 import org.apache.inlong.manager.common.model.WorkflowContext;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageHiveDTO;
-import org.apache.inlong.manager.dao.mapper.StorageHiveEntityMapper;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageForSortDTO;
+import org.apache.inlong.manager.dao.mapper.StorageEntityMapper;
 import org.apache.inlong.manager.service.workflow.business.BusinessResourceWorkflowForm;
 import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.stereotype.Service;
 
+import java.util.List;
+
 /**
  * Event listener of create hive table for all data stream
  */
@@ -37,7 +38,7 @@ import org.springframework.stereotype.Service;
 public class CreateHiveTableListener implements StorageOperateListener {
 
     @Autowired
-    private StorageHiveEntityMapper hiveEntityMapper;
+    private StorageEntityMapper storageMapper;
     @Autowired
     private HiveTableOperator hiveTableOperator;
 
@@ -52,7 +53,7 @@ public class CreateHiveTableListener implements StorageOperateListener {
         String groupId = form.getInlongGroupId();
         log.info("begin to create hive table for groupId={}", groupId);
 
-        List<StorageHiveDTO> configList = hiveEntityMapper.selectAllHiveConfig(groupId, null);
+        List<StorageForSortDTO> configList = storageMapper.selectAllConfig(groupId, null);
         hiveTableOperator.createHiveResource(groupId, configList);
 
         String result = "success to create hive table for group [" + groupId + "]";
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/hive/HiveTableOperator.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/hive/HiveTableOperator.java
index d7a6f16..e184d11 100644
--- a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/hive/HiveTableOperator.java
+++ b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/hive/HiveTableOperator.java
@@ -17,57 +17,58 @@
 
 package org.apache.inlong.manager.service.thirdpart.hive;
 
-import static java.util.stream.Collectors.toList;
-
-import java.util.ArrayList;
-import java.util.List;
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.inlong.manager.common.enums.BizConstant;
 import org.apache.inlong.manager.common.enums.EntityStatus;
 import org.apache.inlong.manager.common.exceptions.WorkflowException;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageHiveDTO;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageForSortDTO;
+import org.apache.inlong.manager.common.pojo.datastorage.hive.HiveStorageDTO;
 import org.apache.inlong.manager.common.pojo.query.ColumnInfoBean;
 import org.apache.inlong.manager.common.pojo.query.DatabaseQueryBean;
 import org.apache.inlong.manager.common.pojo.query.hive.HiveColumnQueryBean;
 import org.apache.inlong.manager.common.pojo.query.hive.HiveTableQueryBean;
-import org.apache.inlong.manager.dao.entity.StorageHiveFieldEntity;
-import org.apache.inlong.manager.dao.mapper.StorageHiveFieldEntityMapper;
+import org.apache.inlong.manager.dao.entity.StorageFieldEntity;
+import org.apache.inlong.manager.dao.mapper.StorageFieldEntityMapper;
 import org.apache.inlong.manager.service.core.DataSourceService;
-import org.apache.inlong.manager.service.core.StorageService;
-import org.apache.inlong.manager.service.core.impl.StorageHiveOperation;
+import org.apache.inlong.manager.service.storage.StorageService;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.stereotype.Component;
 
+import java.util.ArrayList;
+import java.util.List;
+
+import static java.util.stream.Collectors.toList;
+
 /**
  * Create hive table operation
  */
 @Component
 public class HiveTableOperator {
 
-    private static final Logger LOGGER = LoggerFactory.getLogger(StorageHiveOperation.class);
+    private static final Logger LOGGER = LoggerFactory.getLogger(HiveTableOperator.class);
 
     @Autowired
     private StorageService storageService;
     @Autowired
-    private StorageHiveFieldEntityMapper hiveFieldMapper;
+    private StorageFieldEntityMapper hiveFieldMapper;
     @Autowired
     private DataSourceService<DatabaseQueryBean, HiveTableQueryBean> dataSourceService;
 
     /**
      * Create hive table according to the groupId and hive config
      */
-    public void createHiveResource(String groupId, List<StorageHiveDTO> configList) {
+    public void createHiveResource(String groupId, List<StorageForSortDTO> configList) {
         if (CollectionUtils.isEmpty(configList)) {
             LOGGER.warn("no hive config, skip to create");
             return;
         }
-        for (StorageHiveDTO config : configList) {
+        for (StorageForSortDTO config : configList) {
             if (EntityStatus.DATA_STORAGE_CONFIG_SUCCESSFUL.getCode().equals(config.getStatus())) {
                 LOGGER.warn("hive [" + config.getId() + "] already success, skip to create");
                 continue;
-            } else if (BizConstant.DISABLE_CREATE_TABLE.equals(config.getEnableCreateTable())) {
+            } else if (BizConstant.DISABLE_CREATE_RESOURCE.equals(config.getEnableCreateResource())) {
                 LOGGER.warn("create table was disable, skip to create table for hive [" + config.getId() + "]");
                 continue;
             }
@@ -75,12 +76,14 @@ public class HiveTableOperator {
         }
     }
 
-    private void createTable(String groupId, StorageHiveDTO config) {
+    private void createTable(String groupId, StorageForSortDTO config) {
         if (LOGGER.isDebugEnabled()) {
             LOGGER.debug("begin create hive table for business={}, config={}", groupId, config);
         }
 
-        HiveTableQueryBean tableBean = getTableQueryBean(config);
+        // Get all info from config
+        HiveStorageDTO hiveInfo = HiveStorageDTO.getFromJson(config.getExtParams());
+        HiveTableQueryBean tableBean = getTableQueryBean(config, hiveInfo);
         try {
             // create database if not exists
             dataSourceService.createDb(tableBean);
@@ -99,11 +102,11 @@ public class HiveTableOperator {
                     dataSourceService.createColumn(tableBean);
                 }
             }
-            storageService.updateHiveStatusById(config.getId(),
+            storageService.updateStatus(config.getId(),
                     EntityStatus.DATA_STORAGE_CONFIG_SUCCESSFUL.getCode(), "create hive table success");
         } catch (Throwable e) {
             LOGGER.error("create hive table error, ", e);
-            storageService.updateHiveStatusById(config.getId(),
+            storageService.updateStatus(config.getId(),
                     EntityStatus.DATA_STORAGE_CONFIG_FAILED.getCode(), e.getMessage());
             throw new WorkflowException("create hive table failed, reason: " + e.getMessage());
         }
@@ -111,15 +114,15 @@ public class HiveTableOperator {
         LOGGER.info("success create hive table for data group [" + groupId + "]");
     }
 
-    protected HiveTableQueryBean getTableQueryBean(StorageHiveDTO config) {
+    protected HiveTableQueryBean getTableQueryBean(StorageForSortDTO config, HiveStorageDTO hiveInfo) {
         String groupId = config.getInlongGroupId();
         String streamId = config.getInlongStreamId();
         LOGGER.info("begin to get table query bean for groupId={}, streamId={}", groupId, streamId);
 
-        List<StorageHiveFieldEntity> fieldEntities = hiveFieldMapper.selectHiveFields(groupId, streamId);
+        List<StorageFieldEntity> fieldEntities = hiveFieldMapper.selectFields(groupId, streamId);
 
         List<HiveColumnQueryBean> columnQueryBeans = new ArrayList<>();
-        for (StorageHiveFieldEntity field : fieldEntities) {
+        for (StorageFieldEntity field : fieldEntities) {
             HiveColumnQueryBean columnBean = new HiveColumnQueryBean();
             columnBean.setColumnName(field.getFieldName());
             columnBean.setColumnType(field.getFieldType());
@@ -128,7 +131,7 @@ public class HiveTableOperator {
         }
 
         // set partition field and type
-        String partitionField = config.getPrimaryPartition();
+        String partitionField = hiveInfo.getPrimaryPartition();
         if (partitionField != null) {
             HiveColumnQueryBean partColumn = new HiveColumnQueryBean();
             partColumn.setPartition(true);
@@ -141,15 +144,15 @@ public class HiveTableOperator {
         HiveTableQueryBean queryBean = new HiveTableQueryBean();
         queryBean.setColumns(columnQueryBeans);
         // set terminated symbol
-        if (config.getTargetSeparator() != null) {
-            char ch = (char) Integer.parseInt(config.getTargetSeparator());
+        if (hiveInfo.getDataSeparator() != null) {
+            char ch = (char) Integer.parseInt(hiveInfo.getDataSeparator());
             queryBean.setFieldTerSymbol(String.valueOf(ch));
         }
-        queryBean.setUsername(config.getUsername());
-        queryBean.setPassword(config.getPassword());
-        queryBean.setTableName(config.getTableName());
-        queryBean.setDbName(config.getDbName());
-        queryBean.setJdbcUrl(config.getJdbcUrl());
+        queryBean.setUsername(hiveInfo.getUsername());
+        queryBean.setPassword(hiveInfo.getPassword());
+        queryBean.setTableName(hiveInfo.getTableName());
+        queryBean.setDbName(hiveInfo.getDbName());
+        queryBean.setJdbcUrl(hiveInfo.getJdbcUrl());
 
         if (LOGGER.isDebugEnabled()) {
             LOGGER.debug("success to get table query bean={}", queryBean);
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/mq/CreatePulsarGroupForStreamTaskListener.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/mq/CreatePulsarGroupForStreamTaskListener.java
index c92e0a7..7ac5677 100644
--- a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/mq/CreatePulsarGroupForStreamTaskListener.java
+++ b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/mq/CreatePulsarGroupForStreamTaskListener.java
@@ -17,7 +17,6 @@
 
 package org.apache.inlong.manager.service.thirdpart.mq;
 
-import java.util.List;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.inlong.manager.common.beans.ClusterBean;
 import org.apache.inlong.manager.common.event.ListenerResult;
@@ -32,13 +31,15 @@ import org.apache.inlong.manager.dao.entity.DataStreamEntity;
 import org.apache.inlong.manager.dao.mapper.DataStreamEntityMapper;
 import org.apache.inlong.manager.service.core.BusinessService;
 import org.apache.inlong.manager.service.core.ConsumptionService;
-import org.apache.inlong.manager.service.core.StorageService;
+import org.apache.inlong.manager.service.storage.StorageService;
 import org.apache.inlong.manager.service.thirdpart.mq.util.PulsarUtils;
 import org.apache.inlong.manager.service.workflow.business.BusinessResourceWorkflowForm;
 import org.apache.pulsar.client.admin.PulsarAdmin;
 import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.stereotype.Component;
 
+import java.util.List;
+
 /**
  * Create a subscription group for a single data stream
  */
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/sort/PushHiveConfigTaskListener.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/sort/PushHiveConfigTaskListener.java
index d563ed4..6be7ba1 100644
--- a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/sort/PushHiveConfigTaskListener.java
+++ b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/thirdpart/sort/PushHiveConfigTaskListener.java
@@ -17,11 +17,6 @@
 
 package org.apache.inlong.manager.service.thirdpart.sort;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.lang3.StringUtils;
@@ -35,15 +30,16 @@ import org.apache.inlong.manager.common.exceptions.WorkflowListenerException;
 import org.apache.inlong.manager.common.model.WorkflowContext;
 import org.apache.inlong.manager.common.pojo.business.BusinessExtInfo;
 import org.apache.inlong.manager.common.pojo.business.BusinessInfo;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageHiveDTO;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageForSortDTO;
+import org.apache.inlong.manager.common.pojo.datastorage.hive.HiveStorageDTO;
 import org.apache.inlong.manager.common.settings.BusinessSettings;
 import org.apache.inlong.manager.common.util.JsonUtils;
 import org.apache.inlong.manager.common.util.Preconditions;
 import org.apache.inlong.manager.dao.entity.BusinessEntity;
-import org.apache.inlong.manager.dao.entity.StorageHiveFieldEntity;
+import org.apache.inlong.manager.dao.entity.StorageFieldEntity;
 import org.apache.inlong.manager.dao.mapper.BusinessEntityMapper;
-import org.apache.inlong.manager.dao.mapper.StorageHiveEntityMapper;
-import org.apache.inlong.manager.dao.mapper.StorageHiveFieldEntityMapper;
+import org.apache.inlong.manager.dao.mapper.StorageEntityMapper;
+import org.apache.inlong.manager.dao.mapper.StorageFieldEntityMapper;
 import org.apache.inlong.manager.service.workflow.business.BusinessResourceWorkflowForm;
 import org.apache.inlong.sort.ZkTools;
 import org.apache.inlong.sort.formats.common.FormatInfo;
@@ -62,6 +58,12 @@ import org.apache.inlong.sort.protocol.source.TubeSourceInfo;
 import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.stereotype.Component;
 
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
 @Slf4j
 @Component
 public class PushHiveConfigTaskListener implements SortOperateListener {
@@ -87,9 +89,9 @@ public class PushHiveConfigTaskListener implements SortOperateListener {
     @Autowired
     private BusinessEntityMapper businessMapper;
     @Autowired
-    private StorageHiveEntityMapper storageHiveMapper;
+    private StorageEntityMapper storageMapper;
     @Autowired
-    private StorageHiveFieldEntityMapper hiveFieldMapper;
+    private StorageFieldEntityMapper storageFieldMapper;
 
     @Override
     public TaskEvent event() {
@@ -114,15 +116,15 @@ public class PushHiveConfigTaskListener implements SortOperateListener {
 
         // if streamId not null, just push the config belongs to the groupId and the streamId
         String streamId = form.getInlongStreamId();
-        List<StorageHiveDTO> hiveInfoList = storageHiveMapper.selectAllHiveConfig(groupId, streamId);
-        for (StorageHiveDTO hiveInfo : hiveInfoList) {
-            Integer storageId = hiveInfo.getId();
+        List<StorageForSortDTO> sortInfoList = storageMapper.selectAllConfig(groupId, streamId);
+        for (StorageForSortDTO sortInfo : sortInfoList) {
+            Integer storageId = sortInfo.getId();
 
             if (log.isDebugEnabled()) {
-                log.debug("hive storage info: {}", hiveInfo);
+                log.debug("hive storage info: {}", sortInfo);
             }
 
-            DataFlowInfo dataFlowInfo = getDataFlowInfo(businessInfo, hiveInfo);
+            DataFlowInfo dataFlowInfo = getDataFlowInfo(businessInfo, sortInfo);
             // add extra properties for flow info
             dataFlowInfo.getProperties().put(DATA_FLOW_GROUP_ID_KEY, groupId);
             if (log.isDebugEnabled()) {
@@ -145,29 +147,30 @@ public class PushHiveConfigTaskListener implements SortOperateListener {
         return ListenerResult.success();
     }
 
-    private DataFlowInfo getDataFlowInfo(BusinessInfo businessInfo, StorageHiveDTO hiveInfo) {
-        String groupId = hiveInfo.getInlongGroupId();
-        String streamId = hiveInfo.getInlongStreamId();
-        List<StorageHiveFieldEntity> fieldList = hiveFieldMapper.selectHiveFields(groupId, streamId);
+    private DataFlowInfo getDataFlowInfo(BusinessInfo businessInfo, StorageForSortDTO sortInfo) {
+        String groupId = sortInfo.getInlongGroupId();
+        String streamId = sortInfo.getInlongStreamId();
+        List<StorageFieldEntity> fieldList = storageFieldMapper.selectFields(groupId, streamId);
 
         if (fieldList == null || fieldList.size() == 0) {
             throw new WorkflowListenerException("no hive fields for groupId=" + groupId + ", streamId=" + streamId);
         }
 
-        SourceInfo sourceInfo = getSourceInfo(businessInfo, hiveInfo, fieldList);
+        HiveStorageDTO hiveInfo = HiveStorageDTO.getFromJson(sortInfo.getExtParams());
+        SourceInfo sourceInfo = getSourceInfo(businessInfo, sortInfo, hiveInfo, fieldList);
         SinkInfo sinkInfo = getSinkInfo(hiveInfo, fieldList);
 
         // push information
-        return new DataFlowInfo(hiveInfo.getId(), sourceInfo, sinkInfo);
+        return new DataFlowInfo(sortInfo.getId(), sourceInfo, sinkInfo);
     }
 
-    private HiveSinkInfo getSinkInfo(StorageHiveDTO hiveInfo, List<StorageHiveFieldEntity> fieldList) {
+    private HiveSinkInfo getSinkInfo(HiveStorageDTO hiveInfo, List<StorageFieldEntity> fieldList) {
         if (hiveInfo.getJdbcUrl() == null) {
             throw new WorkflowListenerException("hive server url cannot be empty");
         }
 
         // Use the field separator in Hive, the default is TextFile
-        Character separator = (char) Integer.parseInt(hiveInfo.getTargetSeparator());
+        Character separator = (char) Integer.parseInt(hiveInfo.getDataSeparator());
         HiveFileFormat fileFormat;
         String format = hiveInfo.getFileFormat();
 
@@ -225,31 +228,31 @@ public class PushHiveConfigTaskListener implements SortOperateListener {
     /**
      * Get source info
      */
-    private SourceInfo getSourceInfo(BusinessInfo businessInfo, StorageHiveDTO storageInfo,
-            List<StorageHiveFieldEntity> fieldList) {
+    private SourceInfo getSourceInfo(BusinessInfo businessInfo, StorageForSortDTO sortInfo,
+            HiveStorageDTO hiveInfo, List<StorageFieldEntity> fieldList) {
         DeserializationInfo deserializationInfo = null;
-        boolean isDbType = BizConstant.DATA_SOURCE_DB.equals(storageInfo.getDataSourceType());
+        boolean isDbType = BizConstant.DATA_SOURCE_DB.equals(sortInfo.getDataSourceType());
         if (!isDbType) {
             // FILE and auto push source, the data format is TEXT or KEY-VALUE, temporarily use InLongMsgCsv
-            String dataType = storageInfo.getDataType();
+            String dataType = sortInfo.getDataType();
             if (BizConstant.DATA_TYPE_TEXT.equalsIgnoreCase(dataType)
                     || BizConstant.DATA_TYPE_KEY_VALUE.equalsIgnoreCase(dataType)) {
                 // Use the field separator from the data stream
-                char separator = (char) Integer.parseInt(storageInfo.getSourceSeparator());
+                char separator = (char) Integer.parseInt(sortInfo.getSourceSeparator());
                 // TODO support escape
                 /*Character escape = null;
                 if (info.getDataEscapeChar() != null) {
                     escape = info.getDataEscapeChar().charAt(0);
                 }*/
                 // Whether to delete the first separator, the default is false for the time being
-                deserializationInfo = new InLongMsgCsvDeserializationInfo(storageInfo.getInlongStreamId(), separator);
+                deserializationInfo = new InLongMsgCsvDeserializationInfo(sortInfo.getInlongStreamId(), separator);
             }
         }
 
         // The number and order of the source fields must be the same as the target fields
         SourceInfo sourceInfo = null;
         // Get the source field, if there is no partition field in source, add the partition field to the end
-        List<FieldInfo> sourceFields = getSourceFields(fieldList, storageInfo.getPrimaryPartition());
+        List<FieldInfo> sourceFields = getSourceFields(fieldList, hiveInfo.getPrimaryPartition());
 
         String middleWare = businessInfo.getMiddlewareType();
         if (BizConstant.MIDDLEWARE_TUBE.equalsIgnoreCase(middleWare)) {
@@ -261,7 +264,7 @@ public class PushHiveConfigTaskListener implements SortOperateListener {
             sourceInfo = new TubeSourceInfo(topic, masterAddress, consumerGroup,
                     deserializationInfo, sourceFields.toArray(new FieldInfo[0]));
         } else if (BizConstant.MIDDLEWARE_PULSAR.equalsIgnoreCase(middleWare)) {
-            sourceInfo = createPulsarSourceInfo(businessInfo, storageInfo, deserializationInfo, sourceFields);
+            sourceInfo = createPulsarSourceInfo(businessInfo, sortInfo, deserializationInfo, sourceFields);
         }
 
         return sourceInfo;
@@ -270,10 +273,10 @@ public class PushHiveConfigTaskListener implements SortOperateListener {
     /**
      * Get sink fields
      */
-    private List<FieldInfo> getSinkFields(List<StorageHiveFieldEntity> fieldList, String partitionField) {
+    private List<FieldInfo> getSinkFields(List<StorageFieldEntity> fieldList, String partitionField) {
         boolean duplicate = false;
         List<FieldInfo> fieldInfoList = new ArrayList<>();
-        for (StorageHiveFieldEntity field : fieldList) {
+        for (StorageFieldEntity field : fieldList) {
             String fieldName = field.getFieldName();
             if (fieldName.equals(partitionField)) {
                 duplicate = true;
@@ -296,9 +299,9 @@ public class PushHiveConfigTaskListener implements SortOperateListener {
      * Get source field list
      * TODO  support BuiltInField
      */
-    private List<FieldInfo> getSourceFields(List<StorageHiveFieldEntity> fieldList, String partitionField) {
+    private List<FieldInfo> getSourceFields(List<StorageFieldEntity> fieldList, String partitionField) {
         List<FieldInfo> fieldInfoList = new ArrayList<>();
-        for (StorageHiveFieldEntity field : fieldList) {
+        for (StorageFieldEntity field : fieldList) {
             FormatInfo formatInfo = SortFieldFormatUtils.convertFieldFormat(field.getSourceFieldType().toLowerCase());
             String fieldName = field.getSourceFieldName();
 
@@ -309,13 +312,11 @@ public class PushHiveConfigTaskListener implements SortOperateListener {
         return fieldInfoList;
     }
 
-    private PulsarSourceInfo createPulsarSourceInfo(BusinessInfo businessInfo,
-            StorageHiveDTO storageInfo,
-            DeserializationInfo deserializationInfo,
-            List<FieldInfo> sourceFields) {
+    private PulsarSourceInfo createPulsarSourceInfo(BusinessInfo businessInfo, StorageForSortDTO sortInfo,
+            DeserializationInfo deserializationInfo, List<FieldInfo> sourceFields) {
         final String tenant = clusterBean.getDefaultTenant();
         final String namespace = businessInfo.getMqResourceObj();
-        final String pulsarTopic = storageInfo.getMqResourceObj();
+        final String pulsarTopic = sortInfo.getMqResourceObj();
         // Full name of Topic in Pulsar
         final String fullTopicName = "persistent://" + tenant + "/" + namespace + "/" + pulsarTopic;
         final String consumerGroup = clusterBean.getAppName() + "_" + pulsarTopic + "_consumer_group";
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/workflow/business/NewBusinessWorkflowForm.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/workflow/business/NewBusinessWorkflowForm.java
index c6cb6ce..9796f56 100644
--- a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/workflow/business/NewBusinessWorkflowForm.java
+++ b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/workflow/business/NewBusinessWorkflowForm.java
@@ -19,15 +19,16 @@ package org.apache.inlong.manager.service.workflow.business;
 
 import com.google.common.collect.Maps;
 import io.swagger.annotations.ApiModelProperty;
-import java.util.List;
-import java.util.Map;
 import lombok.Data;
 import lombok.EqualsAndHashCode;
-import org.apache.inlong.manager.common.pojo.business.BusinessInfo;
-import org.apache.inlong.manager.common.pojo.datastream.DataStreamSummaryInfo;
-import org.apache.inlong.manager.service.workflow.BaseWorkflowFormType;
 import org.apache.inlong.manager.common.exceptions.FormValidateException;
+import org.apache.inlong.manager.common.pojo.business.BusinessInfo;
+import org.apache.inlong.manager.common.pojo.datastream.StreamBriefResponse;
 import org.apache.inlong.manager.common.util.Preconditions;
+import org.apache.inlong.manager.service.workflow.BaseWorkflowFormType;
+
+import java.util.List;
+import java.util.Map;
 
 /**
  * New business workflow form information
@@ -42,7 +43,7 @@ public class NewBusinessWorkflowForm extends BaseWorkflowFormType {
     private BusinessInfo businessInfo;
 
     @ApiModelProperty(value = "All data stream information under the business, including the storage information")
-    private List<DataStreamSummaryInfo> streamInfoList;
+    private List<StreamBriefResponse> streamInfoList;
 
     @Override
     public void validate() throws FormValidateException {
diff --git a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/workflow/stream/CreateStreamWorkflowDefinition.java b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/workflow/stream/CreateStreamWorkflowDefinition.java
index b9da8a9..ad9f744 100644
--- a/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/workflow/stream/CreateStreamWorkflowDefinition.java
+++ b/inlong-manager/manager-service/src/main/java/org/apache/inlong/manager/service/workflow/stream/CreateStreamWorkflowDefinition.java
@@ -17,12 +17,14 @@
 
 package org.apache.inlong.manager.service.workflow.stream;
 
-import java.util.Collections;
-import java.util.List;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.inlong.manager.common.enums.BizConstant;
-import org.apache.inlong.manager.service.core.StorageService;
+import org.apache.inlong.manager.common.model.definition.EndEvent;
+import org.apache.inlong.manager.common.model.definition.Process;
+import org.apache.inlong.manager.common.model.definition.ServiceTask;
+import org.apache.inlong.manager.common.model.definition.StartEvent;
+import org.apache.inlong.manager.service.storage.StorageService;
 import org.apache.inlong.manager.service.thirdpart.hive.CreateHiveTableForStreamListener;
 import org.apache.inlong.manager.service.thirdpart.mq.CreatePulsarGroupForStreamTaskListener;
 import org.apache.inlong.manager.service.thirdpart.mq.CreatePulsarTopicForStreamTaskListener;
@@ -31,13 +33,12 @@ import org.apache.inlong.manager.service.workflow.ProcessName;
 import org.apache.inlong.manager.service.workflow.WorkflowDefinition;
 import org.apache.inlong.manager.service.workflow.business.BusinessResourceWorkflowForm;
 import org.apache.inlong.manager.service.workflow.business.listener.InitBusinessInfoListener;
-import org.apache.inlong.manager.common.model.definition.EndEvent;
-import org.apache.inlong.manager.common.model.definition.Process;
-import org.apache.inlong.manager.common.model.definition.ServiceTask;
-import org.apache.inlong.manager.common.model.definition.StartEvent;
 import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.stereotype.Component;
 
+import java.util.Collections;
+import java.util.List;
+
 /**
  * Data stream access resource creation
  */
@@ -122,7 +123,7 @@ public class CreateStreamWorkflowDefinition implements WorkflowDefinition {
             BusinessResourceWorkflowForm form = (BusinessResourceWorkflowForm) c.getProcessForm();
             String groupId = form.getInlongGroupId();
             String streamId = form.getInlongStreamId();
-            List<String> dsForHive = storageService.filterStreamIdByStorageType(groupId, BizConstant.STORAGE_HIVE,
+            List<String> dsForHive = storageService.getExistsStreamIdList(groupId, BizConstant.STORAGE_HIVE,
                     Collections.singletonList(streamId));
             if (CollectionUtils.isEmpty(dsForHive)) {
                 log.warn("business [{}] adn data stream [{}] does not have storage, skip create hive table", groupId,
diff --git a/inlong-manager/manager-service/src/test/java/org/apache/inlong/manager/service/BaseConfig.java b/inlong-manager/manager-service/src/test/java/org/apache/inlong/manager/service/BaseConfig.java
index 653bdb5..753d2a9 100644
--- a/inlong-manager/manager-service/src/test/java/org/apache/inlong/manager/service/BaseConfig.java
+++ b/inlong-manager/manager-service/src/test/java/org/apache/inlong/manager/service/BaseConfig.java
@@ -17,15 +17,22 @@
 
 package org.apache.inlong.manager.service;
 
+import com.fasterxml.jackson.databind.ObjectMapper;
 import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Configuration;
 import org.springframework.web.client.RestTemplate;
 
 @Configuration
 public class BaseConfig {
+
     @Bean
     public RestTemplate restTemplate() {
         return new RestTemplate();
     }
 
+    @Bean
+    public ObjectMapper objectMapper() {
+        return new ObjectMapper();
+    }
+
 }
diff --git a/inlong-manager/manager-service/src/test/resources/application-test.properties b/inlong-manager/manager-service/src/test/resources/application-test.properties
index 7603e24..5dd5b84 100644
--- a/inlong-manager/manager-service/src/test/resources/application-test.properties
+++ b/inlong-manager/manager-service/src/test/resources/application-test.properties
@@ -16,14 +16,18 @@
 # specific language governing permissions and limitations
 # under the License.
 #
+
 # Log level
 logging.level.root=INFO
 logging.level.org.apache.inlong.manager=debug
+
 spring.datasource.druid.url=jdbc:h2:mem:test;MODE=MYSQL;DB_CLOSE_DELAY=-1;IGNORECASE=TRUE;
 spring.datasource.druid.username=root
 spring.datasource.druid.password=""
+
 spring.datasource.druid.driver-class-name=org.h2.Driver
 spring.datasource.schema=classpath:sql/apache_inlong_manager.sql
+
 spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
 spring.datasource.druid.validationQuery=SELECT 'x'
 # Initialization size, minimum, maximum
@@ -44,26 +48,32 @@ spring.datasource.druid.testOnReturn=false
 spring.datasource.druid.filters=stat,wall
 # Open the mergeSql function through the connectProperties property, Slow SQL records
 spring.datasource.druid.connectionProperties=druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
+
 # Manager address of Tube cluster, used to create Topic
 cluster.tube.manager=http://127.0.0.1:8081
 # Master address, used to manage Tube broker
 cluster.tube.master=127.0.0.1:8000,127.0.0.1:8010
 # Tube cluster ID
 cluster.tube.clusterId=1
+
 # Push configuration to the path on ZooKeeper
 cluster.zk.url=127.0.0.1:2181
 cluster.zk.root=inlong_hive
+
 # Application name in Sort
 sort.appName=inlong_app
+
 # Pulsar admin URL
 pulsar.adminUrl=http://127.0.0.1:8080,127.0.0.2:8080,127.0.0.3:8080
 # Pulsar broker address
 pulsar.serviceUrl=pulsar://127.0.0.1:6650,127.0.0.1:6650,127.0.0.1:6650
 # Default tenant of Pulsar
 pulsar.defaultTenant=public
+
 # Audit configuration
 # Audit query source that decide what data source to query, currently only supports [MYSQL|ELASTICSEARCH]
 audit.query.source=MYSQL
+
 # Elasticsearch config
 # Elasticsearch host split by coma if more than one host, such as 'host1,host2'
 es.index.search.hostname=127.0.0.1
diff --git a/inlong-manager/manager-service/src/test/resources/sql/apache_inlong_manager.sql b/inlong-manager/manager-service/src/test/resources/sql/apache_inlong_manager.sql
index 8ba9380..75e1654 100644
--- a/inlong-manager/manager-service/src/test/resources/sql/apache_inlong_manager.sql
+++ b/inlong-manager/manager-service/src/test/resources/sql/apache_inlong_manager.sql
@@ -72,11 +72,11 @@ DROP TABLE IF EXISTS `business`;
 CREATE TABLE `business`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`     varchar(128) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
     `name`                varchar(128)          DEFAULT '' COMMENT 'Business name, English, numbers and underscore',
     `cn_name`             varchar(256)          DEFAULT NULL COMMENT 'Chinese display name',
     `description`         varchar(256)          DEFAULT '' COMMENT 'Business Introduction',
-    `middleware_type`     varchar(10)           DEFAULT 'TUBE' COMMENT 'The middleware type of message queue, high throughput: TUBE, high consistency: PULSAR',
+    `middleware_type`     varchar(20)           DEFAULT 'TUBE' COMMENT 'The middleware type of message queue, high throughput: TUBE, high consistency: PULSAR',
     `queue_module`        VARCHAR(20)  NULL     DEFAULT 'parallel' COMMENT 'Queue model of Pulsar, parallel: multiple partitions, high throughput, out-of-order messages; serial: single partition, low throughput, and orderly messages',
     `topic_partition_num` INT(4)       NULL     DEFAULT '3' COMMENT 'The number of partitions of Pulsar Topic, 1-20',
     `mq_resource_obj`     varchar(128) NOT NULL COMMENT 'MQ resource object, for Tube, its Topic, for Pulsar, its Namespace',
@@ -105,7 +105,7 @@ DROP TABLE IF EXISTS `business_pulsar`;
 CREATE TABLE `business_pulsar`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`     varchar(128) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
     `ensemble`            int(3)                DEFAULT '3' COMMENT 'The writable nodes number of ledger',
     `write_quorum`        int(3)                DEFAULT '3' COMMENT 'The copies number of ledger',
     `ack_quorum`          int(3)                DEFAULT '2' COMMENT 'The number of requested acks',
@@ -128,13 +128,12 @@ DROP TABLE IF EXISTS `business_ext`;
 CREATE TABLE `business_ext`
 (
     `id`              int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id` varchar(128) NOT NULL COMMENT 'Business group id',
+    `inlong_group_id` varchar(256) NOT NULL COMMENT 'Business group id',
     `key_name`        varchar(64)  NOT NULL COMMENT 'Configuration item name',
     `key_value`       varchar(256)          DEFAULT NULL COMMENT 'The value of the configuration item',
     `is_deleted`      tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
     `modify_time`     timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
-    PRIMARY KEY (`id`),
-    KEY `index_group_id` (`inlong_group_id`)
+    PRIMARY KEY (`id`)
 );
 
 -- ----------------------------
@@ -228,12 +227,12 @@ DROP TABLE IF EXISTS `consumption`;
 CREATE TABLE `consumption`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `consumer_group_name` varchar(255) DEFAULT NULL COMMENT 'consumer group name',
-    `consumer_group_id`   varchar(255) NOT NULL COMMENT 'Consumer group ID',
+    `consumer_group_name` varchar(256)          DEFAULT NULL COMMENT 'consumer group name',
+    `consumer_group_id`   varchar(256) NOT NULL COMMENT 'Consumer group ID',
     `in_charges`          varchar(512) NOT NULL COMMENT 'Person in charge of consumption',
-    `inlong_group_id`     varchar(255) NOT NULL COMMENT 'Business group id',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Business group id',
     `middleware_type`     varchar(10)           DEFAULT 'TUBE' COMMENT 'The middleware type of message queue, high throughput: TUBE, high consistency: PULSAR',
-    `topic`               varchar(255) NOT NULL COMMENT 'Consumption topic',
+    `topic`               varchar(256) NOT NULL COMMENT 'Consumption topic',
     `filter_enabled`      int(2)                DEFAULT '0' COMMENT 'Whether to filter, default 0, not filter consume',
     `inlong_stream_id`    varchar(1024)         DEFAULT NULL COMMENT 'Data stream ID for consumption, if filter_enable is 1, it cannot empty',
     `status`              int(4)       NOT NULL COMMENT 'Status: draft, pending approval, approval rejected, approval passed',
@@ -253,13 +252,13 @@ CREATE TABLE `consumption_pulsar`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT,
     `consumption_id`      int(11)      DEFAULT NULL COMMENT 'ID of the consumption information to which it belongs, guaranteed to be uniquely associated with consumption information',
-    `consumer_group_id`   varchar(255) NOT NULL COMMENT 'Consumer group ID',
-    `consumer_group_name` varchar(255) DEFAULT NULL COMMENT 'Consumer group name',
-    `inlong_group_id`     varchar(255) NOT NULL COMMENT 'Business group ID',
+    `consumer_group_id`   varchar(256) NOT NULL COMMENT 'Consumer group ID',
+    `consumer_group_name` varchar(256) DEFAULT NULL COMMENT 'Consumer group name',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Business group ID',
     `is_rlq`              tinyint(1)   DEFAULT '0' COMMENT 'Whether to configure the retry letter topic, 0: no configuration, 1: configuration',
-    `retry_letter_topic`  varchar(255) DEFAULT NULL COMMENT 'The name of the retry queue topic',
+    `retry_letter_topic`  varchar(256) DEFAULT NULL COMMENT 'The name of the retry queue topic',
     `is_dlq`              tinyint(1)   DEFAULT '0' COMMENT 'Whether to configure dead letter topic, 0: no configuration, 1: means configuration',
-    `dead_letter_topic`   varchar(255) DEFAULT NULL COMMENT 'dead letter topic name',
+    `dead_letter_topic`   varchar(256) DEFAULT NULL COMMENT 'dead letter topic name',
     `is_deleted`          tinyint(1)   DEFAULT '0' COMMENT 'Whether to delete',
     PRIMARY KEY (`id`)
 ) COMMENT ='Pulsar consumption table';
@@ -323,8 +322,7 @@ CREATE TABLE `data_source_cmd_config`
     `modify_time`         timestamp   NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Last update time ',
     `create_time`         timestamp   NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
     `result_info`         varchar(64)          DEFAULT NULL,
-    PRIMARY KEY (`id`),
-    KEY `index_1` (`task_id`, `bSend`, `specified_data_time`)
+    PRIMARY KEY (`id`)
 );
 
 -- ----------------------------
@@ -334,9 +332,9 @@ DROP TABLE IF EXISTS `data_stream`;
 CREATE TABLE `data_stream`
 (
     `id`                     int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_stream_id`       varchar(128) NOT NULL COMMENT 'Data stream id, non-deleted globally unique',
-    `inlong_group_id`        varchar(128) NOT NULL COMMENT 'Owning business group id',
-    `name`                   varchar(64)  DEFAULT NULL COMMENT 'The name of the data stream page display, can be Chinese',
+    `inlong_stream_id`       varchar(256) NOT NULL COMMENT 'Data stream id, non-deleted globally unique',
+    `inlong_group_id`        varchar(256) NOT NULL COMMENT 'Owning business group id',
+    `name`                   varchar(64)       DEFAULT NULL COMMENT 'The name of the data stream page display, can be Chinese',
     `description`            varchar(256)      DEFAULT '' COMMENT 'Introduction to data stream',
     `mq_resource_obj`        varchar(128)      DEFAULT NULL COMMENT 'MQ resource object, in the data stream, Tube is data_stream_id, Pulsar is Topic',
     `data_source_type`       varchar(32)       DEFAULT 'FILE' COMMENT 'Data source type, including: FILE, DB, Auto-Push (DATA_PROXY_SDK, HTTP)',
@@ -358,7 +356,7 @@ CREATE TABLE `data_stream`
     `modifier`               varchar(64)       DEFAULT NULL COMMENT 'Modifier name',
     `create_time`            timestamp    NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
     `modify_time`            timestamp    NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
-    `temp_view`              varchar(512)      DEFAULT NULL COMMENT 'Temporary view, used to save intermediate data that has not been submitted or approved after modification',
+    `temp_view`              text              DEFAULT NULL COMMENT 'Temporary view, used to save intermediate data that has not been submitted or approved after modification',
     PRIMARY KEY (`id`),
     UNIQUE KEY `unique_data_stream` (`inlong_stream_id`, `inlong_group_id`, `is_deleted`, `modify_time`)
 );
@@ -370,14 +368,13 @@ DROP TABLE IF EXISTS `data_stream_ext`;
 CREATE TABLE `data_stream_ext`
 (
     `id`               int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`  varchar(128) NOT NULL COMMENT 'Owning business group id',
-    `inlong_stream_id` varchar(128) NOT NULL COMMENT 'Owning data stream id',
+    `inlong_group_id`  varchar(256) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id` varchar(256) NOT NULL COMMENT 'Owning data stream id',
     `key_name`         varchar(64)  NOT NULL COMMENT 'Configuration item name',
     `key_value`        varchar(256)          DEFAULT NULL COMMENT 'The value of the configuration item',
     `is_deleted`       tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
     `modify_time`      timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
-    PRIMARY KEY (`id`),
-    KEY `index_stream_id` (`inlong_stream_id`)
+    PRIMARY KEY (`id`)
 );
 
 -- ----------------------------
@@ -387,20 +384,16 @@ DROP TABLE IF EXISTS `data_stream_field`;
 CREATE TABLE `data_stream_field`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`     varchar(128) NOT NULL COMMENT 'Owning business group id',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Owning business group id',
     `inlong_stream_id`    varchar(256) NOT NULL COMMENT 'Owning data stream id',
-    `is_predefined_field` tinyint(1)   DEFAULT '0' COMMENT 'Whether it is a predefined field, 0: no, 1: yes',
-    `field_name`          varchar(20)  NOT NULL COMMENT 'field name',
-    `field_value`         varchar(128) DEFAULT NULL COMMENT 'Field value, required if it is a predefined field',
-    `pre_expression`      varchar(256) DEFAULT NULL COMMENT 'Pre-defined field value expression',
-    `field_type`          varchar(20)  NOT NULL COMMENT 'field type',
-    `field_comment`       varchar(50)  DEFAULT NULL COMMENT 'Field description',
-    `rank_num`            smallint(6)  DEFAULT '0' COMMENT 'Field order (front-end display field order)',
-    `is_deleted`          tinyint(1)   DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
-    `is_exist`            tinyint(1)   DEFAULT '0' COMMENT 'Does it exist, 0: does not exist, 1: exists',
-    `bon_field_path`      varchar(256) DEFAULT NULL COMMENT 'BON field path',
-    `bon_field_type`      varchar(64)  DEFAULT NULL COMMENT 'BON field type',
-    `encrypt_level`       varchar(20)  DEFAULT NULL COMMENT 'Encryption level',
+    `is_predefined_field` tinyint(1)    DEFAULT '0' COMMENT 'Whether it is a predefined field, 0: no, 1: yes',
+    `field_name`          varchar(50)  NOT NULL COMMENT 'field name',
+    `field_value`         varchar(128)  DEFAULT NULL COMMENT 'Field value, required if it is a predefined field',
+    `pre_expression`      varchar(256)  DEFAULT NULL COMMENT 'Pre-defined field value expression',
+    `field_type`          varchar(50)  NOT NULL COMMENT 'field type',
+    `field_comment`       varchar(2000) DEFAULT NULL COMMENT 'field description',
+    `rank_num`            smallint(6)   DEFAULT '0' COMMENT 'Field order (front-end display field order)',
+    `is_deleted`          tinyint(1)    DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
     PRIMARY KEY (`id`)
 );
 
@@ -412,13 +405,13 @@ CREATE TABLE `operation_log`
 (
     `id`                  int(11)   NOT NULL AUTO_INCREMENT,
     `authentication_type` varchar(64)        DEFAULT NULL COMMENT 'Authentication type',
-    `operation_type`      varchar(255)       DEFAULT NULL COMMENT 'operation type',
+    `operation_type`      varchar(256)       DEFAULT NULL COMMENT 'operation type',
     `http_method`         varchar(64)        DEFAULT NULL COMMENT 'Request method',
-    `invoke_method`       varchar(255)       DEFAULT NULL COMMENT 'invoke method',
-    `operator`            varchar(255)       DEFAULT NULL COMMENT 'operator',
-    `proxy`               varchar(255)       DEFAULT NULL COMMENT 'proxy',
-    `request_url`         varchar(255)       DEFAULT NULL COMMENT 'Request URL',
-    `remote_address`      varchar(255)       DEFAULT NULL COMMENT 'Request IP',
+    `invoke_method`       varchar(256)       DEFAULT NULL COMMENT 'invoke method',
+    `operator`            varchar(256)       DEFAULT NULL COMMENT 'operator',
+    `proxy`               varchar(256)       DEFAULT NULL COMMENT 'proxy',
+    `request_url`         varchar(256)       DEFAULT NULL COMMENT 'Request URL',
+    `remote_address`      varchar(256)       DEFAULT NULL COMMENT 'Request IP',
     `cost_time`           bigint(20)         DEFAULT NULL COMMENT 'time-consuming',
     `body`                text COMMENT 'Request body',
     `param`               text COMMENT 'parameter',
@@ -436,15 +429,15 @@ CREATE TABLE `role`
 (
     `id`          int(11)      NOT NULL AUTO_INCREMENT,
     `role_code`   varchar(100) NOT NULL COMMENT 'Role code',
-    `role_name`   varchar(255) NOT NULL COMMENT 'Role Chinese name',
+    `role_name`   varchar(256) NOT NULL COMMENT 'Role Chinese name',
     `create_time` datetime     NOT NULL,
     `update_time` datetime     NOT NULL DEFAULT CURRENT_TIMESTAMP,
-    `create_by`   varchar(255) NOT NULL,
-    `update_by`   varchar(255) NOT NULL,
+    `create_by`   varchar(256) NOT NULL,
+    `update_by`   varchar(256) NOT NULL,
     `disabled`    tinyint(1)   NOT NULL DEFAULT '0' COMMENT 'Is it disabled?',
     PRIMARY KEY (`id`),
-    UNIQUE KEY `role_role_code_uindex` (`role_code`),
-    UNIQUE KEY `role_role_name_uindex` (`role_name`)
+    UNIQUE KEY `unique_role_code` (`role_code`),
+    UNIQUE KEY `unique_role_name` (`role_name`)
 );
 
 -- ----------------------------
@@ -454,7 +447,7 @@ DROP TABLE IF EXISTS `source_db_basic`;
 CREATE TABLE `source_db_basic`
 (
     `id`               int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`  varchar(128) NOT NULL COMMENT 'Owning business group id',
+    `inlong_group_id`  varchar(256) NOT NULL COMMENT 'Owning business group id',
     `inlong_stream_id` varchar(256) NOT NULL COMMENT 'Owning data stream id',
     `sync_type`        tinyint(1)            DEFAULT '0' COMMENT 'Data synchronization type, 0: FULL, full amount, 1: INCREMENTAL, incremental',
     `is_deleted`       tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
@@ -473,8 +466,8 @@ DROP TABLE IF EXISTS `source_db_detail`;
 CREATE TABLE `source_db_detail`
 (
     `id`               int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`  varchar(128) NOT NULL COMMENT 'Owning business group id',
-    `inlong_stream_id` varchar(128) NOT NULL COMMENT 'Owning data stream id',
+    `inlong_group_id`  varchar(256) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id` varchar(256) NOT NULL COMMENT 'Owning data stream id',
     `access_type`      varchar(20)  NOT NULL COMMENT 'Collection type, with Agent, DataProxy client, LoadProxy',
     `db_name`          varchar(128)          DEFAULT NULL COMMENT 'database name',
     `transfer_ip`      varchar(64)           DEFAULT NULL COMMENT 'Transfer IP',
@@ -501,8 +494,8 @@ DROP TABLE IF EXISTS `source_file_basic`;
 CREATE TABLE `source_file_basic`
 (
     `id`                int(11)      NOT NULL AUTO_INCREMENT COMMENT 'ID',
-    `inlong_group_id`   varchar(128) NOT NULL COMMENT 'Business group id',
-    `inlong_stream_id`  varchar(128) NOT NULL COMMENT 'Data stream id',
+    `inlong_group_id`   varchar(256) NOT NULL COMMENT 'Business group id',
+    `inlong_stream_id`  varchar(256) NOT NULL COMMENT 'Data stream id',
     `is_hybrid_source`  tinyint(1)            DEFAULT '0' COMMENT 'Whether to mix data sources',
     `is_table_mapping`  tinyint(1)            DEFAULT '0' COMMENT 'Is there a table name mapping',
     `date_offset`       int(4)                DEFAULT '0' COMMENT 'Time offset\n',
@@ -526,8 +519,8 @@ DROP TABLE IF EXISTS `source_file_detail`;
 CREATE TABLE `source_file_detail`
 (
     `id`               int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`  varchar(128) NOT NULL COMMENT 'Owning business group id',
-    `inlong_stream_id` varchar(128) NOT NULL COMMENT 'Owning data stream id',
+    `inlong_group_id`  varchar(256) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id` varchar(256) NOT NULL COMMENT 'Owning data stream id',
     `access_type`      varchar(20)           DEFAULT 'Agent' COMMENT 'Collection type, there are Agent, DataProxy client, LoadProxy, the file can only be Agent temporarily',
     `server_name`      varchar(64)           DEFAULT NULL COMMENT 'The name of the data source service. If it is empty, add configuration through the following fields',
     `ip`               varchar(128) NOT NULL COMMENT 'Data source IP address',
@@ -561,67 +554,51 @@ CREATE TABLE `storage_ext`
     `key_value`    varchar(256)         DEFAULT NULL COMMENT 'The value of the configuration item',
     `is_deleted`   tinyint(1)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
     `modify_time`  timestamp   NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
-    PRIMARY KEY (`id`),
-    KEY `index_storage_id` (`storage_id`)
+    PRIMARY KEY (`id`)
 );
 
 -- ----------------------------
--- Table structure for storage_hive
+-- Table structure for data_storage
 -- ----------------------------
-DROP TABLE IF EXISTS `storage_hive`;
-CREATE TABLE `storage_hive`
+DROP TABLE IF EXISTS `data_storage`;
+CREATE TABLE `data_storage`
 (
-    `id`                          int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`             varchar(128) NOT NULL COMMENT 'Owning business group id',
-    `inlong_stream_id`            varchar(128) NOT NULL COMMENT 'Owning data stream id',
-    `jdbc_url`                    varchar(255)          DEFAULT NULL COMMENT 'Hive JDBC connection URL, such as "jdbc:hive2://127.0.0.1:10000"',
-    `username`                    varchar(128)          DEFAULT NULL COMMENT 'Username',
-    `password`                    varchar(255)          DEFAULT NULL COMMENT 'User password',
-    `db_name`                     varchar(128)          DEFAULT NULL COMMENT 'Target database name',
-    `table_name`                  varchar(128)          DEFAULT NULL COMMENT 'Target data table name',
-    `hdfs_default_fs`             varchar(255)          DEFAULT NULL COMMENT 'HDFS defaultFS, such as "hdfs://127.0.0.1:9000"',
-    `warehouse_dir`               varchar(250)          DEFAULT '/user/hive/warehouse' COMMENT 'Hive table storage path on HDFS, such as "/user/hive/warehouse"',
-    `partition_interval`          int(5)                DEFAULT NULL COMMENT 'Partition interval, support: 1(D / H), 10 I, 30 I',
-    `partition_unit`              varchar(10)           DEFAULT 'D' COMMENT 'Partition type, support: D-day, H-hour, I-minute',
-    `primary_partition`           varchar(255)          DEFAULT 'dt' COMMENT 'primary partition field',
-    `secondary_partition`         varchar(256)          DEFAULT NULL COMMENT 'secondary partition field',
-    `partition_creation_strategy` varchar(50)           DEFAULT 'COMPLETED' COMMENT 'Partition creation strategy, support: ARRIVED, COMPLETED',
-    `file_format`                 varchar(15)           DEFAULT 'TextFile' COMMENT 'The stored table format, TextFile, RCFile, SequenceFile, Avro',
-    `data_encoding`               varchar(20)           DEFAULT 'UTF-8' COMMENT 'data encoding type',
-    `data_separator`              varchar(10)           DEFAULT NULL COMMENT 'data field separator',
-    `storage_period`              int(5)                DEFAULT '10' COMMENT 'Data storage period, unit: day',
-    `opt_log`                     varchar(5000)         DEFAULT NULL COMMENT 'Background operation log',
-    `status`                      int(4)                DEFAULT '0' COMMENT 'status',
-    `previous_status`             int(4)                DEFAULT '0' COMMENT 'Previous status',
-    `is_deleted`                  tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
-    `creator`                     varchar(64)           DEFAULT NULL COMMENT 'creator name',
-    `modifier`                    varchar(64)           DEFAULT NULL COMMENT 'modifier name',
-    `create_time`                 timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'create time',
-    `modify_time`                 timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'modify time',
-    `temp_view`                   text                  DEFAULT NULL COMMENT 'Temporary view, used to save un-submitted and unapproved intermediate data after modification',
+    `id`                     int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
+    `inlong_group_id`        varchar(256) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id`       varchar(256) NOT NULL COMMENT 'Owning data stream id',
+    `storage_type`           varchar(15)           DEFAULT 'HIVE' COMMENT 'Storage type, including: HIVE, ES, etc',
+    `storage_period`         int(11)               DEFAULT '10' COMMENT 'Data storage period, unit: day',
+    `enable_create_resource` tinyint(1)            DEFAULT '1' COMMENT 'Whether to enable create storage resource? 0: disable, 1: enable. default is 1',
+    `ext_params`             text COMMENT 'Another fields, will saved as JSON type',
+    `operate_log`            varchar(5000)         DEFAULT NULL COMMENT 'Background operate log',
+    `status`                 int(11)               DEFAULT '0' COMMENT 'Status',
+    `previous_status`        int(11)               DEFAULT '0' COMMENT 'Previous status',
+    `is_deleted`             tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 1: deleted',
+    `creator`                varchar(64)  NOT NULL COMMENT 'Creator name',
+    `modifier`               varchar(64)           DEFAULT NULL COMMENT 'Modifier name',
+    `create_time`            timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
+    `modify_time`            timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
     PRIMARY KEY (`id`)
 );
 
 -- ----------------------------
--- Table structure for storage_hive_field
+-- Table structure for storage_field
 -- ----------------------------
-DROP TABLE IF EXISTS `storage_hive_field`;
-CREATE TABLE `storage_hive_field`
+DROP TABLE IF EXISTS `data_storage_field`;
+CREATE TABLE `data_storage_field`
 (
-    `id`                int(11)     NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `storage_id`        int(11)     NOT NULL COMMENT 'Hive data storage id',
-    `source_field_name` varchar(20) NOT NULL COMMENT 'source field name',
-    `source_field_type` varchar(20) NOT NULL COMMENT 'source field type',
-    `field_name`        varchar(20) NOT NULL COMMENT 'field name',
-    `field_type`        varchar(20) NOT NULL COMMENT 'field type',
-    `field_comment`     varchar(2000) DEFAULT '' COMMENT 'Field description',
-    `is_required`       tinyint(1)    DEFAULT NULL COMMENT 'Is it required, 0: not necessary, 1: required',
-    `bon_field_path`    varchar(256)  DEFAULT NULL COMMENT 'BON field path',
-    `bon_field_type`    varchar(64)   DEFAULT NULL COMMENT 'BON field type',
-    `encrypt_level`     varchar(20)   DEFAULT NULL COMMENT 'Encryption level',
-    `is_exist`          tinyint(1)    DEFAULT '0' COMMENT 'Does it exist, 0: does not exist, 1: exists',
+    `id`                int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
+    `inlong_group_id`   varchar(256) NOT NULL COMMENT 'inlong group id',
+    `inlong_stream_id`  varchar(256) NOT NULL COMMENT 'inlong stream id',
+    `storage_id`        int(11)      NOT NULL COMMENT 'data storage id',
+    `storage_type`      varchar(15)  NOT NULL COMMENT 'storage type',
+    `source_field_name` varchar(50)   DEFAULT NULL COMMENT 'source field name',
+    `source_field_type` varchar(50)   DEFAULT NULL COMMENT 'source field type',
+    `field_name`        varchar(50)  NOT NULL COMMENT 'field name',
+    `field_type`        varchar(50)  NOT NULL COMMENT 'field type',
+    `field_comment`     varchar(2000) DEFAULT NULL COMMENT 'field description',
     `rank_num`          smallint(6)   DEFAULT '0' COMMENT 'Field order (front-end display field order)',
-    `is_deleted`        tinyint(1)    DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`        tinyint(1)    DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     PRIMARY KEY (`id`)
 );
 
@@ -634,12 +611,12 @@ CREATE TABLE `task`
     `id`          bigint(20)   NOT NULL,
     `taskflow_id` bigint(20)   NOT NULL COMMENT 'Owning task flow id',
     `task_def_id` bigint(20)    DEFAULT NULL COMMENT 'task definition id',
-    `task_name`   varchar(255) NOT NULL COMMENT 'task name',
-    `status`      varchar(255)  DEFAULT NULL COMMENT 'task status',
-    `post_param`  varchar(255)  DEFAULT NULL COMMENT 'Task parameters',
+    `task_name`   varchar(256) NOT NULL COMMENT 'task name',
+    `status`      varchar(256)  DEFAULT NULL COMMENT 'task status',
+    `post_param`  varchar(256)  DEFAULT NULL COMMENT 'Task parameters',
     `resultmsg`   varchar(1000) DEFAULT NULL COMMENT 'Execution result log',
     `create_time` datetime     NOT NULL COMMENT 'Create time',
-    `create_by`   varchar(255) NOT NULL COMMENT 'creator',
+    `create_by`   varchar(256) NOT NULL COMMENT 'creator',
     `update_time` datetime      DEFAULT NULL COMMENT 'last modified time',
     `update_by`   varchar(0)    DEFAULT NULL COMMENT 'last modified person',
     PRIMARY KEY (`id`)
@@ -654,10 +631,10 @@ CREATE TABLE `task_def`
     `id`              bigint(20)   NOT NULL,
     `taskflow_def_id` bigint(20)   NOT NULL COMMENT 'Task flow definition id',
     `parent_id`       bigint(20)   DEFAULT NULL COMMENT 'parent task id',
-    `implclass`       varchar(255) DEFAULT NULL COMMENT 'task processing flow class',
-    `task_name`       varchar(255) DEFAULT NULL COMMENT 'task name',
+    `implclass`       varchar(256) DEFAULT NULL COMMENT 'task processing flow class',
+    `task_name`       varchar(256) DEFAULT NULL COMMENT 'task name',
     `create_time`     datetime     NOT NULL COMMENT 'Create time',
-    `create_by`       varchar(255) NOT NULL COMMENT 'creator',
+    `create_by`       varchar(256) NOT NULL COMMENT 'creator',
     `update_time`     datetime     DEFAULT NULL COMMENT 'last modified time',
     `update_by`       datetime     DEFAULT NULL COMMENT 'last modified person',
     `delivery_id`     bigint(20)   DEFAULT NULL COMMENT 'Task push method',
@@ -672,12 +649,12 @@ CREATE TABLE `taskflow`
 (
     `id`              bigint(20)   NOT NULL AUTO_INCREMENT,
     `taskflow_def_id` bigint(20)   NOT NULL COMMENT 'Taskflow definition id',
-    `status`          varchar(255) DEFAULT NULL COMMENT 'status',
-    `create_by`       varchar(255) NOT NULL COMMENT 'creator',
+    `status`          varchar(256) DEFAULT NULL COMMENT 'status',
+    `create_by`       varchar(256) NOT NULL COMMENT 'creator',
     `create_time`     datetime     DEFAULT NULL COMMENT 'Create time',
     `update_time`     datetime     DEFAULT NULL COMMENT 'last modified time',
-    `update_by`       varchar(255) DEFAULT NULL COMMENT 'last modified person',
-    `event`           varchar(255) DEFAULT NULL COMMENT 'trigger event',
+    `update_by`       varchar(256) DEFAULT NULL COMMENT 'last modified person',
+    `event`           varchar(256) DEFAULT NULL COMMENT 'trigger event',
     PRIMARY KEY (`id`)
 );
 
@@ -688,12 +665,12 @@ DROP TABLE IF EXISTS `taskflow_def`;
 CREATE TABLE `taskflow_def`
 (
     `id`            bigint(20)   NOT NULL AUTO_INCREMENT,
-    `name`          varchar(255) NOT NULL COMMENT 'Workflow definition name',
-    `descrip`       varchar(255) DEFAULT NULL COMMENT 'Workflow function description',
+    `name`          varchar(256) NOT NULL COMMENT 'Workflow definition name',
+    `descrip`       varchar(256) DEFAULT NULL COMMENT 'Workflow function description',
     `create_time`   datetime     NOT NULL COMMENT 'Create time',
-    `create_by`     varchar(255) NOT NULL COMMENT 'creator',
+    `create_by`     varchar(256) NOT NULL COMMENT 'creator',
     `isValid`       int(11)      DEFAULT NULL COMMENT 'logical deletion',
-    `trigger_event` varchar(255) DEFAULT NULL COMMENT 'trigger event',
+    `trigger_event` varchar(256) DEFAULT NULL COMMENT 'trigger event',
     PRIMARY KEY (`id`)
 );
 
@@ -704,16 +681,16 @@ DROP TABLE IF EXISTS `user`;
 CREATE TABLE `user`
 (
     `id`           int(11)      NOT NULL AUTO_INCREMENT,
-    `name`         varchar(255) NOT NULL COMMENT 'account name',
+    `name`         varchar(256) NOT NULL COMMENT 'account name',
     `password`     varchar(64)  NOT NULL COMMENT 'password md5',
     `account_type` int(11)      NOT NULL DEFAULT '1' COMMENT 'account type, 0-manager 1-normal',
     `due_date`     datetime              DEFAULT NULL COMMENT 'due date for account',
     `create_time`  datetime     NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
     `update_time`  datetime              DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'update time',
-    `create_by`    varchar(255) NOT NULL COMMENT 'create by sb.',
-    `update_by`    varchar(255)          DEFAULT NULL COMMENT 'update by sb.',
+    `create_by`    varchar(256) NOT NULL COMMENT 'create by sb.',
+    `update_by`    varchar(256)          DEFAULT NULL COMMENT 'update by sb.',
     PRIMARY KEY (`id`),
-    UNIQUE KEY `user_name_uindex` (`name`)
+    UNIQUE KEY `unique_user_name` (`name`)
 );
 
 -- create default admin user, username is 'admin', password is 'inlong'
@@ -728,12 +705,12 @@ DROP TABLE IF EXISTS `user_role`;
 CREATE TABLE `user_role`
 (
     `id`          int(11)      NOT NULL AUTO_INCREMENT,
-    `user_name`   varchar(255) NOT NULL COMMENT 'username rtx',
-    `role_code`   varchar(255) NOT NULL COMMENT 'role',
+    `user_name`   varchar(256) NOT NULL COMMENT 'username rtx',
+    `role_code`   varchar(256) NOT NULL COMMENT 'role',
     `create_time` datetime     NOT NULL,
     `update_time` datetime     NOT NULL DEFAULT CURRENT_TIMESTAMP,
-    `create_by`   varchar(255) NOT NULL,
-    `update_by`   varchar(255) NOT NULL,
+    `create_by`   varchar(256) NOT NULL,
+    `update_by`   varchar(256) NOT NULL,
     `disabled`    tinyint(1)   NOT NULL DEFAULT '0' COMMENT 'Is it disabled?',
     PRIMARY KEY (`id`)
 );
@@ -745,11 +722,11 @@ DROP TABLE IF EXISTS `wf_approver`;
 CREATE TABLE `wf_approver`
 (
     `id`                int(11)       NOT NULL AUTO_INCREMENT,
-    `process_name`      varchar(255)  NOT NULL COMMENT 'process definition name',
-    `task_name`         varchar(255)  NOT NULL COMMENT 'Approval task name',
+    `process_name`      varchar(256)  NOT NULL COMMENT 'process definition name',
+    `task_name`         varchar(256)  NOT NULL COMMENT 'Approval task name',
     `filter_key`        varchar(64)   NOT NULL COMMENT 'filter condition KEY',
-    `filter_value`      varchar(255)           DEFAULT NULL COMMENT 'Filter matching value',
-    `filter_value_desc` varchar(255)           DEFAULT NULL COMMENT 'Filter value description',
+    `filter_value`      varchar(256)           DEFAULT NULL COMMENT 'Filter matching value',
+    `filter_value_desc` varchar(256)           DEFAULT NULL COMMENT 'Filter value description',
     `approvers`         varchar(1024) NOT NULL COMMENT 'Approvers, separated by commas',
     `creator`           varchar(64)   NOT NULL COMMENT 'creator',
     `modifier`          varchar(64)   NOT NULL COMMENT 'modifier',
@@ -757,7 +734,7 @@ CREATE TABLE `wf_approver`
     `modify_time`       timestamp     NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'update time',
     `is_deleted`        int(11)                DEFAULT '0' COMMENT 'Whether to delete, 0 is not deleted, if greater than 0, delete',
     PRIMARY KEY (`id`),
-    KEY `process_name_task_name_index` (`process_name`, `task_name`)
+    UNIQUE KEY `process_name_task_name_index` (`process_name`, `task_name`)
 );
 
 -- create default approver for new consumption and new business
@@ -776,12 +753,12 @@ CREATE TABLE `wf_event_log`
 (
     `id`                   int(11)      NOT NULL AUTO_INCREMENT,
     `process_inst_id`      int(11)      NOT NULL,
-    `process_name`         varchar(255)  DEFAULT NULL COMMENT 'Process name',
-    `process_display_name` varchar(255) NOT NULL COMMENT 'Process name',
-    `inlong_group_id`      varchar(128)  DEFAULT NULL COMMENT 'Business group id',
+    `process_name`         varchar(256)  DEFAULT NULL COMMENT 'Process name',
+    `process_display_name` varchar(256) NOT NULL COMMENT 'Process name',
+    `inlong_group_id`      varchar(256)  DEFAULT NULL COMMENT 'Business group id',
     `task_inst_id`         int(11)       DEFAULT NULL COMMENT 'Task ID',
-    `element_name`         varchar(255) NOT NULL COMMENT 'The name of the component that triggered the event',
-    `element_display_name` varchar(255) NOT NULL COMMENT 'Chinese name of the component that triggered the event',
+    `element_name`         varchar(256) NOT NULL COMMENT 'The name of the component that triggered the event',
+    `element_display_name` varchar(256) NOT NULL COMMENT 'Chinese name of the component that triggered the event',
     `event_type`           varchar(64)  NOT NULL COMMENT 'Event type: process event/task event',
     `event`                varchar(64)  NOT NULL COMMENT 'Event name',
     `listener`             varchar(1024) DEFAULT NULL COMMENT 'Event listener name',
@@ -802,17 +779,17 @@ DROP TABLE IF EXISTS `wf_process_instance`;
 CREATE TABLE `wf_process_instance`
 (
     `id`              int(11)      NOT NULL AUTO_INCREMENT,
-    `name`            varchar(255) NOT NULL COMMENT 'process name',
-    `display_name`    varchar(255) NOT NULL COMMENT 'Process display name',
-    `type`            varchar(255)          DEFAULT NULL COMMENT 'Process classification',
-    `title`           varchar(255)          DEFAULT NULL COMMENT 'Process title',
-    `inlong_group_id` varchar(128)          DEFAULT NULL COMMENT 'Business group id: to facilitate related business',
-    `applicant`       varchar(255) NOT NULL COMMENT 'applicant',
+    `name`            varchar(256) NOT NULL COMMENT 'process name',
+    `display_name`    varchar(256) NOT NULL COMMENT 'Process display name',
+    `type`            varchar(256)          DEFAULT NULL COMMENT 'Process classification',
+    `title`           varchar(256)          DEFAULT NULL COMMENT 'Process title',
+    `inlong_group_id` varchar(256)          DEFAULT NULL COMMENT 'Business group id: to facilitate related business',
+    `applicant`       varchar(256) NOT NULL COMMENT 'applicant',
     `state`           varchar(64)  NOT NULL COMMENT 'state',
     `form_data`       mediumtext COMMENT 'form information',
     `start_time`      datetime     NOT NULL COMMENT 'start time',
     `end_time`        datetime              DEFAULT NULL COMMENT 'End event',
-    `ext`             text COMMENT 'Extended information-json',
+    `ext`             text COMMENT 'Extended information-JSON',
     `hidden`          tinyint(1)   NOT NULL DEFAULT '0' COMMENT 'Is it hidden',
     PRIMARY KEY (`id`)
 );
@@ -826,19 +803,19 @@ CREATE TABLE `wf_task_instance`
     `id`                   int(11)       NOT NULL AUTO_INCREMENT,
     `type`                 varchar(64)   NOT NULL COMMENT 'Task type: UserTask user task/ServiceTask system task',
     `process_inst_id`      int(11)       NOT NULL COMMENT 'process ID',
-    `process_name`         varchar(255)  NOT NULL COMMENT 'process name',
-    `process_display_name` varchar(255)  NOT NULL COMMENT 'process name',
-    `name`                 varchar(255)  NOT NULL COMMENT 'task name',
-    `display_name`         varchar(255)  NOT NULL COMMENT 'Task display name',
+    `process_name`         varchar(256)  NOT NULL COMMENT 'process name',
+    `process_display_name` varchar(256)  NOT NULL COMMENT 'process name',
+    `name`                 varchar(256)  NOT NULL COMMENT 'task name',
+    `display_name`         varchar(256)  NOT NULL COMMENT 'Task display name',
     `applicant`            varchar(64)   DEFAULT NULL COMMENT 'applicant',
     `approvers`            varchar(1024) NOT NULL COMMENT 'approvers',
     `state`                varchar(64)   NOT NULL COMMENT 'state',
-    `operator`             varchar(255)  DEFAULT NULL COMMENT 'actual operator',
+    `operator`             varchar(256)  DEFAULT NULL COMMENT 'actual operator',
     `remark`               varchar(1024) DEFAULT NULL COMMENT 'Remark information',
     `form_data`            mediumtext COMMENT 'form information submitted by the current task',
     `start_time`           datetime      NOT NULL COMMENT 'start time',
     `end_time`             datetime      DEFAULT NULL COMMENT 'End time',
-    `ext`                  text COMMENT 'Extended information-json',
+    `ext`                  text COMMENT 'Extended information-JSON',
     PRIMARY KEY (`id`)
 );
 
@@ -873,7 +850,7 @@ CREATE TABLE `cluster_set_inlongid`
 (
     `id`              int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
     `set_name`        varchar(128) NOT NULL COMMENT 'ClusterSet name, English, numbers and underscore',
-    `inlong_group_id` varchar(128) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
+    `inlong_group_id` varchar(256) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
     PRIMARY KEY (`id`),
     UNIQUE KEY `unique_cluster_set_inlongid` (`set_name`, `inlong_group_id`)
 );
@@ -904,8 +881,7 @@ CREATE TABLE `cache_cluster_ext`
     `key_value`    varchar(256) NULL COMMENT 'The value of the configuration item',
     `is_deleted`   tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
     `modify_time`  timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
-    PRIMARY KEY (`id`),
-    KEY `index_cache_cluster` (`cluster_name`)
+    PRIMARY KEY (`id`)
 );
 
 -- ----------------------------
@@ -978,8 +954,7 @@ CREATE TABLE `flume_source_ext`
     `key_value`   varchar(256) NULL COMMENT 'The value of the configuration item',
     `is_deleted`  tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
     `modify_time` timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
-    PRIMARY KEY (`id`),
-    KEY `index_flume_source_ext` (`parent_name`)
+    PRIMARY KEY (`id`)
 );
 
 -- ----------------------------
@@ -1009,8 +984,7 @@ CREATE TABLE `flume_channel_ext`
     `key_value`   varchar(256) NULL COMMENT 'The value of the configuration item',
     `is_deleted`  tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
     `modify_time` timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
-    PRIMARY KEY (`id`),
-    KEY `index_flume_channel_ext` (`parent_name`)
+    PRIMARY KEY (`id`)
 );
 
 -- ----------------------------
@@ -1041,8 +1015,7 @@ CREATE TABLE `flume_sink_ext`
     `key_value`   varchar(256) NULL COMMENT 'The value of the configuration item',
     `is_deleted`  tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
     `modify_time` timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
-    PRIMARY KEY (`id`),
-    KEY `index_flume_sink_ext` (`parent_name`)
+    PRIMARY KEY (`id`)
 );
 
 SET FOREIGN_KEY_CHECKS = 1;
diff --git a/inlong-manager/manager-web/sql/apache_inlong_manager.sql b/inlong-manager/manager-web/sql/apache_inlong_manager.sql
index 5c11a74..b405247 100644
--- a/inlong-manager/manager-web/sql/apache_inlong_manager.sql
+++ b/inlong-manager/manager-web/sql/apache_inlong_manager.sql
@@ -80,11 +80,11 @@ DROP TABLE IF EXISTS `business`;
 CREATE TABLE `business`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`     varchar(128) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
     `name`                varchar(128)      DEFAULT '' COMMENT 'Business name, English, numbers and underscore',
     `cn_name`             varchar(256)      DEFAULT NULL COMMENT 'Chinese display name',
     `description`         varchar(256)      DEFAULT '' COMMENT 'Business Introduction',
-    `middleware_type`     varchar(10)       DEFAULT 'TUBE' COMMENT 'The middleware type of message queue, high throughput: TUBE, high consistency: PULSAR',
+    `middleware_type`     varchar(20)       DEFAULT 'TUBE' COMMENT 'The middleware type of message queue, high throughput: TUBE, high consistency: PULSAR',
     `queue_module`        VARCHAR(20)  NULL DEFAULT 'parallel' COMMENT 'Queue model of Pulsar, parallel: multiple partitions, high throughput, out-of-order messages; serial: single partition, low throughput, and orderly messages',
     `topic_partition_num` INT(4)       NULL DEFAULT '3' COMMENT 'The number of partitions of Pulsar Topic, 1-20',
     `mq_resource_obj`     varchar(128) NOT NULL COMMENT 'MQ resource object, for Tube, its Topic, for Pulsar, its Namespace',
@@ -96,7 +96,7 @@ CREATE TABLE `business`
     `in_charges`          varchar(512) NOT NULL COMMENT 'Name of responsible person, separated by commas',
     `followers`           varchar(512)      DEFAULT NULL COMMENT 'List of names of business followers, separated by commas',
     `status`              int(4)            DEFAULT '21' COMMENT 'Business status',
-    `is_deleted`          tinyint(1)        DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`          int(11)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     `creator`             varchar(64)  NOT NULL COMMENT 'Creator name',
     `modifier`            varchar(64)       DEFAULT NULL COMMENT 'Modifier name',
     `create_time`         timestamp    NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
@@ -114,7 +114,7 @@ DROP TABLE IF EXISTS `business_pulsar`;
 CREATE TABLE `business_pulsar`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`     varchar(128) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
     `ensemble`            int(3)            DEFAULT '3' COMMENT 'The writable nodes number of ledger',
     `write_quorum`        int(3)            DEFAULT '3' COMMENT 'The copies number of ledger',
     `ack_quorum`          int(3)            DEFAULT '2' COMMENT 'The number of requested acks',
@@ -124,7 +124,7 @@ CREATE TABLE `business_pulsar`
     `ttl_unit`            varchar(20)       DEFAULT 'hours' COMMENT 'The unit of time-to-live duration',
     `retention_size`      int(11)           DEFAULT '-1' COMMENT 'Message size',
     `retention_size_unit` varchar(20)       DEFAULT 'MB' COMMENT 'The unit of message size',
-    `is_deleted`          tinyint(1)        DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`          int(11)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     `create_time`         timestamp    NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
     `modify_time`         timestamp    NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
     PRIMARY KEY (`id`)
@@ -138,10 +138,10 @@ DROP TABLE IF EXISTS `business_ext`;
 CREATE TABLE `business_ext`
 (
     `id`              int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id` varchar(128) NOT NULL COMMENT 'Business group id',
+    `inlong_group_id` varchar(256) NOT NULL COMMENT 'Business group id',
     `key_name`        varchar(64)  NOT NULL COMMENT 'Configuration item name',
     `key_value`       varchar(256)      DEFAULT NULL COMMENT 'The value of the configuration item',
-    `is_deleted`      tinyint(1)        DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`      int(11)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     `modify_time`     timestamp    NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
     PRIMARY KEY (`id`),
     KEY `index_group_id` (`inlong_group_id`)
@@ -164,7 +164,7 @@ CREATE TABLE `cluster_info`
     `is_backup`   tinyint(1)        DEFAULT '0' COMMENT 'Whether it is a backup cluster, 0: no, 1: yes',
     `ext_props`   json              DEFAULT NULL COMMENT 'extended properties',
     `status`      int(4)            DEFAULT '1' COMMENT 'cluster status',
-    `is_deleted`  tinyint(1)        DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`  int(11)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     `creator`     varchar(64)  NOT NULL COMMENT 'Creator name',
     `modifier`    varchar(64)       DEFAULT NULL COMMENT 'Modifier name',
     `create_time` timestamp    NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
@@ -198,7 +198,7 @@ CREATE TABLE `common_db_server`
     `backup_db_server_ip` varchar(64)       DEFAULT NULL COMMENT 'Backup DB HOST',
     `backup_db_port`      int(11)           DEFAULT NULL COMMENT 'Backup DB port',
     `status`              int(4)            DEFAULT '0' COMMENT 'status',
-    `is_deleted`          tinyint(1)        DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`          int(11)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     `creator`             varchar(64)  NOT NULL COMMENT 'Creator name',
     `modifier`            varchar(64)       DEFAULT NULL COMMENT 'Modifier name',
     `create_time`         timestamp    NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
@@ -224,7 +224,7 @@ CREATE TABLE `common_file_server`
     `username`       varchar(64) NOT NULL COMMENT 'User name of the data source IP host',
     `password`       varchar(64) NOT NULL COMMENT 'The password corresponding to the above user name',
     `status`         int(4)               DEFAULT '0' COMMENT 'status',
-    `is_deleted`     tinyint(1)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`     int(11)              DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     `creator`        varchar(64) NOT NULL COMMENT 'Creator name',
     `modifier`       varchar(64)          DEFAULT NULL COMMENT 'Modifier name',
     `create_time`    timestamp   NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
@@ -242,16 +242,16 @@ DROP TABLE IF EXISTS `consumption`;
 CREATE TABLE `consumption`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `consumer_group_name` varchar(255)      DEFAULT NULL COMMENT 'consumer group name',
-    `consumer_group_id`   varchar(255) NOT NULL COMMENT 'Consumer group ID',
+    `consumer_group_name` varchar(256)      DEFAULT NULL COMMENT 'consumer group name',
+    `consumer_group_id`   varchar(256) NOT NULL COMMENT 'Consumer group ID',
     `in_charges`          varchar(512) NOT NULL COMMENT 'Person in charge of consumption',
-    `inlong_group_id`     varchar(255) NOT NULL COMMENT 'Business group id',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Business group id',
     `middleware_type`     varchar(10)       DEFAULT 'TUBE' COMMENT 'The middleware type of message queue, high throughput: TUBE, high consistency: PULSAR',
-    `topic`               varchar(255) NOT NULL COMMENT 'Consumption topic',
+    `topic`               varchar(256) NOT NULL COMMENT 'Consumption topic',
     `filter_enabled`      int(2)            DEFAULT '0' COMMENT 'Whether to filter, default 0, not filter consume',
-    `inlong_stream_id`    varchar(1024)     DEFAULT NULL COMMENT 'Data stream ID for consumption, if filter_enable is 1, it cannot empty',
+    `inlong_stream_id`    varchar(256)      DEFAULT NULL COMMENT 'Data stream ID for consumption, if filter_enable is 1, it cannot empty',
     `status`              int(4)       NOT NULL COMMENT 'Status: draft, pending approval, approval rejected, approval passed',
-    `is_deleted`          tinyint(1)        DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`          int(11)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     `creator`             varchar(64)  NOT NULL COMMENT 'creator',
     `modifier`            varchar(64)       DEFAULT NULL COMMENT 'modifier',
     `create_time`         timestamp    NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
@@ -268,14 +268,14 @@ CREATE TABLE `consumption_pulsar`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT,
     `consumption_id`      int(11)      DEFAULT NULL COMMENT 'ID of the consumption information to which it belongs, guaranteed to be uniquely associated with consumption information',
-    `consumer_group_id`   varchar(255) NOT NULL COMMENT 'Consumer group ID',
-    `consumer_group_name` varchar(255) DEFAULT NULL COMMENT 'Consumer group name',
-    `inlong_group_id`     varchar(255) NOT NULL COMMENT 'Business group ID',
+    `consumer_group_id`   varchar(256) NOT NULL COMMENT 'Consumer group ID',
+    `consumer_group_name` varchar(256) DEFAULT NULL COMMENT 'Consumer group name',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Business group ID',
     `is_rlq`              tinyint(1)   DEFAULT '0' COMMENT 'Whether to configure the retry letter topic, 0: no configuration, 1: configuration',
-    `retry_letter_topic`  varchar(255) DEFAULT NULL COMMENT 'The name of the retry queue topic',
+    `retry_letter_topic`  varchar(256) DEFAULT NULL COMMENT 'The name of the retry queue topic',
     `is_dlq`              tinyint(1)   DEFAULT '0' COMMENT 'Whether to configure dead letter topic, 0: no configuration, 1: means configuration',
-    `dead_letter_topic`   varchar(255) DEFAULT NULL COMMENT 'dead letter topic name',
-    `is_deleted`          tinyint(1)   DEFAULT '0' COMMENT 'Whether to delete',
+    `dead_letter_topic`   varchar(256) DEFAULT NULL COMMENT 'dead letter topic name',
+    `is_deleted`          int(11)      DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     PRIMARY KEY (`id`)
 ) ENGINE = InnoDB
   DEFAULT CHARSET = utf8mb4 COMMENT ='Pulsar consumption table';
@@ -297,7 +297,7 @@ CREATE TABLE `data_proxy_cluster`
     `in_charges`  varchar(512)      DEFAULT NULL COMMENT 'Name of responsible person, separated by commas',
     `ext_props`   json              DEFAULT NULL COMMENT 'Extended properties',
     `status`      int(4)            DEFAULT '1' COMMENT 'Cluster status',
-    `is_deleted`  tinyint(1)        DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`  int(11)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     `creator`     varchar(64)  NOT NULL COMMENT 'Creator name',
     `modifier`    varchar(64)       DEFAULT NULL COMMENT 'Modifier name',
     `create_time` timestamp    NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
@@ -356,8 +356,8 @@ DROP TABLE IF EXISTS `data_stream`;
 CREATE TABLE `data_stream`
 (
     `id`                     int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_stream_id`       varchar(128) NOT NULL COMMENT 'Data stream id, non-deleted globally unique',
-    `inlong_group_id`        varchar(128) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id`       varchar(256) NOT NULL COMMENT 'Data stream id, non-deleted globally unique',
+    `inlong_group_id`        varchar(256) NOT NULL COMMENT 'Owning business group id',
     `name`                   varchar(64)       DEFAULT NULL COMMENT 'The name of the data stream page display, can be Chinese',
     `description`            varchar(256)      DEFAULT '' COMMENT 'Introduction to data stream',
     `mq_resource_obj`        varchar(128)      DEFAULT NULL COMMENT 'MQ resource object, in the data stream, Tube is data_stream_id, Pulsar is Topic',
@@ -375,7 +375,7 @@ CREATE TABLE `data_stream`
     `in_charges`             varchar(512)      DEFAULT NULL COMMENT 'Name of responsible person, separated by commas',
     `status`                 int(4)            DEFAULT '0' COMMENT 'Data stream status',
     `previous_status`        int(4)            DEFAULT '0' COMMENT 'Previous status',
-    `is_deleted`             tinyint(1)        DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`             int(11)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     `creator`                varchar(64)       DEFAULT NULL COMMENT 'Creator name',
     `modifier`               varchar(64)       DEFAULT NULL COMMENT 'Modifier name',
     `create_time`            timestamp    NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
@@ -393,11 +393,11 @@ DROP TABLE IF EXISTS `data_stream_ext`;
 CREATE TABLE `data_stream_ext`
 (
     `id`               int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`  varchar(128) NOT NULL COMMENT 'Owning business group id',
-    `inlong_stream_id` varchar(128) NOT NULL COMMENT 'Owning data stream id',
+    `inlong_group_id`  varchar(256) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id` varchar(256) NOT NULL COMMENT 'Owning data stream id',
     `key_name`         varchar(64)  NOT NULL COMMENT 'Configuration item name',
     `key_value`        varchar(256)      DEFAULT NULL COMMENT 'The value of the configuration item',
-    `is_deleted`       tinyint(1)        DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`       int(11)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     `modify_time`      timestamp    NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
     PRIMARY KEY (`id`),
     KEY `index_stream_id` (`inlong_stream_id`)
@@ -411,7 +411,7 @@ DROP TABLE IF EXISTS `data_stream_field`;
 CREATE TABLE `data_stream_field`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`     varchar(128) NOT NULL COMMENT 'Owning business group id',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Owning business group id',
     `inlong_stream_id`    varchar(256) NOT NULL COMMENT 'Owning data stream id',
     `is_predefined_field` tinyint(1)   DEFAULT '0' COMMENT 'Whether it is a predefined field, 0: no, 1: yes',
     `field_name`          varchar(20)  NOT NULL COMMENT 'field name',
@@ -420,11 +420,7 @@ CREATE TABLE `data_stream_field`
     `field_type`          varchar(20)  NOT NULL COMMENT 'field type',
     `field_comment`       varchar(50)  DEFAULT NULL COMMENT 'Field description',
     `rank_num`            smallint(6)  DEFAULT '0' COMMENT 'Field order (front-end display field order)',
-    `is_deleted`          tinyint(1)   DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
-    `is_exist`            tinyint(1)   DEFAULT '0' COMMENT 'Does it exist, 0: does not exist, 1: exists',
-    `bon_field_path`      varchar(256) DEFAULT NULL COMMENT 'BON field path',
-    `bon_field_type`      varchar(64)  DEFAULT NULL COMMENT 'BON field type',
-    `encrypt_level`       varchar(20)  DEFAULT NULL COMMENT 'Encryption level',
+    `is_deleted`          int(11)      DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     PRIMARY KEY (`id`),
     KEY `index_stream_id` (`inlong_stream_id`)
 ) ENGINE = InnoDB
@@ -438,13 +434,13 @@ CREATE TABLE `operation_log`
 (
     `id`                  int(11)   NOT NULL AUTO_INCREMENT,
     `authentication_type` varchar(64)        DEFAULT NULL COMMENT 'Authentication type',
-    `operation_type`      varchar(255)       DEFAULT NULL COMMENT 'operation type',
+    `operation_type`      varchar(256)       DEFAULT NULL COMMENT 'operation type',
     `http_method`         varchar(64)        DEFAULT NULL COMMENT 'Request method',
-    `invoke_method`       varchar(255)       DEFAULT NULL COMMENT 'invoke method',
-    `operator`            varchar(255)       DEFAULT NULL COMMENT 'operator',
-    `proxy`               varchar(255)       DEFAULT NULL COMMENT 'proxy',
-    `request_url`         varchar(255)       DEFAULT NULL COMMENT 'Request URL',
-    `remote_address`      varchar(255)       DEFAULT NULL COMMENT 'Request IP',
+    `invoke_method`       varchar(256)       DEFAULT NULL COMMENT 'invoke method',
+    `operator`            varchar(256)       DEFAULT NULL COMMENT 'operator',
+    `proxy`               varchar(256)       DEFAULT NULL COMMENT 'proxy',
+    `request_url`         varchar(256)       DEFAULT NULL COMMENT 'Request URL',
+    `remote_address`      varchar(256)       DEFAULT NULL COMMENT 'Request IP',
     `cost_time`           bigint(20)         DEFAULT NULL COMMENT 'time-consuming',
     `body`                text COMMENT 'Request body',
     `param`               text COMMENT 'parameter',
@@ -463,11 +459,11 @@ CREATE TABLE `role`
 (
     `id`          int(11)      NOT NULL AUTO_INCREMENT,
     `role_code`   varchar(100) NOT NULL COMMENT 'Role code',
-    `role_name`   varchar(255) NOT NULL COMMENT 'Role Chinese name',
+    `role_name`   varchar(256) NOT NULL COMMENT 'Role Chinese name',
     `create_time` datetime     NOT NULL,
     `update_time` datetime     NOT NULL DEFAULT CURRENT_TIMESTAMP,
-    `create_by`   varchar(255) NOT NULL,
-    `update_by`   varchar(255) NOT NULL,
+    `create_by`   varchar(256) NOT NULL,
+    `update_by`   varchar(256) NOT NULL,
     `disabled`    tinyint(1)   NOT NULL DEFAULT '0' COMMENT 'Is it disabled?',
     PRIMARY KEY (`id`),
     UNIQUE KEY `role_role_code_uindex` (`role_code`),
@@ -482,10 +478,10 @@ DROP TABLE IF EXISTS `source_db_basic`;
 CREATE TABLE `source_db_basic`
 (
     `id`               int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`  varchar(128) NOT NULL COMMENT 'Owning business group id',
+    `inlong_group_id`  varchar(256) NOT NULL COMMENT 'Owning business group id',
     `inlong_stream_id` varchar(256) NOT NULL COMMENT 'Owning data stream id',
     `sync_type`        tinyint(1)        DEFAULT '0' COMMENT 'Data synchronization type, 0: FULL, full amount, 1: INCREMENTAL, incremental',
-    `is_deleted`       tinyint(1)        DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`       int(11)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     `creator`          varchar(64)  NOT NULL COMMENT 'Creator name',
     `modifier`         varchar(64)       DEFAULT NULL COMMENT 'Modifier name',
     `create_time`      timestamp    NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
@@ -502,8 +498,8 @@ DROP TABLE IF EXISTS `source_db_detail`;
 CREATE TABLE `source_db_detail`
 (
     `id`               int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`  varchar(128) NOT NULL COMMENT 'Owning business group id',
-    `inlong_stream_id` varchar(128) NOT NULL COMMENT 'Owning data stream id',
+    `inlong_group_id`  varchar(256) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id` varchar(256) NOT NULL COMMENT 'Owning data stream id',
     `access_type`      varchar(20)  NOT NULL COMMENT 'Collection type, with Agent, DataProxy client, LoadProxy',
     `db_name`          varchar(128)      DEFAULT NULL COMMENT 'database name',
     `transfer_ip`      varchar(64)       DEFAULT NULL COMMENT 'Transfer IP',
@@ -514,7 +510,7 @@ CREATE TABLE `source_db_detail`
     `crontab`          varchar(56)       DEFAULT NULL COMMENT 'Timed scheduling expression, required for full amount',
     `status`           int(4)            DEFAULT '0' COMMENT 'Data source status',
     `previous_status`  int(4)            DEFAULT '0' COMMENT 'Previous status',
-    `is_deleted`       tinyint(1)        DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`       int(11)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     `creator`          varchar(64)  NOT NULL COMMENT 'Creator name',
     `modifier`         varchar(64)       DEFAULT NULL COMMENT 'Modifier name',
     `create_time`      timestamp    NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
@@ -531,8 +527,8 @@ DROP TABLE IF EXISTS `source_file_basic`;
 CREATE TABLE `source_file_basic`
 (
     `id`                int(11)      NOT NULL AUTO_INCREMENT COMMENT 'ID',
-    `inlong_group_id`   varchar(128) NOT NULL COMMENT 'Business group id',
-    `inlong_stream_id`  varchar(128) NOT NULL COMMENT 'Data stream id',
+    `inlong_group_id`   varchar(256) NOT NULL COMMENT 'Business group id',
+    `inlong_stream_id`  varchar(256) NOT NULL COMMENT 'Data stream id',
     `is_hybrid_source`  tinyint(1)        DEFAULT '0' COMMENT 'Whether to mix data sources',
     `is_table_mapping`  tinyint(1)        DEFAULT '0' COMMENT 'Is there a table name mapping',
     `date_offset`       int(4)            DEFAULT '0' COMMENT 'Time offset\n',
@@ -540,7 +536,7 @@ CREATE TABLE `source_file_basic`
     `file_rolling_type` varchar(2)        DEFAULT 'H' COMMENT 'File rolling type',
     `upload_max_size`   int(4)            DEFAULT '120' COMMENT 'Upload maximum size',
     `need_compress`     tinyint(1)        DEFAULT '0' COMMENT 'Whether need compress',
-    `is_deleted`        tinyint(1)        DEFAULT '0' COMMENT 'Delete switch',
+    `is_deleted`        int(11)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     `creator`           varchar(64)  NOT NULL COMMENT 'Creator',
     `modifier`          varchar(64)       DEFAULT NULL COMMENT 'Modifier',
     `create_time`       timestamp    NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
@@ -557,8 +553,8 @@ DROP TABLE IF EXISTS `source_file_detail`;
 CREATE TABLE `source_file_detail`
 (
     `id`               int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`  varchar(128) NOT NULL COMMENT 'Owning business group id',
-    `inlong_stream_id` varchar(128) NOT NULL COMMENT 'Owning data stream id',
+    `inlong_group_id`  varchar(256) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id` varchar(256) NOT NULL COMMENT 'Owning data stream id',
     `access_type`      varchar(20)       DEFAULT 'Agent' COMMENT 'Collection type, there are Agent, DataProxy client, LoadProxy, the file can only be Agent temporarily',
     `server_name`      varchar(64)       DEFAULT NULL COMMENT 'The name of the data source service. If it is empty, add configuration through the following fields',
     `ip`               varchar(128) NOT NULL COMMENT 'Data source IP address',
@@ -570,7 +566,7 @@ CREATE TABLE `source_file_detail`
     `file_path`        varchar(256) NOT NULL COMMENT 'File path, supports regular matching',
     `status`           int(4)            DEFAULT '0' COMMENT 'Data source status',
     `previous_status`  int(4)            DEFAULT '0' COMMENT 'Previous status',
-    `is_deleted`       tinyint(1)        DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`       int(11)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     `creator`          varchar(64)  NOT NULL COMMENT 'Creator name',
     `modifier`         varchar(64)       DEFAULT NULL COMMENT 'Modifier name',
     `create_time`      timestamp    NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
@@ -591,7 +587,7 @@ CREATE TABLE `storage_ext`
     `storage_id`   int(11)     NOT NULL COMMENT 'data storage id',
     `key_name`     varchar(64) NOT NULL COMMENT 'Configuration item name',
     `key_value`    varchar(256)         DEFAULT NULL COMMENT 'The value of the configuration item',
-    `is_deleted`   tinyint(1)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`   int(11)              DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     `modify_time`  timestamp   NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
     PRIMARY KEY (`id`),
     KEY `index_storage_id` (`storage_id`)
@@ -599,67 +595,51 @@ CREATE TABLE `storage_ext`
   DEFAULT CHARSET = utf8mb4 COMMENT ='Data storage extension table';
 
 -- ----------------------------
--- Table structure for storage_hive
+-- Table structure for data_storage
 -- ----------------------------
-DROP TABLE IF EXISTS `storage_hive`;
-CREATE TABLE `storage_hive`
+DROP TABLE IF EXISTS `data_storage`;
+CREATE TABLE `data_storage`
 (
-    `id`                          int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`             varchar(128) NOT NULL COMMENT 'Owning business group id',
-    `inlong_stream_id`            varchar(128) NOT NULL COMMENT 'Owning data stream id',
-    `enable_create_table`         tinyint(1)        DEFAULT 1 COMMENT 'Whether to enable create table, 1: enable, 0: disable, default is 1',
-    `jdbc_url`                    varchar(255)      DEFAULT NULL COMMENT 'Hive JDBC connection URL, such as "jdbc:hive2://127.0.0.1:10000"',
-    `username`                    varchar(128)      DEFAULT NULL COMMENT 'Username',
-    `password`                    varchar(255)      DEFAULT NULL COMMENT 'User password',
-    `db_name`                     varchar(128)      DEFAULT NULL COMMENT 'Target database name',
-    `table_name`                  varchar(128)      DEFAULT NULL COMMENT 'Target data table name',
-    `hdfs_default_fs`             varchar(255)      DEFAULT NULL COMMENT 'HDFS defaultFS, such as "hdfs://127.0.0.1:9000"',
-    `warehouse_dir`               varchar(250)      DEFAULT '/user/hive/warehouse' COMMENT 'Hive table storage path on HDFS, such as "/user/hive/warehouse"',
-    `partition_interval`          int(5)            DEFAULT NULL COMMENT 'Partition interval, support: 1(D / H), 10 I, 30 I',
-    `partition_unit`              varchar(10)       DEFAULT 'D' COMMENT 'Partition type, support: D-day, H-hour, I-minute',
-    `primary_partition`           varchar(255)      DEFAULT 'dt' COMMENT 'primary partition field',
-    `secondary_partition`         varchar(256)      DEFAULT NULL COMMENT 'secondary partition field',
-    `partition_creation_strategy` varchar(50)       DEFAULT 'COMPLETED' COMMENT 'Partition creation strategy, support: ARRIVED, COMPLETED',
-    `file_format`                 varchar(15)       DEFAULT 'TextFile' COMMENT 'The stored table format, TextFile, RCFile, SequenceFile, Avro',
-    `data_encoding`               varchar(20)       DEFAULT 'UTF-8' COMMENT 'data encoding type',
-    `data_separator`              varchar(10)       DEFAULT NULL COMMENT 'data field separator',
-    `storage_period`              int(5)            DEFAULT '10' COMMENT 'Data storage period, unit: day',
-    `opt_log`                     varchar(5000)     DEFAULT NULL COMMENT 'Background operation log',
-    `status`                      int(4)            DEFAULT '0' COMMENT 'status',
-    `previous_status`             int(4)            DEFAULT '0' COMMENT 'Previous status',
-    `is_deleted`                  tinyint(1)        DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
-    `creator`                     varchar(64)       DEFAULT NULL COMMENT 'creator name',
-    `modifier`                    varchar(64)       DEFAULT NULL COMMENT 'modifier name',
-    `create_time`                 timestamp    NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
-    `modify_time`                 timestamp    NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
-    `temp_view`                   json              DEFAULT NULL COMMENT 'Temporary view, used to save un-submitted and unapproved intermediate data after modification',
+    `id`                     int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
+    `inlong_group_id`        varchar(256) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id`       varchar(256) NOT NULL COMMENT 'Owning data stream id',
+    `storage_type`           varchar(15)           DEFAULT 'HIVE' COMMENT 'Storage type, including: HIVE, ES, etc',
+    `storage_period`         int(11)               DEFAULT '10' COMMENT 'Data storage period, unit: day',
+    `enable_create_resource` tinyint(1)            DEFAULT '1' COMMENT 'Whether to enable create storage resource? 0: disable, 1: enable. default is 1',
+    `ext_params`             text COMMENT 'Another fields, will saved as JSON type',
+    `operate_log`            varchar(5000)         DEFAULT NULL COMMENT 'Background operate log',
+    `status`                 int(11)               DEFAULT '0' COMMENT 'Status',
+    `previous_status`        int(11)               DEFAULT '0' COMMENT 'Previous status',
+    `is_deleted`             int(11)               DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
+    `creator`                varchar(64)  NOT NULL COMMENT 'Creator name',
+    `modifier`               varchar(64)           DEFAULT NULL COMMENT 'Modifier name',
+    `create_time`            timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
+    `modify_time`            timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
     PRIMARY KEY (`id`)
 ) ENGINE = InnoDB
-  DEFAULT CHARSET = utf8mb4 COMMENT ='Data is stored in Hive configuration table';
+  DEFAULT CHARSET = utf8mb4 COMMENT ='Data storage table';
 
 -- ----------------------------
--- Table structure for storage_hive_field
+-- Table structure for data_storage_field
 -- ----------------------------
-DROP TABLE IF EXISTS `storage_hive_field`;
-CREATE TABLE `storage_hive_field`
+DROP TABLE IF EXISTS `data_storage_field`;
+CREATE TABLE `data_storage_field`
 (
-    `id`                int(11)     NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `storage_id`        int(11)     NOT NULL COMMENT 'Hive data storage id',
-    `source_field_name` varchar(20) NOT NULL COMMENT 'source field name',
-    `source_field_type` varchar(20) NOT NULL COMMENT 'source field type',
-    `field_name`        varchar(20) NOT NULL COMMENT 'field name',
-    `field_type`        varchar(20) NOT NULL COMMENT 'field type',
-    `field_comment`     varchar(2000) DEFAULT '' COMMENT 'Field description',
-    `is_required`       tinyint(1)    DEFAULT NULL COMMENT 'Is it required, 0: not necessary, 1: required',
-    `bon_field_path`    varchar(256)  DEFAULT NULL COMMENT 'BON field path',
-    `bon_field_type`    varchar(64)   DEFAULT NULL COMMENT 'BON field type',
-    `encrypt_level`     varchar(20)   DEFAULT NULL COMMENT 'Encryption level',
-    `is_exist`          tinyint(1)    DEFAULT '0' COMMENT 'Does it exist, 0: does not exist, 1: exists',
+    `id`                int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
+    `inlong_group_id`   varchar(256) NOT NULL COMMENT 'inlong group id',
+    `inlong_stream_id`  varchar(256) NOT NULL COMMENT 'inlong stream id',
+    `storage_id`        int(11)      NOT NULL COMMENT 'data storage id',
+    `storage_type`      varchar(15)  NOT NULL COMMENT 'storage type',
+    `source_field_name` varchar(50)   DEFAULT NULL COMMENT 'source field name',
+    `source_field_type` varchar(50)   DEFAULT NULL COMMENT 'source field type',
+    `field_name`        varchar(50)  NOT NULL COMMENT 'field name',
+    `field_type`        varchar(50)  NOT NULL COMMENT 'field type',
+    `field_comment`     varchar(2000) DEFAULT NULL COMMENT 'field description',
     `rank_num`          smallint(6)   DEFAULT '0' COMMENT 'Field order (front-end display field order)',
-    `is_deleted`        tinyint(1)    DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`        int(11)       DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     PRIMARY KEY (`id`)
 ) ENGINE = InnoDB
-  DEFAULT CHARSET = utf8mb4 COMMENT ='Data stored in Hive field';
+  DEFAULT CHARSET = utf8mb4 COMMENT ='Data storage field table';
 
 -- ----------------------------
 -- Table structure for task
@@ -670,12 +650,12 @@ CREATE TABLE `task`
     `id`          bigint(20)   NOT NULL,
     `taskflow_id` bigint(20)   NOT NULL COMMENT 'Owning task flow id',
     `task_def_id` bigint(20)    DEFAULT NULL COMMENT 'task definition id',
-    `task_name`   varchar(255) NOT NULL COMMENT 'task name',
-    `status`      varchar(255)  DEFAULT NULL COMMENT 'task status',
-    `post_param`  varchar(255)  DEFAULT NULL COMMENT 'Task parameters',
+    `task_name`   varchar(256) NOT NULL COMMENT 'task name',
+    `status`      varchar(256)  DEFAULT NULL COMMENT 'task status',
+    `post_param`  varchar(256)  DEFAULT NULL COMMENT 'Task parameters',
     `resultmsg`   varchar(1000) DEFAULT NULL COMMENT 'Execution result log',
     `create_time` datetime     NOT NULL COMMENT 'Create time',
-    `create_by`   varchar(255) NOT NULL COMMENT 'creator',
+    `create_by`   varchar(256) NOT NULL COMMENT 'creator',
     `update_time` datetime      DEFAULT NULL COMMENT 'last modified time',
     `update_by`   varchar(0)    DEFAULT NULL COMMENT 'last modified person',
     PRIMARY KEY (`id`)
@@ -691,10 +671,10 @@ CREATE TABLE `task_def`
     `id`              bigint(20)   NOT NULL,
     `taskflow_def_id` bigint(20)   NOT NULL COMMENT 'Task flow definition id',
     `parent_id`       bigint(20)   DEFAULT NULL COMMENT 'parent task id',
-    `implclass`       varchar(255) DEFAULT NULL COMMENT 'task processing flow class',
-    `task_name`       varchar(255) DEFAULT NULL COMMENT 'task name',
+    `implclass`       varchar(256) DEFAULT NULL COMMENT 'task processing flow class',
+    `task_name`       varchar(256) DEFAULT NULL COMMENT 'task name',
     `create_time`     datetime     NOT NULL COMMENT 'Create time',
-    `create_by`       varchar(255) NOT NULL COMMENT 'creator',
+    `create_by`       varchar(256) NOT NULL COMMENT 'creator',
     `update_time`     datetime     DEFAULT NULL COMMENT 'last modified time',
     `update_by`       datetime     DEFAULT NULL COMMENT 'last modified person',
     `delivery_id`     bigint(20)   DEFAULT NULL COMMENT 'Task push method',
@@ -710,12 +690,12 @@ CREATE TABLE `taskflow`
 (
     `id`              bigint(20)   NOT NULL AUTO_INCREMENT,
     `taskflow_def_id` bigint(20)   NOT NULL COMMENT 'Taskflow definition id',
-    `status`          varchar(255) DEFAULT NULL COMMENT 'status',
-    `create_by`       varchar(255) NOT NULL COMMENT 'creator',
+    `status`          varchar(256) DEFAULT NULL COMMENT 'status',
+    `create_by`       varchar(256) NOT NULL COMMENT 'creator',
     `create_time`     datetime     DEFAULT NULL COMMENT 'Create time',
     `update_time`     datetime     DEFAULT NULL COMMENT 'last modified time',
-    `update_by`       varchar(255) DEFAULT NULL COMMENT 'last modified person',
-    `event`           varchar(255) DEFAULT NULL COMMENT 'trigger event',
+    `update_by`       varchar(256) DEFAULT NULL COMMENT 'last modified person',
+    `event`           varchar(256) DEFAULT NULL COMMENT 'trigger event',
     PRIMARY KEY (`id`)
 ) ENGINE = InnoDB
   DEFAULT CHARSET = latin1 COMMENT ='Task flow instance';
@@ -727,12 +707,12 @@ DROP TABLE IF EXISTS `taskflow_def`;
 CREATE TABLE `taskflow_def`
 (
     `id`            bigint(20)   NOT NULL AUTO_INCREMENT,
-    `name`          varchar(255) NOT NULL COMMENT 'Workflow definition name',
-    `descrip`       varchar(255) DEFAULT NULL COMMENT 'Workflow function description',
+    `name`          varchar(256) NOT NULL COMMENT 'Workflow definition name',
+    `descrip`       varchar(256) DEFAULT NULL COMMENT 'Workflow function description',
     `create_time`   datetime     NOT NULL COMMENT 'Create time',
-    `create_by`     varchar(255) NOT NULL COMMENT 'creator',
+    `create_by`     varchar(256) NOT NULL COMMENT 'creator',
     `isValid`       int(11)      DEFAULT NULL COMMENT 'logical deletion',
-    `trigger_event` varchar(255) DEFAULT NULL COMMENT 'trigger event',
+    `trigger_event` varchar(256) DEFAULT NULL COMMENT 'trigger event',
     PRIMARY KEY (`id`)
 ) ENGINE = InnoDB
   DEFAULT CHARSET = latin1 COMMENT ='Task flow definition';
@@ -744,14 +724,14 @@ DROP TABLE IF EXISTS `user`;
 CREATE TABLE `user`
 (
     `id`           int(11)      NOT NULL AUTO_INCREMENT,
-    `name`         varchar(255) NOT NULL COMMENT 'account name',
+    `name`         varchar(256) NOT NULL COMMENT 'account name',
     `password`     varchar(64)  NOT NULL COMMENT 'password md5',
     `account_type` int(11)      NOT NULL DEFAULT '1' COMMENT 'account type, 0-manager 1-normal',
     `due_date`     datetime              DEFAULT NULL COMMENT 'due date for account',
     `create_time`  datetime     NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
     `update_time`  datetime              DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'update time',
-    `create_by`    varchar(255) NOT NULL COMMENT 'create by sb.',
-    `update_by`    varchar(255)          DEFAULT NULL COMMENT 'update by sb.',
+    `create_by`    varchar(256) NOT NULL COMMENT 'create by sb.',
+    `update_by`    varchar(256)          DEFAULT NULL COMMENT 'update by sb.',
     PRIMARY KEY (`id`),
     UNIQUE KEY `user_name_uindex` (`name`)
 ) ENGINE = InnoDB
@@ -769,12 +749,12 @@ DROP TABLE IF EXISTS `user_role`;
 CREATE TABLE `user_role`
 (
     `id`          int(11)      NOT NULL AUTO_INCREMENT,
-    `user_name`   varchar(255) NOT NULL COMMENT 'username rtx',
-    `role_code`   varchar(255) NOT NULL COMMENT 'role',
+    `user_name`   varchar(256) NOT NULL COMMENT 'username rtx',
+    `role_code`   varchar(256) NOT NULL COMMENT 'role',
     `create_time` datetime     NOT NULL,
     `update_time` datetime     NOT NULL DEFAULT CURRENT_TIMESTAMP,
-    `create_by`   varchar(255) NOT NULL,
-    `update_by`   varchar(255) NOT NULL,
+    `create_by`   varchar(256) NOT NULL,
+    `update_by`   varchar(256) NOT NULL,
     `disabled`    tinyint(1)   NOT NULL DEFAULT '0' COMMENT 'Is it disabled?',
     PRIMARY KEY (`id`)
 ) ENGINE = InnoDB
@@ -787,11 +767,11 @@ DROP TABLE IF EXISTS `wf_approver`;
 CREATE TABLE `wf_approver`
 (
     `id`                int(11)       NOT NULL AUTO_INCREMENT,
-    `process_name`      varchar(255)  NOT NULL COMMENT 'process definition name',
-    `task_name`         varchar(255)  NOT NULL COMMENT 'Approval task name',
+    `process_name`      varchar(256)  NOT NULL COMMENT 'process definition name',
+    `task_name`         varchar(256)  NOT NULL COMMENT 'Approval task name',
     `filter_key`        varchar(64)   NOT NULL COMMENT 'filter condition KEY',
-    `filter_value`      varchar(255)           DEFAULT NULL COMMENT 'Filter matching value',
-    `filter_value_desc` varchar(255)           DEFAULT NULL COMMENT 'Filter value description',
+    `filter_value`      varchar(256)           DEFAULT NULL COMMENT 'Filter matching value',
+    `filter_value_desc` varchar(256)           DEFAULT NULL COMMENT 'Filter value description',
     `approvers`         varchar(1024) NOT NULL COMMENT 'Approvers, separated by commas',
     `creator`           varchar(64)   NOT NULL COMMENT 'creator',
     `modifier`          varchar(64)   NOT NULL COMMENT 'modifier',
@@ -819,12 +799,12 @@ CREATE TABLE `wf_event_log`
 (
     `id`                   int(11)      NOT NULL AUTO_INCREMENT,
     `process_inst_id`      int(11)      NOT NULL,
-    `process_name`         varchar(255)  DEFAULT NULL COMMENT 'Process name',
-    `process_display_name` varchar(255) NOT NULL COMMENT 'Process name',
-    `inlong_group_id`      varchar(128)  DEFAULT NULL COMMENT 'Business group id',
+    `process_name`         varchar(256)  DEFAULT NULL COMMENT 'Process name',
+    `process_display_name` varchar(256) NOT NULL COMMENT 'Process name',
+    `inlong_group_id`      varchar(256)  DEFAULT NULL COMMENT 'Business group id',
     `task_inst_id`         int(11)       DEFAULT NULL COMMENT 'Task ID',
-    `element_name`         varchar(255) NOT NULL COMMENT 'The name of the component that triggered the event',
-    `element_display_name` varchar(255) NOT NULL COMMENT 'Chinese name of the component that triggered the event',
+    `element_name`         varchar(256) NOT NULL COMMENT 'The name of the component that triggered the event',
+    `element_display_name` varchar(256) NOT NULL COMMENT 'Chinese name of the component that triggered the event',
     `event_type`           varchar(64)  NOT NULL COMMENT 'Event type: process event/task event',
     `event`                varchar(64)  NOT NULL COMMENT 'Event name',
     `listener`             varchar(1024) DEFAULT NULL COMMENT 'Event listener name',
@@ -846,12 +826,12 @@ DROP TABLE IF EXISTS `wf_process_instance`;
 CREATE TABLE `wf_process_instance`
 (
     `id`              int(11)      NOT NULL AUTO_INCREMENT,
-    `name`            varchar(255) NOT NULL COMMENT 'process name',
-    `display_name`    varchar(255) NOT NULL COMMENT 'Process display name',
-    `type`            varchar(255)          DEFAULT NULL COMMENT 'Process classification',
-    `title`           varchar(255)          DEFAULT NULL COMMENT 'Process title',
-    `inlong_group_id` varchar(128)          DEFAULT NULL COMMENT 'Business group id: to facilitate related business',
-    `applicant`       varchar(255) NOT NULL COMMENT 'applicant',
+    `name`            varchar(256) NOT NULL COMMENT 'process name',
+    `display_name`    varchar(256) NOT NULL COMMENT 'Process display name',
+    `type`            varchar(256)          DEFAULT NULL COMMENT 'Process classification',
+    `title`           varchar(256)          DEFAULT NULL COMMENT 'Process title',
+    `inlong_group_id` varchar(256)          DEFAULT NULL COMMENT 'Business group id: to facilitate related business',
+    `applicant`       varchar(256) NOT NULL COMMENT 'applicant',
     `state`           varchar(64)  NOT NULL COMMENT 'state',
     `form_data`       mediumtext COMMENT 'form information',
     `start_time`      datetime     NOT NULL COMMENT 'start time',
@@ -871,14 +851,14 @@ CREATE TABLE `wf_task_instance`
     `id`                   int(11)       NOT NULL AUTO_INCREMENT,
     `type`                 varchar(64)   NOT NULL COMMENT 'Task type: UserTask user task/ServiceTask system task',
     `process_inst_id`      int(11)       NOT NULL COMMENT 'process ID',
-    `process_name`         varchar(255)  NOT NULL COMMENT 'process name',
-    `process_display_name` varchar(255)  NOT NULL COMMENT 'process name',
-    `name`                 varchar(255)  NOT NULL COMMENT 'task name',
-    `display_name`         varchar(255)  NOT NULL COMMENT 'Task display name',
+    `process_name`         varchar(256)  NOT NULL COMMENT 'process name',
+    `process_display_name` varchar(256)  NOT NULL COMMENT 'process name',
+    `name`                 varchar(256)  NOT NULL COMMENT 'task name',
+    `display_name`         varchar(256)  NOT NULL COMMENT 'Task display name',
     `applicant`            varchar(64)   DEFAULT NULL COMMENT 'applicant',
     `approvers`            varchar(1024) NOT NULL COMMENT 'approvers',
     `state`                varchar(64)   NOT NULL COMMENT 'state',
-    `operator`             varchar(255)  DEFAULT NULL COMMENT 'actual operator',
+    `operator`             varchar(256)  DEFAULT NULL COMMENT 'actual operator',
     `remark`               varchar(1024) DEFAULT NULL COMMENT 'Remark information',
     `form_data`            mediumtext COMMENT 'form information submitted by the current task',
     `start_time`           datetime      NOT NULL COMMENT 'start time',
@@ -902,7 +882,7 @@ CREATE TABLE `cluster_set`
     `in_charges`      varchar(512) COMMENT 'Name of responsible person, separated by commas',
     `followers`       varchar(512) COMMENT 'List of names of business followers, separated by commas',
     `status`          int(4)            DEFAULT '21' COMMENT 'ClusterSet status',
-    `is_deleted`      tinyint(1)        DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`      int(11)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
     `creator`         varchar(64)  NOT NULL COMMENT 'Creator name',
     `modifier`        varchar(64)  NULL COMMENT 'Modifier name',
     `create_time`     timestamp    NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
@@ -919,8 +899,8 @@ DROP TABLE IF EXISTS `cluster_set_inlongid`;
 CREATE TABLE `cluster_set_inlongid`
 (
     `id`              int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `set_name`        varchar(128) NOT NULL COMMENT 'ClusterSet name, English, numbers and underscore',
-    `inlong_group_id` varchar(128) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
+    `set_name`        varchar(256) NOT NULL COMMENT 'ClusterSet name, English, numbers and underscore',
+    `inlong_group_id` varchar(256) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
     PRIMARY KEY (`id`),
     UNIQUE KEY `unique_cluster_set_inlongid` (`set_name`, `inlong_group_id`)
 ) ENGINE = InnoDB
@@ -951,7 +931,7 @@ CREATE TABLE `cache_cluster_ext`
     `cluster_name` varchar(128) NOT NULL COMMENT 'CacheCluster name, English, numbers and underscore',
     `key_name`     varchar(64)  NOT NULL COMMENT 'Configuration item name',
     `key_value`    varchar(256) NULL COMMENT 'The value of the configuration item',
-    `is_deleted`   tinyint(1)        DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`   int(11)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
     `modify_time`  timestamp    NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
     PRIMARY KEY (`id`),
     KEY `index_cache_cluster` (`cluster_name`)
@@ -1030,7 +1010,7 @@ CREATE TABLE `flume_source_ext`
     `set_name`    varchar(128) NOT NULL COMMENT 'ClusterSet name, English, numbers and underscore',
     `key_name`    varchar(64)  NOT NULL COMMENT 'Configuration item name',
     `key_value`   varchar(256) NULL COMMENT 'The value of the configuration item',
-    `is_deleted`  tinyint(1)        DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`  int(11)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
     `modify_time` timestamp    NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
     PRIMARY KEY (`id`),
     KEY `index_flume_source_ext` (`parent_name`)
@@ -1063,7 +1043,7 @@ CREATE TABLE `flume_channel_ext`
     `set_name`    varchar(128) NOT NULL COMMENT 'ClusterSet name, English, numbers and underscore',
     `key_name`    varchar(64)  NOT NULL COMMENT 'Configuration item name',
     `key_value`   varchar(256) NULL COMMENT 'The value of the configuration item',
-    `is_deleted`  tinyint(1)        DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`  int(11)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
     `modify_time` timestamp    NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
     PRIMARY KEY (`id`),
     KEY `index_flume_channel_ext` (`parent_name`)
@@ -1097,7 +1077,7 @@ CREATE TABLE `flume_sink_ext`
     `set_name`    varchar(128) NOT NULL COMMENT 'ClusterSet name, English, numbers and underscore',
     `key_name`    varchar(64)  NOT NULL COMMENT 'Configuration item name',
     `key_value`   varchar(256) NULL COMMENT 'The value of the configuration item',
-    `is_deleted`  tinyint(1)        DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`  int(11)           DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     `modify_time` timestamp    NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
     PRIMARY KEY (`id`),
     KEY `index_flume_sink_ext` (`parent_name`)
@@ -1141,10 +1121,10 @@ CREATE TABLE `db_collector_detail_task`
 DROP TABLE IF EXISTS `sort_cluster_config`;
 CREATE TABLE `sort_cluster_config`
 (
-    `id`            int(11)       NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `cluster_name`  varchar(128)  NOT NULL COMMENT 'Cluster name',
-    `task_name`     varchar(128)  NOT NULL COMMENT 'Task name',
-    `sink_type`     varchar(128)  NOT NULL COMMENT 'Type of sink',
+    `id`           int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
+    `cluster_name` varchar(128) NOT NULL COMMENT 'Cluster name',
+    `task_name`    varchar(128) NOT NULL COMMENT 'Task name',
+    `sink_type`    varchar(128) NOT NULL COMMENT 'Type of sink',
     PRIMARY KEY (`id`),
     KEY `index_sort_cluster_config` (`cluster_name`)
 ) ENGINE = InnoDB
@@ -1156,12 +1136,12 @@ CREATE TABLE `sort_cluster_config`
 DROP TABLE IF EXISTS `sort_task_id_param`;
 CREATE TABLE `sort_task_id_param`
 (
-    `id`               int(11)       NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `task_name`        varchar(128)  NOT NULL COMMENT 'Task name',
-    `group_id`         varchar(128)  NOT NULL COMMENT 'Inlong group id',
-    `stream_id`        varchar(128)  NULL COMMENT 'Inlong stream id',
-    `param_key`        varchar(128)  NOT NULL COMMENT 'Key of param',
-    `param_value`      varchar(1024)  NOT NULL COMMENT 'Value of param',
+    `id`          int(11)       NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
+    `task_name`   varchar(128)  NOT NULL COMMENT 'Task name',
+    `group_id`    varchar(128)  NOT NULL COMMENT 'Inlong group id',
+    `stream_id`   varchar(128)  NULL COMMENT 'Inlong stream id',
+    `param_key`   varchar(128)  NOT NULL COMMENT 'Key of param',
+    `param_value` varchar(1024) NOT NULL COMMENT 'Value of param',
     PRIMARY KEY (`id`),
     KEY `index_sort_task_id_param` (`task_name`)
 ) ENGINE = InnoDB
@@ -1173,11 +1153,11 @@ CREATE TABLE `sort_task_id_param`
 DROP TABLE IF EXISTS `sort_task_sink_param`;
 CREATE TABLE `sort_task_sink_param`
 (
-    `id`               int(11)       NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `task_name`        varchar(128)  NOT NULL COMMENT 'Task name',
-    `sink_type`        varchar(128)  NOT NULL COMMENT 'Type of sink',
-    `param_key`        varchar(128)  NOT NULL COMMENT 'Key of param',
-    `param_value`      varchar(1024)  NOT NULL COMMENT 'Value of param',
+    `id`          int(11)       NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
+    `task_name`   varchar(128)  NOT NULL COMMENT 'Task name',
+    `sink_type`   varchar(128)  NOT NULL COMMENT 'Type of sink',
+    `param_key`   varchar(128)  NOT NULL COMMENT 'Key of param',
+    `param_value` varchar(1024) NOT NULL COMMENT 'Value of param',
     PRIMARY KEY (`id`),
     KEY `index_sort_task_sink_params` (`task_name`, `sink_type`)
 ) ENGINE = InnoDB
diff --git a/inlong-manager/manager-web/src/main/java/org/apache/inlong/manager/web/controller/DataStreamController.java b/inlong-manager/manager-web/src/main/java/org/apache/inlong/manager/web/controller/DataStreamController.java
index 6ae93ae..1386822 100644
--- a/inlong-manager/manager-web/src/main/java/org/apache/inlong/manager/web/controller/DataStreamController.java
+++ b/inlong-manager/manager-web/src/main/java/org/apache/inlong/manager/web/controller/DataStreamController.java
@@ -22,16 +22,15 @@ import io.swagger.annotations.Api;
 import io.swagger.annotations.ApiImplicitParam;
 import io.swagger.annotations.ApiImplicitParams;
 import io.swagger.annotations.ApiOperation;
-import java.util.List;
 import org.apache.inlong.manager.common.beans.Response;
 import org.apache.inlong.manager.common.enums.OperationType;
 import org.apache.inlong.manager.common.pojo.datastream.DataStreamInfo;
 import org.apache.inlong.manager.common.pojo.datastream.DataStreamListVO;
 import org.apache.inlong.manager.common.pojo.datastream.DataStreamPageRequest;
-import org.apache.inlong.manager.common.pojo.datastream.DataStreamSummaryInfo;
-import org.apache.inlong.manager.common.pojo.datastream.FullStreamRequest;
 import org.apache.inlong.manager.common.pojo.datastream.FullPageUpdateInfo;
+import org.apache.inlong.manager.common.pojo.datastream.FullStreamRequest;
 import org.apache.inlong.manager.common.pojo.datastream.FullStreamResponse;
+import org.apache.inlong.manager.common.pojo.datastream.StreamBriefResponse;
 import org.apache.inlong.manager.common.util.LoginUserUtil;
 import org.apache.inlong.manager.service.core.DataStreamService;
 import org.apache.inlong.manager.service.core.operationlog.OperationLog;
@@ -43,6 +42,8 @@ import org.springframework.web.bind.annotation.RequestMethod;
 import org.springframework.web.bind.annotation.RequestParam;
 import org.springframework.web.bind.annotation.RestController;
 
+import java.util.List;
+
 /**
  * Data stream control layer
  */
@@ -132,8 +133,8 @@ public class DataStreamController {
     @RequestMapping(value = "/getSummaryList/{groupId}", method = RequestMethod.GET)
     @ApiOperation(value = "Obtain the flow of data stream according to groupId")
     @ApiImplicitParam(name = "groupId", value = "Business group id", dataTypeClass = String.class, required = true)
-    public Response<List<DataStreamSummaryInfo>> getSummaryList(@PathVariable String groupId) {
-        return Response.success(dataStreamService.getSummaryList(groupId));
+    public Response<List<StreamBriefResponse>> getSummaryList(@PathVariable String groupId) {
+        return Response.success(dataStreamService.getBriefList(groupId));
     }
 
 }
diff --git a/inlong-manager/manager-web/src/main/java/org/apache/inlong/manager/web/controller/StorageController.java b/inlong-manager/manager-web/src/main/java/org/apache/inlong/manager/web/controller/StorageController.java
index cba9cf8..5c3dfde 100644
--- a/inlong-manager/manager-web/src/main/java/org/apache/inlong/manager/web/controller/StorageController.java
+++ b/inlong-manager/manager-web/src/main/java/org/apache/inlong/manager/web/controller/StorageController.java
@@ -22,22 +22,22 @@ import io.swagger.annotations.Api;
 import io.swagger.annotations.ApiImplicitParam;
 import io.swagger.annotations.ApiImplicitParams;
 import io.swagger.annotations.ApiOperation;
-import java.util.List;
 import org.apache.inlong.manager.common.beans.Response;
 import org.apache.inlong.manager.common.enums.OperationType;
-import org.apache.inlong.manager.common.pojo.datastorage.BaseStorageRequest;
-import org.apache.inlong.manager.common.pojo.datastorage.BaseStorageListResponse;
-import org.apache.inlong.manager.common.pojo.datastorage.BaseStorageResponse;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageListResponse;
 import org.apache.inlong.manager.common.pojo.datastorage.StoragePageRequest;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageRequest;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageResponse;
 import org.apache.inlong.manager.common.pojo.query.ColumnInfoBean;
 import org.apache.inlong.manager.common.pojo.query.ConnectionInfo;
 import org.apache.inlong.manager.common.pojo.query.DatabaseDetail;
 import org.apache.inlong.manager.common.pojo.query.TableQueryBean;
 import org.apache.inlong.manager.common.util.LoginUserUtil;
 import org.apache.inlong.manager.service.core.DataSourceService;
-import org.apache.inlong.manager.service.core.StorageService;
 import org.apache.inlong.manager.service.core.operationlog.OperationLog;
+import org.apache.inlong.manager.service.storage.StorageService;
 import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.validation.annotation.Validated;
 import org.springframework.web.bind.annotation.PathVariable;
 import org.springframework.web.bind.annotation.RequestBody;
 import org.springframework.web.bind.annotation.RequestMapping;
@@ -45,6 +45,8 @@ import org.springframework.web.bind.annotation.RequestMethod;
 import org.springframework.web.bind.annotation.RequestParam;
 import org.springframework.web.bind.annotation.RestController;
 
+import java.util.List;
+
 /**
  * Data storage control layer
  */
@@ -61,42 +63,42 @@ public class StorageController {
     @RequestMapping(value = "/save", method = RequestMethod.POST)
     @OperationLog(operation = OperationType.CREATE)
     @ApiOperation(value = "Save storage information")
-    public Response<Integer> save(@RequestBody BaseStorageRequest storageInfo) {
-        return Response.success(storageService.save(storageInfo, LoginUserUtil.getLoginUserDetail().getUserName()));
+    public Response<Integer> save(@Validated @RequestBody StorageRequest request) {
+        return Response.success(storageService.save(request, LoginUserUtil.getLoginUserDetail().getUserName()));
     }
 
     @RequestMapping(value = "/get/{id}", method = RequestMethod.GET)
     @ApiOperation(value = "Query storage information")
     @ApiImplicitParams({
-            @ApiImplicitParam(name = "storageType", dataTypeClass = String.class, required = true),
-            @ApiImplicitParam(name = "id", dataTypeClass = Integer.class, required = true)
+            @ApiImplicitParam(name = "id", dataTypeClass = Integer.class, required = true),
+            @ApiImplicitParam(name = "storageType", dataTypeClass = String.class, required = true)
     })
-    public Response<BaseStorageResponse> get(@RequestParam String storageType, @PathVariable Integer id) {
-        return Response.success(storageService.getById(storageType, id));
+    public Response<StorageResponse> get(@PathVariable Integer id, @RequestParam String storageType) {
+        return Response.success(storageService.get(id, storageType));
     }
 
     @RequestMapping(value = "/list", method = RequestMethod.GET)
     @ApiOperation(value = "Query data storage list based on conditions")
-    public Response<PageInfo<? extends BaseStorageListResponse>> listByCondition(StoragePageRequest request) {
+    public Response<PageInfo<? extends StorageListResponse>> listByCondition(StoragePageRequest request) {
         return Response.success(storageService.listByCondition(request));
     }
 
     @RequestMapping(value = "/update", method = RequestMethod.POST)
     @OperationLog(operation = OperationType.UPDATE)
     @ApiOperation(value = "Modify data storage information")
-    public Response<Boolean> update(@RequestBody BaseStorageRequest storageInfo) {
-        return Response.success(storageService.update(storageInfo, LoginUserUtil.getLoginUserDetail().getUserName()));
+    public Response<Boolean> update(@Validated @RequestBody StorageRequest request) {
+        return Response.success(storageService.update(request, LoginUserUtil.getLoginUserDetail().getUserName()));
     }
 
     @RequestMapping(value = "/delete/{id}", method = RequestMethod.DELETE)
     @OperationLog(operation = OperationType.DELETE)
     @ApiOperation(value = "Delete data storage information")
     @ApiImplicitParams({
-            @ApiImplicitParam(name = "storageType", dataTypeClass = String.class, required = true),
-            @ApiImplicitParam(name = "id", dataTypeClass = Integer.class, required = true)
+            @ApiImplicitParam(name = "id", dataTypeClass = Integer.class, required = true),
+            @ApiImplicitParam(name = "storageType", dataTypeClass = String.class, required = true)
     })
-    public Response<Boolean> delete(@RequestParam String storageType, @PathVariable Integer id) {
-        boolean result = storageService.delete(storageType, id, LoginUserUtil.getLoginUserDetail().getUserName());
+    public Response<Boolean> delete(@PathVariable Integer id, @RequestParam String storageType) {
+        boolean result = storageService.delete(id, storageType, LoginUserUtil.getLoginUserDetail().getUserName());
         return Response.success(result);
     }
 
diff --git a/inlong-manager/manager-web/src/test/java/org/apache/inlong/manager/service/core/BusinessServiceTest.java b/inlong-manager/manager-web/src/test/java/org/apache/inlong/manager/service/core/BusinessServiceTest.java
index 5bf9e44..6cf27d0 100644
--- a/inlong-manager/manager-web/src/test/java/org/apache/inlong/manager/service/core/BusinessServiceTest.java
+++ b/inlong-manager/manager-web/src/test/java/org/apache/inlong/manager/service/core/BusinessServiceTest.java
@@ -17,8 +17,6 @@
 
 package org.apache.inlong.manager.service.core;
 
-import java.util.Arrays;
-import java.util.List;
 import org.apache.inlong.manager.common.enums.BizConstant;
 import org.apache.inlong.manager.common.enums.EntityStatus;
 import org.apache.inlong.manager.common.pojo.business.BusinessExtInfo;
@@ -32,6 +30,9 @@ import org.junit.Test;
 import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.boot.test.context.TestComponent;
 
+import java.util.Arrays;
+import java.util.List;
+
 /**
  * Business service test
  */
@@ -62,6 +63,7 @@ public class BusinessServiceTest extends ServiceBaseTest {
         businessInfo.setName(groupName);
         businessInfo.setMiddlewareType(BizConstant.MIDDLEWARE_PULSAR);
         businessInfo.setCreator(operator);
+        businessInfo.setInCharges(operator);
         businessInfo.setStatus(EntityStatus.BIZ_CONFIG_SUCCESSFUL.getCode());
 
         BusinessPulsarInfo pulsarInfo = new BusinessPulsarInfo();
diff --git a/inlong-manager/manager-web/src/test/java/org/apache/inlong/manager/service/core/DataStorageServiceTest.java b/inlong-manager/manager-web/src/test/java/org/apache/inlong/manager/service/core/StorageServiceTest.java
similarity index 68%
rename from inlong-manager/manager-web/src/test/java/org/apache/inlong/manager/service/core/DataStorageServiceTest.java
rename to inlong-manager/manager-web/src/test/java/org/apache/inlong/manager/service/core/StorageServiceTest.java
index 406c1f3..436284b 100644
--- a/inlong-manager/manager-web/src/test/java/org/apache/inlong/manager/service/core/DataStorageServiceTest.java
+++ b/inlong-manager/manager-web/src/test/java/org/apache/inlong/manager/service/core/StorageServiceTest.java
@@ -18,10 +18,11 @@
 package org.apache.inlong.manager.service.core;
 
 import org.apache.inlong.manager.common.enums.BizConstant;
-import org.apache.inlong.manager.common.pojo.datastorage.BaseStorageResponse;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageHiveRequest;
-import org.apache.inlong.manager.common.pojo.datastorage.StorageHiveResponse;
+import org.apache.inlong.manager.common.pojo.datastorage.StorageResponse;
+import org.apache.inlong.manager.common.pojo.datastorage.hive.HiveStorageRequest;
+import org.apache.inlong.manager.common.pojo.datastorage.hive.HiveStorageResponse;
 import org.apache.inlong.manager.common.util.CommonBeanUtils;
+import org.apache.inlong.manager.service.storage.StorageService;
 import org.apache.inlong.manager.web.ServiceBaseTest;
 import org.junit.Assert;
 import org.junit.Test;
@@ -30,7 +31,7 @@ import org.springframework.beans.factory.annotation.Autowired;
 /**
  * Data storage service test
  */
-public class DataStorageServiceTest extends ServiceBaseTest {
+public class StorageServiceTest extends ServiceBaseTest {
 
     private final String globalGroupId = "b_group1";
     private final String globalStreamId = "stream1";
@@ -39,18 +40,16 @@ public class DataStorageServiceTest extends ServiceBaseTest {
     @Autowired
     private StorageService storageService;
     @Autowired
-    private BusinessServiceTest businessServiceTest;
-    @Autowired
     private DataStreamServiceTest streamServiceTest;
 
     public Integer saveStorage() {
         streamServiceTest.saveDataStream(globalGroupId, globalStreamId, globalOperator);
 
-        StorageHiveRequest storageInfo = new StorageHiveRequest();
+        HiveStorageRequest storageInfo = new HiveStorageRequest();
         storageInfo.setInlongGroupId(globalGroupId);
         storageInfo.setInlongStreamId(globalStreamId);
         storageInfo.setStorageType(BizConstant.STORAGE_HIVE);
-        storageInfo.setEnableCreateTable(BizConstant.DISABLE_CREATE_TABLE);
+        storageInfo.setEnableCreateResource(BizConstant.DISABLE_CREATE_RESOURCE);
 
         return storageService.save(storageInfo, globalOperator);
     }
@@ -60,7 +59,7 @@ public class DataStorageServiceTest extends ServiceBaseTest {
         Integer id = this.saveStorage();
         Assert.assertNotNull(id);
 
-        boolean result = storageService.delete(BizConstant.STORAGE_HIVE, id, globalOperator);
+        boolean result = storageService.delete(id, BizConstant.STORAGE_HIVE, globalOperator);
         Assert.assertTrue(result);
     }
 
@@ -68,22 +67,22 @@ public class DataStorageServiceTest extends ServiceBaseTest {
     public void testListByIdentifier() {
         Integer id = this.saveStorage();
 
-        BaseStorageResponse storage = storageService.getById(BizConstant.STORAGE_HIVE, id);
+        StorageResponse storage = storageService.get(id, BizConstant.STORAGE_HIVE);
         Assert.assertEquals(globalGroupId, storage.getInlongGroupId());
 
-        storageService.delete(BizConstant.STORAGE_HIVE, id, globalOperator);
+        storageService.delete(id, BizConstant.STORAGE_HIVE, globalOperator);
     }
 
     @Test
     public void testGetAndUpdate() {
         Integer id = this.saveStorage();
-        BaseStorageResponse storage = storageService.getById(BizConstant.STORAGE_HIVE, id);
-        Assert.assertEquals(globalGroupId, storage.getInlongGroupId());
+        StorageResponse response = storageService.get(id, BizConstant.STORAGE_HIVE);
+        Assert.assertEquals(globalGroupId, response.getInlongGroupId());
 
-        StorageHiveResponse hiveResponse = (StorageHiveResponse) storage;
-        hiveResponse.setEnableCreateTable(BizConstant.DISABLE_CREATE_TABLE);
+        HiveStorageResponse hiveResponse = (HiveStorageResponse) response;
+        hiveResponse.setEnableCreateResource(BizConstant.DISABLE_CREATE_RESOURCE);
 
-        StorageHiveRequest request = CommonBeanUtils.copyProperties(hiveResponse, StorageHiveRequest::new);
+        HiveStorageRequest request = CommonBeanUtils.copyProperties(hiveResponse, HiveStorageRequest::new);
         boolean result = storageService.update(request, globalOperator);
         Assert.assertTrue(result);
     }
diff --git a/inlong-manager/manager-web/src/test/resources/application-test.properties b/inlong-manager/manager-web/src/test/resources/application-test.properties
index 7603e24..5dd5b84 100644
--- a/inlong-manager/manager-web/src/test/resources/application-test.properties
+++ b/inlong-manager/manager-web/src/test/resources/application-test.properties
@@ -16,14 +16,18 @@
 # specific language governing permissions and limitations
 # under the License.
 #
+
 # Log level
 logging.level.root=INFO
 logging.level.org.apache.inlong.manager=debug
+
 spring.datasource.druid.url=jdbc:h2:mem:test;MODE=MYSQL;DB_CLOSE_DELAY=-1;IGNORECASE=TRUE;
 spring.datasource.druid.username=root
 spring.datasource.druid.password=""
+
 spring.datasource.druid.driver-class-name=org.h2.Driver
 spring.datasource.schema=classpath:sql/apache_inlong_manager.sql
+
 spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
 spring.datasource.druid.validationQuery=SELECT 'x'
 # Initialization size, minimum, maximum
@@ -44,26 +48,32 @@ spring.datasource.druid.testOnReturn=false
 spring.datasource.druid.filters=stat,wall
 # Open the mergeSql function through the connectProperties property, Slow SQL records
 spring.datasource.druid.connectionProperties=druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
+
 # Manager address of Tube cluster, used to create Topic
 cluster.tube.manager=http://127.0.0.1:8081
 # Master address, used to manage Tube broker
 cluster.tube.master=127.0.0.1:8000,127.0.0.1:8010
 # Tube cluster ID
 cluster.tube.clusterId=1
+
 # Push configuration to the path on ZooKeeper
 cluster.zk.url=127.0.0.1:2181
 cluster.zk.root=inlong_hive
+
 # Application name in Sort
 sort.appName=inlong_app
+
 # Pulsar admin URL
 pulsar.adminUrl=http://127.0.0.1:8080,127.0.0.2:8080,127.0.0.3:8080
 # Pulsar broker address
 pulsar.serviceUrl=pulsar://127.0.0.1:6650,127.0.0.1:6650,127.0.0.1:6650
 # Default tenant of Pulsar
 pulsar.defaultTenant=public
+
 # Audit configuration
 # Audit query source that decide what data source to query, currently only supports [MYSQL|ELASTICSEARCH]
 audit.query.source=MYSQL
+
 # Elasticsearch config
 # Elasticsearch host split by coma if more than one host, such as 'host1,host2'
 es.index.search.hostname=127.0.0.1
diff --git a/inlong-manager/manager-web/src/test/resources/sql/apache_inlong_manager.sql b/inlong-manager/manager-web/src/test/resources/sql/apache_inlong_manager.sql
index 1184ddd..75e1654 100644
--- a/inlong-manager/manager-web/src/test/resources/sql/apache_inlong_manager.sql
+++ b/inlong-manager/manager-web/src/test/resources/sql/apache_inlong_manager.sql
@@ -72,11 +72,11 @@ DROP TABLE IF EXISTS `business`;
 CREATE TABLE `business`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`     varchar(128) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
     `name`                varchar(128)          DEFAULT '' COMMENT 'Business name, English, numbers and underscore',
     `cn_name`             varchar(256)          DEFAULT NULL COMMENT 'Chinese display name',
     `description`         varchar(256)          DEFAULT '' COMMENT 'Business Introduction',
-    `middleware_type`     varchar(10)           DEFAULT 'TUBE' COMMENT 'The middleware type of message queue, high throughput: TUBE, high consistency: PULSAR',
+    `middleware_type`     varchar(20)           DEFAULT 'TUBE' COMMENT 'The middleware type of message queue, high throughput: TUBE, high consistency: PULSAR',
     `queue_module`        VARCHAR(20)  NULL     DEFAULT 'parallel' COMMENT 'Queue model of Pulsar, parallel: multiple partitions, high throughput, out-of-order messages; serial: single partition, low throughput, and orderly messages',
     `topic_partition_num` INT(4)       NULL     DEFAULT '3' COMMENT 'The number of partitions of Pulsar Topic, 1-20',
     `mq_resource_obj`     varchar(128) NOT NULL COMMENT 'MQ resource object, for Tube, its Topic, for Pulsar, its Namespace',
@@ -105,7 +105,7 @@ DROP TABLE IF EXISTS `business_pulsar`;
 CREATE TABLE `business_pulsar`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`     varchar(128) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
     `ensemble`            int(3)                DEFAULT '3' COMMENT 'The writable nodes number of ledger',
     `write_quorum`        int(3)                DEFAULT '3' COMMENT 'The copies number of ledger',
     `ack_quorum`          int(3)                DEFAULT '2' COMMENT 'The number of requested acks',
@@ -128,7 +128,7 @@ DROP TABLE IF EXISTS `business_ext`;
 CREATE TABLE `business_ext`
 (
     `id`              int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id` varchar(128) NOT NULL COMMENT 'Business group id',
+    `inlong_group_id` varchar(256) NOT NULL COMMENT 'Business group id',
     `key_name`        varchar(64)  NOT NULL COMMENT 'Configuration item name',
     `key_value`       varchar(256)          DEFAULT NULL COMMENT 'The value of the configuration item',
     `is_deleted`      tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
@@ -227,12 +227,12 @@ DROP TABLE IF EXISTS `consumption`;
 CREATE TABLE `consumption`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `consumer_group_name` varchar(255)          DEFAULT NULL COMMENT 'consumer group name',
-    `consumer_group_id`   varchar(255) NOT NULL COMMENT 'Consumer group ID',
+    `consumer_group_name` varchar(256)          DEFAULT NULL COMMENT 'consumer group name',
+    `consumer_group_id`   varchar(256) NOT NULL COMMENT 'Consumer group ID',
     `in_charges`          varchar(512) NOT NULL COMMENT 'Person in charge of consumption',
-    `inlong_group_id`     varchar(255) NOT NULL COMMENT 'Business group id',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Business group id',
     `middleware_type`     varchar(10)           DEFAULT 'TUBE' COMMENT 'The middleware type of message queue, high throughput: TUBE, high consistency: PULSAR',
-    `topic`               varchar(255) NOT NULL COMMENT 'Consumption topic',
+    `topic`               varchar(256) NOT NULL COMMENT 'Consumption topic',
     `filter_enabled`      int(2)                DEFAULT '0' COMMENT 'Whether to filter, default 0, not filter consume',
     `inlong_stream_id`    varchar(1024)         DEFAULT NULL COMMENT 'Data stream ID for consumption, if filter_enable is 1, it cannot empty',
     `status`              int(4)       NOT NULL COMMENT 'Status: draft, pending approval, approval rejected, approval passed',
@@ -252,13 +252,13 @@ CREATE TABLE `consumption_pulsar`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT,
     `consumption_id`      int(11)      DEFAULT NULL COMMENT 'ID of the consumption information to which it belongs, guaranteed to be uniquely associated with consumption information',
-    `consumer_group_id`   varchar(255) NOT NULL COMMENT 'Consumer group ID',
-    `consumer_group_name` varchar(255) DEFAULT NULL COMMENT 'Consumer group name',
-    `inlong_group_id`     varchar(255) NOT NULL COMMENT 'Business group ID',
+    `consumer_group_id`   varchar(256) NOT NULL COMMENT 'Consumer group ID',
+    `consumer_group_name` varchar(256) DEFAULT NULL COMMENT 'Consumer group name',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Business group ID',
     `is_rlq`              tinyint(1)   DEFAULT '0' COMMENT 'Whether to configure the retry letter topic, 0: no configuration, 1: configuration',
-    `retry_letter_topic`  varchar(255) DEFAULT NULL COMMENT 'The name of the retry queue topic',
+    `retry_letter_topic`  varchar(256) DEFAULT NULL COMMENT 'The name of the retry queue topic',
     `is_dlq`              tinyint(1)   DEFAULT '0' COMMENT 'Whether to configure dead letter topic, 0: no configuration, 1: means configuration',
-    `dead_letter_topic`   varchar(255) DEFAULT NULL COMMENT 'dead letter topic name',
+    `dead_letter_topic`   varchar(256) DEFAULT NULL COMMENT 'dead letter topic name',
     `is_deleted`          tinyint(1)   DEFAULT '0' COMMENT 'Whether to delete',
     PRIMARY KEY (`id`)
 ) COMMENT ='Pulsar consumption table';
@@ -332,8 +332,8 @@ DROP TABLE IF EXISTS `data_stream`;
 CREATE TABLE `data_stream`
 (
     `id`                     int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_stream_id`       varchar(128) NOT NULL COMMENT 'Data stream id, non-deleted globally unique',
-    `inlong_group_id`        varchar(128) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id`       varchar(256) NOT NULL COMMENT 'Data stream id, non-deleted globally unique',
+    `inlong_group_id`        varchar(256) NOT NULL COMMENT 'Owning business group id',
     `name`                   varchar(64)       DEFAULT NULL COMMENT 'The name of the data stream page display, can be Chinese',
     `description`            varchar(256)      DEFAULT '' COMMENT 'Introduction to data stream',
     `mq_resource_obj`        varchar(128)      DEFAULT NULL COMMENT 'MQ resource object, in the data stream, Tube is data_stream_id, Pulsar is Topic',
@@ -368,8 +368,8 @@ DROP TABLE IF EXISTS `data_stream_ext`;
 CREATE TABLE `data_stream_ext`
 (
     `id`               int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`  varchar(128) NOT NULL COMMENT 'Owning business group id',
-    `inlong_stream_id` varchar(128) NOT NULL COMMENT 'Owning data stream id',
+    `inlong_group_id`  varchar(256) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id` varchar(256) NOT NULL COMMENT 'Owning data stream id',
     `key_name`         varchar(64)  NOT NULL COMMENT 'Configuration item name',
     `key_value`        varchar(256)          DEFAULT NULL COMMENT 'The value of the configuration item',
     `is_deleted`       tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
@@ -384,20 +384,16 @@ DROP TABLE IF EXISTS `data_stream_field`;
 CREATE TABLE `data_stream_field`
 (
     `id`                  int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`     varchar(128) NOT NULL COMMENT 'Owning business group id',
+    `inlong_group_id`     varchar(256) NOT NULL COMMENT 'Owning business group id',
     `inlong_stream_id`    varchar(256) NOT NULL COMMENT 'Owning data stream id',
-    `is_predefined_field` tinyint(1)   DEFAULT '0' COMMENT 'Whether it is a predefined field, 0: no, 1: yes',
-    `field_name`          varchar(20)  NOT NULL COMMENT 'field name',
-    `field_value`         varchar(128) DEFAULT NULL COMMENT 'Field value, required if it is a predefined field',
-    `pre_expression`      varchar(256) DEFAULT NULL COMMENT 'Pre-defined field value expression',
-    `field_type`          varchar(20)  NOT NULL COMMENT 'field type',
-    `field_comment`       varchar(50)  DEFAULT NULL COMMENT 'Field description',
-    `rank_num`            smallint(6)  DEFAULT '0' COMMENT 'Field order (front-end display field order)',
-    `is_deleted`          tinyint(1)   DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
-    `is_exist`            tinyint(1)   DEFAULT '0' COMMENT 'Does it exist, 0: does not exist, 1: exists',
-    `bon_field_path`      varchar(256) DEFAULT NULL COMMENT 'BON field path',
-    `bon_field_type`      varchar(64)  DEFAULT NULL COMMENT 'BON field type',
-    `encrypt_level`       varchar(20)  DEFAULT NULL COMMENT 'Encryption level',
+    `is_predefined_field` tinyint(1)    DEFAULT '0' COMMENT 'Whether it is a predefined field, 0: no, 1: yes',
+    `field_name`          varchar(50)  NOT NULL COMMENT 'field name',
+    `field_value`         varchar(128)  DEFAULT NULL COMMENT 'Field value, required if it is a predefined field',
+    `pre_expression`      varchar(256)  DEFAULT NULL COMMENT 'Pre-defined field value expression',
+    `field_type`          varchar(50)  NOT NULL COMMENT 'field type',
+    `field_comment`       varchar(2000) DEFAULT NULL COMMENT 'field description',
+    `rank_num`            smallint(6)   DEFAULT '0' COMMENT 'Field order (front-end display field order)',
+    `is_deleted`          tinyint(1)    DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
     PRIMARY KEY (`id`)
 );
 
@@ -409,13 +405,13 @@ CREATE TABLE `operation_log`
 (
     `id`                  int(11)   NOT NULL AUTO_INCREMENT,
     `authentication_type` varchar(64)        DEFAULT NULL COMMENT 'Authentication type',
-    `operation_type`      varchar(255)       DEFAULT NULL COMMENT 'operation type',
+    `operation_type`      varchar(256)       DEFAULT NULL COMMENT 'operation type',
     `http_method`         varchar(64)        DEFAULT NULL COMMENT 'Request method',
-    `invoke_method`       varchar(255)       DEFAULT NULL COMMENT 'invoke method',
-    `operator`            varchar(255)       DEFAULT NULL COMMENT 'operator',
-    `proxy`               varchar(255)       DEFAULT NULL COMMENT 'proxy',
-    `request_url`         varchar(255)       DEFAULT NULL COMMENT 'Request URL',
-    `remote_address`      varchar(255)       DEFAULT NULL COMMENT 'Request IP',
+    `invoke_method`       varchar(256)       DEFAULT NULL COMMENT 'invoke method',
+    `operator`            varchar(256)       DEFAULT NULL COMMENT 'operator',
+    `proxy`               varchar(256)       DEFAULT NULL COMMENT 'proxy',
+    `request_url`         varchar(256)       DEFAULT NULL COMMENT 'Request URL',
+    `remote_address`      varchar(256)       DEFAULT NULL COMMENT 'Request IP',
     `cost_time`           bigint(20)         DEFAULT NULL COMMENT 'time-consuming',
     `body`                text COMMENT 'Request body',
     `param`               text COMMENT 'parameter',
@@ -433,11 +429,11 @@ CREATE TABLE `role`
 (
     `id`          int(11)      NOT NULL AUTO_INCREMENT,
     `role_code`   varchar(100) NOT NULL COMMENT 'Role code',
-    `role_name`   varchar(255) NOT NULL COMMENT 'Role Chinese name',
+    `role_name`   varchar(256) NOT NULL COMMENT 'Role Chinese name',
     `create_time` datetime     NOT NULL,
     `update_time` datetime     NOT NULL DEFAULT CURRENT_TIMESTAMP,
-    `create_by`   varchar(255) NOT NULL,
-    `update_by`   varchar(255) NOT NULL,
+    `create_by`   varchar(256) NOT NULL,
+    `update_by`   varchar(256) NOT NULL,
     `disabled`    tinyint(1)   NOT NULL DEFAULT '0' COMMENT 'Is it disabled?',
     PRIMARY KEY (`id`),
     UNIQUE KEY `unique_role_code` (`role_code`),
@@ -451,7 +447,7 @@ DROP TABLE IF EXISTS `source_db_basic`;
 CREATE TABLE `source_db_basic`
 (
     `id`               int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`  varchar(128) NOT NULL COMMENT 'Owning business group id',
+    `inlong_group_id`  varchar(256) NOT NULL COMMENT 'Owning business group id',
     `inlong_stream_id` varchar(256) NOT NULL COMMENT 'Owning data stream id',
     `sync_type`        tinyint(1)            DEFAULT '0' COMMENT 'Data synchronization type, 0: FULL, full amount, 1: INCREMENTAL, incremental',
     `is_deleted`       tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
@@ -470,8 +466,8 @@ DROP TABLE IF EXISTS `source_db_detail`;
 CREATE TABLE `source_db_detail`
 (
     `id`               int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`  varchar(128) NOT NULL COMMENT 'Owning business group id',
-    `inlong_stream_id` varchar(128) NOT NULL COMMENT 'Owning data stream id',
+    `inlong_group_id`  varchar(256) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id` varchar(256) NOT NULL COMMENT 'Owning data stream id',
     `access_type`      varchar(20)  NOT NULL COMMENT 'Collection type, with Agent, DataProxy client, LoadProxy',
     `db_name`          varchar(128)          DEFAULT NULL COMMENT 'database name',
     `transfer_ip`      varchar(64)           DEFAULT NULL COMMENT 'Transfer IP',
@@ -498,8 +494,8 @@ DROP TABLE IF EXISTS `source_file_basic`;
 CREATE TABLE `source_file_basic`
 (
     `id`                int(11)      NOT NULL AUTO_INCREMENT COMMENT 'ID',
-    `inlong_group_id`   varchar(128) NOT NULL COMMENT 'Business group id',
-    `inlong_stream_id`  varchar(128) NOT NULL COMMENT 'Data stream id',
+    `inlong_group_id`   varchar(256) NOT NULL COMMENT 'Business group id',
+    `inlong_stream_id`  varchar(256) NOT NULL COMMENT 'Data stream id',
     `is_hybrid_source`  tinyint(1)            DEFAULT '0' COMMENT 'Whether to mix data sources',
     `is_table_mapping`  tinyint(1)            DEFAULT '0' COMMENT 'Is there a table name mapping',
     `date_offset`       int(4)                DEFAULT '0' COMMENT 'Time offset\n',
@@ -523,8 +519,8 @@ DROP TABLE IF EXISTS `source_file_detail`;
 CREATE TABLE `source_file_detail`
 (
     `id`               int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`  varchar(128) NOT NULL COMMENT 'Owning business group id',
-    `inlong_stream_id` varchar(128) NOT NULL COMMENT 'Owning data stream id',
+    `inlong_group_id`  varchar(256) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id` varchar(256) NOT NULL COMMENT 'Owning data stream id',
     `access_type`      varchar(20)           DEFAULT 'Agent' COMMENT 'Collection type, there are Agent, DataProxy client, LoadProxy, the file can only be Agent temporarily',
     `server_name`      varchar(64)           DEFAULT NULL COMMENT 'The name of the data source service. If it is empty, add configuration through the following fields',
     `ip`               varchar(128) NOT NULL COMMENT 'Data source IP address',
@@ -562,63 +558,47 @@ CREATE TABLE `storage_ext`
 );
 
 -- ----------------------------
--- Table structure for storage_hive
+-- Table structure for data_storage
 -- ----------------------------
-DROP TABLE IF EXISTS `storage_hive`;
-CREATE TABLE `storage_hive`
+DROP TABLE IF EXISTS `data_storage`;
+CREATE TABLE `data_storage`
 (
-    `id`                          int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `inlong_group_id`             varchar(128) NOT NULL COMMENT 'Owning business group id',
-    `inlong_stream_id`            varchar(128) NOT NULL COMMENT 'Owning data stream id',
-    `enable_create_table`         tinyint(1)            DEFAULT 1 COMMENT 'Whether to enable create table, 1: enable, 0: disable, default is 1',
-    `jdbc_url`                    varchar(255)          DEFAULT NULL COMMENT 'Hive JDBC connection URL, such as "jdbc:hive2://127.0.0.1:10000"',
-    `username`                    varchar(128)          DEFAULT NULL COMMENT 'Username',
-    `password`                    varchar(255)          DEFAULT NULL COMMENT 'User password',
-    `db_name`                     varchar(128)          DEFAULT NULL COMMENT 'Target database name',
-    `table_name`                  varchar(128)          DEFAULT NULL COMMENT 'Target data table name',
-    `hdfs_default_fs`             varchar(255)          DEFAULT NULL COMMENT 'HDFS defaultFS, such as "hdfs://127.0.0.1:9000"',
-    `warehouse_dir`               varchar(250)          DEFAULT '/user/hive/warehouse' COMMENT 'Hive table storage path on HDFS, such as "/user/hive/warehouse"',
-    `partition_interval`          int(5)                DEFAULT NULL COMMENT 'Partition interval, support: 1(D / H), 10 I, 30 I',
-    `partition_unit`              varchar(10)           DEFAULT 'D' COMMENT 'Partition type, support: D-day, H-hour, I-minute',
-    `primary_partition`           varchar(255)          DEFAULT 'dt' COMMENT 'primary partition field',
-    `secondary_partition`         varchar(256)          DEFAULT NULL COMMENT 'secondary partition field',
-    `partition_creation_strategy` varchar(50)           DEFAULT 'COMPLETED' COMMENT 'Partition creation strategy, support: ARRIVED, COMPLETED',
-    `file_format`                 varchar(15)           DEFAULT 'TextFile' COMMENT 'The stored table format, TextFile, RCFile, SequenceFile, Avro',
-    `data_encoding`               varchar(20)           DEFAULT 'UTF-8' COMMENT 'data encoding type',
-    `data_separator`              varchar(10)           DEFAULT NULL COMMENT 'data field separator',
-    `storage_period`              int(5)                DEFAULT '10' COMMENT 'Data storage period, unit: day',
-    `opt_log`                     varchar(5000)         DEFAULT NULL COMMENT 'Background operation log',
-    `status`                      int(4)                DEFAULT '0' COMMENT 'status',
-    `previous_status`             int(4)                DEFAULT '0' COMMENT 'Previous status',
-    `is_deleted`                  tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
-    `creator`                     varchar(64)           DEFAULT NULL COMMENT 'creator name',
-    `modifier`                    varchar(64)           DEFAULT NULL COMMENT 'modifier name',
-    `create_time`                 timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'create time',
-    `modify_time`                 timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'modify time',
-    `temp_view`                   text                  DEFAULT NULL COMMENT 'Temporary view, used to save un-submitted and unapproved intermediate data after modification',
+    `id`                     int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
+    `inlong_group_id`        varchar(256) NOT NULL COMMENT 'Owning business group id',
+    `inlong_stream_id`       varchar(256) NOT NULL COMMENT 'Owning data stream id',
+    `storage_type`           varchar(15)           DEFAULT 'HIVE' COMMENT 'Storage type, including: HIVE, ES, etc',
+    `storage_period`         int(11)               DEFAULT '10' COMMENT 'Data storage period, unit: day',
+    `enable_create_resource` tinyint(1)            DEFAULT '1' COMMENT 'Whether to enable create storage resource? 0: disable, 1: enable. default is 1',
+    `ext_params`             text COMMENT 'Another fields, will saved as JSON type',
+    `operate_log`            varchar(5000)         DEFAULT NULL COMMENT 'Background operate log',
+    `status`                 int(11)               DEFAULT '0' COMMENT 'Status',
+    `previous_status`        int(11)               DEFAULT '0' COMMENT 'Previous status',
+    `is_deleted`             tinyint(1)            DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 1: deleted',
+    `creator`                varchar(64)  NOT NULL COMMENT 'Creator name',
+    `modifier`               varchar(64)           DEFAULT NULL COMMENT 'Modifier name',
+    `create_time`            timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
+    `modify_time`            timestamp    NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Modify time',
     PRIMARY KEY (`id`)
 );
 
 -- ----------------------------
--- Table structure for storage_hive_field
+-- Table structure for storage_field
 -- ----------------------------
-DROP TABLE IF EXISTS `storage_hive_field`;
-CREATE TABLE `storage_hive_field`
+DROP TABLE IF EXISTS `data_storage_field`;
+CREATE TABLE `data_storage_field`
 (
-    `id`                int(11)     NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
-    `storage_id`        int(11)     NOT NULL COMMENT 'Hive data storage id',
-    `source_field_name` varchar(20) NOT NULL COMMENT 'source field name',
-    `source_field_type` varchar(20) NOT NULL COMMENT 'source field type',
-    `field_name`        varchar(20) NOT NULL COMMENT 'field name',
-    `field_type`        varchar(20) NOT NULL COMMENT 'field type',
-    `field_comment`     varchar(2000) DEFAULT '' COMMENT 'Field description',
-    `is_required`       tinyint(1)    DEFAULT NULL COMMENT 'Is it required, 0: not necessary, 1: required',
-    `bon_field_path`    varchar(256)  DEFAULT NULL COMMENT 'BON field path',
-    `bon_field_type`    varchar(64)   DEFAULT NULL COMMENT 'BON field type',
-    `encrypt_level`     varchar(20)   DEFAULT NULL COMMENT 'Encryption level',
-    `is_exist`          tinyint(1)    DEFAULT '0' COMMENT 'Does it exist, 0: does not exist, 1: exists',
+    `id`                int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
+    `inlong_group_id`   varchar(256) NOT NULL COMMENT 'inlong group id',
+    `inlong_stream_id`  varchar(256) NOT NULL COMMENT 'inlong stream id',
+    `storage_id`        int(11)      NOT NULL COMMENT 'data storage id',
+    `storage_type`      varchar(15)  NOT NULL COMMENT 'storage type',
+    `source_field_name` varchar(50)   DEFAULT NULL COMMENT 'source field name',
+    `source_field_type` varchar(50)   DEFAULT NULL COMMENT 'source field type',
+    `field_name`        varchar(50)  NOT NULL COMMENT 'field name',
+    `field_type`        varchar(50)  NOT NULL COMMENT 'field type',
+    `field_comment`     varchar(2000) DEFAULT NULL COMMENT 'field description',
     `rank_num`          smallint(6)   DEFAULT '0' COMMENT 'Field order (front-end display field order)',
-    `is_deleted`        tinyint(1)    DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, 1: deleted',
+    `is_deleted`        tinyint(1)    DEFAULT '0' COMMENT 'Whether to delete, 0: not deleted, > 0: deleted',
     PRIMARY KEY (`id`)
 );
 
@@ -631,12 +611,12 @@ CREATE TABLE `task`
     `id`          bigint(20)   NOT NULL,
     `taskflow_id` bigint(20)   NOT NULL COMMENT 'Owning task flow id',
     `task_def_id` bigint(20)    DEFAULT NULL COMMENT 'task definition id',
-    `task_name`   varchar(255) NOT NULL COMMENT 'task name',
-    `status`      varchar(255)  DEFAULT NULL COMMENT 'task status',
-    `post_param`  varchar(255)  DEFAULT NULL COMMENT 'Task parameters',
+    `task_name`   varchar(256) NOT NULL COMMENT 'task name',
+    `status`      varchar(256)  DEFAULT NULL COMMENT 'task status',
+    `post_param`  varchar(256)  DEFAULT NULL COMMENT 'Task parameters',
     `resultmsg`   varchar(1000) DEFAULT NULL COMMENT 'Execution result log',
     `create_time` datetime     NOT NULL COMMENT 'Create time',
-    `create_by`   varchar(255) NOT NULL COMMENT 'creator',
+    `create_by`   varchar(256) NOT NULL COMMENT 'creator',
     `update_time` datetime      DEFAULT NULL COMMENT 'last modified time',
     `update_by`   varchar(0)    DEFAULT NULL COMMENT 'last modified person',
     PRIMARY KEY (`id`)
@@ -651,10 +631,10 @@ CREATE TABLE `task_def`
     `id`              bigint(20)   NOT NULL,
     `taskflow_def_id` bigint(20)   NOT NULL COMMENT 'Task flow definition id',
     `parent_id`       bigint(20)   DEFAULT NULL COMMENT 'parent task id',
-    `implclass`       varchar(255) DEFAULT NULL COMMENT 'task processing flow class',
-    `task_name`       varchar(255) DEFAULT NULL COMMENT 'task name',
+    `implclass`       varchar(256) DEFAULT NULL COMMENT 'task processing flow class',
+    `task_name`       varchar(256) DEFAULT NULL COMMENT 'task name',
     `create_time`     datetime     NOT NULL COMMENT 'Create time',
-    `create_by`       varchar(255) NOT NULL COMMENT 'creator',
+    `create_by`       varchar(256) NOT NULL COMMENT 'creator',
     `update_time`     datetime     DEFAULT NULL COMMENT 'last modified time',
     `update_by`       datetime     DEFAULT NULL COMMENT 'last modified person',
     `delivery_id`     bigint(20)   DEFAULT NULL COMMENT 'Task push method',
@@ -669,12 +649,12 @@ CREATE TABLE `taskflow`
 (
     `id`              bigint(20)   NOT NULL AUTO_INCREMENT,
     `taskflow_def_id` bigint(20)   NOT NULL COMMENT 'Taskflow definition id',
-    `status`          varchar(255) DEFAULT NULL COMMENT 'status',
-    `create_by`       varchar(255) NOT NULL COMMENT 'creator',
+    `status`          varchar(256) DEFAULT NULL COMMENT 'status',
+    `create_by`       varchar(256) NOT NULL COMMENT 'creator',
     `create_time`     datetime     DEFAULT NULL COMMENT 'Create time',
     `update_time`     datetime     DEFAULT NULL COMMENT 'last modified time',
-    `update_by`       varchar(255) DEFAULT NULL COMMENT 'last modified person',
-    `event`           varchar(255) DEFAULT NULL COMMENT 'trigger event',
+    `update_by`       varchar(256) DEFAULT NULL COMMENT 'last modified person',
+    `event`           varchar(256) DEFAULT NULL COMMENT 'trigger event',
     PRIMARY KEY (`id`)
 );
 
@@ -685,12 +665,12 @@ DROP TABLE IF EXISTS `taskflow_def`;
 CREATE TABLE `taskflow_def`
 (
     `id`            bigint(20)   NOT NULL AUTO_INCREMENT,
-    `name`          varchar(255) NOT NULL COMMENT 'Workflow definition name',
-    `descrip`       varchar(255) DEFAULT NULL COMMENT 'Workflow function description',
+    `name`          varchar(256) NOT NULL COMMENT 'Workflow definition name',
+    `descrip`       varchar(256) DEFAULT NULL COMMENT 'Workflow function description',
     `create_time`   datetime     NOT NULL COMMENT 'Create time',
-    `create_by`     varchar(255) NOT NULL COMMENT 'creator',
+    `create_by`     varchar(256) NOT NULL COMMENT 'creator',
     `isValid`       int(11)      DEFAULT NULL COMMENT 'logical deletion',
-    `trigger_event` varchar(255) DEFAULT NULL COMMENT 'trigger event',
+    `trigger_event` varchar(256) DEFAULT NULL COMMENT 'trigger event',
     PRIMARY KEY (`id`)
 );
 
@@ -701,14 +681,14 @@ DROP TABLE IF EXISTS `user`;
 CREATE TABLE `user`
 (
     `id`           int(11)      NOT NULL AUTO_INCREMENT,
-    `name`         varchar(255) NOT NULL COMMENT 'account name',
+    `name`         varchar(256) NOT NULL COMMENT 'account name',
     `password`     varchar(64)  NOT NULL COMMENT 'password md5',
     `account_type` int(11)      NOT NULL DEFAULT '1' COMMENT 'account type, 0-manager 1-normal',
     `due_date`     datetime              DEFAULT NULL COMMENT 'due date for account',
     `create_time`  datetime     NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'Create time',
     `update_time`  datetime              DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'update time',
-    `create_by`    varchar(255) NOT NULL COMMENT 'create by sb.',
-    `update_by`    varchar(255)          DEFAULT NULL COMMENT 'update by sb.',
+    `create_by`    varchar(256) NOT NULL COMMENT 'create by sb.',
+    `update_by`    varchar(256)          DEFAULT NULL COMMENT 'update by sb.',
     PRIMARY KEY (`id`),
     UNIQUE KEY `unique_user_name` (`name`)
 );
@@ -725,12 +705,12 @@ DROP TABLE IF EXISTS `user_role`;
 CREATE TABLE `user_role`
 (
     `id`          int(11)      NOT NULL AUTO_INCREMENT,
-    `user_name`   varchar(255) NOT NULL COMMENT 'username rtx',
-    `role_code`   varchar(255) NOT NULL COMMENT 'role',
+    `user_name`   varchar(256) NOT NULL COMMENT 'username rtx',
+    `role_code`   varchar(256) NOT NULL COMMENT 'role',
     `create_time` datetime     NOT NULL,
     `update_time` datetime     NOT NULL DEFAULT CURRENT_TIMESTAMP,
-    `create_by`   varchar(255) NOT NULL,
-    `update_by`   varchar(255) NOT NULL,
+    `create_by`   varchar(256) NOT NULL,
+    `update_by`   varchar(256) NOT NULL,
     `disabled`    tinyint(1)   NOT NULL DEFAULT '0' COMMENT 'Is it disabled?',
     PRIMARY KEY (`id`)
 );
@@ -742,11 +722,11 @@ DROP TABLE IF EXISTS `wf_approver`;
 CREATE TABLE `wf_approver`
 (
     `id`                int(11)       NOT NULL AUTO_INCREMENT,
-    `process_name`      varchar(255)  NOT NULL COMMENT 'process definition name',
-    `task_name`         varchar(255)  NOT NULL COMMENT 'Approval task name',
+    `process_name`      varchar(256)  NOT NULL COMMENT 'process definition name',
+    `task_name`         varchar(256)  NOT NULL COMMENT 'Approval task name',
     `filter_key`        varchar(64)   NOT NULL COMMENT 'filter condition KEY',
-    `filter_value`      varchar(255)           DEFAULT NULL COMMENT 'Filter matching value',
-    `filter_value_desc` varchar(255)           DEFAULT NULL COMMENT 'Filter value description',
+    `filter_value`      varchar(256)           DEFAULT NULL COMMENT 'Filter matching value',
+    `filter_value_desc` varchar(256)           DEFAULT NULL COMMENT 'Filter value description',
     `approvers`         varchar(1024) NOT NULL COMMENT 'Approvers, separated by commas',
     `creator`           varchar(64)   NOT NULL COMMENT 'creator',
     `modifier`          varchar(64)   NOT NULL COMMENT 'modifier',
@@ -773,12 +753,12 @@ CREATE TABLE `wf_event_log`
 (
     `id`                   int(11)      NOT NULL AUTO_INCREMENT,
     `process_inst_id`      int(11)      NOT NULL,
-    `process_name`         varchar(255)  DEFAULT NULL COMMENT 'Process name',
-    `process_display_name` varchar(255) NOT NULL COMMENT 'Process name',
-    `inlong_group_id`      varchar(128)  DEFAULT NULL COMMENT 'Business group id',
+    `process_name`         varchar(256)  DEFAULT NULL COMMENT 'Process name',
+    `process_display_name` varchar(256) NOT NULL COMMENT 'Process name',
+    `inlong_group_id`      varchar(256)  DEFAULT NULL COMMENT 'Business group id',
     `task_inst_id`         int(11)       DEFAULT NULL COMMENT 'Task ID',
-    `element_name`         varchar(255) NOT NULL COMMENT 'The name of the component that triggered the event',
-    `element_display_name` varchar(255) NOT NULL COMMENT 'Chinese name of the component that triggered the event',
+    `element_name`         varchar(256) NOT NULL COMMENT 'The name of the component that triggered the event',
+    `element_display_name` varchar(256) NOT NULL COMMENT 'Chinese name of the component that triggered the event',
     `event_type`           varchar(64)  NOT NULL COMMENT 'Event type: process event/task event',
     `event`                varchar(64)  NOT NULL COMMENT 'Event name',
     `listener`             varchar(1024) DEFAULT NULL COMMENT 'Event listener name',
@@ -799,12 +779,12 @@ DROP TABLE IF EXISTS `wf_process_instance`;
 CREATE TABLE `wf_process_instance`
 (
     `id`              int(11)      NOT NULL AUTO_INCREMENT,
-    `name`            varchar(255) NOT NULL COMMENT 'process name',
-    `display_name`    varchar(255) NOT NULL COMMENT 'Process display name',
-    `type`            varchar(255)          DEFAULT NULL COMMENT 'Process classification',
-    `title`           varchar(255)          DEFAULT NULL COMMENT 'Process title',
-    `inlong_group_id` varchar(128)          DEFAULT NULL COMMENT 'Business group id: to facilitate related business',
-    `applicant`       varchar(255) NOT NULL COMMENT 'applicant',
+    `name`            varchar(256) NOT NULL COMMENT 'process name',
+    `display_name`    varchar(256) NOT NULL COMMENT 'Process display name',
+    `type`            varchar(256)          DEFAULT NULL COMMENT 'Process classification',
+    `title`           varchar(256)          DEFAULT NULL COMMENT 'Process title',
+    `inlong_group_id` varchar(256)          DEFAULT NULL COMMENT 'Business group id: to facilitate related business',
+    `applicant`       varchar(256) NOT NULL COMMENT 'applicant',
     `state`           varchar(64)  NOT NULL COMMENT 'state',
     `form_data`       mediumtext COMMENT 'form information',
     `start_time`      datetime     NOT NULL COMMENT 'start time',
@@ -823,14 +803,14 @@ CREATE TABLE `wf_task_instance`
     `id`                   int(11)       NOT NULL AUTO_INCREMENT,
     `type`                 varchar(64)   NOT NULL COMMENT 'Task type: UserTask user task/ServiceTask system task',
     `process_inst_id`      int(11)       NOT NULL COMMENT 'process ID',
-    `process_name`         varchar(255)  NOT NULL COMMENT 'process name',
-    `process_display_name` varchar(255)  NOT NULL COMMENT 'process name',
-    `name`                 varchar(255)  NOT NULL COMMENT 'task name',
-    `display_name`         varchar(255)  NOT NULL COMMENT 'Task display name',
+    `process_name`         varchar(256)  NOT NULL COMMENT 'process name',
+    `process_display_name` varchar(256)  NOT NULL COMMENT 'process name',
+    `name`                 varchar(256)  NOT NULL COMMENT 'task name',
+    `display_name`         varchar(256)  NOT NULL COMMENT 'Task display name',
     `applicant`            varchar(64)   DEFAULT NULL COMMENT 'applicant',
     `approvers`            varchar(1024) NOT NULL COMMENT 'approvers',
     `state`                varchar(64)   NOT NULL COMMENT 'state',
-    `operator`             varchar(255)  DEFAULT NULL COMMENT 'actual operator',
+    `operator`             varchar(256)  DEFAULT NULL COMMENT 'actual operator',
     `remark`               varchar(1024) DEFAULT NULL COMMENT 'Remark information',
     `form_data`            mediumtext COMMENT 'form information submitted by the current task',
     `start_time`           datetime      NOT NULL COMMENT 'start time',
@@ -870,7 +850,7 @@ CREATE TABLE `cluster_set_inlongid`
 (
     `id`              int(11)      NOT NULL AUTO_INCREMENT COMMENT 'Incremental primary key',
     `set_name`        varchar(128) NOT NULL COMMENT 'ClusterSet name, English, numbers and underscore',
-    `inlong_group_id` varchar(128) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
+    `inlong_group_id` varchar(256) NOT NULL COMMENT 'Business group id, filled in by the user, undeleted ones cannot be repeated',
     PRIMARY KEY (`id`),
     UNIQUE KEY `unique_cluster_set_inlongid` (`set_name`, `inlong_group_id`)
 );
diff --git a/inlong-manager/pom.xml b/inlong-manager/pom.xml
index a88a904..0f7340e 100644
--- a/inlong-manager/pom.xml
+++ b/inlong-manager/pom.xml
@@ -61,6 +61,7 @@
         <guava.version>18.0</guava.version>
         <jackson.version>2.12.0</jackson.version>
         <jackson.jsr310.version>2.13.1</jackson.jsr310.version>
+        <reflections.version>0.10.2</reflections.version>
         <opencsv.version>5.4</opencsv.version>
         <httpcore.version>4.4.14</httpcore.version>
         <httpclient.version>4.5.13</httpclient.version>
@@ -188,6 +189,11 @@
                 <version>${log4j.version}</version>
             </dependency>
             <dependency>
+                <groupId>org.apache.logging.log4j</groupId>
+                <artifactId>log4j-core</artifactId>
+                <version>${log4j.version}</version>
+            </dependency>
+            <dependency>
                 <groupId>org.springframework.boot</groupId>
                 <artifactId>spring-boot-starter-aop</artifactId>
                 <version>${spring-boot.version}</version>
@@ -272,6 +278,12 @@
                 <version>${jackson.version}</version>
             </dependency>
             <dependency>
+                <groupId>org.reflections</groupId>
+                <artifactId>reflections</artifactId>
+                <version>${reflections.version}</version>
+            </dependency>
+
+            <dependency>
                 <groupId>io.swagger</groupId>
                 <artifactId>swagger-annotations</artifactId>
                 <version>${swagger-annotations.version}</version>