You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@doris.apache.org by mo...@apache.org on 2022/06/17 13:02:52 UTC

[doris] branch master updated: [style](fe)the last step of fe CheckStyle (#10134)

This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new b7b78ae707 [style](fe)the last step of fe CheckStyle (#10134)
b7b78ae707 is described below

commit b7b78ae7079a61eadab0e78d9e9c9792dd0af1b3
Author: morrySnow <10...@users.noreply.github.com>
AuthorDate: Fri Jun 17 21:02:45 2022 +0800

    [style](fe)the last step of fe CheckStyle (#10134)
    
    1. fix all checkstyle warning
    2. change all checkstyle rules to error
    3. remove some java doc rules
        a. RequireEmptyLineBeforeBlockTagGroup
        b. JavadocStyle
        c. JavadocParagraph
    4. suppress some rules for old codes
        a. all java doc rules only affect on Nereids
        b. DeclarationOrder only affect on Nereids
        c. OverloadMethodsDeclarationOrder only affect on Nereids
        d. VariableDeclarationUsageDistance only affect on Nereids
        e. suppress OneTopLevelClass on org/apache/doris/load/loadv2/dpp/ColumnParser.java
        f. suppress OneTopLevelClass on org/apache/doris/load/loadv2/dpp/SparkRDDAggregator.java
        g. suppress LineLength on org/apache/doris/catalog/FunctionSet.java
        h. suppress LineLength on org/apache/doris/common/ErrorCode.java
---
 .../developer/developer-guide/java-format-code.md  |   7 +
 .../developer/developer-guide/java-format-code.md  |   7 +
 fe/check/checkstyle/checkstyle.xml                 | 119 ++------
 fe/check/checkstyle/suppressions.xml               |  29 ++
 .../apache/doris/common/io/DataOutputBuffer.java   |   2 +-
 .../java/org/apache/doris/common/io/IOUtils.java   |   9 +-
 .../org/apache/doris/common/io/OutputBuffer.java   |   2 +-
 .../main/java/org/apache/doris/common/io/Text.java |   2 +-
 .../doris/common/property/PropertySchema.java      |   2 +-
 .../src/main/java/org/apache/doris/PaloFe.java     |   6 +-
 .../main/java/org/apache/doris/alter/Alter.java    |  33 ++-
 .../java/org/apache/doris/alter/AlterHandler.java  |  11 +-
 .../java/org/apache/doris/alter/AlterJobV2.java    |   5 +-
 .../org/apache/doris/alter/AlterOperations.java    |   7 +-
 .../doris/alter/MaterializedViewHandler.java       |  45 +--
 .../java/org/apache/doris/alter/RollupJobV2.java   |  30 +-
 .../apache/doris/alter/SchemaChangeHandler.java    | 110 +++++---
 .../org/apache/doris/alter/SchemaChangeJobV2.java  |  27 +-
 .../java/org/apache/doris/alter/SystemHandler.java |   3 +-
 .../apache/doris/analysis/AbstractBackupStmt.java  |   9 +-
 .../analysis/AdminCancelRebalanceDiskStmt.java     |   2 +-
 .../apache/doris/analysis/AdminCleanTrashStmt.java |   2 +-
 .../doris/analysis/AdminCompactTableStmt.java      |   3 +-
 .../doris/analysis/AdminRebalanceDiskStmt.java     |   6 +-
 .../org/apache/doris/analysis/AggregateInfo.java   |   6 +-
 .../apache/doris/analysis/AggregateInfoBase.java   |   4 +-
 .../apache/doris/analysis/AggregateParamsList.java |   2 +-
 .../doris/analysis/AlterColumnStatsStmt.java       |   4 +-
 .../doris/analysis/AlterDatabaseQuotaStmt.java     |   3 +-
 .../apache/doris/analysis/AlterDatabaseRename.java |   7 +-
 .../doris/analysis/AlterRoutineLoadStmt.java       |  15 +-
 .../doris/analysis/AlterSqlBlockRuleStmt.java      |  15 +-
 .../apache/doris/analysis/AlterTableStatsStmt.java |   4 +-
 .../org/apache/doris/analysis/AlterTableStmt.java  |   6 +-
 .../org/apache/doris/analysis/AlterViewStmt.java   |   7 +-
 .../org/apache/doris/analysis/AnalyticExpr.java    |  15 +-
 .../org/apache/doris/analysis/AnalyticInfo.java    |   4 +-
 .../org/apache/doris/analysis/AnalyticWindow.java  |   6 +
 .../java/org/apache/doris/analysis/Analyzer.java   |  14 +-
 .../org/apache/doris/analysis/ArithmeticExpr.java  |   4 +
 .../java/org/apache/doris/analysis/BackupStmt.java |   4 +-
 .../org/apache/doris/analysis/BinaryPredicate.java |   2 +-
 .../java/org/apache/doris/analysis/BrokerDesc.java |   7 +-
 .../doris/analysis/BuiltinAggregateFunction.java   |   1 +
 .../java/org/apache/doris/analysis/CaseExpr.java   |  10 +-
 .../java/org/apache/doris/analysis/CastExpr.java   |   3 +-
 .../apache/doris/analysis/ChannelDescription.java  |   3 +-
 .../java/org/apache/doris/analysis/ColumnDef.java  |   8 +-
 .../apache/doris/analysis/CompoundPredicate.java   |   2 +-
 .../doris/analysis/CreateDataSyncJobStmt.java      |   6 +-
 .../org/apache/doris/analysis/CreateDbStmt.java    |   3 +-
 .../apache/doris/analysis/CreateFunctionStmt.java  |  10 +-
 .../doris/analysis/CreateMaterializedViewStmt.java |   9 +-
 .../doris/analysis/CreateRoutineLoadStmt.java      |   3 +-
 .../doris/analysis/CreateSqlBlockRuleStmt.java     |  17 +-
 .../apache/doris/analysis/CreateTableLikeStmt.java |   3 +-
 .../org/apache/doris/analysis/CreateTableStmt.java |   6 +-
 .../org/apache/doris/analysis/CreateUserStmt.java  |   3 +-
 .../org/apache/doris/analysis/DataDescription.java |  11 +-
 .../org/apache/doris/analysis/DateLiteral.java     |  27 +-
 .../org/apache/doris/analysis/DecimalLiteral.java  |   3 +-
 .../java/org/apache/doris/analysis/DeleteStmt.java |   6 +-
 .../org/apache/doris/analysis/DescribeStmt.java    |   4 +-
 .../org/apache/doris/analysis/DescriptorTable.java |   2 +-
 .../java/org/apache/doris/analysis/DropDbStmt.java |   3 +-
 .../java/org/apache/doris/analysis/ExportStmt.java |   2 +-
 .../main/java/org/apache/doris/analysis/Expr.java  |  31 +-
 .../java/org/apache/doris/analysis/ExprId.java     |   2 +-
 .../apache/doris/analysis/ExprSubstitutionMap.java |   2 +-
 .../apache/doris/analysis/ExpressionFunctions.java |   6 +-
 .../apache/doris/analysis/FunctionCallExpr.java    |  24 +-
 .../org/apache/doris/analysis/FunctionParams.java  |   2 +-
 .../java/org/apache/doris/analysis/GrantStmt.java  |  12 +-
 .../org/apache/doris/analysis/GroupByClause.java   |   4 +-
 .../org/apache/doris/analysis/InlineViewRef.java   |   3 +-
 .../java/org/apache/doris/analysis/InsertStmt.java |  28 +-
 .../org/apache/doris/analysis/IsNullPredicate.java |   1 +
 .../org/apache/doris/analysis/LargeIntLiteral.java |   2 +-
 .../java/org/apache/doris/analysis/LoadStmt.java   |   5 +-
 .../java/org/apache/doris/analysis/LockTable.java  |   1 +
 .../doris/analysis/MVColumnBitmapUnionPattern.java |   3 +-
 .../analysis/ModifyTablePropertiesClause.java      |   3 +-
 .../org/apache/doris/analysis/OpcodeRegistry.java  | 314 ---------------------
 .../org/apache/doris/analysis/OrderByElement.java  |   5 +
 .../org/apache/doris/analysis/OutFileClause.java   |  41 +--
 .../java/org/apache/doris/analysis/Predicate.java  |   5 +-
 .../java/org/apache/doris/analysis/QueryStmt.java  |  11 +-
 .../org/apache/doris/analysis/RecoverDbStmt.java   |   9 +-
 .../doris/analysis/RecoverPartitionStmt.java       |   7 +-
 .../apache/doris/analysis/RecoverTableStmt.java    |  10 +-
 .../org/apache/doris/analysis/RefreshDbStmt.java   |   6 +-
 .../doris/analysis/ReplacePartitionClause.java     |   9 +-
 .../org/apache/doris/analysis/ResourcePattern.java |   1 +
 .../org/apache/doris/analysis/RestoreStmt.java     |   8 +-
 .../java/org/apache/doris/analysis/RevokeStmt.java |   3 +-
 .../analysis/RoutineLoadDataSourceProperties.java  |  51 ++--
 .../org/apache/doris/analysis/SchemaTableType.java |   5 +-
 .../org/apache/doris/analysis/SelectListItem.java  |   3 +-
 .../java/org/apache/doris/analysis/SelectStmt.java |  23 +-
 .../apache/doris/analysis/SetOperationStmt.java    |   6 +-
 .../java/org/apache/doris/analysis/SetVar.java     |   3 +-
 .../org/apache/doris/analysis/ShowAlterStmt.java   |   3 +-
 .../org/apache/doris/analysis/ShowBackupStmt.java  |   6 +-
 .../org/apache/doris/analysis/ShowClusterStmt.java |   4 +-
 .../org/apache/doris/analysis/ShowExportStmt.java  |   3 +-
 .../org/apache/doris/analysis/ShowGrantsStmt.java  |   1 +
 .../org/apache/doris/analysis/ShowIndexStmt.java   |   4 +-
 .../apache/doris/analysis/ShowLoadProfileStmt.java |   2 +-
 .../org/apache/doris/analysis/ShowPolicyStmt.java  |   2 -
 .../doris/analysis/ShowQueryProfileStmt.java       |   2 +-
 .../org/apache/doris/analysis/ShowRestoreStmt.java |   6 +-
 .../org/apache/doris/analysis/ShowRolesStmt.java   |   1 +
 .../doris/analysis/ShowRoutineLoadTaskStmt.java    |   3 +-
 .../apache/doris/analysis/ShowStreamLoadStmt.java  |   3 +-
 .../org/apache/doris/analysis/ShowViewStmt.java    |   3 +-
 .../java/org/apache/doris/analysis/SortInfo.java   |   2 +-
 .../org/apache/doris/analysis/StmtRewriter.java    |   5 +-
 .../org/apache/doris/analysis/StorageBackend.java  |   3 +-
 .../org/apache/doris/analysis/StringLiteral.java   |   3 +-
 .../java/org/apache/doris/analysis/Subquery.java   |   2 +-
 .../org/apache/doris/analysis/TablePattern.java    |   1 +
 .../doris/analysis/TransactionBeginStmt.java       |   3 +
 .../java/org/apache/doris/analysis/TypeDef.java    |   1 -
 .../java/org/apache/doris/analysis/UseStmt.java    |   3 +-
 .../org/apache/doris/analysis/UserIdentity.java    |   3 +-
 .../java/org/apache/doris/analysis/ValueList.java  |   1 +
 .../java/org/apache/doris/analysis/WithClause.java |   3 +-
 .../org/apache/doris/backup/BackupHandler.java     |  12 +-
 .../java/org/apache/doris/backup/BackupJob.java    |  17 +-
 .../org/apache/doris/backup/BrokerStorage.java     |  10 +-
 .../java/org/apache/doris/backup/Repository.java   |   6 +-
 .../java/org/apache/doris/backup/RestoreJob.java   |  46 +--
 .../java/org/apache/doris/backup/S3Storage.java    |  10 +-
 .../apache/doris/blockrule/SqlBlockRuleMgr.java    |   2 +-
 .../apache/doris/catalog/AggregateFunction.java    | 102 ++++---
 .../org/apache/doris/catalog/AggregateType.java    |   1 +
 .../java/org/apache/doris/catalog/AuthType.java    |   2 +-
 .../java/org/apache/doris/catalog/BrokerTable.java |   4 +-
 .../java/org/apache/doris/catalog/Catalog.java     | 131 ++++++---
 .../apache/doris/catalog/ColocateGroupSchema.java  |   3 +-
 .../main/java/org/apache/doris/catalog/Column.java |   7 +-
 .../java/org/apache/doris/catalog/ColumnStats.java |   5 +-
 .../java/org/apache/doris/catalog/Database.java    |  28 +-
 .../doris/catalog/DynamicPartitionProperty.java    |   6 +-
 .../java/org/apache/doris/catalog/EsTable.java     |  16 +-
 .../java/org/apache/doris/catalog/Function.java    |   7 +-
 .../java/org/apache/doris/catalog/FunctionSet.java |   1 +
 .../apache/doris/catalog/HashDistributionInfo.java |   1 +
 .../doris/catalog/HiveMetaStoreClientHelper.java   |  24 +-
 .../java/org/apache/doris/catalog/HiveTable.java   |  26 +-
 .../apache/doris/catalog/ListPartitionInfo.java    |  10 +-
 .../java/org/apache/doris/catalog/MapType.java     |   2 +
 .../org/apache/doris/catalog/MetadataViewer.java   |  18 +-
 .../java/org/apache/doris/catalog/MysqlTable.java  |   3 +-
 .../java/org/apache/doris/catalog/OdbcTable.java   |  11 +-
 .../java/org/apache/doris/catalog/OlapTable.java   |  19 +-
 .../org/apache/doris/catalog/PartitionInfo.java    |   7 +-
 .../org/apache/doris/catalog/PrimitiveType.java    |   8 +-
 .../doris/catalog/RandomDistributionInfo.java      |   1 +
 .../apache/doris/catalog/RangePartitionInfo.java   |   8 +-
 .../java/org/apache/doris/catalog/Replica.java     |  19 +-
 .../java/org/apache/doris/catalog/Resource.java    |   1 +
 .../org/apache/doris/catalog/ResourceGroup.java    |   1 +
 .../org/apache/doris/catalog/ScalarFunction.java   |   3 +-
 .../java/org/apache/doris/catalog/ScalarType.java  |   6 +-
 .../java/org/apache/doris/catalog/SchemaTable.java |   1 +
 .../org/apache/doris/catalog/SparkResource.java    |  10 +-
 .../java/org/apache/doris/catalog/StructType.java  |   1 +
 .../java/org/apache/doris/catalog/TableIf.java     |   6 +-
 .../org/apache/doris/catalog/TableProperty.java    |   6 +-
 .../main/java/org/apache/doris/catalog/Tablet.java |  18 +-
 .../apache/doris/catalog/TabletInvertedIndex.java  |  52 ++--
 .../org/apache/doris/catalog/TempPartitions.java   |   1 +
 .../main/java/org/apache/doris/catalog/Type.java   |   4 +
 .../doris/catalog/external/ExternalDatabase.java   |   2 +-
 .../apache/doris/clone/BackendLoadStatistic.java   |   6 +-
 .../org/apache/doris/clone/BeLoadRebalancer.java   |   8 +-
 .../apache/doris/clone/ClusterLoadStatistic.java   |  66 +++--
 .../clone/ColocateTableCheckerAndBalancer.java     |  47 +--
 .../org/apache/doris/clone/DiskRebalancer.java     |   3 +-
 .../doris/clone/DynamicPartitionScheduler.java     |  69 +++--
 .../java/org/apache/doris/clone/MovesCacheMap.java |  12 +-
 .../apache/doris/clone/PartitionRebalancer.java    |  45 ++-
 .../java/org/apache/doris/clone/Rebalancer.java    |   5 +-
 .../java/org/apache/doris/clone/TabletChecker.java |  20 +-
 .../org/apache/doris/clone/TabletSchedCtx.java     |  21 +-
 .../org/apache/doris/clone/TabletScheduler.java    |  48 ++--
 .../clone/TwoDimensionalGreedyRebalanceAlgo.java   |  61 ++--
 .../main/java/org/apache/doris/common/CIDR.java    |   1 +
 .../java/org/apache/doris/common/CheckedMath.java  |   2 +-
 .../main/java/org/apache/doris/common/Config.java  |  28 +-
 .../java/org/apache/doris/common/ConfigBase.java   |   9 +-
 .../java/org/apache/doris/common/DdlException.java |   1 +
 .../java/org/apache/doris/common/ErrorReport.java  |   3 +-
 .../java/org/apache/doris/common/GenericPool.java  |   5 +-
 .../src/main/java/org/apache/doris/common/Id.java  |   2 +-
 .../java/org/apache/doris/common/IdGenerator.java  |   2 +
 .../java/org/apache/doris/common/Log4jConfig.java  |   6 +-
 .../main/java/org/apache/doris/common/Pair.java    |   2 +-
 .../org/apache/doris/common/ThreadPoolManager.java |  37 ++-
 .../java/org/apache/doris/common/ThriftServer.java |   6 +-
 .../org/apache/doris/common/UserException.java     |   1 +
 .../doris/common/parquet/BrokerInputFile.java      |  12 +-
 .../common/proc/BackendLoadStatisticProcNode.java  |   8 +-
 .../common/proc/ClusterLoadStatisticProcDir.java   |   7 +-
 .../proc/ColocationGroupBackendSeqsProcNode.java   |   3 +-
 .../common/proc/CurrentQueryInfoProvider.java      |   6 +-
 .../doris/common/proc/EsPartitionsProcDir.java     |   6 +-
 .../org/apache/doris/common/proc/JobsProcDir.java  |   9 +-
 .../org/apache/doris/common/proc/JvmProcDir.java   |  40 ++-
 .../doris/common/proc/PartitionsProcDir.java       |   9 +-
 .../org/apache/doris/common/proc/ProcResult.java   |   1 +
 .../doris/common/proc/StatisticProcNode.java       |   3 +-
 .../doris/common/proc/TabletHealthProcDir.java     |  19 +-
 .../doris/common/profile/ProfileTreeBuilder.java   |   9 +-
 .../doris/common/profile/ProfileTreeNode.java      |   1 +
 .../common/publish/ClusterStatePublisher.java      |   3 +-
 .../org/apache/doris/common/util/BrokerUtil.java   |  33 ++-
 .../doris/common/util/DynamicPartitionUtil.java    |  84 ++++--
 .../org/apache/doris/common/util/KafkaUtil.java    |   4 +-
 .../org/apache/doris/common/util/ListUtil.java     |  14 +-
 .../apache/doris/common/util/MetaLockUtils.java    |   3 +-
 .../org/apache/doris/common/util/PrintableMap.java |   1 +
 .../apache/doris/common/util/ProfileManager.java   |   6 +-
 .../apache/doris/common/util/PropertyAnalyzer.java |  20 +-
 .../org/apache/doris/common/util/RangeUtils.java   |   6 +-
 .../apache/doris/common/util/ReflectionUtils.java  |   2 +-
 .../apache/doris/common/util/RuntimeProfile.java   |   3 +-
 .../org/apache/doris/common/util/SmallFileMgr.java |   8 +-
 .../org/apache/doris/common/util/SqlBlockUtil.java |  19 +-
 .../java/org/apache/doris/common/util/URI.java     |   1 +
 .../java/org/apache/doris/common/util/Util.java    |   6 +-
 .../doris/consistency/CheckConsistencyJob.java     |   3 +-
 .../doris/consistency/ConsistencyChecker.java      |  12 +-
 .../doris/datasource/InternalDataSource.java       |   9 +-
 .../org/apache/doris/deploy/DeployManager.java     |   3 +-
 .../doris/external/elasticsearch/EsNodeInfo.java   |   5 +-
 .../doris/external/elasticsearch/EsRepository.java |   6 +-
 .../external/elasticsearch/EsShardPartitions.java  |   3 +-
 .../doris/external/elasticsearch/EsUtil.java       |   3 +-
 .../doris/external/elasticsearch/MappingPhase.java |   3 +-
 .../doris/external/iceberg/IcebergCatalogMgr.java  |   3 +-
 .../iceberg/IcebergTableCreationRecordMgr.java     |  17 +-
 .../apache/doris/httpv2/config/WebConfigurer.java  |   1 +
 .../doris/httpv2/controller/BaseController.java    |   3 +-
 .../httpv2/controller/HardwareInfoController.java  |  75 +++--
 .../doris/httpv2/entity/ResponseEntityBuilder.java |   3 +-
 .../doris/httpv2/interceptor/AuthInterceptor.java  |   6 +-
 .../httpv2/interceptor/ServletTraceIterceptor.java |   1 +
 .../doris/httpv2/rest/CheckDecommissionAction.java |   3 +-
 .../apache/doris/httpv2/rest/GetDdlStmtAction.java |   3 +-
 .../org/apache/doris/httpv2/rest/LoadAction.java   |   3 +-
 .../doris/httpv2/rest/RestBaseController.java      |   3 +-
 .../doris/httpv2/rest/TableQueryPlanAction.java    |  27 +-
 .../doris/httpv2/rest/TableSchemaAction.java       |   3 +-
 .../org/apache/doris/httpv2/rest/UploadAction.java |  18 +-
 .../doris/httpv2/rest/manager/NodeAction.java      |   6 +-
 .../apache/doris/httpv2/restv2/ImportAction.java   |   3 +-
 .../apache/doris/httpv2/util/LoadSubmitter.java    |   6 +-
 .../doris/httpv2/util/StatementSubmitter.java      |   6 +-
 .../org/apache/doris/httpv2/util/TmpFileMgr.java   |   6 +-
 .../apache/doris/journal/bdbje/BDBDebugger.java    |   3 +-
 .../apache/doris/journal/bdbje/BDBEnvironment.java |  10 +-
 .../java/org/apache/doris/ldap/LdapClient.java     |   7 +-
 .../org/apache/doris/ldap/LdapPrivsChecker.java    |  16 +-
 .../org/apache/doris/load/BrokerFileGroup.java     |   6 +-
 .../apache/doris/load/BrokerFileGroupAggInfo.java  |   6 +-
 .../java/org/apache/doris/load/DeleteHandler.java  |  77 +++--
 .../main/java/org/apache/doris/load/DeleteJob.java |   6 +-
 .../main/java/org/apache/doris/load/EtlStatus.java |   1 +
 .../main/java/org/apache/doris/load/FailMsg.java   |   1 +
 .../src/main/java/org/apache/doris/load/Load.java  |  43 +--
 .../java/org/apache/doris/load/LoadChecker.java    |  58 ++--
 .../main/java/org/apache/doris/load/LoadJob.java   |   3 +-
 .../org/apache/doris/load/PartitionLoadInfo.java   |   1 +
 .../main/java/org/apache/doris/load/Source.java    |   1 +
 .../org/apache/doris/load/StreamLoadRecord.java    |   4 +-
 .../org/apache/doris/load/StreamLoadRecordMgr.java |   9 +-
 .../apache/doris/load/loadv2/BrokerLoadJob.java    |  19 +-
 .../doris/load/loadv2/BrokerLoadPendingTask.java   |   3 +-
 .../org/apache/doris/load/loadv2/BulkLoadJob.java  |   9 +-
 .../org/apache/doris/load/loadv2/ConfigFile.java   |   5 +-
 .../apache/doris/load/loadv2/InsertLoadJob.java    |   4 +-
 .../java/org/apache/doris/load/loadv2/LoadJob.java |   6 +-
 .../apache/doris/load/loadv2/LoadLoadingTask.java  |   7 +-
 .../doris/load/loadv2/SparkEtlJobHandler.java      |   9 +-
 .../doris/load/loadv2/SparkLauncherMonitor.java    |   7 +-
 .../org/apache/doris/load/loadv2/SparkLoadJob.java |  41 +--
 .../doris/load/loadv2/SparkLoadPendingTask.java    |  15 +-
 .../apache/doris/load/loadv2/SparkRepository.java  |  11 +-
 .../load/routineload/KafkaRoutineLoadJob.java      |  16 +-
 .../doris/load/routineload/KafkaTaskInfo.java      |   7 +-
 .../doris/load/routineload/RoutineLoadJob.java     |  69 +++--
 .../doris/load/routineload/RoutineLoadManager.java |  12 +-
 .../load/routineload/RoutineLoadScheduler.java     |   9 +-
 .../load/routineload/RoutineLoadTaskInfo.java      |   4 +-
 .../load/routineload/RoutineLoadTaskScheduler.java |  16 +-
 .../org/apache/doris/load/sync/SyncChannel.java    |   3 +-
 .../apache/doris/load/sync/SyncChannelHandle.java  |   2 +-
 .../java/org/apache/doris/load/sync/SyncJob.java   |   3 +-
 .../doris/load/sync/canal/CanalSyncChannel.java    |  16 +-
 .../apache/doris/load/sync/canal/CanalSyncJob.java |  14 +-
 .../apache/doris/load/sync/canal/CanalUtils.java   |   9 +-
 .../doris/load/sync/canal/SyncCanalClient.java     |   3 +-
 .../doris/load/sync/position/EntryPosition.java    |   4 +-
 .../apache/doris/load/update/UpdateManager.java    |   6 +-
 .../doris/load/update/UpdateStmtExecutor.java      |   6 +-
 .../java/org/apache/doris/master/Checkpoint.java   |  11 +-
 .../java/org/apache/doris/master/MasterImpl.java   |  34 ++-
 .../master/PartitionInMemoryInfoCollector.java     |   3 +-
 .../org/apache/doris/master/ReportHandler.java     |  26 +-
 .../org/apache/doris/metric/CounterMetric.java     |   2 +-
 .../main/java/org/apache/doris/metric/Metric.java  |   2 +-
 .../java/org/apache/doris/metric/MetricRepo.java   | 130 ++++++---
 .../doris/metric/PrometheusMetricVisitor.java      |  75 +++--
 .../doris/metric/SimpleCoreMetricVisitor.java      |  10 +-
 .../org/apache/doris/metric/SystemMetrics.java     |   3 +-
 .../apache/doris/monitor/jvm/JvmPauseMonitor.java  |   4 +-
 .../org/apache/doris/monitor/jvm/JvmStats.java     |   2 +-
 .../org/apache/doris/monitor/unit/TimeValue.java   |   3 +-
 .../java/org/apache/doris/mysql/MysqlPacket.java   |   2 +-
 .../java/org/apache/doris/mysql/MysqlPassword.java |   4 +-
 .../java/org/apache/doris/mysql/MysqlProto.java    |   7 +-
 .../org/apache/doris/mysql/nio/AcceptListener.java |   8 +-
 .../org/apache/doris/mysql/nio/NMysqlChannel.java  |   2 +-
 .../org/apache/doris/mysql/nio/NMysqlServer.java   |   9 +-
 .../org/apache/doris/mysql/privilege/PaloAuth.java |  39 ++-
 .../org/apache/doris/mysql/privilege/PaloRole.java |   8 +-
 .../doris/mysql/privilege/ResourcePrivEntry.java   |  18 +-
 .../doris/mysql/privilege/TablePrivEntry.java      |   3 +-
 .../apache/doris/mysql/privilege/UserProperty.java |   3 +-
 .../doris/mysql/privilege/UserPropertyMgr.java     |   3 +-
 .../plans/physical/PhysicalBroadcastHashJoin.java  |   1 -
 .../apache/doris/persist/ColocatePersistInfo.java  |   3 +-
 .../org/apache/doris/persist/CreateTableInfo.java  |   1 +
 .../apache/doris/persist/DropPartitionInfo.java    |   3 +-
 .../java/org/apache/doris/persist/EditLog.java     |  22 +-
 .../doris/persist/ModifyCommentOperationLog.java   |   3 +-
 .../apache/doris/persist/PartitionPersistInfo.java |   4 +-
 .../apache/doris/persist/ReplicaPersistInfo.java   |  11 +-
 .../org/apache/doris/persist/gson/GsonUtils.java   |   8 +-
 .../org/apache/doris/persist/meta/MetaFooter.java  |   3 +-
 .../org/apache/doris/persist/meta/MetaHeader.java  |   3 +-
 .../doris/persist/meta/MetaPersistMethod.java      |   6 +-
 .../org/apache/doris/persist/meta/MetaWriter.java  |   3 +-
 .../org/apache/doris/planner/AnalyticEvalNode.java |   4 +-
 .../org/apache/doris/planner/AnalyticPlanner.java  |   6 +-
 .../org/apache/doris/planner/BrokerScanNode.java   |   6 +-
 .../org/apache/doris/planner/CrossJoinNode.java    |   4 +-
 .../doris/planner/DistributedPlanColocateRule.java |   3 +-
 .../apache/doris/planner/DistributedPlanner.java   |  17 +-
 .../java/org/apache/doris/planner/EsScanNode.java  |   9 +-
 .../doris/planner/HashDistributionPruner.java      |   3 +-
 .../org/apache/doris/planner/HashJoinNode.java     |  10 +-
 .../org/apache/doris/planner/HiveScanNode.java     |   3 +-
 .../apache/doris/planner/JoinCostEvaluation.java   |   2 +-
 .../org/apache/doris/planner/LoadScanNode.java     |  17 +-
 .../org/apache/doris/planner/OlapScanNode.java     |  17 +-
 .../org/apache/doris/planner/OlapTableSink.java    |  13 +-
 .../doris/planner/PartitionColumnFilter.java       |   3 +-
 .../org/apache/doris/planner/PartitionPruner.java  |   2 +-
 .../org/apache/doris/planner/PlanFragment.java     |   2 +-
 .../java/org/apache/doris/planner/PlanNode.java    |  12 +-
 .../java/org/apache/doris/planner/Planner.java     |   3 +-
 .../org/apache/doris/planner/PlannerContext.java   |   4 +-
 .../org/apache/doris/planner/ProjectPlanner.java   |   4 +-
 .../org/apache/doris/planner/RollupSelector.java   |   5 +-
 .../org/apache/doris/planner/RuntimeFilter.java    |   6 +-
 .../doris/planner/RuntimeFilterGenerator.java      |   7 +-
 .../java/org/apache/doris/planner/ScanNode.java    |   6 +-
 .../java/org/apache/doris/planner/SelectNode.java  |   3 +-
 .../org/apache/doris/planner/SetOperationNode.java |  15 +-
 .../apache/doris/planner/SingleNodePlanner.java    |   3 +-
 .../java/org/apache/doris/planner/SortNode.java    |   6 +-
 .../apache/doris/planner/StreamLoadPlanner.java    |   3 +-
 .../apache/doris/planner/StreamLoadScanNode.java   |   3 +-
 .../apache/doris/plugin/DynamicPluginLoader.java   |   9 +-
 .../java/org/apache/doris/plugin/PluginMgr.java    |   7 +-
 .../java/org/apache/doris/plugin/PluginZip.java    |   5 +-
 .../java/org/apache/doris/policy/PolicyMgr.java    |   1 -
 .../java/org/apache/doris/qe/AuditLogBuilder.java  |   4 +-
 .../java/org/apache/doris/qe/ConnectContext.java   |   3 +-
 .../java/org/apache/doris/qe/ConnectProcessor.java |   4 +-
 .../java/org/apache/doris/qe/ConnectScheduler.java |  15 +-
 .../main/java/org/apache/doris/qe/Coordinator.java | 305 +++++++++++---------
 .../java/org/apache/doris/qe/GlobalVariable.java   |   6 +-
 .../java/org/apache/doris/qe/HelpObjectIface.java  |   1 +
 .../apache/doris/qe/InsertStreamTxnExecutor.java   |   6 +-
 .../org/apache/doris/qe/MasterTxnExecutor.java     |   1 +
 .../java/org/apache/doris/qe/MultiLoadMgr.java     |  10 +-
 .../main/java/org/apache/doris/qe/QueryDetail.java |   2 +-
 .../java/org/apache/doris/qe/QueryDetailQueue.java |   2 +-
 .../org/apache/doris/qe/QueryStateException.java   |   1 +
 .../java/org/apache/doris/qe/ResultReceiver.java   |   3 +-
 .../apache/doris/qe/RuntimeFilterTypeHelper.java   |  13 +-
 .../java/org/apache/doris/qe/SessionVariable.java  |   4 +-
 .../java/org/apache/doris/qe/ShowExecutor.java     |  68 +++--
 .../java/org/apache/doris/qe/SimpleScheduler.java  |   3 +-
 .../java/org/apache/doris/qe/SqlModeHelper.java    |  12 +-
 .../java/org/apache/doris/qe/StmtExecutor.java     |  47 +--
 .../main/java/org/apache/doris/qe/VariableMgr.java |   6 +-
 .../org/apache/doris/qe/cache/CacheAnalyzer.java   |  14 +-
 .../org/apache/doris/qe/cache/CacheBeProxy.java    |   6 +-
 .../apache/doris/qe/cache/CacheCoordinator.java    |   3 +-
 .../org/apache/doris/qe/cache/RowBatchBuilder.java |   3 +-
 .../java/org/apache/doris/qe/cache/SqlCache.java   |   3 +-
 .../org/apache/doris/rewrite/ExprRewriter.java     |   5 +-
 .../doris/rewrite/ExtractCommonFactorsRule.java    |  15 +-
 .../java/org/apache/doris/rewrite/FEFunctions.java |   6 +-
 .../apache/doris/rewrite/FoldConstantsRule.java    |  15 +-
 .../org/apache/doris/rewrite/InferFiltersRule.java |  29 +-
 .../doris/rewrite/RewriteBinaryPredicatesRule.java |   7 +-
 .../doris/rewrite/RewriteDateLiteralRule.java      |   8 +-
 .../doris/rewrite/RewriteFromUnixTimeRule.java     |  24 +-
 .../org/apache/doris/rpc/AttachmentRequest.java    |   5 +
 .../apache/doris/service/FrontendServiceImpl.java  |  22 +-
 .../doris/statistics/StatisticsJobManager.java     |   3 +-
 .../doris/statistics/StatisticsJobScheduler.java   |  18 +-
 .../apache/doris/statistics/StatisticsManager.java |   2 +-
 .../doris/statistics/StatisticsTaskScheduler.java  |   5 +-
 .../org/apache/doris/statistics/StatsType.java     |   2 +
 .../main/java/org/apache/doris/system/Backend.java |   3 +-
 .../org/apache/doris/system/BackendHbResponse.java |   3 +-
 .../org/apache/doris/system/BeSelectionPolicy.java |   1 +
 .../java/org/apache/doris/system/Diagnoser.java    |  18 +-
 .../apache/doris/system/FrontendHbResponse.java    |   3 +-
 .../java/org/apache/doris/system/HeartbeatMgr.java |  14 +-
 .../org/apache/doris/system/SystemInfoService.java |   3 +-
 .../main/java/org/apache/doris/task/AgentTask.java |   2 +-
 .../org/apache/doris/task/AgentTaskExecutor.java   |   3 +-
 .../main/java/org/apache/doris/task/CloneTask.java |   2 +
 .../apache/doris/task/HadoopLoadPendingTask.java   |   9 +-
 .../java/org/apache/doris/task/LoadEtlTask.java    |   8 +-
 .../java/org/apache/doris/task/LoadTaskInfo.java   |  93 +++---
 .../org/apache/doris/task/MasterTaskExecutor.java  |  12 +-
 .../java/org/apache/doris/task/StreamLoadTask.java |   1 +
 .../doris/task/UpdateTabletMetaInfoTask.java       |   3 +-
 .../AbstractTxnStateChangeCallback.java            |   3 +-
 .../doris/transaction/DatabaseTransactionMgr.java  | 188 +++++++-----
 .../doris/transaction/GlobalTransactionMgr.java    |  62 ++--
 .../doris/transaction/PublishVersionDaemon.java    |  15 +-
 .../doris/transaction/TransactionIdGenerator.java  |   1 +
 .../apache/doris/transaction/TransactionState.java |   6 +-
 .../doris/transaction/TxnStateChangeCallback.java  |   3 +-
 .../java/org/apache/doris/alter/AlterTest.java     |  12 +-
 .../doris/analysis/CreateDataSyncJobStmtTest.java  |   1 +
 .../apache/doris/analysis/GroupByClauseTest.java   |   1 +
 .../apache/doris/analysis/InsertArrayStmtTest.java |   1 -
 .../doris/analysis/SetOperationStmtTest.java       |   1 +
 .../analysis/TableNameComparedLowercaseTest.java   |   6 +-
 .../analysis/TableNameStoredLowercaseTest.java     |   6 +-
 .../org/apache/doris/backup/BackupJobTest.java     |   2 +
 .../org/apache/doris/backup/RestoreJobTest.java    |   2 +
 .../doris/blockrule/SqlBlockRuleMgrTest.java       |   2 +-
 .../org/apache/doris/catalog/ColumnTypeTest.java   |   1 +
 .../org/apache/doris/catalog/HiveTableTest.java    |   1 -
 .../org/apache/doris/catalog/OlapTableTest.java    |   4 +-
 .../java/org/apache/doris/catalog/TableTest.java   |  14 +-
 .../java/org/apache/doris/catalog/TabletTest.java  |   6 +-
 .../apache/doris/catalog/TempPartitionTest.java    | 166 +++++++----
 .../apache/doris/catalog/TruncateTableTest.java    |  15 +-
 .../org/apache/doris/clone/RebalancerTestUtil.java |   6 +-
 .../doris/clone/TabletRepairAndBalanceTest.java    |  20 +-
 .../doris/clone/TabletReplicaTooSlowTest.java      |   3 +-
 .../TwoDimensionalGreedyRebalanceAlgoTest.java     |   9 +-
 .../org/apache/doris/common/GenericPoolTest.java   |   1 +
 .../apache/doris/common/ThreadPoolManagerTest.java |   6 +-
 .../java/org/apache/doris/common/util/URITest.java |   1 +
 .../org/apache/doris/http/DorisHttpTestCase.java   |   6 +-
 .../doris/http/TableQueryPlanActionTest.java       |  13 +-
 .../apache/doris/ldap/LdapPrivsCheckerTest.java    |   8 +-
 .../org/apache/doris/load/DeleteHandlerTest.java   |   1 +
 .../org/apache/doris/load/TabletLoadInfoTest.java  |   1 +
 .../doris/load/loadv2/SparkRepositoryTest.java     |  12 +-
 .../doris/load/sync/canal/CanalSyncDataTest.java   |   5 +
 .../doris/load/sync/canal/CanalSyncJobTest.java    |   4 +
 .../persist/BatchModifyPartitionsInfoTest.java     |  12 +-
 .../org/apache/doris/planner/ColocatePlanTest.java |   4 +-
 .../doris/planner/RuntimeFilterGeneratorTest.java  |  32 +--
 .../doris/planner/StreamLoadScanNodeTest.java      |  13 +-
 .../doris/planner/TableFunctionPlanTest.java       |  25 +-
 .../org/apache/doris/qe/ConnectProcessorTest.java  |   5 +
 .../java/org/apache/doris/qe/CoordinatorTest.java  |   6 +-
 .../java/org/apache/doris/qe/MultiLoadMgrTest.java |   1 +
 .../java/org/apache/doris/qe/ShowExecutorTest.java |   1 +
 .../org/apache/doris/qe/ShowResultSetTest.java     |   1 +
 .../java/org/apache/doris/qe/StmtExecutorTest.java |  18 +-
 .../doris/resource/TagSerializationTest.java       |  12 +-
 .../org/apache/doris/rewrite/FEFunctionsTest.java  |   1 +
 .../apache/doris/rewrite/InferFiltersRuleTest.java |   1 +
 .../org/apache/doris/service/ExecuteEnvTest.java   |  26 +-
 .../apache/doris/system/SystemInfoServiceTest.java |   9 +-
 .../java/org/apache/doris/task/AgentTaskTest.java  |   3 +-
 .../doris/task/SerialExecutorServiceTest.java      |   1 +
 .../apache/doris/utframe/MockedBackendFactory.java |   2 +-
 .../org/apache/doris/utframe/UtFrameUtils.java     |  11 +-
 .../java/org/apache/doris/udf/BitmapAndUDF.java    |   3 +-
 .../java/org/apache/doris/udf/BitmapCountUDF.java  |   3 +-
 .../java/org/apache/doris/udf/BitmapOrUDF.java     |   3 +-
 .../java/org/apache/doris/udf/BitmapUnionUDAF.java |   3 +-
 .../java/org/apache/doris/udf/BitmapXorUDF.java    |   3 +-
 .../java/org/apache/doris/udf/JMXJsonUtil.java     |   2 +-
 .../main/java/org/apache/doris/udf/JniUtil.java    |   3 +-
 .../java/org/apache/doris/udf/UdfExecutor.java     |  10 +-
 fe/pom.xml                                         |  32 +--
 .../load/loadv2/dpp/DorisRangePartitioner.java     |   1 +
 .../apache/doris/load/loadv2/dpp/DppColumns.java   |   8 -
 .../org/apache/doris/load/loadv2/dpp/DppUtils.java |  37 ++-
 .../doris/load/loadv2/dpp/GlobalDictBuilder.java   |  39 ++-
 .../dpp/MinimumCoverageRollupTreeBuilder.java      |   4 +-
 .../org/apache/doris/load/loadv2/dpp/SparkDpp.java | 209 +++++++-------
 .../doris/load/loadv2/dpp/SparkRDDAggregator.java  |  15 +-
 .../apache/doris/load/loadv2/etl/EtlJobConfig.java |   9 +-
 .../apache/doris/load/loadv2/etl/SparkEtlJob.java  |  18 +-
 514 files changed, 3883 insertions(+), 2763 deletions(-)

diff --git a/docs/en/developer/developer-guide/java-format-code.md b/docs/en/developer/developer-guide/java-format-code.md
index d87a97f5ca..bad37cc7f1 100644
--- a/docs/en/developer/developer-guide/java-format-code.md
+++ b/docs/en/developer/developer-guide/java-format-code.md
@@ -42,6 +42,13 @@ standard java package
 * Do not use `import *`
 * Do not use `import static`
 
+## Check when compile
+
+Now, when compiling with `caven`, `CheckStyle` checks are done by default. This will slightly slow down compilation. If you want to skip checkstyle, please use the following command to compile
+```
+mvn clean install -DskipTests -Dcheckstyle.skip
+```
+
 ## Checkstyle Plugin
 
 Now we have `formatter-check` in `CI` to check the code format.
diff --git a/docs/zh-CN/developer/developer-guide/java-format-code.md b/docs/zh-CN/developer/developer-guide/java-format-code.md
index 6896107476..6fad1867ec 100644
--- a/docs/zh-CN/developer/developer-guide/java-format-code.md
+++ b/docs/zh-CN/developer/developer-guide/java-format-code.md
@@ -42,6 +42,13 @@ standard java package
 * 禁止使用 `import *`
 * 禁止使用 `import static`
 
+## 编译时检查
+
+现在,在使用`maven`进行编译时,会默认进行`CheckStyle`检查。此检查会略微降低编译速度。如果想跳过此检查,请使用如下命令进行编译
+```
+mvn clean install -DskipTests -Dcheckstyle.skip
+```
+
 ## Checkstyle 插件
 
 现在的 `CI` 之中会有 `formatter-check` 进行代码格式化检测。
diff --git a/fe/check/checkstyle/checkstyle.xml b/fe/check/checkstyle/checkstyle.xml
index 2b005d16c2..b37927ff1d 100644
--- a/fe/check/checkstyle/checkstyle.xml
+++ b/fe/check/checkstyle/checkstyle.xml
@@ -24,7 +24,7 @@ under the License.
 
 <module name = "Checker">
     <property name="charset" value="UTF-8"/>
-    <property name="severity" value="warning"/>
+    <property name="severity" value="error"/>
     <property name="fileExtensions" value="java"/>
     <!-- Excludes all 'module-info.java' files              -->
     <!-- See https://checkstyle.org/config_filefilters.html -->
@@ -43,7 +43,6 @@ under the License.
 
     <module name="FileTabCharacter">
         <property name="eachLine" value="true"/>
-        <property name="severity" value="error"/>
     </module>
     <module name="LineLength">
         <property name="fileExtensions" value="java"/>
@@ -52,24 +51,20 @@ under the License.
     </module>
     <module name="NewlineAtEndOfFile">
         <property name="lineSeparator" value="lf"/>
-        <property name="severity" value="error"/>
     </module>
 
     <module name="RegexpSingleline">
         <property name="format" value="&gt;&gt;&gt;&gt;&gt;&gt;&gt;"/>
         <property name="message" value="Merge conflicts unresolved."/>
-        <property name="severity" value="error"/>
     </module>
     <module name="RegexpSingleline">
         <property name="format" value="&lt;&lt;&lt;&lt;&lt;&lt;&lt;"/>
         <property name="message" value="Merge conflicts unresolved."/>
-        <property name="severity" value="error"/>
     </module>
     <module name="RegexpSingleline">
         <property name="format" value="\s+$"/>
         <property name="message" value="Trailing whitespace found."/>
         <property name="fileExtensions" value=".java"/>
-        <property name="severity" value="error"/>
     </module>
 
     <module name="TreeWalker">
@@ -83,33 +78,26 @@ under the License.
             <property name="tokens"
                       value="CLASS_DEF, INTERFACE_DEF, ENUM_DEF, METHOD_DEF, CTOR_DEF,
                       RECORD_DEF, COMPACT_CTOR_DEF"/>
-            <property name="severity" value="error"/>
         </module>
         <module name="AnnotationLocation">
             <property name="id" value="AnnotationLocationVariables"/>
             <property name="tokens" value="VARIABLE_DEF"/>
             <property name="allowSamelineMultipleAnnotations" value="true"/>
-            <property name="severity" value="error"/>
-        </module>
-        <module name="MissingOverride">
-            <property name="severity" value="error"/>
         </module>
+        <module name="MissingOverride"/>
 
         <!-- Block Checks -->
         <module name="EmptyBlock">
             <property name="option" value="TEXT"/>
             <property name="tokens"
                       value="LITERAL_TRY, LITERAL_FINALLY, LITERAL_IF, LITERAL_ELSE, LITERAL_SWITCH"/>
-            <property name="severity" value="error"/>
         </module>
         <module name="EmptyCatchBlock">
             <property name="exceptionVariableName" value="expected"/>
-            <property name="severity" value="error"/>
         </module>
         <module name="NeedBraces">
             <property name="tokens"
                       value="LITERAL_DO, LITERAL_ELSE, LITERAL_FOR, LITERAL_IF, LITERAL_WHILE"/>
-            <property name="severity" value="error"/>
         </module>
         <module name="LeftCurly">
             <property name="tokens"
@@ -118,14 +106,12 @@ under the License.
                     LITERAL_DO, LITERAL_ELSE, LITERAL_FINALLY, LITERAL_FOR, LITERAL_IF,
                     LITERAL_SWITCH, LITERAL_SYNCHRONIZED, LITERAL_TRY, LITERAL_WHILE, METHOD_DEF,
                     OBJBLOCK, STATIC_INIT, RECORD_DEF, COMPACT_CTOR_DEF"/>
-            <property name="severity" value="error"/>
         </module>
         <module name="RightCurly">
             <property name="id" value="RightCurlySame"/>
             <property name="tokens"
                       value="LITERAL_TRY, LITERAL_CATCH, LITERAL_FINALLY, LITERAL_IF, LITERAL_ELSE,
                     LITERAL_DO"/>
-            <property name="severity" value="error"/>
         </module>
         <module name="RightCurly">
             <property name="id" value="RightCurlyAlone"/>
@@ -134,7 +120,6 @@ under the License.
                       value="CLASS_DEF, METHOD_DEF, CTOR_DEF, LITERAL_FOR, LITERAL_WHILE, STATIC_INIT,
                     INSTANCE_INIT, ANNOTATION_DEF, ENUM_DEF, INTERFACE_DEF, RECORD_DEF,
                     COMPACT_CTOR_DEF"/>
-            <property name="severity" value="error"/>
         </module>
         <module name="SuppressionXpathSingleFilter">
             <!-- suppresion is required till https://github.com/checkstyle/checkstyle/issues/7541 -->
@@ -148,61 +133,36 @@ under the License.
 
         <!-- Coding -->
         <module name="DeclarationOrder"/>
-        <module name="FallThrough">
-            <property name="severity" value="error"/>
-        </module>
+        <module name="FallThrough"/>
         <module name="IllegalTokenText">
             <property name="tokens" value="STRING_LITERAL, CHAR_LITERAL"/>
             <property name="format"
                       value="\\u00(09|0(a|A)|0(c|C)|0(d|D)|22|27|5(C|c))|\\(0(10|11|12|14|15|42|47)|134)"/>
             <property name="message"
                       value="Consider using special escape sequence instead of octal value or Unicode escaped value."/>
-            <property name="severity" value="error"/>
-        </module>
-        <module name="MissingSwitchDefault">
-            <property name="severity" value="error"/>
-        </module>
-        <module name="MultipleVariableDeclarations">
-            <property name="severity" value="error"/>
-        </module>
-        <module name="NoFinalizer">
-            <property name="severity" value="error"/>
-        </module>
-        <module name="OneStatementPerLine">
-            <property name="severity" value="error"/>
         </module>
+        <module name="MissingSwitchDefault"/>
+        <module name="MultipleVariableDeclarations"/>
+        <module name="NoFinalizer"/>
+        <module name="OneStatementPerLine"/>
         <module name="OverloadMethodsDeclarationOrder"/>
-        <module name="StringLiteralEquality">
-            <property name="severity" value="error"/>
-        </module>
-        <module name="UnusedLocalVariable">
-            <property name="severity" value="error"/>
-        </module>
+
+        <module name="StringLiteralEquality"/>
+        <module name="UnusedLocalVariable"/>
 
         <!-- Headers -->
         <!-- Imports -->
-        <module name="AvoidStarImport">
-            <property name="severity" value="error"/>
-        </module>
-        <module name="AvoidStaticImport">
-            <property name="severity" value="error"/>
-        </module>
+        <module name="AvoidStarImport"/>
+        <module name="AvoidStaticImport"/>
         <module name="CustomImportOrder">
             <property name="sortImportsInGroupAlphabetically" value="true"/>
             <property name="separateLineBetweenGroups" value="true"/>
             <property name="customImportOrderRules" value="SAME_PACKAGE(3)###THIRD_PARTY_PACKAGE###STANDARD_JAVA_PACKAGE"/>
             <property name="tokens" value="IMPORT, STATIC_IMPORT, PACKAGE_DEF"/>
-            <property name="severity" value="error"/>
-        </module>
-        <module name="RedundantImport">
-            <property name="severity" value="error"/>
-        </module>
-        <module name="UnusedImports">
-            <property name="severity" value="error"/>
-        </module>
-        <module name="EmptyStatement">
-            <property name="severity" value="error"/>
         </module>
+        <module name="RedundantImport"/>
+        <module name="UnusedImports"/>
+        <module name="EmptyStatement"/>
 
         <!-- Javadoc Comments -->
         <module name="AtclauseOrder">
@@ -220,25 +180,20 @@ under the License.
             <property name="tokens" value="METHOD_DEF, CTOR_DEF, ANNOTATION_FIELD_DEF, COMPACT_CTOR_DEF"/>
         </module>
 
-        <module name="JavadocParagraph"/>
-        <module name="JavadocStyle"/>
         <module name="JavadocTagContinuationIndentation"/>
         <module name="MissingJavadocMethod">
             <property name="scope" value="public"/>
-            <property name="minLineCount" value="2"/>
+            <property name="minLineCount" value="5"/>
             <property name="allowedAnnotations" value="Override, Test"/>
-            <property name="tokens" value="METHOD_DEF, CTOR_DEF, ANNOTATION_FIELD_DEF,
-                                   COMPACT_CTOR_DEF"/>
+            <property name="tokens" value="METHOD_DEF, CTOR_DEF, ANNOTATION_FIELD_DEF, COMPACT_CTOR_DEF"/>
         </module>
         <module name="MissingJavadocType">
             <property name="scope" value="protected"/>
             <property name="tokens"
-                      value="CLASS_DEF, INTERFACE_DEF, ENUM_DEF,
-                      RECORD_DEF, ANNOTATION_DEF"/>
+                      value="CLASS_DEF, INTERFACE_DEF, ENUM_DEF, RECORD_DEF, ANNOTATION_DEF"/>
             <property name="excludeScope" value="nothing"/>
         </module>
         <module name="NonEmptyAtclauseDescription"/>
-        <module name="RequireEmptyLineBeforeBlockTagGroup"/>
         <module name="SummaryJavadoc">
             <property name="forbiddenSummaryFragments"
                       value="^@return the *|^This method returns |^A [{]@code [a-zA-Z0-9]+[}]( is a )"/>
@@ -252,11 +207,9 @@ under the License.
             <property name="allowEscapesForControlCharacters" value="true"/>
             <property name="allowByTailComment" value="true"/>
             <property name="allowNonPrintableEscapes" value="true"/>
-            <property name="severity" value="error"/>
         </module>
         <module name="CommentsIndentation">
             <property name="tokens" value="SINGLE_LINE_COMMENT, BLOCK_COMMENT_BEGIN"/>
-            <property name="severity" value="error"/>
         </module>
         <module name="Indentation">
             <property name="basicOffset" value="4"/>
@@ -265,14 +218,9 @@ under the License.
             <property name="throwsIndent" value="8"/>
             <property name="lineWrappingIndentation" value="8"/>
             <property name="arrayInitIndent" value="4"/>
-            <property name="severity" value="error"/>
-        </module>
-        <module name="OuterTypeFilename">
-            <property name="severity" value="error"/>
-        </module>
-        <module name="UpperEll">
-            <property name="severity" value="error"/>
         </module>
+        <module name="OuterTypeFilename"/>
+        <module name="UpperEll"/>
 
         <!-- Modifiers -->
         <module name="ModifierOrder"/>
@@ -285,90 +233,75 @@ under the License.
                       value="CLASS_DEF, INTERFACE_DEF, ENUM_DEF, ANNOTATION_DEF, ANNOTATION_FIELD_DEF,
                     PARAMETER_DEF, VARIABLE_DEF, METHOD_DEF, PATTERN_VARIABLE_DEF, RECORD_DEF,
                     RECORD_COMPONENT_DEF"/>
-            <property name="severity" value="error"/>
         </module>
         <module name="CatchParameterName">
             <property name="format" value="^([a-z0-9][a-zA-Z0-9]*)?$"/>
-            <property name="severity" value="error"/>
             <message key="name.invalidPattern"
                      value="Catch parameter name ''{0}'' must match pattern ''{1}''."/>
         </module>
         <module name="ClassTypeParameterName">
             <property name="format" value="(^[A-Z][0-9]?)$|([A-Z][a-zA-Z0-9]*$)"/>
-            <property name="severity" value="error"/>
             <message key="name.invalidPattern"
                      value="Class type name ''{0}'' must match pattern ''{1}''."/>
         </module>
         <module name="InterfaceTypeParameterName">
             <property name="format" value="(^[A-Z][0-9]?)$|([A-Z][a-zA-Z0-9]*$)"/>
-            <property name="severity" value="error"/>
             <message key="name.invalidPattern"
                      value="Interface type name ''{0}'' must match pattern ''{1}''."/>
         </module>
         <module name="LambdaParameterName">
             <property name="format" value="^([a-z][a-zA-Z0-9]*)?$"/>
-            <property name="severity" value="error"/>
             <message key="name.invalidPattern"
                      value="Lambda parameter name ''{0}'' must match pattern ''{1}''."/>
         </module>
         <module name="LocalVariableName">
             <property name="format" value="^([a-z0-9][a-zA-Z0-9]*)?$"/>
-            <property name="severity" value="error"/>
             <message key="name.invalidPattern"
                      value="Local variable name ''{0}'' must match pattern ''{1}''."/>
         </module>
         <module name="MemberName">
             <property name="format" value="^([a-z][a-zA-Z0-9]*)?$"/>
-            <property name="severity" value="error"/>
             <message key="name.invalidPattern"
                      value="Member name ''{0}'' must match pattern ''{1}''."/>
         </module>
         <module name="MethodName">
             <property name="format" value="^[a-z][a-z0-9][a-zA-Z0-9_]*$"/>
-            <property name="severity" value="error"/>
             <message key="name.invalidPattern"
                      value="Method name ''{0}'' must match pattern ''{1}''."/>
         </module>
         <module name="MethodTypeParameterName">
             <property name="format" value="(^[A-Z][0-9]?)$|([A-Z][a-zA-Z0-9]*$)"/>
-            <property name="severity" value="error"/>
             <message key="name.invalidPattern"
                      value="Method type name ''{0}'' must match pattern ''{1}''."/>
         </module>
         <module name="PackageName">
             <property name="format" value="^[a-z]+(\.[a-z][a-z0-9]*)*$"/>
-            <property name="severity" value="error"/>
             <message key="name.invalidPattern"
                      value="Package name ''{0}'' must match pattern ''{1}''."/>
         </module>
         <module name="ParameterName">
             <property name="format" value="^([a-z0-9][a-zA-Z0-9]*)?$"/>
-            <property name="severity" value="error"/>
             <message key="name.invalidPattern"
                      value="Parameter name ''{0}'' must match pattern ''{1}''."/>
         </module>
         <module name="PatternVariableName">
             <property name="format" value="^[a-z]([a-z0-9][a-zA-Z0-9]*)?$"/>
-            <property name="severity" value="error"/>
             <message key="name.invalidPattern"
                      value="Pattern variable name ''{0}'' must match pattern ''{1}''."/>
         </module>
         <module name="RecordComponentName">
             <property name="format" value="^[a-z]([a-z0-9][a-zA-Z0-9]*)?$"/>
-            <property name="severity" value="error"/>
             <message key="name.invalidPattern"
                      value="Record component name ''{0}'' must match pattern ''{1}''."/>
         </module>
         <module name="RecordTypeParameterName">
             <property name="format" value="(^[A-Z][0-9]?)$|([A-Z][a-zA-Z0-9]*$)"/>
-            <property name="severity" value="error"/>
             <message key="name.invalidPattern"
                      value="Record type name ''{0}'' must match pattern ''{1}''."/>
         </module>
         <module name="TypeName">
             <property name="tokens" value="CLASS_DEF, INTERFACE_DEF, ENUM_DEF,
                     ANNOTATION_DEF, RECORD_DEF"/>
-            <property name="severity" value="error"/>
             <message key="name.invalidPattern"
                      value="Type name ''{0}'' must match pattern ''{1}''."/>
         </module>
@@ -392,35 +325,28 @@ under the License.
                      value="GenericWhitespace ''{0}'' should followed by whitespace."/>
             <message key="ws.notPreceded"
                      value="GenericWhitespace ''{0}'' is not preceded with whitespace."/>
-            <property name="severity" value="error"/>
         </module>
         <module name="MethodParamPad">
             <property name="tokens"
                       value="CTOR_DEF, LITERAL_NEW, METHOD_CALL, METHOD_DEF,
                     SUPER_CTOR_CALL, ENUM_CONSTANT_DEF, RECORD_DEF"/>
-            <property name="severity" value="error"/>
         </module>
         <module name="NoLineWrap">
             <property name="tokens" value="PACKAGE_DEF, IMPORT, STATIC_IMPORT"/>
-            <property name="severity" value="error"/>
         </module>
         <module name="NoWhitespaceBefore">
             <property name="tokens"
                       value="COMMA, SEMI, POST_INC, POST_DEC, DOT,
                     LABELED_STAT, METHOD_REF"/>
             <property name="allowLineBreaks" value="true"/>
-            <property name="severity" value="error"/>
-        </module>
-        <module name="NoWhitespaceBeforeCaseDefaultColon">
-            <property name="severity" value="error"/>
         </module>
+        <module name="NoWhitespaceBeforeCaseDefaultColon"/>
         <module name="OperatorWrap">
             <property name="option" value="NL"/>
             <property name="tokens"
                       value="BAND, BOR, BSR, BXOR, DIV, EQUAL, GE, GT, LAND, LE, LITERAL_INSTANCEOF, LOR,
                     LT, MINUS, MOD, NOT_EQUAL, PLUS, QUESTION, SL, SR, STAR, METHOD_REF,
                     TYPE_EXTENSION_AND "/>
-            <property name="severity" value="error"/>
         </module>
         <module name="ParenPad">
             <property name="tokens"
@@ -429,7 +355,6 @@ under the License.
                     LITERAL_SWITCH, LITERAL_SYNCHRONIZED, LITERAL_WHILE, METHOD_CALL,
                     METHOD_DEF, QUESTION, RESOURCE_SPECIFICATION, SUPER_CTOR_CALL, LAMBDA,
                     RECORD_DEF"/>
-            <property name="severity" value="error"/>
         </module>
         <module name="SeparatorWrap">
             <property name="id" value="SeparatorWrapDot"/>
@@ -462,7 +387,6 @@ under the License.
             <property name="tokens"
                       value="COMMA, SEMI, TYPECAST, LITERAL_IF, LITERAL_ELSE,
                     LITERAL_WHILE, LITERAL_DO, LITERAL_FOR, DO_WHILE"/>
-            <property name="severity" value="error"/>
         </module>
         <module name="WhitespaceAround">
             <property name="allowEmptyConstructors" value="true"/>
@@ -479,7 +403,6 @@ under the License.
                     LITERAL_TRY, LITERAL_WHILE, LOR, LT, MINUS, MINUS_ASSIGN, MOD, MOD_ASSIGN,
                     NOT_EQUAL, PLUS, PLUS_ASSIGN, QUESTION, RCURLY, SL, SLIST, SL_ASSIGN, SR,
                     SR_ASSIGN, STAR, STAR_ASSIGN, LITERAL_ASSERT, TYPE_EXTENSION_AND"/>
-            <property name="severity" value="error"/>
             <message key="ws.notFollowed"
                      value="WhitespaceAround: ''{0}'' is not followed by whitespace. Empty blocks may only be represented as '{}' when not part of a multi-block statement (4.1.3)"/>
             <message key="ws.notPreceded"
diff --git a/fe/check/checkstyle/suppressions.xml b/fe/check/checkstyle/suppressions.xml
index 4f7909fd1f..8524bed6fd 100644
--- a/fe/check/checkstyle/suppressions.xml
+++ b/fe/check/checkstyle/suppressions.xml
@@ -27,4 +27,33 @@ under the License.
     <suppress files="[\\/]jmockit[\\/]" checks=".*" />
     <suppress files="[\\/]test[\\/]" checks="MissingJavadocMethod" />
     <suppress files="[\\/]test[\\/]" checks="MissingJavadocType" />
+    <suppress files="[\\/]test[\\/]" checks="LineLength" />
+
+    <!-- Suppress some rules except nereids -->
+    <!-- Java doc -->
+    <suppress files="org[\\/]apache[\\/]doris[\\/](?!nereids)[^\\/]+[\\/]|PaloFe\.java" checks="AtclauseOrder" />
+    <suppress files="org[\\/]apache[\\/]doris[\\/](?!nereids)[^\\/]+[\\/]|PaloFe\.java" checks="JavadocMethod" />
+    <suppress files="org[\\/]apache[\\/]doris[\\/](?!nereids)[^\\/]+[\\/]|PaloFe\.java" checks="JavadocParagraph" />
+    <suppress files="org[\\/]apache[\\/]doris[\\/](?!nereids)[^\\/]+[\\/]|PaloFe\.java" checks="JavadocStyle" />
+    <suppress files="org[\\/]apache[\\/]doris[\\/](?!nereids)[^\\/]+[\\/]|PaloFe\.java" checks="JavadocTagContinuationIndentation" />
+    <suppress files="org[\\/]apache[\\/]doris[\\/](?!nereids)[^\\/]+[\\/]|PaloFe\.java" checks="InvalidJavadocPosition" />
+    <suppress files="org[\\/]apache[\\/]doris[\\/](?!nereids)[^\\/]+[\\/]|PaloFe\.java" checks="MissingJavadocMethod" />
+    <suppress files="org[\\/]apache[\\/]doris[\\/](?!nereids)[^\\/]+[\\/]|PaloFe\.java" checks="MissingJavadocType" />
+    <suppress files="org[\\/]apache[\\/]doris[\\/](?!nereids)[^\\/]+[\\/]|PaloFe\.java" checks="NonEmptyAtclauseDescription" />
+    <suppress files="org[\\/]apache[\\/]doris[\\/](?!nereids)[^\\/]+[\\/]|PaloFe\.java" checks="RequireEmptyLineBeforeBlockTagGroup" />
+    <suppress files="org[\\/]apache[\\/]doris[\\/](?!nereids)[^\\/]+[\\/]|PaloFe\.java" checks="SummaryJavadoc" />
+    <suppress files="org[\\/]apache[\\/]doris[\\/](?!nereids)[^\\/]+[\\/]|PaloFe\.java" checks="SingleLineJavadoc" />
+
+    <!-- other -->
+    <suppress files="org[\\/]apache[\\/]doris[\\/](?!nereids)[^\\/]+[\\/]|PaloFe\.java" checks="DeclarationOrder" />
+    <suppress files="org[\\/]apache[\\/]doris[\\/](?!nereids)[^\\/]+[\\/]|PaloFe\.java" checks="OverloadMethodsDeclarationOrder" />
+    <suppress files="org[\\/]apache[\\/]doris[\\/](?!nereids)[^\\/]+[\\/]|PaloFe\.java" checks="VariableDeclarationUsageDistance" />
+
+    <!-- exclude rules for special files -->
+    <suppress files="org[\\/]apache[\\/]doris[\\/]load[\\/]loadv2[\\/]dpp[\\/]ColumnParser\.java" checks="OneTopLevelClass" />
+    <suppress files="org[\\/]apache[\\/]doris[\\/]load[\\/]loadv2[\\/]dpp[\\/]SparkRDDAggregator\.java" checks="OneTopLevelClass" />
+    <suppress files="org[\\/]apache[\\/]doris[\\/]catalog[\\/]FunctionSet\.java" checks="LineLength" />
+    <suppress files="org[\\/]apache[\\/]doris[\\/]common[\\/]ErrorCode\.java" checks="LineLength" />
+    <suppress files="org[\\/]apache[\\/]doris[\\/]udf[\\/]UdafExecutor\.java" checks="NoFinalizer" />
+    <suppress files="org[\\/]apache[\\/]doris[\\/]udf[\\/]UdfExecutor\.java" checks="NoFinalizer" />
 </suppressions>
diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/io/DataOutputBuffer.java b/fe/fe-common/src/main/java/org/apache/doris/common/io/DataOutputBuffer.java
index f0337217d8..6aea88c41c 100644
--- a/fe/fe-common/src/main/java/org/apache/doris/common/io/DataOutputBuffer.java
+++ b/fe/fe-common/src/main/java/org/apache/doris/common/io/DataOutputBuffer.java
@@ -70,7 +70,7 @@ public class DataOutputBuffer extends DataOutputStream {
         public void write(DataInput in, int len) throws IOException {
             int newcount = count + len;
             if (newcount > buf.length) {
-                byte newbuf[] = new byte[Math.max(buf.length << 1, newcount)];
+                byte[] newbuf = new byte[Math.max(buf.length << 1, newcount)];
                 System.arraycopy(buf, 0, newbuf, 0, count);
                 buf = newbuf;
             }
diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/io/IOUtils.java b/fe/fe-common/src/main/java/org/apache/doris/common/io/IOUtils.java
index ffd2330bd3..60596f44d1 100644
--- a/fe/fe-common/src/main/java/org/apache/doris/common/io/IOUtils.java
+++ b/fe/fe-common/src/main/java/org/apache/doris/common/io/IOUtils.java
@@ -34,7 +34,7 @@ import java.net.Socket;
 public class IOUtils {
     public static long copyBytes(InputStream in, OutputStream out,
             int buffSize, long len) throws IOException {
-        byte buf[] = new byte[buffSize];
+        byte[] buf = new byte[buffSize];
         int totalRead = 0;
         int toRead = 0;
         int bytesRead = 0;
@@ -76,7 +76,7 @@ public class IOUtils {
             int buffSize, int speed, boolean close) throws IOException {
 
         PrintStream ps = out instanceof PrintStream ? (PrintStream) out : null;
-        byte buf[] = new byte[buffSize];
+        byte[] buf = new byte[buffSize];
         long bytesReadTotal = 0;
         long startTime = 0;
         long sleepTime = 0;
@@ -133,7 +133,7 @@ public class IOUtils {
             int buffSize, boolean close) throws IOException {
 
         PrintStream ps = out instanceof PrintStream ? (PrintStream) out : null;
-        byte buf[] = new byte[buffSize];
+        byte[] buf = new byte[buffSize];
         long totalBytes = 0;
         try {
             int bytesRead = in.read(buf);
@@ -169,7 +169,7 @@ public class IOUtils {
      *             if it could not read requested number of bytes for any reason
      *             (including EOF)
      */
-    public static void readFully(InputStream in, byte buf[], int off, int len)
+    public static void readFully(InputStream in, byte[] buf, int off, int len)
             throws IOException {
         int toRead = len;
         int tmpOff = off;
@@ -263,6 +263,7 @@ public class IOUtils {
             Text.writeString(output, value);
         }
     }
+
     public static String readOptionStringOrNull(DataInput input) throws IOException {
         if (input.readBoolean()) {
             return Text.readString(input);
diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/io/OutputBuffer.java b/fe/fe-common/src/main/java/org/apache/doris/common/io/OutputBuffer.java
index 8f95d7401b..f68a2f179e 100644
--- a/fe/fe-common/src/main/java/org/apache/doris/common/io/OutputBuffer.java
+++ b/fe/fe-common/src/main/java/org/apache/doris/common/io/OutputBuffer.java
@@ -66,7 +66,7 @@ public class OutputBuffer extends FilterOutputStream {
         public void write(InputStream in, int len) throws IOException {
             int newcount = count + len;
             if (newcount > buf.length) {
-                byte newbuf[] = new byte[Math.max(buf.length << 1, newcount)];
+                byte[] newbuf = new byte[Math.max(buf.length << 1, newcount)];
                 System.arraycopy(buf, 0, newbuf, 0, count);
                 buf = newbuf;
             }
diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/io/Text.java b/fe/fe-common/src/main/java/org/apache/doris/common/io/Text.java
index 7331195d32..1710cb34e2 100644
--- a/fe/fe-common/src/main/java/org/apache/doris/common/io/Text.java
+++ b/fe/fe-common/src/main/java/org/apache/doris/common/io/Text.java
@@ -606,7 +606,7 @@ public class Text implements Writable {
         return ch;
     }
 
-    static final int offsetsFromUTF8[] = { 0x00000000, 0x00003080, 0x000E2080,
+    static final int[] offsetsFromUTF8 = { 0x00000000, 0x00003080, 0x000E2080,
             0x03C82080, 0xFA082080, 0x82082080 };
 
     /**
diff --git a/fe/fe-common/src/main/java/org/apache/doris/common/property/PropertySchema.java b/fe/fe-common/src/main/java/org/apache/doris/common/property/PropertySchema.java
index b68d2072bf..e58c62f80e 100644
--- a/fe/fe-common/src/main/java/org/apache/doris/common/property/PropertySchema.java
+++ b/fe/fe-common/src/main/java/org/apache/doris/common/property/PropertySchema.java
@@ -332,7 +332,7 @@ public abstract class PropertySchema<T> {
         }
     }
 
-    private static abstract class ComparableProperty<T extends Comparable> extends PropertySchema<T> {
+    private abstract static class ComparableProperty<T extends Comparable> extends PropertySchema<T> {
         protected ComparableProperty(String name) {
             super(name);
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/PaloFe.java b/fe/fe-core/src/main/java/org/apache/doris/PaloFe.java
index 0d1ec31434..8004ee15ab 100755
--- a/fe/fe-core/src/main/java/org/apache/doris/PaloFe.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/PaloFe.java
@@ -132,7 +132,8 @@ public class PaloFe {
             // 1. HttpServer for HTTP Server
             // 2. FeServer for Thrift Server
             // 3. QeService for MySQL Server
-            QeService qeService = new QeService(Config.query_port, Config.mysql_service_nio_enabled, ExecuteEnv.getInstance().getScheduler());
+            QeService qeService = new QeService(Config.query_port, Config.mysql_service_nio_enabled,
+                    ExecuteEnv.getInstance().getScheduler());
             FeServer feServer = new FeServer(Config.rpc_port);
 
             feServer.start();
@@ -324,7 +325,8 @@ public class PaloFe {
         } else if (cmdLineOpts.runImageTool()) {
             File imageFile = new File(cmdLineOpts.getImagePath());
             if (!imageFile.exists()) {
-                System.out.println("image does not exist: " + imageFile.getAbsolutePath() + " . Please put an absolute path instead");
+                System.out.println("image does not exist: " + imageFile.getAbsolutePath()
+                        + " . Please put an absolute path instead");
                 System.exit(-1);
             } else {
                 System.out.println("Start to load image: ");
diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java b/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java
index 2154ab311c..a11537eb33 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/alter/Alter.java
@@ -171,7 +171,8 @@ public class Alter {
                     }
                     Catalog.getCurrentCatalog().dropPartition(db, olapTable, ((DropPartitionClause) alterClause));
                 } else if (alterClause instanceof ReplacePartitionClause) {
-                    Catalog.getCurrentCatalog().replaceTempPartition(db, olapTable, (ReplacePartitionClause) alterClause);
+                    Catalog.getCurrentCatalog().replaceTempPartition(
+                            db, olapTable, (ReplacePartitionClause) alterClause);
                 } else if (alterClause instanceof ModifyPartitionClause) {
                     ModifyPartitionClause clause = ((ModifyPartitionClause) alterClause);
                     // expand the partition names if it is 'Modify Partition(*)'
@@ -206,7 +207,8 @@ public class Alter {
         } else if (currentAlterOps.contains(AlterOpType.MODIFY_DISTRIBUTION)) {
             Preconditions.checkState(alterClauses.size() == 1);
             AlterClause alterClause = alterClauses.get(0);
-            Catalog.getCurrentCatalog().modifyDefaultDistributionBucketNum(db, olapTable, (ModifyDistributionClause) alterClause);
+            Catalog.getCurrentCatalog().modifyDefaultDistributionBucketNum(
+                    db, olapTable, (ModifyDistributionClause) alterClause);
         } else if (currentAlterOps.contains(AlterOpType.MODIFY_COLUMN_COMMENT)) {
             processModifyColumnComment(db, olapTable, alterClauses);
         } else if (currentAlterOps.contains(AlterOpType.MODIFY_TABLE_COMMENT)) {
@@ -227,7 +229,8 @@ public class Alter {
             ModifyTableCommentClause clause = (ModifyTableCommentClause) alterClause;
             tbl.setComment(clause.getComment());
             // log
-            ModifyCommentOperationLog op = ModifyCommentOperationLog.forTable(db.getId(), tbl.getId(), clause.getComment());
+            ModifyCommentOperationLog op = ModifyCommentOperationLog
+                    .forTable(db.getId(), tbl.getId(), clause.getComment());
             Catalog.getCurrentCatalog().getEditLog().logModifyComment(op);
         } finally {
             tbl.writeUnlock();
@@ -338,7 +341,8 @@ public class Alter {
         }
     }
 
-    private void processModifyEngineInternal(Database db, Table externalTable, Map<String, String> prop, boolean isReplay) {
+    private void processModifyEngineInternal(Database db, Table externalTable,
+            Map<String, String> prop, boolean isReplay) {
         MysqlTable mysqlTable = (MysqlTable) externalTable;
         Map<String, String> newProp = Maps.newHashMap(prop);
         newProp.put(OdbcTable.ODBC_HOST, mysqlTable.getHost());
@@ -393,7 +397,8 @@ public class Alter {
                 processAlterExternalTable(stmt, table, db);
                 return;
             default:
-                throw new DdlException("Do not support alter " + table.getType().toString() + " table[" + tableName + "]");
+                throw new DdlException("Do not support alter "
+                        + table.getType().toString() + " table[" + tableName + "]");
         }
 
         // the following ops should done outside table lock. because it contain synchronized create operation
@@ -402,7 +407,8 @@ public class Alter {
             AlterClause alterClause = alterClauses.get(0);
             if (alterClause instanceof AddPartitionClause) {
                 if (!((AddPartitionClause) alterClause).isTempPartition()) {
-                    DynamicPartitionUtil.checkAlterAllowed((OlapTable) db.getTableOrMetaException(tableName, TableType.OLAP));
+                    DynamicPartitionUtil.checkAlterAllowed(
+                            (OlapTable) db.getTableOrMetaException(tableName, TableType.OLAP));
                 }
                 Catalog.getCurrentCatalog().addPartition(db, tableName, (AddPartitionClause) alterClause);
             } else if (alterClause instanceof ModifyPartitionClause) {
@@ -432,7 +438,8 @@ public class Alter {
     }
 
     // entry of processing replace table
-    private void processReplaceTable(Database db, OlapTable origTable, List<AlterClause> alterClauses) throws UserException {
+    private void processReplaceTable(Database db, OlapTable origTable, List<AlterClause> alterClauses)
+            throws UserException {
         ReplaceTableClause clause = (ReplaceTableClause) alterClauses.get(0);
         String newTblName = clause.getTblName();
         boolean swapTable = clause.isSwapTable();
@@ -452,7 +459,8 @@ public class Alter {
                 }
                 replaceTableInternal(db, origTable, olapNewTbl, swapTable, false);
                 // write edit log
-                ReplaceTableOperationLog log = new ReplaceTableOperationLog(db.getId(), origTable.getId(), olapNewTbl.getId(), swapTable);
+                ReplaceTableOperationLog log = new ReplaceTableOperationLog(db.getId(),
+                        origTable.getId(), olapNewTbl.getId(), swapTable);
                 Catalog.getCurrentCatalog().getEditLog().logReplaceTable(log);
                 LOG.info("finish replacing table {} with table {}, is swap: {}", oldTblName, newTblName, swapTable);
             } finally {
@@ -533,7 +541,8 @@ public class Alter {
         modifyViewDef(db, view, stmt.getInlineViewDef(), ctx.getSessionVariable().getSqlMode(), stmt.getColumns());
     }
 
-    private void modifyViewDef(Database db, View view, String inlineViewDef, long sqlMode, List<Column> newFullSchema) throws DdlException {
+    private void modifyViewDef(Database db, View view, String inlineViewDef, long sqlMode,
+            List<Column> newFullSchema) throws DdlException {
         db.writeLockOrDdlException();
         try {
             view.writeLockOrDdlException();
@@ -549,7 +558,8 @@ public class Alter {
                 db.dropTable(viewName);
                 db.createTable(view);
 
-                AlterViewInfo alterViewInfo = new AlterViewInfo(db.getId(), view.getId(), inlineViewDef, newFullSchema, sqlMode);
+                AlterViewInfo alterViewInfo = new AlterViewInfo(db.getId(), view.getId(),
+                        inlineViewDef, newFullSchema, sqlMode);
                 Catalog.getCurrentCatalog().getEditLog().logModifyViewDef(alterViewInfo);
                 LOG.info("modify view[{}] definition to {}", viewName, inlineViewDef);
             } finally {
@@ -680,7 +690,8 @@ public class Alter {
             DateLiteral dateLiteral = new DateLiteral(dataProperty.getCooldownTimeMs(),
                     TimeUtils.getTimeZone(), Type.DATETIME);
             newProperties.put(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TIME, dateLiteral.getStringValue());
-            newProperties.put(PropertyAnalyzer.PROPERTIES_REMOTE_STORAGE_RESOURCE, dataProperty.getRemoteStorageResourceName());
+            newProperties.put(PropertyAnalyzer.PROPERTIES_REMOTE_STORAGE_RESOURCE,
+                    dataProperty.getRemoteStorageResourceName());
             DateLiteral dateLiteral1 = new DateLiteral(dataProperty.getRemoteCooldownTimeMs(),
                     TimeUtils.getTimeZone(), Type.DATETIME);
             newProperties.put(PropertyAnalyzer.PROPERTIES_REMOTE_STORAGE_COOLDOWN_TIME, dateLiteral1.getStringValue());
diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/AlterHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/AlterHandler.java
index 5205d418b3..347aa36747 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/alter/AlterHandler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/alter/AlterHandler.java
@@ -116,7 +116,8 @@ public abstract class AlterHandler extends MasterDaemon {
             AlterJobV2 alterJobV2 = iterator.next().getValue();
             if (alterJobV2.isExpire()) {
                 iterator.remove();
-                RemoveAlterJobV2OperationLog log = new RemoveAlterJobV2OperationLog(alterJobV2.getJobId(), alterJobV2.getType());
+                RemoveAlterJobV2OperationLog log = new RemoveAlterJobV2OperationLog(
+                        alterJobV2.getJobId(), alterJobV2.getType());
                 Catalog.getCurrentCatalog().getEditLog().logRemoveExpiredAlterJobV2(log);
                 LOG.info("remove expired {} job {}. finish at {}", alterJobV2.getType(),
                         alterJobV2.getJobId(), TimeUtils.longToTimeString(alterJobV2.getFinishedTimeMs()));
@@ -169,7 +170,7 @@ public abstract class AlterHandler extends MasterDaemon {
      * entry function. handle alter ops for external table
      */
     public void processExternalTable(List<AlterClause> alterClauses, Database db, Table externalTable)
-            throws UserException {};
+            throws UserException {}
 
     /*
      * cancel alter ops
@@ -183,11 +184,13 @@ public abstract class AlterHandler extends MasterDaemon {
      * We assume that the specified version is X.
      * Case 1:
      *      After alter table process starts, there is no new load job being submitted. So the new replica
-     *      should be with version (0-1). So we just modify the replica's version to partition's visible version, which is X.
+     *      should be with version (0-1). So we just modify the replica's version to
+     *      partition's visible version, which is X.
      * Case 2:
      *      After alter table process starts, there are some load job being processed.
      * Case 2.1:
-     *      None of them succeed on this replica. so the version is still 1. We should modify the replica's version to X.
+     *      None of them succeed on this replica. so the version is still 1.
+     *      We should modify the replica's version to X.
      * Case 2.2
      *      There are new load jobs after alter task, and at least one of them is succeed on this replica.
      *      So the replica's version should be larger than X. So we don't need to modify the replica version
diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/AlterJobV2.java b/fe/fe-core/src/main/java/org/apache/doris/alter/AlterJobV2.java
index a69ad6266a..89dce3e169 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/alter/AlterJobV2.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/alter/AlterJobV2.java
@@ -43,9 +43,12 @@ import java.util.List;
 public abstract class AlterJobV2 implements Writable {
     private static final Logger LOG = LogManager.getLogger(AlterJobV2.class);
 
+
     public enum JobState {
         PENDING, // Job is created
+        // CHECKSTYLE OFF
         WAITING_TXN, // New replicas are created and Shadow catalog object is visible for incoming txns, waiting for previous txns to be finished
+        // CHECKSTYLE ON
         RUNNING, // alter tasks are sent to BE, and waiting for them finished.
         FINISHED, // job is done
         CANCELLED; // job is cancelled(failed or be cancelled by user)
@@ -175,7 +178,7 @@ public abstract class AlterJobV2 implements Writable {
         }
     }
 
-    public synchronized final boolean cancel(String errMsg) {
+    public final synchronized boolean cancel(String errMsg) {
         return cancelImpl(errMsg);
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/AlterOperations.java b/fe/fe-core/src/main/java/org/apache/doris/alter/AlterOperations.java
index 5d5c346cf3..c60594326b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/alter/AlterOperations.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/alter/AlterOperations.java
@@ -59,8 +59,10 @@ public class AlterOperations {
     }
 
     public boolean hasPartitionOp() {
-        return currentOps.contains(AlterOpType.ADD_PARTITION) || currentOps.contains(AlterOpType.DROP_PARTITION)
-                || currentOps.contains(AlterOpType.REPLACE_PARTITION) || currentOps.contains(AlterOpType.MODIFY_PARTITION);
+        return currentOps.contains(AlterOpType.ADD_PARTITION)
+                || currentOps.contains(AlterOpType.DROP_PARTITION)
+                || currentOps.contains(AlterOpType.REPLACE_PARTITION)
+                || currentOps.contains(AlterOpType.MODIFY_PARTITION);
     }
 
     // MODIFY_TABLE_PROPERTY is also processed by SchemaChangeHandler
@@ -103,6 +105,7 @@ public class AlterOperations {
 
         currentOps.add(opType);
     }
+
     public boolean hasEnableFeatureOP() {
         return currentOps.contains(AlterOpType.ENABLE_FEATURE);
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java
index 66465e0aac..64319c4cfb 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java
@@ -92,7 +92,8 @@ public class MaterializedViewHandler extends AlterHandler {
     }
 
     // for batch submit rollup job, tableId -> jobId
-    // keep table's not final state job size. The job size determine's table's state, = 0 means table is normal, otherwise is rollup
+    // keep table's not final state job size. The job size determine's table's state, = 0 means table is normal,
+    // otherwise is rollup
     private Map<Long, Set<Long>> tableNotFinalStateJobMap = new ConcurrentHashMap<>();
     // keep table's running job,used for concurrency limit
     // table id -> set of running job ids
@@ -197,8 +198,9 @@ public class MaterializedViewHandler extends AlterHandler {
             List<Column> mvColumns = checkAndPrepareMaterializedView(addMVClause, olapTable);
 
             // Step2: create mv job
-            RollupJobV2 rollupJobV2 = createMaterializedViewJob(mvIndexName, baseIndexName, mvColumns, addMVClause
-                    .getProperties(), olapTable, db, baseIndexId, addMVClause.getMVKeysType(), addMVClause.getOrigStmt());
+            RollupJobV2 rollupJobV2 = createMaterializedViewJob(mvIndexName, baseIndexName, mvColumns,
+                    addMVClause.getProperties(), olapTable, db, baseIndexId,
+                    addMVClause.getMVKeysType(), addMVClause.getOrigStmt());
 
             addAlterJobV2(rollupJobV2);
 
@@ -223,7 +225,8 @@ public class MaterializedViewHandler extends AlterHandler {
      * @throws DdlException
      * @throws AnalysisException
      */
-    public void processBatchAddRollup(List<AlterClause> alterClauses, Database db, OlapTable olapTable) throws DdlException, AnalysisException {
+    public void processBatchAddRollup(List<AlterClause> alterClauses, Database db, OlapTable olapTable)
+            throws DdlException, AnalysisException {
         Map<String, RollupJobV2> rollupNameJobMap = new LinkedHashMap<>();
         // save job id for log
         Set<Long> logJobIdSet = new HashSet<>();
@@ -265,11 +268,12 @@ public class MaterializedViewHandler extends AlterHandler {
                 long baseIndexId = checkAndGetBaseIndex(baseIndexName, olapTable);
 
                 // step 2.2  check rollup schema
-                List<Column> rollupSchema = checkAndPrepareMaterializedView(addRollupClause, olapTable, baseIndexId, changeStorageFormat);
+                List<Column> rollupSchema = checkAndPrepareMaterializedView(
+                        addRollupClause, olapTable, baseIndexId, changeStorageFormat);
 
                 // step 3 create rollup job
-                RollupJobV2 alterJobV2 = createMaterializedViewJob(rollupIndexName, baseIndexName, rollupSchema, addRollupClause.getProperties(),
-                        olapTable, db, baseIndexId, olapTable.getKeysType(), null);
+                RollupJobV2 alterJobV2 = createMaterializedViewJob(rollupIndexName, baseIndexName, rollupSchema,
+                        addRollupClause.getProperties(), olapTable, db, baseIndexId, olapTable.getKeysType(), null);
 
                 rollupNameJobMap.put(addRollupClause.getRollupName(), alterJobV2);
                 logJobIdSet.add(alterJobV2.getJobId());
@@ -319,10 +323,9 @@ public class MaterializedViewHandler extends AlterHandler {
      * @throws AnalysisException
      */
     private RollupJobV2 createMaterializedViewJob(String mvName, String baseIndexName,
-                                                  List<Column> mvColumns, Map<String, String> properties,
-                                                  OlapTable olapTable, Database db, long baseIndexId, KeysType mvKeysType,
-                                                  OriginStatement origStmt)
-            throws DdlException, AnalysisException {
+            List<Column> mvColumns, Map<String, String> properties,
+            OlapTable olapTable, Database db, long baseIndexId, KeysType mvKeysType,
+            OriginStatement origStmt) throws DdlException, AnalysisException {
         if (mvKeysType == null) {
             // assign rollup index's key type, same as base index's
             mvKeysType = olapTable.getKeysType();
@@ -384,11 +387,13 @@ public class MaterializedViewHandler extends AlterHandler {
                     if (baseReplica.getState() == Replica.ReplicaState.CLONE
                             || baseReplica.getState() == Replica.ReplicaState.DECOMMISSION
                             || baseReplica.getLastFailedVersion() > 0) {
-                        LOG.info("base replica {} of tablet {} state is {}, and last failed version is {}, skip creating rollup replica",
-                                baseReplica.getId(), baseTabletId, baseReplica.getState(), baseReplica.getLastFailedVersion());
+                        LOG.info("base replica {} of tablet {} state is {}, and last failed version is {},"
+                                        + " skip creating rollup replica", baseReplica.getId(), baseTabletId,
+                                baseReplica.getState(), baseReplica.getLastFailedVersion());
                         continue;
                     }
-                    Preconditions.checkState(baseReplica.getState() == Replica.ReplicaState.NORMAL, baseReplica.getState());
+                    Preconditions.checkState(baseReplica.getState() == Replica.ReplicaState.NORMAL,
+                            baseReplica.getState());
                     // replica's init state is ALTER, so that tablet report process will ignore its report
                     Replica mvReplica = new Replica(mvReplicaId, backendId, Replica.ReplicaState.ALTER,
                             Partition.PARTITION_INIT_VERSION, mvSchemaHash);
@@ -441,7 +446,8 @@ public class MaterializedViewHandler extends AlterHandler {
         int numOfKeys = 0;
         if (olapTable.getKeysType().isAggregationFamily()) {
             if (addMVClause.getMVKeysType() != KeysType.AGG_KEYS) {
-                throw new DdlException("The materialized view of aggregation or unique table must has grouping columns");
+                throw new DdlException("The materialized view of aggregation"
+                        + " or unique table must has grouping columns");
             }
             for (MVColumnItem mvColumnItem : mvColumnItemList) {
                 String mvColumnName = mvColumnItem.getName();
@@ -735,7 +741,8 @@ public class MaterializedViewHandler extends AlterHandler {
             long dbId = db.getId();
             long tableId = olapTable.getId();
             editLog.logBatchDropRollup(new BatchDropInfo(dbId, tableId, indexIdSet));
-            LOG.info("finished drop rollup index[{}] in table[{}]", String.join("", rollupNameSet), olapTable.getName());
+            LOG.info("finished drop rollup index[{}] in table[{}]",
+                    String.join("", rollupNameSet), olapTable.getName());
         } finally {
             olapTable.writeUnlock();
         }
@@ -998,7 +1005,8 @@ public class MaterializedViewHandler extends AlterHandler {
                 continue;
             }
             if (ctx != null) {
-                if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ctx, db.getFullName(), alterJob.getTableName(), PrivPredicate.ALTER)) {
+                if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ctx, db.getFullName(),
+                        alterJob.getTableName(), PrivPredicate.ALTER)) {
                     continue;
                 }
             }
@@ -1041,7 +1049,8 @@ public class MaterializedViewHandler extends AlterHandler {
         }
         olapTable.writeLock();
         try {
-            if (olapTable.getState() != OlapTableState.ROLLUP && olapTable.getState() != OlapTableState.WAITING_STABLE) {
+            if (olapTable.getState() != OlapTableState.ROLLUP
+                    && olapTable.getState() != OlapTableState.WAITING_STABLE) {
                 throw new DdlException("Table[" + tableName + "] is not under ROLLUP. "
                         + "Use 'ALTER TABLE DROP ROLLUP' if you want to.");
             }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java b/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java
index a05863d9ea..ba707233a5 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/alter/RollupJobV2.java
@@ -157,7 +157,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable {
     }
 
     public void addTabletIdMap(long partitionId, long rollupTabletId, long baseTabletId) {
-        Map<Long, Long> tabletIdMap = partitionIdToBaseRollupTabletIdMap.computeIfAbsent(partitionId, k -> Maps.newHashMap());
+        Map<Long, Long> tabletIdMap = partitionIdToBaseRollupTabletIdMap
+                .computeIfAbsent(partitionId, k -> Maps.newHashMap());
         tabletIdMap.put(rollupTabletId, baseTabletId);
     }
 
@@ -181,7 +182,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable {
         Preconditions.checkState(jobState == JobState.PENDING, jobState);
 
         LOG.info("begin to send create rollup replica tasks. job: {}", jobId);
-        Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, s -> new AlterCancelException("Database " + s + " does not exist"));
+        Database db = Catalog.getCurrentCatalog().getDbOrException(dbId,
+                s -> new AlterCancelException("Database " + s + " does not exist"));
         if (!checkTableStable(db)) {
             return;
         }
@@ -291,7 +293,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable {
             tbl.writeUnlock();
         }
 
-        this.watershedTxnId = Catalog.getCurrentGlobalTransactionMgr().getTransactionIDGenerator().getNextTransactionId();
+        this.watershedTxnId = Catalog.getCurrentGlobalTransactionMgr()
+                .getTransactionIDGenerator().getNextTransactionId();
         this.jobState = JobState.WAITING_TXN;
 
         // write edit log
@@ -333,7 +336,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable {
         }
 
         LOG.info("previous transactions are all finished, begin to send rollup tasks. job: {}", jobId);
-        Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, s -> new AlterCancelException("Databasee " + s + " does not exist"));
+        Database db = Catalog.getCurrentCatalog().getDbOrException(dbId,
+                s -> new AlterCancelException("Databasee " + s + " does not exist"));
 
         OlapTable tbl;
         try {
@@ -412,7 +416,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable {
         // must check if db or table still exist first.
         // or if table is dropped, the tasks will never be finished,
         // and the job will be in RUNNING state forever.
-        Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, s -> new AlterCancelException("Databasee " + s + " does not exist"));
+        Database db = Catalog.getCurrentCatalog().getDbOrException(dbId,
+                s -> new AlterCancelException("Databasee " + s + " does not exist"));
 
         OlapTable tbl;
         try {
@@ -447,7 +452,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable {
                 }
 
                 long visiableVersion = partition.getVisibleVersion();
-                short expectReplicationNum = tbl.getPartitionInfo().getReplicaAllocation(partitionId).getTotalReplicaNum();
+                short expectReplicationNum = tbl.getPartitionInfo().getReplicaAllocation(
+                        partitionId).getTotalReplicaNum();
 
 
                 MaterializedIndex rollupIndex = entry.getValue();
@@ -464,7 +470,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable {
                     if (healthyReplicaNum < expectReplicationNum / 2 + 1) {
                         LOG.warn("rollup tablet {} has few healthy replicas: {}, rollup job: {}",
                                 rollupTablet.getId(), replicas, jobId);
-                        throw new AlterCancelException("rollup tablet " + rollupTablet.getId() + " has few healthy replicas");
+                        throw new AlterCancelException("rollup tablet " + rollupTablet.getId()
+                                + " has few healthy replicas");
                     }
                 } // end for tablets
             } // end for partitions
@@ -544,7 +551,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable {
 
     // Check whether transactions of the given database which txnId is less than 'watershedTxnId' are finished.
     protected boolean isPreviousLoadFinished() throws AnalysisException {
-        return Catalog.getCurrentGlobalTransactionMgr().isPreviousTransactionsFinished(watershedTxnId, dbId, Lists.newArrayList(tableId));
+        return Catalog.getCurrentGlobalTransactionMgr().isPreviousTransactionsFinished(
+                watershedTxnId, dbId, Lists.newArrayList(tableId));
     }
 
     /**
@@ -745,7 +753,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable {
             return;
         }
         // parse the define stmt to schema
-        SqlParser parser = new SqlParser(new SqlScanner(new StringReader(origStmt.originStmt), SqlModeHelper.MODE_DEFAULT));
+        SqlParser parser = new SqlParser(new SqlScanner(
+                new StringReader(origStmt.originStmt), SqlModeHelper.MODE_DEFAULT));
         ConnectContext connectContext = new ConnectContext();
         Database db;
         try {
@@ -755,7 +764,8 @@ public class RollupJobV2 extends AlterJobV2 implements GsonPostProcessable {
         }
         String clusterName = db.getClusterName();
         // It's almost impossible that db's cluster name is null, just in case
-        // because before user want to create database, he must first enter a cluster which means that cluster is set to current ConnectContext
+        // because before user want to create database, he must first enter a cluster
+        // which means that cluster is set to current ConnectContext
         // then when createDBStmt is executed, cluster name is set to Database
         if (clusterName == null || clusterName.length() == 0) {
             clusterName = SystemInfoService.DEFAULT_CLUSTER;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
index 1ffa5b8607..6329834d59 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
@@ -111,7 +111,8 @@ public class SchemaChangeHandler extends AlterHandler {
 
     public static final int CYCLE_COUNT_TO_CHECK_EXPIRE_SCHEMA_CHANGE_JOB = 20;
 
-    public final ThreadPoolExecutor schemaChangeThreadPool = ThreadPoolManager.newDaemonCacheThreadPool(MAX_ACTIVE_SCHEMA_CHANGE_JOB_V2_SIZE, "schema-change-pool", true);
+    public final ThreadPoolExecutor schemaChangeThreadPool = ThreadPoolManager.newDaemonCacheThreadPool(
+            MAX_ACTIVE_SCHEMA_CHANGE_JOB_V2_SIZE, "schema-change-pool", true);
 
     public final Map<Long, AlterJobV2> activeSchemaChangeJobsV2 = Maps.newConcurrentMap();
 
@@ -144,7 +145,8 @@ public class SchemaChangeHandler extends AlterHandler {
                 indexSchemaMap, newColNameSet);
     }
 
-    private void processAddColumn(AddColumnClause alterClause, Table externalTable, List<Column> newSchema) throws DdlException {
+    private void processAddColumn(AddColumnClause alterClause,
+            Table externalTable, List<Column> newSchema) throws DdlException {
         Column column = alterClause.getColumn();
         ColumnPosition columnPos = alterClause.getColPos();
         Set<String> newColNameSet = Sets.newHashSet(column.getName());
@@ -152,7 +154,8 @@ public class SchemaChangeHandler extends AlterHandler {
         addColumnInternal(column, columnPos, newSchema, newColNameSet);
     }
 
-    private void processAddColumns(AddColumnsClause alterClause, Table externalTable, List<Column> newSchema) throws DdlException {
+    private void processAddColumns(AddColumnsClause alterClause,
+            Table externalTable, List<Column> newSchema) throws DdlException {
         List<Column> columns = alterClause.getColumns();
         Set<String> newColNameSet = Sets.newHashSet();
         for (Column column : alterClause.getColumns()) {
@@ -190,7 +193,8 @@ public class SchemaChangeHandler extends AlterHandler {
         }
     }
 
-    private void processDropColumn(DropColumnClause alterClause, Table externalTable, List<Column> newSchema) throws DdlException {
+    private void processDropColumn(DropColumnClause alterClause,
+            Table externalTable, List<Column> newSchema) throws DdlException {
         String dropColName = alterClause.getColName();
 
         // find column in base index and remove it
@@ -216,7 +220,7 @@ public class SchemaChangeHandler extends AlterHandler {
     }
 
     private void processDropColumn(DropColumnClause alterClause, OlapTable olapTable,
-                                   Map<Long, LinkedList<Column>> indexSchemaMap, List<Index> indexes) throws DdlException {
+            Map<Long, LinkedList<Column>> indexSchemaMap, List<Index> indexes) throws DdlException {
         String dropColName = alterClause.getColName();
         String targetIndexName = alterClause.getRollupName();
         checkIndexExists(olapTable, targetIndexName);
@@ -261,7 +265,8 @@ public class SchemaChangeHandler extends AlterHandler {
                     }
                 }
                 if (isKey && hasReplaceColumn) {
-                    throw new DdlException("Can not drop key column when table has value column with REPLACE aggregation method");
+                    throw new DdlException(
+                            "Can not drop key column when table has value column with REPLACE aggregation method");
                 }
             } else {
                 // drop column in rollup and base index
@@ -279,7 +284,8 @@ public class SchemaChangeHandler extends AlterHandler {
                     }
                 }
                 if (isKey && hasReplaceColumn) {
-                    throw new DdlException("Can not drop key column when rollup has value column with REPLACE aggregation metho");
+                    throw new DdlException(
+                            "Can not drop key column when rollup has value column with REPLACE aggregation method");
                 }
             }
         }
@@ -352,7 +358,8 @@ public class SchemaChangeHandler extends AlterHandler {
     }
 
     // User can modify column type and column position
-    private void processModifyColumn(ModifyColumnClause alterClause, Table externalTable, List<Column> newSchema) throws DdlException {
+    private void processModifyColumn(ModifyColumnClause alterClause,
+            Table externalTable, List<Column> newSchema) throws DdlException {
         Column modColumn = alterClause.getColumn();
         ColumnPosition columnPos = alterClause.getColPos();
 
@@ -430,14 +437,16 @@ public class SchemaChangeHandler extends AlterHandler {
             }
         } else if (KeysType.UNIQUE_KEYS == olapTable.getKeysType()) {
             if (null != modColumn.getAggregationType()) {
-                throw new DdlException("Can not assign aggregation method on column in Unique data model table: " + modColumn.getName());
+                throw new DdlException("Can not assign aggregation method"
+                        + " on column in Unique data model table: " + modColumn.getName());
             }
             if (!modColumn.isKey()) {
                 modColumn.setAggregationType(AggregateType.REPLACE, true);
             }
         } else {
             if (null != modColumn.getAggregationType()) {
-                throw new DdlException("Can not assign aggregation method on column in Duplicate data model table: " + modColumn.getName());
+                throw new DdlException("Can not assign aggregation method"
+                        + " on column in Duplicate data model table: " + modColumn.getName());
             }
             if (!modColumn.isKey()) {
                 modColumn.setAggregationType(AggregateType.NONE, true);
@@ -604,7 +613,8 @@ public class SchemaChangeHandler extends AlterHandler {
         }
     }
 
-    private void processReorderColumn(ReorderColumnsClause alterClause, Table externalTable, List<Column> newSchema) throws DdlException {
+    private void processReorderColumn(ReorderColumnsClause alterClause,
+            Table externalTable, List<Column> newSchema) throws DdlException {
         List<String> orderedColNames = alterClause.getColumnsByPos();
 
         newSchema.clear();
@@ -763,18 +773,21 @@ public class SchemaChangeHandler extends AlterHandler {
                 newColumn.setIsKey(true);
             } else if (newColumn.getAggregationType() == AggregateType.SUM
                     && newColumn.getDefaultValue() != null && !newColumn.getDefaultValue().equals("0")) {
-                throw new DdlException("The default value of '" + newColName + "' with SUM aggregation function must be zero");
+                throw new DdlException("The default value of '"
+                        + newColName + "' with SUM aggregation function must be zero");
             }
         } else if (KeysType.UNIQUE_KEYS == olapTable.getKeysType()) {
             if (newColumn.getAggregationType() != null) {
-                throw new DdlException("Can not assign aggregation method on column in Unique data model table: " + newColName);
+                throw new DdlException("Can not assign aggregation method"
+                        + " on column in Unique data model table: " + newColName);
             }
             if (!newColumn.isKey()) {
                 newColumn.setAggregationType(AggregateType.REPLACE, true);
             }
         } else {
             if (newColumn.getAggregationType() != null) {
-                throw new DdlException("Can not assign aggregation method on column in Duplicate data model table: " + newColName);
+                throw new DdlException("Can not assign aggregation method"
+                        + " on column in Duplicate data model table: " + newColName);
             }
             if (!newColumn.isKey()) {
                 if (targetIndexId != -1L
@@ -790,7 +803,8 @@ public class SchemaChangeHandler extends AlterHandler {
             throw new DdlException("HLL type column can only be in Aggregation data model table: " + newColName);
         }
 
-        if (newColumn.getAggregationType() == AggregateType.BITMAP_UNION && KeysType.AGG_KEYS != olapTable.getKeysType()) {
+        if (newColumn.getAggregationType() == AggregateType.BITMAP_UNION
+                && KeysType.AGG_KEYS != olapTable.getKeysType()) {
             throw new DdlException("BITMAP_UNION must be used in AGG_KEYS");
         }
 
@@ -1095,7 +1109,8 @@ public class SchemaChangeHandler extends AlterHandler {
         // create job
         Catalog catalog = Catalog.getCurrentCatalog();
         long jobId = catalog.getNextId();
-        SchemaChangeJobV2 schemaChangeJob = new SchemaChangeJobV2(jobId, dbId, olapTable.getId(), olapTable.getName(), timeoutSecond * 1000);
+        SchemaChangeJobV2 schemaChangeJob = new SchemaChangeJobV2(jobId, dbId,
+                olapTable.getId(), olapTable.getName(), timeoutSecond * 1000);
         schemaChangeJob.setBloomFilterInfo(hasBfChange, bfColumns, bfFpp);
         schemaChangeJob.setAlterIndexInfo(hasIndexChange, indexes);
 
@@ -1288,7 +1303,8 @@ public class SchemaChangeHandler extends AlterHandler {
 
         /*
          * Create schema change job
-         * 1. For each index which has been changed, create a SHADOW index, and save the mapping of origin index to SHADOW index.
+         * 1. For each index which has been changed, create a SHADOW index,
+         *    and save the mapping of origin index to SHADOW index.
          * 2. Create all tablets and replicas of all SHADOW index, add them to tablet inverted index.
          * 3. Change table's state as SCHEMA_CHANGE
          */
@@ -1316,7 +1332,8 @@ public class SchemaChangeHandler extends AlterHandler {
                 // index state is SHADOW
                 MaterializedIndex shadowIndex = new MaterializedIndex(shadowIndexId, IndexState.SHADOW);
                 MaterializedIndex originIndex = partition.getIndex(originIndexId);
-                TabletMeta shadowTabletMeta = new TabletMeta(dbId, tableId, partitionId, shadowIndexId, newSchemaHash, medium);
+                TabletMeta shadowTabletMeta = new TabletMeta(dbId, tableId, partitionId,
+                        shadowIndexId, newSchemaHash, medium);
                 ReplicaAllocation replicaAlloc = olapTable.getPartitionInfo().getReplicaAllocation(partitionId);
                 Short totalReplicaNum = replicaAlloc.getTotalReplicaNum();
                 for (Tablet originTablet : originIndex.getTablets()) {
@@ -1338,11 +1355,14 @@ public class SchemaChangeHandler extends AlterHandler {
                         if (originReplica.getState() == Replica.ReplicaState.CLONE
                                 || originReplica.getState() == Replica.ReplicaState.DECOMMISSION
                                 || originReplica.getLastFailedVersion() > 0) {
-                            LOG.info("origin replica {} of tablet {} state is {}, and last failed version is {}, skip creating shadow replica",
-                                    originReplica.getId(), originReplica, originReplica.getState(), originReplica.getLastFailedVersion());
+                            LOG.info("origin replica {} of tablet {} state is {},"
+                                            + " and last failed version is {}, skip creating shadow replica",
+                                    originReplica.getId(), originReplica, originReplica.getState(),
+                                    originReplica.getLastFailedVersion());
                             continue;
                         }
-                        Preconditions.checkState(originReplica.getState() == ReplicaState.NORMAL, originReplica.getState());
+                        Preconditions.checkState(originReplica.getState() == ReplicaState.NORMAL,
+                                originReplica.getState());
                         // replica's init state is ALTER, so that tablet report process will ignore its report
                         Replica shadowReplica = new Replica(shadowReplicaId, backendId, ReplicaState.ALTER,
                                 Partition.PARTITION_INIT_VERSION, newSchemaHash);
@@ -1353,8 +1373,8 @@ public class SchemaChangeHandler extends AlterHandler {
                     if (healthyReplicaNum < totalReplicaNum / 2 + 1) {
                         /*
                          * TODO(cmy): This is a bad design.
-                         * Because in the schema change job, we will only send tasks to the shadow replicas that have been created,
-                         * without checking whether the quorum of replica number are satisfied.
+                         * Because in the schema change job, we will only send tasks to the shadow replicas
+                         * that have been created, without checking whether the quorum of replica number are satisfied.
                          * This will cause the job to fail until we find that the quorum of replica number
                          * is not satisfied until the entire job is done.
                          * So here we check the replica number strictly and do not allow to submit the job
@@ -1370,7 +1390,8 @@ public class SchemaChangeHandler extends AlterHandler {
 
                 schemaChangeJob.addPartitionShadowIndex(partitionId, shadowIndexId, shadowIndex);
             } // end for partition
-            schemaChangeJob.addIndexSchema(shadowIndexId, originIndexId, newIndexName, newSchemaVersion, newSchemaHash, newShortKeyColumnCount, entry.getValue());
+            schemaChangeJob.addIndexSchema(shadowIndexId, originIndexId, newIndexName,
+                    newSchemaVersion, newSchemaHash, newShortKeyColumnCount, entry.getValue());
         } // end for index
 
         // set table state
@@ -1428,14 +1449,16 @@ public class SchemaChangeHandler extends AlterHandler {
         return schemaChangeJobInfos;
     }
 
-    private void getAlterJobV2Infos(Database db, List<AlterJobV2> alterJobsV2, List<List<Comparable>> schemaChangeJobInfos) {
+    private void getAlterJobV2Infos(Database db, List<AlterJobV2> alterJobsV2,
+            List<List<Comparable>> schemaChangeJobInfos) {
         ConnectContext ctx = ConnectContext.get();
         for (AlterJobV2 alterJob : alterJobsV2) {
             if (alterJob.getDbId() != db.getId()) {
                 continue;
             }
             if (ctx != null) {
-                if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ctx, db.getFullName(), alterJob.getTableName(), PrivPredicate.ALTER)) {
+                if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(
+                        ctx, db.getFullName(), alterJob.getTableName(), PrivPredicate.ALTER)) {
                     continue;
                 }
             }
@@ -1487,9 +1510,11 @@ public class SchemaChangeHandler extends AlterHandler {
                     } else if (DynamicPartitionUtil.checkDynamicPartitionPropertiesExist(properties)) {
                         if (!olapTable.dynamicPartitionExists()) {
                             try {
-                                DynamicPartitionUtil.checkInputDynamicPartitionProperties(properties, olapTable.getPartitionInfo());
+                                DynamicPartitionUtil.checkInputDynamicPartitionProperties(
+                                        properties, olapTable.getPartitionInfo());
                             } catch (DdlException e) {
-                                // This table is not a dynamic partition table and didn't supply all dynamic partition properties
+                                // This table is not a dynamic partition table
+                                // and didn't supply all dynamic partition properties
                                 throw new DdlException("Table " + db.getFullName() + "."
                                         + olapTable.getName() + " is not a dynamic partition table."
                                         + " Use command `HELP ALTER TABLE` "
@@ -1498,8 +1523,10 @@ public class SchemaChangeHandler extends AlterHandler {
                         }
                         Catalog.getCurrentCatalog().modifyTableDynamicPartition(db, olapTable, properties);
                         return;
-                    } else if (properties.containsKey("default." + PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION)) {
-                        Preconditions.checkNotNull(properties.get("default." + PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION));
+                    } else if (properties.containsKey(
+                            "default." + PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION)) {
+                        Preconditions.checkNotNull(properties.get("default."
+                                + PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION));
                         Catalog.getCurrentCatalog().modifyTableDefaultReplicaAllocation(db, olapTable, properties);
                         return;
                     } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION)) {
@@ -1615,7 +1642,8 @@ public class SchemaChangeHandler extends AlterHandler {
     /**
      * Update all partitions' in-memory property of table
      */
-    public void updateTableInMemoryMeta(Database db, String tableName, Map<String, String> properties) throws UserException {
+    public void updateTableInMemoryMeta(Database db, String tableName, Map<String, String> properties)
+            throws UserException {
         List<Partition> partitions = Lists.newArrayList();
         OlapTable olapTable = (OlapTable) db.getTableOrMetaException(tableName, Table.TableType.OLAP);
         olapTable.readLock();
@@ -1646,9 +1674,8 @@ public class SchemaChangeHandler extends AlterHandler {
      * Update some specified partitions' in-memory property of table
      */
     public void updatePartitionsInMemoryMeta(Database db,
-                                             String tableName,
-                                             List<String> partitionNames,
-                                             Map<String, String> properties) throws DdlException, MetaNotFoundException {
+            String tableName, List<String> partitionNames, Map<String, String> properties)
+            throws DdlException, MetaNotFoundException {
         OlapTable olapTable = (OlapTable) db.getTableOrMetaException(tableName, Table.TableType.OLAP);
         boolean isInMemory = Boolean.parseBoolean(properties.get(PropertyAnalyzer.PROPERTIES_INMEMORY));
         if (isInMemory == olapTable.isInMemory()) {
@@ -1735,7 +1762,8 @@ public class SchemaChangeHandler extends AlterHandler {
                 } else {
                     List<Map.Entry<Long, Set<Pair<Long, Integer>>>> unfinishedMarks = countDownLatch.getLeftMarks();
                     // only show at most 3 results
-                    List<Map.Entry<Long, Set<Pair<Long, Integer>>>> subList = unfinishedMarks.subList(0, Math.min(unfinishedMarks.size(), 3));
+                    List<Map.Entry<Long, Set<Pair<Long, Integer>>>> subList
+                            = unfinishedMarks.subList(0, Math.min(unfinishedMarks.size(), 3));
                     if (!subList.isEmpty()) {
                         errMsg += " Unfinished mark: " + Joiner.on(", ").join(subList);
                     }
@@ -1770,9 +1798,11 @@ public class SchemaChangeHandler extends AlterHandler {
             // find from new alter jobs first
             List<AlterJobV2> schemaChangeJobV2List = getUnfinishedAlterJobV2ByTableId(olapTable.getId());
             // current schemaChangeJob job doesn't support batch operation,so just need to get one job
-            schemaChangeJobV2 = schemaChangeJobV2List.size() == 0 ? null : Iterables.getOnlyElement(schemaChangeJobV2List);
+            schemaChangeJobV2 = schemaChangeJobV2List.size() == 0
+                    ? null : Iterables.getOnlyElement(schemaChangeJobV2List);
             if (schemaChangeJobV2 == null) {
-                throw new DdlException("Table[" + tableName + "] is under schema change state but could not find related job");
+                throw new DdlException("Table[" + tableName + "] is under schema change state"
+                        + " but could not find related job");
             }
         } finally {
             olapTable.writeUnlock();
@@ -1804,7 +1834,8 @@ public class SchemaChangeHandler extends AlterHandler {
         for (Index existedIdx : existedIndexes) {
             if (existedIdx.getIndexName().equalsIgnoreCase(indexDef.getIndexName())) {
                 if (indexDef.isSetIfNotExists()) {
-                    LOG.info("create index[{}] which already exists on table[{}]", indexDef.getIndexName(), olapTable.getName());
+                    LOG.info("create index[{}] which already exists on table[{}]",
+                            indexDef.getIndexName(), olapTable.getName());
                     return true;
                 }
                 throw new DdlException("index `" + indexDef.getIndexName() + "` already exist.");
@@ -1834,7 +1865,8 @@ public class SchemaChangeHandler extends AlterHandler {
      * Returns true if the index does not exist, there is no need to create the job to drop the index.
      * Otherwise return false, there is need to create a job to drop the index.
      */
-    private boolean processDropIndex(DropIndexClause alterClause, OlapTable olapTable, List<Index> indexes) throws DdlException {
+    private boolean processDropIndex(DropIndexClause alterClause, OlapTable olapTable,
+            List<Index> indexes) throws DdlException {
         String indexName = alterClause.getIndexName();
         List<Index> existedIndexes = olapTable.getIndexes();
         Index found = null;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java
index a95bdede1d..cf1dc977a9 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java
@@ -198,7 +198,8 @@ public class SchemaChangeJobV2 extends AlterJobV2 {
     protected void runPendingJob() throws AlterCancelException {
         Preconditions.checkState(jobState == JobState.PENDING, jobState);
         LOG.info("begin to send create replica tasks. job: {}", jobId);
-        Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, s -> new AlterCancelException("Database " + s + " does not exist"));
+        Database db = Catalog.getCurrentCatalog()
+                .getDbOrException(dbId, s -> new AlterCancelException("Database " + s + " does not exist"));
 
         if (!checkTableStable(db)) {
             return;
@@ -261,7 +262,8 @@ public class SchemaChangeJobV2 extends AlterJobV2 {
                                     tbl.isInMemory(),
                                     tbl.getPartitionInfo().getTabletType(partitionId),
                                     tbl.getCompressionType());
-                            createReplicaTask.setBaseTablet(partitionIndexTabletMap.get(partitionId, shadowIdxId).get(shadowTabletId), originSchemaHash);
+                            createReplicaTask.setBaseTablet(partitionIndexTabletMap.get(partitionId, shadowIdxId)
+                                    .get(shadowTabletId), originSchemaHash);
                             if (this.storageFormat != null) {
                                 createReplicaTask.setStorageFormat(this.storageFormat);
                             }
@@ -317,12 +319,14 @@ public class SchemaChangeJobV2 extends AlterJobV2 {
             tbl.writeUnlock();
         }
 
-        this.watershedTxnId = Catalog.getCurrentGlobalTransactionMgr().getTransactionIDGenerator().getNextTransactionId();
+        this.watershedTxnId = Catalog.getCurrentGlobalTransactionMgr()
+                .getTransactionIDGenerator().getNextTransactionId();
         this.jobState = JobState.WAITING_TXN;
 
         // write edit log
         Catalog.getCurrentCatalog().getEditLog().logAlterJob(this);
-        LOG.info("transfer schema change job {} state to {}, watershed txn id: {}", jobId, this.jobState, watershedTxnId);
+        LOG.info("transfer schema change job {} state to {}, watershed txn id: {}",
+                jobId, this.jobState, watershedTxnId);
     }
 
     private void addShadowIndexToCatalog(OlapTable tbl) {
@@ -369,7 +373,8 @@ public class SchemaChangeJobV2 extends AlterJobV2 {
         }
 
         LOG.info("previous transactions are all finished, begin to send schema change tasks. job: {}", jobId);
-        Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, s -> new AlterCancelException("Database " + s + " does not exist"));
+        Database db = Catalog.getCurrentCatalog()
+                .getDbOrException(dbId, s -> new AlterCancelException("Database " + s + " does not exist"));
 
         OlapTable tbl;
         try {
@@ -473,7 +478,8 @@ public class SchemaChangeJobV2 extends AlterJobV2 {
         // must check if db or table still exist first.
         // or if table is dropped, the tasks will never be finished,
         // and the job will be in RUNNING state forever.
-        Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, s -> new AlterCancelException("Database " + s + " does not exist"));
+        Database db = Catalog.getCurrentCatalog()
+                .getDbOrException(dbId, s -> new AlterCancelException("Database " + s + " does not exist"));
 
         OlapTable tbl;
         try {
@@ -487,7 +493,8 @@ public class SchemaChangeJobV2 extends AlterJobV2 {
             List<AgentTask> tasks = schemaChangeBatchTask.getUnfinishedTasks(2000);
             for (AgentTask task : tasks) {
                 if (task.getFailedTimes() >= 3) {
-                    throw new AlterCancelException("schema change task failed after try three times: " + task.getErrorMsg());
+                    throw new AlterCancelException("schema change task failed after try three times: "
+                            + task.getErrorMsg());
                 }
             }
             return;
@@ -507,7 +514,8 @@ public class SchemaChangeJobV2 extends AlterJobV2 {
                 Preconditions.checkNotNull(partition, partitionId);
 
                 long visiableVersion = partition.getVisibleVersion();
-                short expectReplicationNum = tbl.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum();
+                short expectReplicationNum = tbl.getPartitionInfo()
+                        .getReplicaAllocation(partition.getId()).getTotalReplicaNum();
 
                 Map<Long, MaterializedIndex> shadowIndexMap = partitionIndexMap.row(partitionId);
                 for (Map.Entry<Long, MaterializedIndex> entry : shadowIndexMap.entrySet()) {
@@ -680,7 +688,8 @@ public class SchemaChangeJobV2 extends AlterJobV2 {
 
     // Check whether transactions of the given database which txnId is less than 'watershedTxnId' are finished.
     protected boolean isPreviousLoadFinished() throws AnalysisException {
-        return Catalog.getCurrentGlobalTransactionMgr().isPreviousTransactionsFinished(watershedTxnId, dbId, Lists.newArrayList(tableId));
+        return Catalog.getCurrentGlobalTransactionMgr().isPreviousTransactionsFinished(
+                watershedTxnId, dbId, Lists.newArrayList(tableId));
     }
 
     /**
diff --git a/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java b/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java
index 44cd210c78..b0aa25a6fe 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SystemHandler.java
@@ -117,7 +117,8 @@ public class SystemHandler extends AlterHandler {
             AddBackendClause addBackendClause = (AddBackendClause) alterClause;
             final String destClusterName = addBackendClause.getDestCluster();
 
-            if ((!Strings.isNullOrEmpty(destClusterName) || addBackendClause.isFree()) && Config.disable_cluster_feature) {
+            if ((!Strings.isNullOrEmpty(destClusterName) || addBackendClause.isFree())
+                    && Config.disable_cluster_feature) {
                 ErrorReport.reportAnalysisException(ErrorCode.ERR_INVALID_OPERATION, "ADD BACKEND TO CLUSTER");
             }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AbstractBackupStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AbstractBackupStmt.java
index 2bf8a48fd5..8a602921a4 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AbstractBackupStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AbstractBackupStmt.java
@@ -36,8 +36,8 @@ import java.util.Map;
 public class AbstractBackupStmt extends DdlStmt {
     private static final Logger LOG = LogManager.getLogger(AbstractBackupStmt.class);
 
-    private final static String PROP_TIMEOUT = "timeout";
-    private final static long MIN_TIMEOUT_MS = 600 * 1000L; // 10 min
+    private static final String PROP_TIMEOUT = "timeout";
+    private static final long MIN_TIMEOUT_MS = 600 * 1000L; // 10 min
 
     protected LabelName labelName;
     protected String repoName;
@@ -46,8 +46,9 @@ public class AbstractBackupStmt extends DdlStmt {
 
     protected long timeoutMs;
 
-    public AbstractBackupStmt(LabelName labelName, String repoName, AbstractBackupTableRefClause abstractBackupTableRefClause,
-                              Map<String, String> properties) {
+    public AbstractBackupStmt(LabelName labelName, String repoName,
+            AbstractBackupTableRefClause abstractBackupTableRefClause,
+            Map<String, String> properties) {
         this.labelName = labelName;
         this.repoName = repoName;
         this.abstractBackupTableRefClause = abstractBackupTableRefClause;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCancelRebalanceDiskStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCancelRebalanceDiskStmt.java
index f9006f4c33..818032ccfd 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCancelRebalanceDiskStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCancelRebalanceDiskStmt.java
@@ -39,7 +39,7 @@ public class AdminCancelRebalanceDiskStmt extends DdlStmt {
         ImmutableMap<Long, Backend> backendsInfo = Catalog.getCurrentSystemInfo().getIdToBackend();
         Map<String, Long> backendsID = new HashMap<String, Long>();
         for (Backend backend : backendsInfo.values()) {
-            backendsID.put(String.valueOf(backend.getHost()) + ":" + String.valueOf(backend.getHeartbeatPort()), backend.getId());
+            backendsID.put(backend.getHost() + ":" + backend.getHeartbeatPort(), backend.getId());
         }
         if (backends == null) {
             for (Backend backend : backendsInfo.values()) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCleanTrashStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCleanTrashStmt.java
index 6a1e716102..b0a0b8caa4 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCleanTrashStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCleanTrashStmt.java
@@ -39,7 +39,7 @@ public class AdminCleanTrashStmt extends DdlStmt {
         ImmutableMap<Long, Backend> backendsInfo = Catalog.getCurrentSystemInfo().getIdToBackend();
         Map<String, Long> backendsID = new HashMap<String, Long>();
         for (Backend backend : backendsInfo.values()) {
-            backendsID.put(String.valueOf(backend.getHost()) + ":" + String.valueOf(backend.getHeartbeatPort()), backend.getId());
+            backendsID.put(backend.getHost() + ":" + backend.getHeartbeatPort(), backend.getId());
         }
         if (backends == null) {
             for (Backend backend : backendsInfo.values()) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCompactTableStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCompactTableStmt.java
index 1cd448a0fd..d65ad0acff 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCompactTableStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminCompactTableStmt.java
@@ -83,7 +83,8 @@ public class AdminCompactTableStmt extends DdlStmt {
 
         // analyze where clause if not null
         if (where == null) {
-            throw new AnalysisException("Compaction type must be specified in Where clause like: type = 'BASE/CUMULATIVE'");
+            throw new AnalysisException("Compaction type must be specified in"
+                    + " Where clause like: type = 'BASE/CUMULATIVE'");
         }
 
         if (!analyzeWhere()) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminRebalanceDiskStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminRebalanceDiskStmt.java
index cbda427a66..f99c0126bb 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminRebalanceDiskStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AdminRebalanceDiskStmt.java
@@ -40,12 +40,10 @@ public class AdminRebalanceDiskStmt extends DdlStmt {
         ImmutableMap<Long, Backend> backendsInfo = Catalog.getCurrentSystemInfo().getIdToBackend();
         Map<String, Long> backendsID = new HashMap<String, Long>();
         for (Backend backend : backendsInfo.values()) {
-            backendsID.put(String.valueOf(backend.getHost()) + ":" + String.valueOf(backend.getHeartbeatPort()), backend.getId());
+            backendsID.put(backend.getHost() + ":" + backend.getHeartbeatPort(), backend.getId());
         }
         if (backends == null) {
-            for (Backend backend : backendsInfo.values()) {
-                this.backends.add(backend);
-            }
+            this.backends.addAll(backendsInfo.values());
         } else {
             for (String backend : backends) {
                 if (backendsID.get(backend) != null) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfo.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfo.java
index 298837d11e..28084d44ba 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfo.java
@@ -70,7 +70,7 @@ import java.util.List;
  * TODO: Add query tests for aggregation with intermediate tuples with num_nodes=1.
  */
 public final class AggregateInfo extends AggregateInfoBase {
-    private final static Logger LOG = LogManager.getLogger(AggregateInfo.class);
+    private static final Logger LOG = LogManager.getLogger(AggregateInfo.class);
 
     public enum AggPhase {
         FIRST,
@@ -81,7 +81,7 @@ public final class AggregateInfo extends AggregateInfoBase {
         public boolean isMerge() {
             return this == FIRST_MERGE || this == SECOND_MERGE;
         }
-    };
+    }
 
     // created by createMergeAggInfo()
     private AggregateInfo mergeAggInfo;
@@ -173,7 +173,7 @@ public final class AggregateInfo extends AggregateInfoBase {
      * If an aggTupleDesc is created, also registers eq predicates between the
      * grouping exprs and their respective slots with 'analyzer'.
      */
-    static public AggregateInfo create(
+    public static AggregateInfo create(
             ArrayList<Expr> groupingExprs, ArrayList<FunctionCallExpr> aggExprs,
             TupleDescriptor tupleDesc, Analyzer analyzer)
             throws AnalysisException {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfoBase.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfoBase.java
index b44dc45589..f0298afff6 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfoBase.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateInfoBase.java
@@ -38,7 +38,7 @@ import java.util.List;
  * tuple descriptors as well as their smaps for evaluating aggregate functions.
  */
 public abstract class AggregateInfoBase {
-    private final static Logger LOG =
+    private static final Logger LOG =
             LoggerFactory.getLogger(AggregateInfoBase.class);
 
     // For aggregations: All unique grouping expressions from a select block.
@@ -248,7 +248,7 @@ public abstract class AggregateInfoBase {
             if (intermediateType != null) {
                 return true;
             }
-            if (noGrouping && ((AggregateFunction) aggExpr.fn).getNullableMode().equals(Function.NullableMode.DEPEND_ON_ARGUMENT)) {
+            if (noGrouping && aggExpr.fn.getNullableMode().equals(Function.NullableMode.DEPEND_ON_ARGUMENT)) {
                 return true;
             }
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateParamsList.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateParamsList.java
index 6afb62abff..12ff07d99a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateParamsList.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AggregateParamsList.java
@@ -43,7 +43,7 @@ class AggregateParamsList {
         isDistinct = false;
     }
 
-    static public AggregateParamsList createStarParam() {
+    public static AggregateParamsList createStarParam() {
         return new AggregateParamsList();
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColumnStatsStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColumnStatsStmt.java
index b5627c2565..8a21d18fcd 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColumnStatsStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterColumnStatsStmt.java
@@ -67,8 +67,8 @@ public class AlterColumnStatsStmt extends DdlStmt {
             throw new AnalysisException(optional.get() + " is invalid statistic");
         }
         // check auth
-        if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), tableName.getDb(), tableName.getTbl(),
-                PrivPredicate.ALTER)) {
+        if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(
+                ConnectContext.get(), tableName.getDb(), tableName.getTbl(), PrivPredicate.ALTER)) {
             ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR, "ALTER COLUMN STATS",
                     ConnectContext.get().getQualifiedUser(),
                     ConnectContext.get().getRemoteIP(),
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseQuotaStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseQuotaStmt.java
index daacdadb91..d9ce04fdab 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseQuotaStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseQuotaStmt.java
@@ -63,7 +63,8 @@ public class AlterDatabaseQuotaStmt extends DdlStmt {
         super.analyze(analyzer);
 
         if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.ADMIN)) {
-            ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName);
+            ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR,
+                    analyzer.getQualifiedUser(), dbName);
         }
 
         if (Strings.isNullOrEmpty(dbName)) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseRename.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseRename.java
index b7606a3e1a..ff9ed52e8f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseRename.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterDatabaseRename.java
@@ -57,10 +57,9 @@ public class AlterDatabaseRename extends DdlStmt {
         }
 
         if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), dbName,
-                                                               PrivPredicate.of(PrivBitSet.of(PaloPrivilege.ADMIN_PRIV,
-                                                                                              PaloPrivilege.ALTER_PRIV),
-                                                                                Operator.OR))) {
-            ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName);
+                PrivPredicate.of(PrivBitSet.of(PaloPrivilege.ADMIN_PRIV, PaloPrivilege.ALTER_PRIV), Operator.OR))) {
+            ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR,
+                    analyzer.getQualifiedUser(), dbName);
         }
 
         if (Strings.isNullOrEmpty(newDbName)) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterRoutineLoadStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterRoutineLoadStmt.java
index 0bd3f6f438..4656d81f2d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterRoutineLoadStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterRoutineLoadStmt.java
@@ -174,32 +174,35 @@ public class AlterRoutineLoadStmt extends DdlStmt {
         }
 
         if (jobProperties.containsKey(CreateRoutineLoadStmt.JSONPATHS)) {
-            analyzedJobProperties.put(CreateRoutineLoadStmt.JSONPATHS, jobProperties.get(CreateRoutineLoadStmt.JSONPATHS));
+            analyzedJobProperties.put(CreateRoutineLoadStmt.JSONPATHS,
+                    jobProperties.get(CreateRoutineLoadStmt.JSONPATHS));
         }
 
         if (jobProperties.containsKey(CreateRoutineLoadStmt.JSONROOT)) {
-            analyzedJobProperties.put(CreateRoutineLoadStmt.JSONROOT, jobProperties.get(CreateRoutineLoadStmt.JSONROOT));
+            analyzedJobProperties.put(CreateRoutineLoadStmt.JSONROOT,
+                    jobProperties.get(CreateRoutineLoadStmt.JSONROOT));
         }
 
         if (jobProperties.containsKey(CreateRoutineLoadStmt.STRIP_OUTER_ARRAY)) {
-            boolean stripOuterArray = Boolean.valueOf(jobProperties.get(CreateRoutineLoadStmt.STRIP_OUTER_ARRAY));
+            boolean stripOuterArray = Boolean.parseBoolean(jobProperties.get(CreateRoutineLoadStmt.STRIP_OUTER_ARRAY));
             analyzedJobProperties.put(CreateRoutineLoadStmt.STRIP_OUTER_ARRAY, String.valueOf(stripOuterArray));
         }
 
         if (jobProperties.containsKey(CreateRoutineLoadStmt.NUM_AS_STRING)) {
-            boolean numAsString = Boolean.valueOf(jobProperties.get(CreateRoutineLoadStmt.NUM_AS_STRING));
+            boolean numAsString = Boolean.parseBoolean(jobProperties.get(CreateRoutineLoadStmt.NUM_AS_STRING));
             analyzedJobProperties.put(CreateRoutineLoadStmt.NUM_AS_STRING, String.valueOf(numAsString));
         }
 
         if (jobProperties.containsKey(CreateRoutineLoadStmt.FUZZY_PARSE)) {
-            boolean fuzzyParse = Boolean.valueOf(jobProperties.get(CreateRoutineLoadStmt.FUZZY_PARSE));
+            boolean fuzzyParse = Boolean.parseBoolean(jobProperties.get(CreateRoutineLoadStmt.FUZZY_PARSE));
             analyzedJobProperties.put(CreateRoutineLoadStmt.FUZZY_PARSE, String.valueOf(fuzzyParse));
         }
     }
 
     private void checkDataSourceProperties() throws UserException {
         if (!FeConstants.runningUnitTest) {
-            RoutineLoadJob job = Catalog.getCurrentCatalog().getRoutineLoadManager().checkPrivAndGetJob(getDbName(), getLabel());
+            RoutineLoadJob job = Catalog.getCurrentCatalog().getRoutineLoadManager()
+                    .checkPrivAndGetJob(getDbName(), getLabel());
             dataSourceProperties.setTimezone(job.getTimezone());
         } else {
             dataSourceProperties.setTimezone(TimeUtils.DEFAULT_TIME_ZONE);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterSqlBlockRuleStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterSqlBlockRuleStmt.java
index cc2a13c228..a51e20d98a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterSqlBlockRuleStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterSqlBlockRuleStmt.java
@@ -73,16 +73,21 @@ public class AlterSqlBlockRuleStmt extends DdlStmt {
 
     private void setProperties(Map<String, String> properties) throws AnalysisException {
         this.sql = properties.getOrDefault(CreateSqlBlockRuleStmt.SQL_PROPERTY, CreateSqlBlockRuleStmt.STRING_NOT_SET);
-        this.sqlHash = properties.getOrDefault(CreateSqlBlockRuleStmt.SQL_HASH_PROPERTY, CreateSqlBlockRuleStmt.STRING_NOT_SET);
+        this.sqlHash = properties.getOrDefault(CreateSqlBlockRuleStmt.SQL_HASH_PROPERTY,
+                CreateSqlBlockRuleStmt.STRING_NOT_SET);
         String partitionNumString = properties.get(CreateSqlBlockRuleStmt.SCANNED_PARTITION_NUM);
         String tabletNumString = properties.get(CreateSqlBlockRuleStmt.SCANNED_TABLET_NUM);
         String cardinalityString = properties.get(CreateSqlBlockRuleStmt.SCANNED_CARDINALITY);
 
         SqlBlockUtil.checkSqlAndSqlHashSetBoth(sql, sqlHash);
-        SqlBlockUtil.checkSqlAndLimitationsSetBoth(sql, sqlHash, partitionNumString, tabletNumString, cardinalityString);
-        this.partitionNum = Util.getLongPropertyOrDefault(partitionNumString, LONG_NOT_SET, null, CreateSqlBlockRuleStmt.SCANNED_PARTITION_NUM + " should be a long");
-        this.tabletNum = Util.getLongPropertyOrDefault(tabletNumString, LONG_NOT_SET, null, CreateSqlBlockRuleStmt.SCANNED_TABLET_NUM + " should be a long");
-        this.cardinality = Util.getLongPropertyOrDefault(cardinalityString, LONG_NOT_SET, null, CreateSqlBlockRuleStmt.SCANNED_CARDINALITY + " should be a long");
+        SqlBlockUtil.checkSqlAndLimitationsSetBoth(sql, sqlHash,
+                partitionNumString, tabletNumString, cardinalityString);
+        this.partitionNum = Util.getLongPropertyOrDefault(partitionNumString, LONG_NOT_SET, null,
+                CreateSqlBlockRuleStmt.SCANNED_PARTITION_NUM + " should be a long");
+        this.tabletNum = Util.getLongPropertyOrDefault(tabletNumString, LONG_NOT_SET, null,
+                CreateSqlBlockRuleStmt.SCANNED_TABLET_NUM + " should be a long");
+        this.cardinality = Util.getLongPropertyOrDefault(cardinalityString, LONG_NOT_SET, null,
+                CreateSqlBlockRuleStmt.SCANNED_CARDINALITY + " should be a long");
         // allow null, represents no modification
         String globalStr = properties.get(CreateSqlBlockRuleStmt.GLOBAL_PROPERTY);
         this.global = StringUtils.isNotEmpty(globalStr) ? Boolean.parseBoolean(globalStr) : null;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStatsStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStatsStmt.java
index 42661b7bdd..b25f7c2897 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStatsStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStatsStmt.java
@@ -61,8 +61,8 @@ public class AlterTableStatsStmt extends DdlStmt {
             throw new AnalysisException(optional.get() + " is invalid statistic");
         }
         // check auth
-        if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), tableName.getDb(), tableName.getTbl(),
-                PrivPredicate.ALTER)) {
+        if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(
+                ConnectContext.get(), tableName.getDb(), tableName.getTbl(), PrivPredicate.ALTER)) {
             ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR, "ALTER TABLE STATS",
                     ConnectContext.get().getQualifiedUser(),
                     ConnectContext.get().getRemoteIP(),
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStmt.java
index 1085197b12..1734dcc0c7 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterTableStmt.java
@@ -87,10 +87,12 @@ public class AlterTableStmt extends DdlStmt {
                 if (alterFeature == null || alterFeature == EnableFeatureClause.Features.UNKNOWN) {
                     throw new AnalysisException("unknown feature for alter clause");
                 }
-                if (table.getKeysType() != KeysType.UNIQUE_KEYS && alterFeature == EnableFeatureClause.Features.BATCH_DELETE) {
+                if (table.getKeysType() != KeysType.UNIQUE_KEYS
+                        && alterFeature == EnableFeatureClause.Features.BATCH_DELETE) {
                     throw new AnalysisException("Batch delete only supported in unique tables.");
                 }
-                if (table.getKeysType() != KeysType.UNIQUE_KEYS && alterFeature == EnableFeatureClause.Features.SEQUENCE_LOAD) {
+                if (table.getKeysType() != KeysType.UNIQUE_KEYS
+                        && alterFeature == EnableFeatureClause.Features.SEQUENCE_LOAD) {
                     throw new AnalysisException("Sequence load only supported in unique tables.");
                 }
                 // analyse sequence column
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterViewStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterViewStmt.java
index 124b7cc850..93a55b9908 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterViewStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AlterViewStmt.java
@@ -51,11 +51,12 @@ public class AlterViewStmt extends BaseViewStmt {
 
         Table table = analyzer.getTableOrAnalysisException(tableName);
         if (!(table instanceof View)) {
-            throw new AnalysisException(String.format("ALTER VIEW not allowed on a table:%s.%s", getDbName(), getTable()));
+            throw new AnalysisException(String.format("ALTER VIEW not allowed on a table:%s.%s",
+                    getDbName(), getTable()));
         }
 
-        if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), tableName.getDb(), tableName.getTbl(),
-                PrivPredicate.ALTER)) {
+        if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(
+                ConnectContext.get(), tableName.getDb(), tableName.getTbl(), PrivPredicate.ALTER)) {
             ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR, "ALTER VIEW",
                     ConnectContext.get().getQualifiedUser(),
                     ConnectContext.get().getRemoteIP(),
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticExpr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticExpr.java
index b0147dff55..a241da5db3 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticExpr.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticExpr.java
@@ -62,7 +62,7 @@ import java.util.Objects;
  * and need to be substituted as such; example: COUNT(COUNT(..)) OVER (..)
  */
 public class AnalyticExpr extends Expr {
-    private final static Logger LOG = LoggerFactory.getLogger(AnalyticExpr.class);
+    private static final Logger LOG = LoggerFactory.getLogger(AnalyticExpr.class);
     private static String NTILE = "NTILE";
 
     private FunctionCallExpr fnCall;
@@ -134,12 +134,15 @@ public class AnalyticExpr extends Expr {
     public FunctionCallExpr getFnCall() {
         return fnCall;
     }
+
     public List<Expr> getPartitionExprs() {
         return partitionExprs;
     }
+
     public List<OrderByElement> getOrderByElements() {
         return orderByElements;
     }
+
     public AnalyticWindow getWindow() {
         return window;
     }
@@ -210,11 +213,10 @@ public class AnalyticExpr extends Expr {
                 || fn.functionName().equalsIgnoreCase(MAX) || fn.functionName().equalsIgnoreCase(COUNT)) {
             return true;
         }
-
         return false;
     }
 
-    static private boolean isOffsetFn(Function fn) {
+    private static boolean isOffsetFn(Function fn) {
         if (!isAnalyticFn(fn)) {
             return false;
         }
@@ -222,7 +224,7 @@ public class AnalyticExpr extends Expr {
         return fn.functionName().equalsIgnoreCase(LEAD) || fn.functionName().equalsIgnoreCase(LAG);
     }
 
-    static private boolean isMinMax(Function fn) {
+    private static boolean isMinMax(Function fn) {
         if (!isAnalyticFn(fn)) {
             return false;
         }
@@ -230,7 +232,7 @@ public class AnalyticExpr extends Expr {
         return fn.functionName().equalsIgnoreCase(MIN) || fn.functionName().equalsIgnoreCase(MAX);
     }
 
-    static private boolean isRankingFn(Function fn) {
+    private static boolean isRankingFn(Function fn) {
         if (!isAnalyticFn(fn)) {
             return false;
         }
@@ -241,7 +243,7 @@ public class AnalyticExpr extends Expr {
                || fn.functionName().equalsIgnoreCase(NTILE);
     }
 
-    static private boolean isHllAggFn(Function fn) {
+    private static boolean isHllAggFn(Function fn) {
         if (!isAnalyticFn(fn)) {
             return false;
         }
@@ -376,6 +378,7 @@ public class AnalyticExpr extends Expr {
                 + orderByElements.get(0).getExpr().toSql());
         }
     }
+
     /**
      * check the value out of range in lag/lead() function
      */
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticInfo.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticInfo.java
index 8b88f4c4fc..aa993e34bf 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticInfo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticInfo.java
@@ -36,7 +36,7 @@ import java.util.List;
  * the corresponding analytic result tuple and its substitution map.
  */
 public final class AnalyticInfo extends AggregateInfoBase {
-    private final static Logger LOG = LoggerFactory.getLogger(AnalyticInfo.class);
+    private static final Logger LOG = LoggerFactory.getLogger(AnalyticInfo.class);
 
     // All unique analytic exprs of a select block. Used to populate
     // super.aggregateExprs_ based on AnalyticExpr.getFnCall() for each analytic expr
@@ -87,7 +87,7 @@ public final class AnalyticInfo extends AggregateInfoBase {
      * Creates complete AnalyticInfo for analyticExprs, including tuple descriptors and
      * smaps.
      */
-    static public AnalyticInfo create(ArrayList<Expr> analyticExprs, Analyzer analyzer) {
+    public static AnalyticInfo create(ArrayList<Expr> analyticExprs, Analyzer analyzer) {
         Preconditions.checkState(analyticExprs != null && !analyticExprs.isEmpty());
         Expr.removeDuplicates(analyticExprs);
         AnalyticInfo result = new AnalyticInfo(analyticExprs);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticWindow.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticWindow.java
index b6bc06d3bb..4266d02888 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticWindow.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/AnalyticWindow.java
@@ -55,6 +55,7 @@ public class AnalyticWindow {
         public String toString() {
             return description;
         }
+
         public TAnalyticWindowType toThrift() {
             return this == ROWS ? TAnalyticWindowType.ROWS : TAnalyticWindowType.RANGE;
         }
@@ -77,6 +78,7 @@ public class AnalyticWindow {
         public String toString() {
             return description;
         }
+
         public TAnalyticWindowBoundaryType toThrift() {
             Preconditions.checkState(!isAbsolutePos());
 
@@ -140,6 +142,7 @@ public class AnalyticWindow {
         public BoundaryType getType() {
             return type;
         }
+
         public Expr getExpr() {
             return expr;
         }
@@ -243,12 +246,15 @@ public class AnalyticWindow {
     public Type getType() {
         return type;
     }
+
     public Boundary getLeftBoundary() {
         return leftBoundary;
     }
+
     public Boundary getRightBoundary() {
         return rightBoundary;
     }
+
     public Boundary setRightBoundary(Boundary b) {
         return rightBoundary = b;
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/Analyzer.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/Analyzer.java
index 2a598b77c1..997891253f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/Analyzer.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/Analyzer.java
@@ -98,7 +98,7 @@ import java.util.stream.Collectors;
  * simple.
  */
 public class Analyzer {
-    private final static Logger LOG = LogManager.getLogger(Analyzer.class);
+    private static final Logger LOG = LogManager.getLogger(Analyzer.class);
     // used for contains inlineview analytic function's tuple changed
     private ExprSubstitutionMap changeResSmap = new ExprSubstitutionMap();
 
@@ -969,7 +969,8 @@ public class Analyzer {
      *     At this time, vectorization cannot support this situation,
      *     so it is necessary to fall back to non-vectorization for processing.
      *     For example:
-     *       Query: select * from t1 left join (select k1, count(k2) as count_k2 from t2 group by k1) tmp on t1.k1=tmp.k1
+     *       Query: select * from t1 left join
+     *              (select k1, count(k2) as count_k2 from t2 group by k1) tmp on t1.k1=tmp.k1
      *       Origin: tmp.k1 not null, tmp.count_k2 not null
      *       Result: throw VecNotImplException
      */
@@ -1528,6 +1529,7 @@ public class Analyzer {
     public Set<Expr> getGlobalInDeDuplication() {
         return Sets.newHashSet(globalState.globalInDeDuplication);
     }
+
     /**
      * Makes the given semi-joined tuple visible such that its slots can be referenced.
      * If tid is null, makes the currently visible semi-joined tuple invisible again.
@@ -2000,7 +2002,8 @@ public class Analyzer {
         if (globalState.context == null) {
             return false;
         }
-        return !globalState.context.getSessionVariable().isEnableJoinReorderBasedCost() && !globalState.context.getSessionVariable().isDisableJoinReorder();
+        return !globalState.context.getSessionVariable().isEnableJoinReorderBasedCost()
+                && !globalState.context.getSessionVariable().isDisableJoinReorder();
     }
 
     public boolean enableInferPredicate() {
@@ -2028,7 +2031,8 @@ public class Analyzer {
         if (globalState.context == null) {
             return false;
         }
-        return globalState.context.getSessionVariable().isEnableJoinReorderBasedCost() && !globalState.context.getSessionVariable().isDisableJoinReorder();
+        return globalState.context.getSessionVariable().isEnableJoinReorderBasedCost()
+                && !globalState.context.getSessionVariable().isDisableJoinReorder();
     }
 
     public boolean safeIsEnableFoldConstantByBe() {
@@ -2176,6 +2180,7 @@ public class Analyzer {
     public List<Expr> getUnassignedConjuncts(PlanNode node) {
         return getUnassignedConjuncts(node.getTblRefIds());
     }
+
     /**
      * Returns true if e must be evaluated by a join node. Note that it may still be
      * safe to evaluate e elsewhere as well, but in any case the join must evaluate e.
@@ -2196,6 +2201,7 @@ public class Analyzer {
 
         return false;
     }
+
     /**
      * Mark all slots that are referenced in exprs as materialized.
      */
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ArithmeticExpr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ArithmeticExpr.java
index bd1ac2d0b7..8629d771ba 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ArithmeticExpr.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ArithmeticExpr.java
@@ -77,15 +77,19 @@ public class ArithmeticExpr extends Expr {
         public String toString() {
             return description;
         }
+
         public String getName() {
             return name;
         }
+
         public OperatorPosition getPos() {
             return pos;
         }
+
         public TExprOpcode getOpcode() {
             return opcode;
         }
+
         public boolean isUnary() {
             return pos == OperatorPosition.UNARY_PREFIX
                     || pos == OperatorPosition.UNARY_POSTFIX;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/BackupStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/BackupStmt.java
index 43dee100be..a1eef45ec2 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/BackupStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/BackupStmt.java
@@ -28,8 +28,8 @@ import com.google.common.collect.Maps;
 import java.util.Map;
 
 public class BackupStmt extends AbstractBackupStmt {
-    private final static String PROP_TYPE = "type";
-    public final static String PROP_CONTENT = "content";
+    private static final String PROP_TYPE = "type";
+    public static final String PROP_CONTENT = "content";
 
     public enum BackupType {
         INCREMENTAL, FULL
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/BinaryPredicate.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/BinaryPredicate.java
index 12d6aa7b24..7da7356b36 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/BinaryPredicate.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/BinaryPredicate.java
@@ -49,7 +49,7 @@ import java.util.Objects;
  * Most predicates with two operands..
  */
 public class BinaryPredicate extends Predicate implements Writable {
-    private final static Logger LOG = LogManager.getLogger(BinaryPredicate.class);
+    private static final Logger LOG = LogManager.getLogger(BinaryPredicate.class);
 
     // true if this BinaryPredicate is inferred from slot equivalences, false otherwise.
     private boolean isInferred = false;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/BrokerDesc.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/BrokerDesc.java
index eb79f65267..f1155086fe 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/BrokerDesc.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/BrokerDesc.java
@@ -41,11 +41,11 @@ import java.util.Map;
 //   "password" = "password0"
 // )
 public class BrokerDesc extends StorageDesc implements Writable {
-    private final static Logger LOG = LogManager.getLogger(BrokerDesc.class);
+    private static final Logger LOG = LogManager.getLogger(BrokerDesc.class);
 
     // just for multi load
-    public final static String MULTI_LOAD_BROKER = "__DORIS_MULTI_LOAD_BROKER__";
-    public final static String MULTI_LOAD_BROKER_BACKEND_KEY = "__DORIS_MULTI_LOAD_BROKER_BACKEND__";
+    public static final String MULTI_LOAD_BROKER = "__DORIS_MULTI_LOAD_BROKER__";
+    public static final String MULTI_LOAD_BROKER_BACKEND_KEY = "__DORIS_MULTI_LOAD_BROKER_BACKEND__";
 
     // Only used for recovery
     private BrokerDesc() {
@@ -108,6 +108,7 @@ public class BrokerDesc extends StorageDesc implements Writable {
         }
         return TFileType.FILE_BROKER;
     }
+
     public StorageBackend.StorageType storageType() {
         return storageType;
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/BuiltinAggregateFunction.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/BuiltinAggregateFunction.java
index 00b99e87de..d14f3400d0 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/BuiltinAggregateFunction.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/BuiltinAggregateFunction.java
@@ -40,6 +40,7 @@ public class BuiltinAggregateFunction extends Function {
     public boolean isAnalyticFn() {
         return isAnalyticFn;
     }
+
     // TODO: this is not used yet until the planner understand this.
     private org.apache.doris.catalog.Type intermediateType;
     private boolean reqIntermediateTuple = false;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CaseExpr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CaseExpr.java
index 787d658552..40f0016453 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CaseExpr.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CaseExpr.java
@@ -300,7 +300,8 @@ public class CaseExpr extends Expr {
 
     // this method just compare literal value and not completely consistent with be,for two cases
     // 1 not deal float
-    // 2 just compare literal value with same type. for a example sql 'select case when 123 then '1' else '2' end as col'
+    // 2 just compare literal value with same type.
+    //      for a example sql 'select case when 123 then '1' else '2' end as col'
     //      for be will return '1', because be only regard 0 as false
     //      but for current LiteralExpr.compareLiteral, `123`' won't be regard as true
     //  the case which two values has different type left to be
@@ -349,7 +350,8 @@ public class CaseExpr extends Expr {
         // early return when the `when expr` can't be converted to constants
         Expr startExpr = expr.getChild(startIndex);
         if ((!startExpr.isLiteral() || startExpr instanceof DecimalLiteral || startExpr instanceof FloatLiteral)
-                || (!(startExpr instanceof NullLiteral) && !startExpr.getClass().toString().equals(caseExpr.getClass().toString()))) {
+                || (!(startExpr instanceof NullLiteral)
+                && !startExpr.getClass().toString().equals(caseExpr.getClass().toString()))) {
             return expr;
         }
 
@@ -363,7 +365,9 @@ public class CaseExpr extends Expr {
             // 1 not literal
             // 2 float
             // 3 `case expr` and `when expr` don't have same type
-            if ((!currentWhenExpr.isLiteral() || currentWhenExpr instanceof DecimalLiteral || currentWhenExpr instanceof FloatLiteral)
+            if ((!currentWhenExpr.isLiteral()
+                    || currentWhenExpr instanceof DecimalLiteral
+                    || currentWhenExpr instanceof FloatLiteral)
                     || !currentWhenExpr.getClass().toString().equals(caseExpr.getClass().toString())) {
                 // remove the expr which has been evaluated
                 List<Expr> exprLeft = new ArrayList<>();
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CastExpr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CastExpr.java
index f5dd01c112..5b4ed33e1c 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CastExpr.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CastExpr.java
@@ -160,7 +160,8 @@ public class CastExpr extends Expr {
                 if (toType.isNull() || disableRegisterCastingFunction(fromType, toType)) {
                     continue;
                 }
-                String beClass = toType.isDecimalV2() || fromType.isDecimalV2() ? "DecimalV2Operators" : "CastFunctions";
+                String beClass = toType.isDecimalV2()
+                        || fromType.isDecimalV2() ? "DecimalV2Operators" : "CastFunctions";
                 if (fromType.isTime()) {
                     beClass = "TimeOperators";
                 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ChannelDescription.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ChannelDescription.java
index 13ff1438e6..86f7482f02 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ChannelDescription.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ChannelDescription.java
@@ -62,7 +62,8 @@ public class ChannelDescription implements Writable {
     @SerializedName(value = "channelId")
     private long channelId;
 
-    public ChannelDescription(String srcDatabase, String srcTableName, String targetTable, PartitionNames partitionNames, List<String> colNames) {
+    public ChannelDescription(String srcDatabase, String srcTableName, String targetTable,
+            PartitionNames partitionNames, List<String> colNames) {
         this.srcDatabase = srcDatabase;
         this.srcTableName = srcTableName;
         this.targetTable = targetTable;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ColumnDef.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ColumnDef.java
index cffa8df492..e98f4c2dfc 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ColumnDef.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ColumnDef.java
@@ -50,8 +50,8 @@ public class ColumnDef {
      *     k1 INT NULL DEFAULT NULL
      *
      * ColumnnDef will be transformed to Column in Analysis phase, and in Column, default value is a String.
-     * No matter does the user set the default value as NULL explicitly, or not set default value,
-     * the default value in Column will be "null", so that Doris can not distinguish between "not set" and "set as null".
+     * No matter does the user set the default value as NULL explicitly, or not set default value, the default value
+     * in Column will be "null", so that Doris can not distinguish between "not set" and "set as null".
      *
      * But this is OK because Column has another attribute "isAllowNull".
      * If the column is not allowed to be null, and user does not set the default value,
@@ -113,6 +113,7 @@ public class ColumnDef {
         this.comment = "";
         this.defaultValue = DefaultValue.NOT_SET;
     }
+
     public ColumnDef(String name, TypeDef typeDef, boolean isKey, AggregateType aggregateType,
                      boolean isAllowNull, DefaultValue defaultValue, String comment) {
         this(name, typeDef, isKey, aggregateType, isAllowNull, defaultValue, comment, true);
@@ -146,7 +147,8 @@ public class ColumnDef {
     }
 
     public static ColumnDef newSequenceColumnDef(Type type, AggregateType aggregateType) {
-        return new ColumnDef(Column.SEQUENCE_COL, new TypeDef(type), false, aggregateType, true, DefaultValue.NULL_DEFAULT_VALUE,
+        return new ColumnDef(Column.SEQUENCE_COL, new TypeDef(type), false,
+                aggregateType, true, DefaultValue.NULL_DEFAULT_VALUE,
                 "sequence column hidden column", false);
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CompoundPredicate.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CompoundPredicate.java
index d2175117a0..2672d5564c 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CompoundPredicate.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CompoundPredicate.java
@@ -40,7 +40,7 @@ import java.util.Objects;
  * &&, ||, ! predicates.
  */
 public class CompoundPredicate extends Predicate {
-    private final static Logger LOG = LogManager.getLogger(CompoundPredicate.class);
+    private static final Logger LOG = LogManager.getLogger(CompoundPredicate.class);
     private final Operator op;
 
     public static void initBuiltins(FunctionSet functionSet) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateDataSyncJobStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateDataSyncJobStmt.java
index cbf0bdbf13..67159630c3 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateDataSyncJobStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateDataSyncJobStmt.java
@@ -96,10 +96,12 @@ public class CreateDataSyncJobStmt extends DdlStmt {
             Database db = Catalog.getCurrentCatalog().getDbOrAnalysisException(dbName);
             OlapTable olapTable = db.getOlapTableOrAnalysisException(tableName);
             if (olapTable.getKeysType() != KeysType.UNIQUE_KEYS) {
-                throw new AnalysisException("Table: " + tableName + " is not a unique table, key type: " + olapTable.getKeysType());
+                throw new AnalysisException("Table: " + tableName
+                        + " is not a unique table, key type: " + olapTable.getKeysType());
             }
             if (!olapTable.hasDeleteSign()) {
-                throw new AnalysisException("Table: " + tableName + " don't support batch delete. Please upgrade it to support, see `help alter table`.");
+                throw new AnalysisException("Table: " + tableName
+                        + " don't support batch delete. Please upgrade it to support, see `help alter table`.");
             }
         }
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateDbStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateDbStmt.java
index 3ee94b70ae..90b4354844 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateDbStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateDbStmt.java
@@ -65,7 +65,8 @@ public class CreateDbStmt extends DdlStmt {
         dbName = ClusterNamespace.getFullName(getClusterName(), dbName);
 
         if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), dbName, PrivPredicate.CREATE)) {
-            ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName);
+            ErrorReport.reportAnalysisException(
+                    ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName);
         }
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateFunctionStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateFunctionStmt.java
index 5cbbc5102a..9115a25391 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateFunctionStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateFunctionStmt.java
@@ -64,9 +64,9 @@ import java.net.URL;
 import java.net.URLClassLoader;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
-import java.util.HashMap;
 import java.time.LocalDate;
 import java.time.LocalDateTime;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -246,10 +246,12 @@ public class CreateFunctionStmt extends DdlStmt {
     }
 
     private void analyzeUda() throws AnalysisException {
-        AggregateFunction.AggregateFunctionBuilder builder = AggregateFunction.AggregateFunctionBuilder.createUdfBuilder();
+        AggregateFunction.AggregateFunctionBuilder builder
+                = AggregateFunction.AggregateFunctionBuilder.createUdfBuilder();
 
-        builder.name(functionName).argsType(argsDef.getArgTypes()).retType(returnType.getType()).
-                hasVarArgs(argsDef.isVariadic()).intermediateType(intermediateType.getType()).location(URI.create(userFile));
+        builder.name(functionName).argsType(argsDef.getArgTypes()).retType(returnType.getType())
+                .hasVarArgs(argsDef.isVariadic()).intermediateType(intermediateType.getType())
+                .location(URI.create(userFile));
         String initFnSymbol = properties.get(INIT_KEY);
         if (initFnSymbol == null && !(binaryType == TFunctionBinaryType.JAVA_UDF)) {
             throw new AnalysisException("No 'init_fn' in properties");
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateMaterializedViewStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateMaterializedViewStmt.java
index b8125417ee..61e33892a4 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateMaterializedViewStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateMaterializedViewStmt.java
@@ -87,7 +87,8 @@ public class CreateMaterializedViewStmt extends DdlStmt {
     private String baseIndexName;
     private String dbName;
     private KeysType mvKeysType = KeysType.DUP_KEYS;
-    //if process is replaying log, isReplay is true, otherwise is false, avoid replay process error report, only in Rollup or MaterializedIndexMeta is true
+    //if process is replaying log, isReplay is true, otherwise is false, avoid replay process error report,
+    // only in Rollup or MaterializedIndexMeta is true
     private boolean isReplay = false;
 
     public CreateMaterializedViewStmt(String mvName, SelectStmt selectStmt, Map<String, String> properties) {
@@ -317,7 +318,8 @@ public class CreateMaterializedViewStmt extends DdlStmt {
             for (; theBeginIndexOfValue < mvColumnItemList.size(); theBeginIndexOfValue++) {
                 MVColumnItem column = mvColumnItemList.get(theBeginIndexOfValue);
                 keySizeByte += column.getType().getIndexSize();
-                if (theBeginIndexOfValue + 1 > FeConstants.shortkey_max_column_count || keySizeByte > FeConstants.shortkey_maxsize_bytes) {
+                if (theBeginIndexOfValue + 1 > FeConstants.shortkey_max_column_count
+                        || keySizeByte > FeConstants.shortkey_maxsize_bytes) {
                     if (theBeginIndexOfValue == 0 && column.getType().getPrimitiveType().isCharFamily()) {
                         column.setIsKey(true);
                         theBeginIndexOfValue++;
@@ -413,8 +415,7 @@ public class CreateMaterializedViewStmt extends DdlStmt {
             default:
                 throw new AnalysisException("Unsupported function:" + functionName);
         }
-        MVColumnItem mvColumnItem = new MVColumnItem(mvColumnName, type, mvAggregateType, false, defineExpr, baseColumnName);
-        return mvColumnItem;
+        return new MVColumnItem(mvColumnName, type, mvAggregateType, false, defineExpr, baseColumnName);
     }
 
     public Map<String, Expr> parseDefineExprWithoutAnalyze() throws AnalysisException {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateRoutineLoadStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateRoutineLoadStmt.java
index eff6a8bb7c..a55d61d945 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateRoutineLoadStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateRoutineLoadStmt.java
@@ -417,7 +417,8 @@ public class CreateRoutineLoadStmt extends DdlStmt {
             throw new AnalysisException(optional.get() + " is invalid property");
         }
 
-        desiredConcurrentNum = ((Long) Util.getLongPropertyOrDefault(jobProperties.get(DESIRED_CONCURRENT_NUMBER_PROPERTY),
+        desiredConcurrentNum = ((Long) Util.getLongPropertyOrDefault(
+                jobProperties.get(DESIRED_CONCURRENT_NUMBER_PROPERTY),
                 Config.max_routine_load_task_concurrent_num, DESIRED_CONCURRENT_NUMBER_PRED,
                 DESIRED_CONCURRENT_NUMBER_PROPERTY + " should > 0")).intValue();
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateSqlBlockRuleStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateSqlBlockRuleStmt.java
index a458a468ea..4d2018cf3d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateSqlBlockRuleStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateSqlBlockRuleStmt.java
@@ -123,12 +123,17 @@ public class CreateSqlBlockRuleStmt extends DdlStmt {
         SqlBlockUtil.checkSqlAndSqlHashSetBoth(sql, sqlHash);
         SqlBlockUtil.checkPropertiesValidate(sql, sqlHash, partitionNumString, tabletNumString, cardinalityString);
 
-        this.partitionNum = Util.getLongPropertyOrDefault(partitionNumString, 0L, null, SCANNED_PARTITION_NUM + " should be a long");
-        this.tabletNum = Util.getLongPropertyOrDefault(tabletNumString, 0L, null, SCANNED_TABLET_NUM + " should be a long");
-        this.cardinality = Util.getLongPropertyOrDefault(cardinalityString, 0L, null, SCANNED_CARDINALITY + " should be a long");
-
-        this.global = Util.getBooleanPropertyOrDefault(properties.get(GLOBAL_PROPERTY), false, GLOBAL_PROPERTY + " should be a boolean");
-        this.enable = Util.getBooleanPropertyOrDefault(properties.get(ENABLE_PROPERTY), true, ENABLE_PROPERTY + " should be a boolean");
+        this.partitionNum = Util.getLongPropertyOrDefault(partitionNumString, 0L, null,
+                SCANNED_PARTITION_NUM + " should be a long");
+        this.tabletNum = Util.getLongPropertyOrDefault(tabletNumString, 0L, null,
+                SCANNED_TABLET_NUM + " should be a long");
+        this.cardinality = Util.getLongPropertyOrDefault(cardinalityString, 0L, null,
+                SCANNED_CARDINALITY + " should be a long");
+
+        this.global = Util.getBooleanPropertyOrDefault(properties.get(GLOBAL_PROPERTY),
+                false, GLOBAL_PROPERTY + " should be a boolean");
+        this.enable = Util.getBooleanPropertyOrDefault(properties.get(ENABLE_PROPERTY),
+                true, ENABLE_PROPERTY + " should be a boolean");
     }
 
     public static void checkCommonProperties(Map<String, String> properties) throws UserException {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableLikeStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableLikeStmt.java
index b65aaa4d28..1b1bf6dac6 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableLikeStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableLikeStmt.java
@@ -47,7 +47,8 @@ public class CreateTableLikeStmt extends DdlStmt {
     private final ArrayList<String> rollupNames;
     private final boolean withAllRollup;
 
-    public CreateTableLikeStmt(boolean ifNotExists, TableName tableName, TableName existedTableName, ArrayList<String> rollupNames, boolean withAllRollup) throws DdlException {
+    public CreateTableLikeStmt(boolean ifNotExists, TableName tableName, TableName existedTableName,
+            ArrayList<String> rollupNames, boolean withAllRollup) throws DdlException {
         this.ifNotExists = ifNotExists;
         this.tableName = tableName;
         this.existedTableName = existedTableName;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java
index 44de0d27f9..56f20d2113 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java
@@ -381,7 +381,8 @@ public class CreateTableStmt extends DdlStmt {
 
             if (columnDef.getType().isArrayType()) {
                 if (columnDef.getAggregateType() != null && columnDef.getAggregateType() != AggregateType.NONE) {
-                    throw new AnalysisException("Array column can't support aggregation " + columnDef.getAggregateType());
+                    throw new AnalysisException("Array column can't support aggregation "
+                            + columnDef.getAggregateType());
                 }
                 if (columnDef.isKey()) {
                     throw new AnalysisException("Array can only be used in the non-key column of"
@@ -409,7 +410,8 @@ public class CreateTableStmt extends DdlStmt {
                 if (partitionDesc instanceof ListPartitionDesc || partitionDesc instanceof RangePartitionDesc) {
                     partitionDesc.analyze(columnDefs, properties);
                 } else {
-                    throw new AnalysisException("Currently only support range and list partition with engine type olap");
+                    throw new AnalysisException("Currently only support range"
+                            + " and list partition with engine type olap");
                 }
 
             }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateUserStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateUserStmt.java
index 25bdf87b93..180d3f532f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateUserStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateUserStmt.java
@@ -134,7 +134,8 @@ public class CreateUserStmt extends DdlStmt {
         }
 
         // check if current user has GRANT priv on GLOBAL or DATABASE level.
-        if (!Catalog.getCurrentCatalog().getAuth().checkHasPriv(ConnectContext.get(), PrivPredicate.GRANT, PrivLevel.GLOBAL, PrivLevel.DATABASE)) {
+        if (!Catalog.getCurrentCatalog().getAuth().checkHasPriv(ConnectContext.get(),
+                PrivPredicate.GRANT, PrivLevel.GLOBAL, PrivLevel.DATABASE)) {
             ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT");
         }
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DataDescription.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DataDescription.java
index bda93a8651..86d7722305 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DataDescription.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DataDescription.java
@@ -130,13 +130,14 @@ public class DataDescription {
      * For hadoop load, this param is also used to persistence.
      * The function in this param is copied from 'parsedColumnExprList'
      */
-    private final Map<String, Pair<String, List<String>>> columnToHadoopFunction = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER);
+    private final Map<String, Pair<String, List<String>>> columnToHadoopFunction
+            = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER);
 
     private boolean isHadoopLoad = false;
 
     private LoadTask.MergeType mergeType = LoadTask.MergeType.APPEND;
     private final Expr deleteCondition;
-    private Map<String, String> properties;
+    private final Map<String, String> properties;
 
     public DataDescription(String tableName,
                            PartitionNames partitionNames,
@@ -572,7 +573,8 @@ public class DataDescription {
      *      columnToHadoopFunction = {"col3": "strftime("%Y-%m-%d %H:%M:%S", tmp_col3)"}
      */
     private void analyzeColumns() throws AnalysisException {
-        if ((fileFieldNames == null || fileFieldNames.isEmpty()) && (columnsFromPath != null && !columnsFromPath.isEmpty())) {
+        if ((fileFieldNames == null || fileFieldNames.isEmpty())
+                && (columnsFromPath != null && !columnsFromPath.isEmpty())) {
             throw new AnalysisException("Can not specify columns_from_path without column_list");
         }
 
@@ -719,7 +721,8 @@ public class DataDescription {
         }
         // check olapTable schema and sequenceCol
         if (olapTable.hasSequenceCol() && !hasSequenceCol()) {
-            throw new AnalysisException("Table " + olapTable.getName() + " has sequence column, need to specify the sequence column");
+            throw new AnalysisException("Table " + olapTable.getName()
+                    + " has sequence column, need to specify the sequence column");
         }
         if (hasSequenceCol() && !olapTable.hasSequenceCol()) {
             throw new AnalysisException("There is no sequence column in the table " + olapTable.getName());
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DateLiteral.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DateLiteral.java
index a59952f55e..24f552e100 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DateLiteral.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DateLiteral.java
@@ -84,10 +84,10 @@ public class DateLiteral extends LiteralExpr {
     private static Map<String, Integer> MONTH_NAME_DICT = Maps.newHashMap();
     private static Map<String, Integer> MONTH_ABBR_NAME_DICT = Maps.newHashMap();
     private static Map<String, Integer> WEEK_DAY_NAME_DICT = Maps.newHashMap();
-    private final static int[] DAYS_IN_MONTH = new int[] {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
-    private final static int ALLOW_SPACE_MASK = 4 | 64;
-    private final static int MAX_DATE_PARTS = 8;
-    private final static int YY_PART_YEAR = 70;
+    private static final int[] DAYS_IN_MONTH = new int[] {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+    private static final int ALLOW_SPACE_MASK = 4 | 64;
+    private static final int MAX_DATE_PARTS = 8;
+    private static final int YY_PART_YEAR = 70;
 
     static {
         try {
@@ -148,6 +148,7 @@ public class DateLiteral extends LiteralExpr {
 
     //Regex used to determine if the TIME field exists int date_format
     private static final Pattern HAS_TIME_PART = Pattern.compile("^.*[HhIiklrSsTp]+.*$");
+
     //Date Literal persist type in meta
     private enum  DateLiteralType {
         DATETIME(0),
@@ -597,7 +598,9 @@ public class DateLiteral extends LiteralExpr {
                     case 'v': // %v Week (01..53), where Monday is the first day of the week; used with %x
                         builder.appendWeekOfWeekyear(2);
                         break;
-                    case 'x': // %x Year for the week, where Monday is the first day of the week, numeric, four digits; used with %v
+                    case 'x':
+                        // %x Year for the week, where Monday is the first day of the week,
+                        // numeric, four digits; used with %v
                         builder.appendWeekyear(4, 4);
                         break;
                     case 'W': // %W Weekday name (Sunday..Saturday)
@@ -614,9 +617,12 @@ public class DateLiteral extends LiteralExpr {
                     case 'U': // %U Week (00..53), where Sunday is the first day of the week
                     case 'u': // %u Week (00..53), where Monday is the first day of the week
                     case 'V': // %V Week (01..53), where Sunday is the first day of the week; used with %X
-                    case 'X': // %X Year for the week where Sunday is the first day of the week, numeric, four digits; used with %V
+                    case 'X':
+                        // %X Year for the week where Sunday is the first day of the week,
+                        // numeric, four digits; used with %V
                     case 'D': // %D Day of the month with English suffix (0th, 1st, 2nd, 3rd, …)
-                        throw new AnalysisException(String.format("%%%s not supported in date format string", character));
+                        throw new AnalysisException(
+                                String.format("%%%s not supported in date format string", character));
                     case '%': // %% A literal "%" character
                         builder.appendLiteral('%');
                         break;
@@ -957,7 +963,8 @@ public class DateLiteral extends LiteralExpr {
                 }
             } else if (format.charAt(fp) != ' ') {
                 if (format.charAt(fp) != value.charAt(vp)) {
-                    throw new InvalidFormatException("Invalid char: " + value.charAt(vp) + ", expected: " + format.charAt(fp));
+                    throw new InvalidFormatException("Invalid char: " + value.charAt(vp)
+                            + ", expected: " + format.charAt(fp));
                 }
                 fp++;
                 vp++;
@@ -1053,6 +1060,7 @@ public class DateLiteral extends LiteralExpr {
                 || hour > MAX_DATETIME.hour || minute > MAX_DATETIME.minute || second > MAX_DATETIME.second
                 || microsecond > MAX_MICROSECOND;
     }
+
     private boolean checkDate() {
         if (month != 0 && day > DAYS_IN_MONTH[((int) month)]) {
             if (month == 2 && day == 29 && Year.isLeap(year)) {
@@ -1195,7 +1203,8 @@ public class DateLiteral extends LiteralExpr {
             int start = pre;
             int tempVal = 0;
             boolean scanToDelim = (!isIntervalFormat) && (fieldIdx != 6);
-            while (pre < dateStr.length() && Character.isDigit(dateStr.charAt(pre)) && (scanToDelim || fieldLen-- != 0)) {
+            while (pre < dateStr.length() && Character.isDigit(dateStr.charAt(pre))
+                    && (scanToDelim || fieldLen-- != 0)) {
                 tempVal = tempVal * 10 + (dateStr.charAt(pre++) - '0');
             }
             dateVal[fieldIdx] = tempVal;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DecimalLiteral.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DecimalLiteral.java
index 87c22b5242..e938a46361 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DecimalLiteral.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DecimalLiteral.java
@@ -107,7 +107,8 @@ public class DecimalLiteral extends LiteralExpr {
     @Override
     public ByteBuffer getHashValue(PrimitiveType type) {
         ByteBuffer buffer;
-        // no need to consider the overflow when cast decimal to other type, because this func only be used when querying, not storing.
+        // no need to consider the overflow when cast decimal to other type,
+        // because this func only be used when querying, not storing.
         // e.g. For column A with type INT, the data stored certainly no overflow.
         switch (type) {
             case TINYINT:
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DeleteStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DeleteStmt.java
index 9cfd26eece..465813177b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DeleteStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DeleteStmt.java
@@ -132,7 +132,8 @@ public class DeleteStmt extends DdlStmt {
             int inElementNum = inPredicate.getInElementNum();
             int maxAllowedInElementNumOfDelete = Config.max_allowed_in_element_num_of_delete;
             if (inElementNum > maxAllowedInElementNumOfDelete) {
-                throw new AnalysisException("Element num of in predicate should not be more than " + maxAllowedInElementNumOfDelete);
+                throw new AnalysisException("Element num of in predicate should not be more than "
+                        + maxAllowedInElementNumOfDelete);
             }
             for (int i = 1; i <= inPredicate.getInElementNum(); i++) {
                 Expr expr = inPredicate.getChild(i);
@@ -142,7 +143,8 @@ public class DeleteStmt extends DdlStmt {
             }
             deleteConditions.add(inPredicate);
         } else {
-            throw new AnalysisException("Where clause only supports compound predicate, binary predicate, is_null predicate or in predicate");
+            throw new AnalysisException("Where clause only supports compound predicate,"
+                    + " binary predicate, is_null predicate or in predicate");
         }
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DescribeStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DescribeStmt.java
index a2c5139adb..257ea98c83 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DescribeStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DescribeStmt.java
@@ -169,7 +169,8 @@ public class DescribeStmt extends ShowStmt {
                                     column.getOriginType().toString(),
                                     column.isAllowNull() ? "Yes" : "No",
                                     ((Boolean) column.isKey()).toString(),
-                                    column.getDefaultValue() == null ? FeConstants.null_string : column.getDefaultValue(),
+                                    column.getDefaultValue() == null
+                                            ? FeConstants.null_string : column.getDefaultValue(),
                                     extraStr,
                                     ((Boolean) column.isVisible()).toString()
                             );
@@ -221,6 +222,7 @@ public class DescribeStmt extends ShowStmt {
     public String getTableName() {
         return dbTableName.getTbl();
     }
+
     public String getDb() {
         return dbTableName.getDb();
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DescriptorTable.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DescriptorTable.java
index f733e3a9a6..00139913d9 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DescriptorTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DescriptorTable.java
@@ -41,7 +41,7 @@ import java.util.List;
  * them unique ids..
  */
 public class DescriptorTable {
-    private final static Logger LOG = LogManager.getLogger(DescriptorTable.class);
+    private static final Logger LOG = LogManager.getLogger(DescriptorTable.class);
 
     private final HashMap<TupleId, TupleDescriptor> tupleDescs = new HashMap<TupleId, TupleDescriptor>();
     // List of referenced tables with no associated TupleDescriptor to ship to the BE.
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/DropDbStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/DropDbStmt.java
index 2c01a967c4..43cc9f5b47 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/DropDbStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/DropDbStmt.java
@@ -61,7 +61,8 @@ public class DropDbStmt extends DdlStmt {
         dbName = ClusterNamespace.getFullName(getClusterName(), dbName);
         // Don't allowed to drop 'information_schema'
         if (dbName.equalsIgnoreCase(ClusterNamespace.getFullName(getClusterName(), InfoSchemaDb.DATABASE_NAME))) {
-            ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName);
+            ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR,
+                    analyzer.getQualifiedUser(), dbName);
         }
 
         if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), dbName, PrivPredicate.DROP)) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExportStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExportStmt.java
index 6df62c42f4..b8f2c2c9be 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExportStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExportStmt.java
@@ -54,7 +54,7 @@ import java.util.UUID;
 //          [PROPERTIES("key"="value")]
 //          BY BROKER 'broker_name' [( $broker_attrs)]
 public class ExportStmt extends StatementBase {
-    private final static Logger LOG = LogManager.getLogger(ExportStmt.class);
+    private static final Logger LOG = LogManager.getLogger(ExportStmt.class);
 
     public static final String TABLET_NUMBER_PER_TASK_PROP = "tablet_num_per_task";
     public static final String LABEL = "label";
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/Expr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/Expr.java
index 5387539254..5c5a6d8c55 100755
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/Expr.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/Expr.java
@@ -60,7 +60,7 @@ import java.util.Set;
 /**
  * Root of the expr node hierarchy.
  */
-abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneable, Writable {
+public abstract class Expr extends TreeNode<Expr> implements ParseNode, Cloneable, Writable {
     private static final Logger LOG = LogManager.getLogger(Expr.class);
 
     // Name of the function that needs to be implemented by every Expr that
@@ -70,10 +70,10 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
     // to be used where we can't come up with a better estimate
     public static final double DEFAULT_SELECTIVITY = 0.1;
 
-    public final static float FUNCTION_CALL_COST = 10;
+    public static final float FUNCTION_CALL_COST = 10;
 
     // returns true if an Expr is a non-analytic aggregate.
-    private final static com.google.common.base.Predicate<Expr> IS_AGGREGATE_PREDICATE =
+    private static final com.google.common.base.Predicate<Expr> IS_AGGREGATE_PREDICATE =
             new com.google.common.base.Predicate<Expr>() {
                 public boolean apply(Expr arg) {
                     return arg instanceof FunctionCallExpr
@@ -82,7 +82,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
             };
 
     // Returns true if an Expr is a NOT CompoundPredicate.
-    public final static com.google.common.base.Predicate<Expr> IS_NOT_PREDICATE =
+    public static final com.google.common.base.Predicate<Expr> IS_NOT_PREDICATE =
             new com.google.common.base.Predicate<Expr>() {
                 @Override
                 public boolean apply(Expr arg) {
@@ -92,7 +92,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
             };
 
     // Returns true if an Expr is an OR CompoundPredicate.
-    public final static com.google.common.base.Predicate<Expr> IS_OR_PREDICATE =
+    public static final com.google.common.base.Predicate<Expr> IS_OR_PREDICATE =
             new com.google.common.base.Predicate<Expr>() {
                 @Override
                 public boolean apply(Expr arg) {
@@ -102,7 +102,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
             };
 
     // Returns true if an Expr is a scalar subquery
-    public final static com.google.common.base.Predicate<Expr> IS_SCALAR_SUBQUERY =
+    public static final com.google.common.base.Predicate<Expr> IS_SCALAR_SUBQUERY =
             new com.google.common.base.Predicate<Expr>() {
                 @Override
                 public boolean apply(Expr arg) {
@@ -112,7 +112,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
 
     // Returns true if an Expr is an aggregate function that returns non-null on
     // an empty set (e.g. count).
-    public final static com.google.common.base.Predicate<Expr> NON_NULL_EMPTY_AGG =
+    public static final com.google.common.base.Predicate<Expr> NON_NULL_EMPTY_AGG =
             new com.google.common.base.Predicate<Expr>() {
                 @Override
                 public boolean apply(Expr arg) {
@@ -121,7 +121,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
             };
 
     // Returns true if an Expr is a builtin aggregate function.
-    public final static com.google.common.base.Predicate<Expr> CORRELATED_SUBQUERY_SUPPORT_AGG_FN =
+    public static final com.google.common.base.Predicate<Expr> CORRELATED_SUBQUERY_SUPPORT_AGG_FN =
             new com.google.common.base.Predicate<Expr>() {
                 @Override
                 public boolean apply(Expr arg) {
@@ -139,7 +139,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
             };
 
 
-    public final static com.google.common.base.Predicate<Expr> IS_TRUE_LITERAL =
+    public static final com.google.common.base.Predicate<Expr> IS_TRUE_LITERAL =
             new com.google.common.base.Predicate<Expr>() {
                 @Override
                 public boolean apply(Expr arg) {
@@ -147,7 +147,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
                 }
             };
 
-    public final static com.google.common.base.Predicate<Expr> IS_FALSE_LITERAL =
+    public static final com.google.common.base.Predicate<Expr> IS_FALSE_LITERAL =
             new com.google.common.base.Predicate<Expr>() {
                 @Override
                 public boolean apply(Expr arg) {
@@ -155,7 +155,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
                 }
             };
 
-    public final static com.google.common.base.Predicate<Expr> IS_EQ_BINARY_PREDICATE =
+    public static final com.google.common.base.Predicate<Expr> IS_EQ_BINARY_PREDICATE =
             new com.google.common.base.Predicate<Expr>() {
                 @Override
                 public boolean apply(Expr arg) {
@@ -163,7 +163,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
                 }
             };
 
-    public final static com.google.common.base.Predicate<Expr> IS_BINARY_PREDICATE =
+    public static final com.google.common.base.Predicate<Expr> IS_BINARY_PREDICATE =
             new com.google.common.base.Predicate<Expr>() {
                 @Override
                 public boolean apply(Expr arg) {
@@ -423,7 +423,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
     /**
      * Does subclass-specific analysis. Subclasses should override analyzeImpl().
      */
-    abstract protected void analyzeImpl(Analyzer analyzer) throws AnalysisException;
+    protected abstract void analyzeImpl(Analyzer analyzer) throws AnalysisException;
 
     /**
      * Set the expr to be analyzed and computes isConstant_.
@@ -665,6 +665,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
             }
         }
     }
+
     /**
      * Returns true if the list contains an aggregate expr.
      */
@@ -828,6 +829,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
             child.markAgg();
         }
     }
+
     /**
      * Returns the product of the given exprs' number of distinct values or -1 if any of
      * the exprs have an invalid number of distinct values.
@@ -1354,7 +1356,8 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
                 && (this.type.isStringType() || this.type.isHllType())) {
             return this;
         }
-        // Preconditions.checkState(PrimitiveType.isImplicitCast(type, targetType), "cast %s to %s", this.type, targetType);
+        // Preconditions.checkState(PrimitiveType.isImplicitCast(type, targetType),
+        // "cast %s to %s", this.type, targetType);
         // TODO(zc): use implicit cast
         if (!Type.canCastTo(this.type, targetType)) {
             throw new AnalysisException("type not match, originType=" + this.type
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprId.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprId.java
index 79b2fc721e..2045b71367 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprId.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprId.java
@@ -27,7 +27,7 @@ import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
 public class ExprId extends Id<ExprId> {
-    private final static Logger LOG = LogManager.getLogger(ExprId.class);
+    private static final Logger LOG = LogManager.getLogger(ExprId.class);
 
     // Construction only allowed via an IdGenerator.
     public ExprId(int id) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprSubstitutionMap.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprSubstitutionMap.java
index 46b9caa0fa..966cfa7e0a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprSubstitutionMap.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExprSubstitutionMap.java
@@ -37,7 +37,7 @@ import java.util.List;
  * See Expr.substitute() and related functions for details on the actual substitution.
  */
 public final class ExprSubstitutionMap {
-    private final static Logger LOG = LoggerFactory.getLogger(ExprSubstitutionMap.class);
+    private static final Logger LOG = LoggerFactory.getLogger(ExprSubstitutionMap.class);
 
     private boolean checkAnalyzed = true;
     private List<Expr> lhs; // left-hand side
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExpressionFunctions.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExpressionFunctions.java
index 220fe55299..035cd8a67b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ExpressionFunctions.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ExpressionFunctions.java
@@ -75,7 +75,8 @@ public enum ExpressionFunctions {
             // 2. Not in NonNullResultWithNullParamFunctions
             // 3. Has null parameter
             if ((fn.getNullableMode() == Function.NullableMode.DEPEND_ON_ARGUMENT
-                    || Catalog.getCurrentCatalog().isNullResultWithOneNullParamFunction(fn.getFunctionName().getFunction()))
+                    || Catalog.getCurrentCatalog().isNullResultWithOneNullParamFunction(
+                            fn.getFunctionName().getFunction()))
                     && !fn.isUdf()) {
                 for (Expr e : constExpr.getChildren()) {
                     if (e instanceof NullLiteral) {
@@ -205,7 +206,8 @@ public enum ExpressionFunctions {
                 if (argType.isArray()) {
                     Preconditions.checkArgument(method.getParameterTypes().length == typeIndex + 1);
                     final List<Expr> variableLengthExprs = Lists.newArrayList();
-                    for (int variableLengthArgIndex = typeIndex; variableLengthArgIndex < args.size(); variableLengthArgIndex++) {
+                    for (int variableLengthArgIndex = typeIndex;
+                            variableLengthArgIndex < args.size(); variableLengthArgIndex++) {
                         variableLengthExprs.add(args.get(variableLengthArgIndex));
                     }
                     LiteralExpr[] variableLengthArgs = createVariableLengthArgs(variableLengthExprs, typeIndex);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionCallExpr.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionCallExpr.java
index a9ca2504a1..26f55a321f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionCallExpr.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionCallExpr.java
@@ -466,7 +466,8 @@ public class FunctionCallExpr extends Expr {
 
         if (fnName.getFunction().equalsIgnoreCase("json_object")) {
             if ((children.size() & 1) == 1 && (originChildSize == children.size())) {
-                throw new AnalysisException("json_object can't be odd parameters, need even parameters: " + this.toSql());
+                throw new AnalysisException("json_object can't be odd parameters, need even parameters: "
+                        + this.toSql());
             }
             String res = parseJsonDataType(true);
             if (children.size() == originChildSize) {
@@ -505,7 +506,8 @@ public class FunctionCallExpr extends Expr {
                 if (children.size() > 2) {
                     if (!getChild(1).isConstant() || !getChild(2).isConstant()) {
                         throw new AnalysisException(
-                                "The default parameter (parameter 2 or parameter 3) of LEAD/LAG must be a constant: " + this.toSql());
+                                "The default parameter (parameter 2 or parameter 3) of LEAD/LAG must be a constant: "
+                                        + this.toSql());
                     }
                     uncheckedCastChild(Type.BIGINT, 1);
                     if (!getChild(2).type.matchesType(getChild(0).type) && !getChild(2).type.matchesType(Type.NULL)) {
@@ -590,7 +592,8 @@ public class FunctionCallExpr extends Expr {
             }
             Type inputType = getChild(0).getType();
             if (!inputType.isBitmapType()) {
-                throw new AnalysisException(fnName + " function's argument should be of BITMAP type, but was " + inputType);
+                throw new AnalysisException(fnName
+                        + " function's argument should be of BITMAP type, but was " + inputType);
             }
             return;
         }
@@ -601,7 +604,8 @@ public class FunctionCallExpr extends Expr {
             }
             Type inputType = getChild(0).getType();
             if (!inputType.isQuantileStateType()) {
-                throw new AnalysisException(fnName + " function's argument should be of QUANTILE_STATE type, but was" + inputType);
+                throw new AnalysisException(fnName
+                        + " function's argument should be of QUANTILE_STATE type, but was" + inputType);
             }
         }
 
@@ -885,7 +889,8 @@ public class FunctionCallExpr extends Expr {
             }
             for (int i = 3; i < children.size(); i++) {
                 if (children.get(i).type != Type.BOOLEAN) {
-                    throw new AnalysisException("The 4th and subsequent params of " + fnName + " function must be boolean");
+                    throw new AnalysisException("The 4th and subsequent params of "
+                            + fnName + " function must be boolean");
                 }
                 childTypes[i] = children.get(i).type;
             }
@@ -919,7 +924,8 @@ public class FunctionCallExpr extends Expr {
                 if (fn == null) {
                     if (!analyzer.isUDFAllowed()) {
                         throw new AnalysisException(
-                                "Does not support non-builtin functions, or function does not exist: " + this.toSqlImpl());
+                                "Does not support non-builtin functions, or function does not exist: "
+                                        + this.toSqlImpl());
                     }
 
                     String dbName = fnName.analyzeDb(analyzer);
@@ -1081,7 +1087,8 @@ public class FunctionCallExpr extends Expr {
         List<Expr> inputParamsExprs = retExpr.fnParams.exprs();
         List<String> parameters = ((AliasFunction) retExpr.fn).getParameters();
         Preconditions.checkArgument(inputParamsExprs.size() == parameters.size(),
-                "Alias function [" + retExpr.fn.getFunctionName().getFunction() + "] args number is not equal to it's definition");
+                "Alias function [" + retExpr.fn.getFunctionName().getFunction()
+                        + "] args number is not equal to it's definition");
         List<Expr> oriParamsExprs = oriExpr.fnParams.exprs();
 
         // replace origin function params exprs' with input params expr depending on parameter name
@@ -1108,7 +1115,8 @@ public class FunctionCallExpr extends Expr {
      * @return
      * @throws AnalysisException
      */
-    private Expr replaceParams(List<String> parameters, List<Expr> inputParamsExprs, Expr oriExpr) throws AnalysisException {
+    private Expr replaceParams(List<String> parameters, List<Expr> inputParamsExprs, Expr oriExpr)
+            throws AnalysisException {
         for (int i = 0; i < oriExpr.getChildren().size(); i++) {
             Expr retExpr = replaceParams(parameters, inputParamsExprs, oriExpr.getChild(i));
             oriExpr.setChild(i, retExpr);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionParams.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionParams.java
index 8234e25201..32cfba0351 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionParams.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/FunctionParams.java
@@ -58,7 +58,7 @@ public class FunctionParams implements Writable {
         isDistinct = false;
     }
 
-    static public FunctionParams createStarParam() {
+    public static FunctionParams createStarParam() {
         return new FunctionParams();
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/GrantStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/GrantStmt.java
index 18621f19b2..4e849e7c61 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/GrantStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/GrantStmt.java
@@ -59,7 +59,8 @@ public class GrantStmt extends DdlStmt {
         this.privileges = privs.toPrivilegeList();
     }
 
-    public GrantStmt(UserIdentity userIdent, String role, ResourcePattern resourcePattern, List<AccessPrivilege> privileges) {
+    public GrantStmt(UserIdentity userIdent, String role,
+            ResourcePattern resourcePattern, List<AccessPrivilege> privileges) {
         this.userIdent = userIdent;
         this.role = role;
         this.tblPattern = null;
@@ -168,12 +169,14 @@ public class GrantStmt extends DdlStmt {
                     ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT");
                 }
             } else if (tblPattern.getPrivLevel() == PrivLevel.DATABASE) {
-                if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), tblPattern.getQualifiedDb(), PrivPredicate.GRANT)) {
+                if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(),
+                        tblPattern.getQualifiedDb(), PrivPredicate.GRANT)) {
                     ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT");
                 }
             } else {
                 // table level
-                if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), tblPattern.getQualifiedDb(), tblPattern.getTbl(), PrivPredicate.GRANT)) {
+                if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(),
+                        tblPattern.getQualifiedDb(), tblPattern.getTbl(), PrivPredicate.GRANT)) {
                     ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT");
                 }
             }
@@ -204,7 +207,8 @@ public class GrantStmt extends DdlStmt {
                     ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT");
                 }
             } else {
-                if (!Catalog.getCurrentCatalog().getAuth().checkResourcePriv(ConnectContext.get(), resourcePattern.getResourceName(), PrivPredicate.GRANT)) {
+                if (!Catalog.getCurrentCatalog().getAuth().checkResourcePriv(ConnectContext.get(),
+                        resourcePattern.getResourceName(), PrivPredicate.GRANT)) {
                     ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT");
                 }
             }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/GroupByClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/GroupByClause.java
index d74218c69e..f8d490977e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/GroupByClause.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/GroupByClause.java
@@ -45,10 +45,10 @@ import java.util.stream.Collectors;
  * In this class we produce the rule of generating rows base on the group by clause.
  */
 public class GroupByClause implements ParseNode {
-    private final static Logger LOG = LogManager.getLogger(GroupByClause.class);
+    private static final Logger LOG = LogManager.getLogger(GroupByClause.class);
 
     // max num of distinct sets in grouping sets clause
-    private final static int MAX_GROUPING_SETS_NUM = 64;
+    private static final int MAX_GROUPING_SETS_NUM = 64;
     // max num of distinct expressions
     private boolean analyzed = false;
     private boolean exprGenerated = false;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/InlineViewRef.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/InlineViewRef.java
index 666b143305..f0f9c421b8 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/InlineViewRef.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/InlineViewRef.java
@@ -288,7 +288,8 @@ public class InlineViewRef extends TableRef {
                     false, null, selectItemExpr.isNullable(),
                     null, ""));
         }
-        InlineView inlineView = (view != null) ? new InlineView(view, columnList) : new InlineView(getExplicitAlias(), columnList);
+        InlineView inlineView = (view != null) ? new InlineView(view, columnList)
+                : new InlineView(getExplicitAlias(), columnList);
 
         // Create the non-materialized tuple and set the fake table in it.
         TupleDescriptor result = analyzer.getDescTbl().createTupleDescriptor();
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/InsertStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/InsertStmt.java
index 579d9d1512..62a8a9887e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/InsertStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/InsertStmt.java
@@ -143,7 +143,8 @@ public class InsertStmt extends DdlStmt {
             isUserSpecifiedLabel = true;
         }
 
-        this.isValuesOrConstantSelect = (queryStmt instanceof SelectStmt && ((SelectStmt) queryStmt).getTableRefs().isEmpty());
+        this.isValuesOrConstantSelect = (queryStmt instanceof SelectStmt
+                && ((SelectStmt) queryStmt).getTableRefs().isEmpty());
     }
 
     // Ctor for CreateTableAsSelectStmt
@@ -187,7 +188,8 @@ public class InsertStmt extends DdlStmt {
         return tblName.getTbl();
     }
 
-    public void getTables(Analyzer analyzer, Map<Long, Table> tableMap, Set<String> parentViewNameSet) throws AnalysisException {
+    public void getTables(Analyzer analyzer, Map<Long, Table> tableMap, Set<String> parentViewNameSet)
+            throws AnalysisException {
         // get dbs of statement
         queryStmt.getTables(analyzer, tableMap, parentViewNameSet);
         tblName.analyze(analyzer);
@@ -269,8 +271,8 @@ public class InsertStmt extends DdlStmt {
         if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), tblName.getDb(),
                                                                 tblName.getTbl(), PrivPredicate.LOAD)) {
             ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR, "LOAD",
-                                                ConnectContext.get().getQualifiedUser(),
-                                                ConnectContext.get().getRemoteIP(), tblName.getDb() + ": " + tblName.getTbl());
+                    ConnectContext.get().getQualifiedUser(),
+                    ConnectContext.get().getRemoteIP(), tblName.getDb() + ": " + tblName.getTbl());
         }
 
         // check partition
@@ -457,7 +459,8 @@ public class InsertStmt extends DdlStmt {
             if (column.isNameWithPrefix(CreateMaterializedViewStmt.MATERIALIZED_VIEW_NAME_PREFIX)) {
                 SlotRef refColumn = column.getRefColumn();
                 if (refColumn == null) {
-                    ErrorReport.reportAnalysisException(ErrorCode.ERR_BAD_FIELD_ERROR, column.getName(), targetTable.getName());
+                    ErrorReport.reportAnalysisException(ErrorCode.ERR_BAD_FIELD_ERROR,
+                            column.getName(), targetTable.getName());
                 }
                 String origName = refColumn.getColumnName();
                 for (int originColumnIdx = 0; originColumnIdx < targetColumns.size(); originColumnIdx++) {
@@ -526,7 +529,8 @@ public class InsertStmt extends DdlStmt {
                         ExprSubstitutionMap smap = new ExprSubstitutionMap();
                         smap.getLhs().add(entry.second.getRefColumn());
                         smap.getRhs().add(queryStmt.getResultExprs().get(entry.first));
-                        Expr e = Expr.substituteList(Lists.newArrayList(entry.second.getDefineExpr()), smap, analyzer, false).get(0);
+                        Expr e = Expr.substituteList(Lists.newArrayList(entry.second.getDefineExpr()),
+                                smap, analyzer, false).get(0);
                         queryStmt.getResultExprs().add(e);
                     }
                 }
@@ -551,7 +555,8 @@ public class InsertStmt extends DdlStmt {
                         ExprSubstitutionMap smap = new ExprSubstitutionMap();
                         smap.getLhs().add(entry.second.getRefColumn());
                         smap.getRhs().add(queryStmt.getResultExprs().get(entry.first));
-                        Expr e = Expr.substituteList(Lists.newArrayList(entry.second.getDefineExpr()), smap, analyzer, false).get(0);
+                        Expr e = Expr.substituteList(Lists.newArrayList(entry.second.getDefineExpr()),
+                                smap, analyzer, false).get(0);
                         queryStmt.getBaseTblResultExprs().add(e);
                     }
                 }
@@ -605,7 +610,8 @@ public class InsertStmt extends DdlStmt {
                         ExprSubstitutionMap smap = new ExprSubstitutionMap();
                         smap.getLhs().add(entry.second.getRefColumn());
                         smap.getRhs().add(extentedRow.get(entry.first));
-                        extentedRow.add(Expr.substituteList(Lists.newArrayList(entry.second.getDefineExpr()), smap, analyzer, false).get(0));
+                        extentedRow.add(Expr.substituteList(Lists.newArrayList(entry.second.getDefineExpr()),
+                                smap, analyzer, false).get(0));
                     }
                 }
             }
@@ -620,7 +626,8 @@ public class InsertStmt extends DdlStmt {
 
             if (expr instanceof DefaultValueExpr) {
                 if (targetColumns.get(i).getDefaultValue() == null) {
-                    throw new AnalysisException("Column has no default value, column=" + targetColumns.get(i).getName());
+                    throw new AnalysisException("Column has no default value, column="
+                            + targetColumns.get(i).getName());
                 }
                 expr = new StringLiteral(targetColumns.get(i).getDefaultValue());
             }
@@ -727,7 +734,8 @@ public class InsertStmt extends DdlStmt {
         if (!isExplain() && targetTable instanceof OlapTable) {
             ((OlapTableSink) dataSink).complete();
             // add table indexes to transaction state
-            TransactionState txnState = Catalog.getCurrentGlobalTransactionMgr().getTransactionState(db.getId(), transactionId);
+            TransactionState txnState = Catalog.getCurrentGlobalTransactionMgr()
+                    .getTransactionState(db.getId(), transactionId);
             if (txnState == null) {
                 throw new DdlException("txn does not exist: " + transactionId);
             }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/IsNullPredicate.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/IsNullPredicate.java
index a196c89f55..8c17ff9af7 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/IsNullPredicate.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/IsNullPredicate.java
@@ -143,6 +143,7 @@ public class IsNullPredicate extends Predicate {
     public boolean isNullable() {
         return false;
     }
+
     /**
      * fix issue 6390
      */
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/LargeIntLiteral.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/LargeIntLiteral.java
index 0e3fd0c7a8..33f13c74e1 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/LargeIntLiteral.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/LargeIntLiteral.java
@@ -39,7 +39,7 @@ import java.util.Objects;
 
 // large int for the num that native types can not
 public class LargeIntLiteral extends LiteralExpr {
-    private final static Logger LOG = LogManager.getLogger(LargeIntLiteral.class);
+    private static final Logger LOG = LogManager.getLogger(LargeIntLiteral.class);
 
     // -2^127
     public static final BigInteger LARGE_INT_MIN = new BigInteger("-170141183460469231731687303715884105728");
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/LoadStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/LoadStmt.java
index 1c12ad69e5..469f4d4523 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/LoadStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/LoadStmt.java
@@ -124,7 +124,7 @@ public class LoadStmt extends DdlStmt {
 
     private EtlJobType etlJobType = EtlJobType.UNKNOWN;
 
-    public final static ImmutableMap<String, Function> PROPERTIES_MAP = new ImmutableMap.Builder<String, Function>()
+    public static final ImmutableMap<String, Function> PROPERTIES_MAP = new ImmutableMap.Builder<String, Function>()
             .put(TIMEOUT_PROPERTY, new Function<String, Long>() {
                 @Override
                 public @Nullable Long apply(@Nullable String s) {
@@ -337,7 +337,8 @@ public class LoadStmt extends DdlStmt {
             }
             Database db = Catalog.getCurrentCatalog().getDbOrAnalysisException(label.getDbName());
             OlapTable table = db.getOlapTableOrAnalysisException(dataDescription.getTableName());
-            if (dataDescription.getMergeType() != LoadTask.MergeType.APPEND && table.getKeysType() != KeysType.UNIQUE_KEYS) {
+            if (dataDescription.getMergeType() != LoadTask.MergeType.APPEND
+                    && table.getKeysType() != KeysType.UNIQUE_KEYS) {
                 throw new AnalysisException("load by MERGE or DELETE is only supported in unique tables.");
             }
             if (dataDescription.getMergeType() != LoadTask.MergeType.APPEND && !table.hasDeleteSign()) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/LockTable.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/LockTable.java
index 07b51caf1b..4fbaa33651 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/LockTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/LockTable.java
@@ -34,6 +34,7 @@ public class LockTable {
             return desc;
         }
     }
+
     private TableName tableName;
     private String alias;
     private LockType lockType;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/MVColumnBitmapUnionPattern.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/MVColumnBitmapUnionPattern.java
index 45bf9948f8..8dabace3a1 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/MVColumnBitmapUnionPattern.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/MVColumnBitmapUnionPattern.java
@@ -61,7 +61,8 @@ public class MVColumnBitmapUnionPattern implements MVColumnPattern {
 
     @Override
     public String toString() {
-        return FunctionSet.BITMAP_UNION + "(" + FunctionSet.TO_BITMAP + "(column)), type of column could not be integer. "
+        return FunctionSet.BITMAP_UNION + "(" + FunctionSet.TO_BITMAP
+                + "(column)), type of column could not be integer. "
                 + "Or " + FunctionSet.BITMAP_UNION + "(bitmap_column) in agg table";
     }
 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyTablePropertiesClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyTablePropertiesClause.java
index 37b50fa73e..d91b770ec9 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyTablePropertiesClause.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyTablePropertiesClause.java
@@ -82,7 +82,8 @@ public class ModifyTablePropertiesClause extends AlterTableClause {
         } else if (properties.containsKey("default." + PropertyAnalyzer.PROPERTIES_REPLICATION_NUM)
                 || properties.containsKey("default." + PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION)) {
             ReplicaAllocation replicaAlloc = PropertyAnalyzer.analyzeReplicaAllocation(properties, "default");
-            properties.put("default." + PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION, replicaAlloc.toCreateStmt());
+            properties.put("default." + PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION,
+                    replicaAlloc.toCreateStmt());
         } else if (properties.containsKey(PropertyAnalyzer.PROPERTIES_INMEMORY)) {
             this.needTableStable = false;
             this.opType = AlterOpType.MODIFY_TABLE_PROPERTY_SYNC;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/OpcodeRegistry.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/OpcodeRegistry.java
deleted file mode 100644
index 8b3f9f8460..0000000000
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/OpcodeRegistry.java
+++ /dev/null
@@ -1,314 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.doris.analysis;
-
-/**
- * The OpcodeRegistry provides a mapping between function signatures and opcodes. The
- * supported functions are code-gen'ed and added to the registry with an assigned opcode.
- * The opcode is shared with the backend.  The frontend can use the registry to look up
- * a function's opcode.
- * <p/>
- * The OpcodeRegistry also contains a mapping between function names (as strings) to
- * operators.
- * <p/>
- * The OpcodeRegistry is a singleton.
- * <p/>
- * TODO: The opcode registry should be versioned in the FE/BE.
- */
-public class OpcodeRegistry {
-//
-//    private final static Logger         LOG      = LogManager.getLogger(OpcodeRegistry.class);
-//    private static       OpcodeRegistry instance = new OpcodeRegistry();
-//    /**
-//     * This is a mapping of Operator,#args to signatures with a fixed number of arguments.
-//     * The signature is defined by the operator enum and the arguments
-//     * and is a one to one mapping to opcodes.
-//     * The map is structured this way to more efficiently look for signature matches.
-//     * Signatures that have the same number of arguments have a potential to be matches
-//     * by allowing types to be implicitly cast.
-//     * Functions with a variable number of arguments are put into the varArgOperations map.
-//     */
-//    private final Map<Pair<FunctionOperator, Integer>, List<BuiltinFunction>> operations;
-//    /**
-//     * This is a mapping of Operator,varArgType to signatures of vararg functions only.
-//     * varArgType must be a maximum-resolution type.
-//     * We use a separate map to be able to support multiple vararg signatures for the same
-//     * FunctionOperator.
-//     * Contains a special entry mapping from Operator,NULL_TYPE to signatures for each
-//     * Operator to correctly match varag functions when all args are NULL.
-//     * Limitations: Since we do not consider the number of arguments, each FunctionOperator
-//     * is limited to having one vararg signature per maximum-resolution PrimitiveType.
-//     * For example, one can have two signatures func(float, int ...) and func(string ...),
-//     * but not func(float, int ...) and func (int ...).
-//     */
-//    private final Map<Pair<FunctionOperator, PrimitiveType>, List<BuiltinFunction>>
-//      varArgOperations;
-//    /**
-//     * This contains a mapping of function names to a FunctionOperator enum.  This is used
-//     * by FunctionCallExpr to go from the parser input to function opcodes.
-//     * This is a many to one mapping (i.e. substr and substring both map to the same
-//     * operation).
-//     * The mappings are filled in in FunctionRegistry.java which is auto-generated.
-//     */
-//    private final HashMap<String, FunctionOperator> functionNameMap;
-//
-//    private final HashMap<FunctionOperator, List<BuiltinFunction>> funcByOp;
-//
-//    // Singleton interface, don't call the constructor
-//    private OpcodeRegistry() {
-//        operations = Maps.newHashMap();
-//        varArgOperations = Maps.newHashMap();
-//        functionNameMap = Maps.newHashMap();
-//        funcByOp = Maps.newHashMap();
-//
-//        // Add all the function signatures to the registry and the function name(string)
-//        // to FunctionOperator mapping
-//        FunctionRegistry.InitFunctions(this);
-//    }
-//
-//    // Singleton interface
-//    public static OpcodeRegistry instance() {
-//        return instance;
-//    }
-//
-//    /**
-//     * Static utility functions
-//     */
-//    public static boolean isBitwiseOperation(FunctionOperator operator) {
-//        return operator == FunctionOperator.BITAND || operator == FunctionOperator.BITNOT ||
-//          operator == FunctionOperator.BITOR || operator == FunctionOperator.BITXOR;
-//    }
-//
-//    /**
-//     * Returns the set of function names.
-//     *
-//     * @return
-//     */
-//    public Set<String> getFunctionNames() {
-//        return functionNameMap.keySet();
-//    }
-//
-//    /**
-//     * Returns the function operator enum.  The lookup is case insensitive.
-//     * (i.e. "Substring" --> TExprOperator.STRING_SUBSTR).
-//     * Returns INVALID_OP is that function name is unknown.
-//     */
-//    public FunctionOperator getFunctionOperator(String fnName) {
-//        String lookup = fnName.toLowerCase();
-//        if (functionNameMap.containsKey(lookup)) {
-//            return functionNameMap.get(lookup);
-//        }
-//        return FunctionOperator.INVALID_OPERATOR;
-//    }
-//
-//    /**
-//     * Query for a function in the registry, specifying the operation, 'op', the arguments.
-//     * If there is no matching signature, null will be returned.
-//     * If there is a match, the matching signature will be returned.
-//     * If 'allowImplicitCasts' is true the matching signature does not have to match the
-//     * input identically, implicit type promotion is allowed.
-//     */
-//    public BuiltinFunction getFunctionInfo(
-//            FunctionOperator op, boolean allowImplicitCasts,
-//            boolean vectorFunction, PrimitiveType... argTypes) {
-//        Pair<FunctionOperator, Integer> lookup = Pair.create(op, argTypes.length);
-//        List<Pair<FunctionOperator, PrimitiveType>> varArgMatchTypes = null;
-//        if (argTypes.length > 0) {
-//            Set<PrimitiveType> maxResolutionTypes = getMaxResolutionTypes(argTypes);
-//            Preconditions.checkNotNull(maxResolutionTypes);
-//            varArgMatchTypes = Lists.newArrayList();
-//            for (PrimitiveType maxResolutionType : maxResolutionTypes) {
-//                varArgMatchTypes.add(Pair.create(op, maxResolutionType));
-//            }
-//        }
-//        List<BuiltinFunction> functions = null;
-//        if (operations.containsKey(lookup)) {
-//            functions = operations.get(lookup);
-//        } else if (!varArgMatchTypes.isEmpty()) {
-//            functions = Lists.newArrayList();
-//            List<BuiltinFunction> matchedFunctions = null;
-//            for (Pair<FunctionOperator, PrimitiveType> varArgsMatchType : varArgMatchTypes) {
-//                matchedFunctions = varArgOperations.get(varArgsMatchType);
-//                if (matchedFunctions != null) {
-//                    functions.addAll(matchedFunctions);
-//                }
-//            }
-//        }
-//
-//        if (functions == null || functions.isEmpty()) {
-//            // may be we can find from funByOp
-//            if (funcByOp.containsKey(op)) {
-//                functions = funcByOp.get(op);
-//            } else {
-//                return null;
-//            }
-//        }
-//        Type[] args = new Type[argTypes.length];
-//        int i = 0;
-//        for (PrimitiveType type : argTypes) {
-//            args[i] = Type.fromPrimitiveType(type);
-//            i ++;
-//        }
-//        BuiltinFunction search = new BuiltinFunction(op, args);
-//
-//        BuiltinFunction compatibleMatch = null;
-//        List<BuiltinFunction> compatibleMatchFunctions = Lists.newArrayList();
-//        // We firstly choose functions using IS_SUBTYPE(only check cast-method is implemented),
-//        // if more than one functions are found, give priority to the assign-copatible one.
-//        for (BuiltinFunction function : functions) {
-//            if (function.compare(search, Function.CompareMode.IS_INDISTINGUISHABLE)) {
-//                if (vectorFunction == function.vectorFunction) {
-//                    return function;
-//                }
-//            } else if (allowImplicitCasts
-//                    && function.compare(search, Function.CompareMode.IS_SUPERTYPE_OF)) {
-//                if (vectorFunction == function.vectorFunction) {
-//                    compatibleMatchFunctions.add(function);
-//                }
-//            }
-//        }
-//
-//        // If there are many compatible functions, we priority to choose the non-loss-precision one.
-//        for (BuiltinFunction function : compatibleMatchFunctions) {
-//            if (function.compare(search, Function.CompareMode.IS_SUPERTYPE_OF)) {
-//                compatibleMatch = function;
-//            } else {
-//                LOG.info(" false {} {}", function.getReturnType(), function.getArgs());
-//            }
-//        }
-//        if (compatibleMatch == null && compatibleMatchFunctions.size() > 0) {
-//            compatibleMatch = compatibleMatchFunctions.get(0);
-//        }
-//
-//        return compatibleMatch;
-//    }
-//
-//    /**
-//     * Returns the max resolution type for each argType that is not a NULL_TYPE. If all
-//     * argument types are NULL_TYPE then a set will be returned containing NULL_TYPE.
-//     */
-//    private Set<PrimitiveType> getMaxResolutionTypes(PrimitiveType[] argTypes) {
-//        Set<PrimitiveType> maxResolutionTypes = Sets.newHashSet();
-//        for (int i = 0; i < argTypes.length; ++i) {
-//            if (!argTypes[i].isNull()) {
-//                maxResolutionTypes.add(argTypes[i].getMaxResolutionType());
-//            }
-//        }
-//        if (maxResolutionTypes.isEmpty()) {
-//            maxResolutionTypes.add(PrimitiveType.NULL_TYPE);
-//        }
-//        return maxResolutionTypes;
-//    }
-//
-//    /**
-//     * Add a function with the specified opcode/signature to the registry.
-//     */
-//
-//    public boolean add(boolean udfInterface, boolean vectorFunction, FunctionOperator op,
-//                       TExprOpcode opcode, boolean varArgs, PrimitiveType retType, PrimitiveType... args) {
-//        List<BuiltinFunction> functions;
-//        Pair<FunctionOperator, Integer> lookup = Pair.create(op, args.length);
-//        // Take the last argument's type as the vararg type.
-//        Pair<FunctionOperator, PrimitiveType> varArgsLookup = null;
-//        // Special signature for vararg functions to handle matching when all args are NULL.
-//        Pair<FunctionOperator, PrimitiveType> varArgsNullLookup = null;
-//        Preconditions.checkArgument((varArgs) ? args.length > 0 : true);
-//        if (varArgs && args.length > 0) {
-//            varArgsLookup = Pair.create(op, args[args.length - 1].getMaxResolutionType());
-//            varArgsNullLookup = Pair.create(op, PrimitiveType.NULL_TYPE);
-//        }
-//        if (operations.containsKey(lookup)) {
-//            functions = operations.get(lookup);
-//        } else if (varArgsLookup != null && varArgOperations.containsKey(varArgsLookup)) {
-//            functions = varArgOperations.get(varArgsLookup);
-//        } else {
-//            functions = new ArrayList<BuiltinFunction>();
-//            if (varArgs) {
-//                varArgOperations.put(varArgsLookup, functions);
-//                varArgOperations.put(varArgsNullLookup, functions);
-//            } else {
-//                operations.put(lookup, functions);
-//            }
-//        }
-//
-//        Type[] argsType = new Type[args.length];
-//        int i = 0;
-//        for (PrimitiveType type : args) {
-//            argsType[i] = Type.fromPrimitiveType(type);
-//            i ++;
-//        }
-//
-//        BuiltinFunction function =
-//                new BuiltinFunction(udfInterface, vectorFunction, opcode, op, varArgs, Type.fromPrimitiveType(retType), argsType);
-//        if (functions.contains(function)) {
-//            LOG.error("OpcodeRegistry: Function already exists: " + opcode);
-//            return false;
-//        }
-//        functions.add(function);
-//
-//        // add to op map
-//        if (funcByOp.containsKey(op)) {
-//            functions = funcByOp.get(op);
-//        } else {
-//            functions = Lists.newArrayList();
-//            funcByOp.put(op, functions);
-//        }
-//        functions.add(function);
-//        return true;
-//    }
-//
-//    public boolean addFunctionMapping(String functionName, FunctionOperator op) {
-//        if (functionNameMap.containsKey(functionName)) {
-//            LOG.error("OpcodeRegistry: Function mapping already exists: " + functionName);
-//            return false;
-//        }
-//        functionNameMap.put(functionName, op);
-//        return true;
-//    }
-//
-//    /**
-//     * Contains all the information about a builtin function.
-//     * TODO: merge with Function and Udf
-//     */
-//    public static class BuiltinFunction extends Function {
-//        // If true, this builtin is implemented against the Udf interface.
-//        public final boolean          udfInterface;
-//        public final boolean          vectorFunction;
-//        public       TExprOpcode      opcode;
-//        public       FunctionOperator operator;
-//
-//        // Constructor for searching, specifying the op and arguments
-//        public BuiltinFunction(FunctionOperator operator, Type[] args) {
-//            super(new FunctionName(operator.toString()), args, Type.INVALID, false);
-//            this.operator = operator;
-//            this.udfInterface = false;
-//            this.vectorFunction = false;
-//            this.setBinaryType(TFunctionBinaryType.BUILTIN);
-//        }
-//
-//        private BuiltinFunction(boolean udfInterface, boolean vectorFunction, TExprOpcode opcode,
-//                                FunctionOperator operator, boolean varArgs, Type ret, Type[] args) {
-//            super(new FunctionName(opcode.toString()), args, ret, varArgs);
-//            this.operator = operator;
-//            this.opcode = opcode;
-//            this.udfInterface = udfInterface;
-//            this.vectorFunction = vectorFunction;
-//            this.setBinaryType(TFunctionBinaryType.BUILTIN);
-//        }
-//    }
-}
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/OrderByElement.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/OrderByElement.java
index c9d96ed043..39b8bd328d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/OrderByElement.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/OrderByElement.java
@@ -60,11 +60,13 @@ public class OrderByElement {
     public Boolean getNullsFirstParam() {
         return nullsFirstParam;
     }
+
     public OrderByElement clone() {
         OrderByElement clone = new OrderByElement(
                 expr.clone(), isAsc, nullsFirstParam);
         return clone;
     }
+
     /**
      * Returns a new list of OrderByElements with the same (cloned) expressions but the
      * ordering direction reversed (asc becomes desc, nulls first becomes nulls last, etc.)
@@ -82,6 +84,7 @@ public class OrderByElement {
 
         return result;
     }
+
     /**
      * Extracts the order-by exprs from the list of order-by elements and returns them.
      */
@@ -111,6 +114,7 @@ public class OrderByElement {
 
         return result;
     }
+
     public String toSql() {
         StringBuilder strBuilder = new StringBuilder();
         strBuilder.append(expr.toSql());
@@ -167,6 +171,7 @@ public class OrderByElement {
         OrderByElement o = (OrderByElement) obj;
         return expr.equals(o.expr) && isAsc == o.isAsc  && nullsFirstParam == o.nullsFirstParam;
     }
+
     /**
      * Compute nullsFirst.
      *
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/OutFileClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/OutFileClause.java
index 8443949d35..7f8b1436a0 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/OutFileClause.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/OutFileClause.java
@@ -226,8 +226,8 @@ public class OutFileClause {
             switch (resultType.getPrimitiveType()) {
                 case BOOLEAN:
                     if (!type.equals("boolean")) {
-                        throw new AnalysisException("project field type is BOOLEAN, should use boolean, but the type of column "
-                                + i + " is " + type);
+                        throw new AnalysisException("project field type is BOOLEAN, should use boolean,"
+                                + " but the type of column " + i + " is " + type);
                     }
                     break;
                 case TINYINT:
@@ -248,14 +248,14 @@ public class OutFileClause {
                     break;
                 case FLOAT:
                     if (!type.equals("float")) {
-                        throw new AnalysisException("project field type is FLOAT, should use float, but the definition type of column "
-                                + i + " is " + type);
+                        throw new AnalysisException("project field type is FLOAT, should use float,"
+                                + " but the definition type of column " + i + " is " + type);
                     }
                     break;
                 case DOUBLE:
                     if (!type.equals("double")) {
-                        throw new AnalysisException("project field type is DOUBLE, should use double, but the definition type of column "
-                                + i + " is " + type);
+                        throw new AnalysisException("project field type is DOUBLE, should use double,"
+                                + " but the definition type of column " + i + " is " + type);
                     }
                     break;
                 case CHAR:
@@ -263,23 +263,26 @@ public class OutFileClause {
                 case STRING:
                 case DECIMALV2:
                     if (!type.equals("byte_array")) {
-                        throw new AnalysisException("project field type is CHAR/VARCHAR/STRING/DECIMAL, should use byte_array, "
-                                + "but the definition type of column " + i + " is " + type);
+                        throw new AnalysisException("project field type is CHAR/VARCHAR/STRING/DECIMAL,"
+                                + " should use byte_array, but the definition type of column " + i + " is " + type);
                     }
                     break;
                 case HLL:
                 case BITMAP:
-                    if (ConnectContext.get() != null && ConnectContext.get().getSessionVariable().isReturnObjectDataAsBinary()) {
+                    if (ConnectContext.get() != null && ConnectContext.get()
+                            .getSessionVariable().isReturnObjectDataAsBinary()) {
                         if (!type.equals("byte_array")) {
                             throw new AnalysisException("project field type is HLL/BITMAP, should use byte_array, "
                                     + "but the definition type of column " + i + " is " + type);
                         }
                     } else {
-                        throw new AnalysisException("Parquet format does not support column type: " + resultType.getPrimitiveType());
+                        throw new AnalysisException("Parquet format does not support column type: "
+                                + resultType.getPrimitiveType());
                     }
                     break;
                 default:
-                    throw new AnalysisException("Parquet format does not support column type: " + resultType.getPrimitiveType());
+                    throw new AnalysisException("Parquet format does not support column type: "
+                            + resultType.getPrimitiveType());
             }
         }
     }
@@ -318,12 +321,14 @@ public class OutFileClause {
                     break;
                 case HLL:
                 case BITMAP:
-                    if (ConnectContext.get() != null && ConnectContext.get().getSessionVariable().isReturnObjectDataAsBinary()) {
+                    if (ConnectContext.get() != null && ConnectContext.get()
+                            .getSessionVariable().isReturnObjectDataAsBinary()) {
                         column.add("byte_array");
                     }
                     break;
                 default:
-                    throw new AnalysisException("currently parquet do not support column type: " + expr.getType().getPrimitiveType());
+                    throw new AnalysisException("currently parquet do not support column type: "
+                            + expr.getType().getPrimitiveType());
             }
             column.add("col" + i);
             this.schema.add(column);
@@ -338,7 +343,8 @@ public class OutFileClause {
         if (filePath.startsWith(LOCAL_FILE_PREFIX)) {
             if (!Config.enable_outfile_to_local) {
                 throw new AnalysisException("Exporting results to local disk is not allowed."
-                    + " To enable this feature, you need to add `enable_outfile_to_local=true` in fe.conf and restart FE");
+                        + " To enable this feature, you need to add `enable_outfile_to_local=true`"
+                        + " in fe.conf and restart FE");
             }
             isLocalOutput = true;
             filePath = filePath.substring(LOCAL_FILE_PREFIX.length() - 1); // leave last '/'
@@ -433,11 +439,12 @@ public class OutFileClause {
                 brokerProps.put(entry.getKey(), entry.getValue());
                 processedPropKeys.add(entry.getKey());
             } else if (entry.getKey().contains(BrokerUtil.HADOOP_FS_NAME)
-                && storageType == StorageBackend.StorageType.HDFS) {
+                    && storageType == StorageBackend.StorageType.HDFS) {
                 brokerProps.put(entry.getKey(), entry.getValue());
                 processedPropKeys.add(entry.getKey());
-            } else if ((entry.getKey().startsWith(HADOOP_FS_PROP_PREFIX) || entry.getKey().startsWith(HADOOP_PROP_PREFIX))
-                && storageType == StorageBackend.StorageType.HDFS) {
+            } else if ((entry.getKey().startsWith(HADOOP_FS_PROP_PREFIX)
+                    || entry.getKey().startsWith(HADOOP_PROP_PREFIX))
+                    && storageType == StorageBackend.StorageType.HDFS) {
                 brokerProps.put(entry.getKey(), entry.getValue());
                 processedPropKeys.add(entry.getKey());
             }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/Predicate.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/Predicate.java
index c34e9f7728..1c2f87b4f5 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/Predicate.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/Predicate.java
@@ -119,14 +119,15 @@ public abstract class Predicate extends Expr {
                 Preconditions.checkState(right != null);
 
                 // ATTN(cmy): Usually, the BinaryPredicate in the query will be rewritten through ExprRewriteRule,
-                // and all SingleColumnPredicate will be rewritten as "column on the left and the constant on the right".
+                // and all SingleColumnPredicate will be rewritten as "column on the left and the constant on the right"
                 // So usually the right child is constant.
                 //
                 // But if there is a subquery in where clause, the planner will equal the subquery to join.
                 // During the equal, some auxiliary BinaryPredicate will be automatically generated,
                 // and these BinaryPredicates will not go through ExprRewriteRule.
                 // As a result, these BinaryPredicates may be as "column on the right and the constant on the left".
-                // Example can be found in QueryPlanTest.java -> testJoinPredicateTransitivityWithSubqueryInWhereClause().
+                // Example can be found in QueryPlanTest.java
+                //   -> testJoinPredicateTransitivityWithSubqueryInWhereClause().
                 //
                 // Because our current planner implementation is very error-prone, so when this happens,
                 // we simply assume that these kind of BinaryPredicates cannot be pushed down,
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/QueryStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/QueryStmt.java
index 5ca6044214..9e79548e6d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/QueryStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/QueryStmt.java
@@ -51,7 +51,7 @@ import java.util.stream.Collectors;
  * analysis of the ORDER BY and LIMIT clauses.
  */
 public abstract class QueryStmt extends StatementBase {
-    private final static Logger LOG = LogManager.getLogger(QueryStmt.class);
+    private static final Logger LOG = LogManager.getLogger(QueryStmt.class);
 
     /////////////////////////////////////////
     // BEGIN: Members that need to be reset()
@@ -455,7 +455,8 @@ public abstract class QueryStmt extends StatementBase {
         return resultExprs.get((int) pos - 1).clone();
     }
 
-    public void getWithClauseTables(Analyzer analyzer, Map<Long, Table> tableMap, Set<String> parentViewNameSet) throws AnalysisException {
+    public void getWithClauseTables(Analyzer analyzer, Map<Long, Table> tableMap,
+            Set<String> parentViewNameSet) throws AnalysisException {
         if (withClause != null) {
             withClause.getTables(analyzer, tableMap, parentViewNameSet);
         }
@@ -532,8 +533,10 @@ public abstract class QueryStmt extends StatementBase {
     //                "select a.siteid, b.citycode, a.siteid from (select siteid, citycode from tmp) a " +
     //                "left join (select siteid, citycode from tmp) b on a.siteid = b.siteid;";
     // tmp in child stmt "(select siteid, citycode from tmp)" do not contain with_Clause
-    // so need to check is view name by parentViewNameSet. issue link: https://github.com/apache/incubator-doris/issues/4598
-    public abstract void getTables(Analyzer analyzer, Map<Long, Table> tables, Set<String> parentViewNameSet) throws AnalysisException;
+    // so need to check is view name by parentViewNameSet.
+    // issue link: https://github.com/apache/incubator-doris/issues/4598
+    public abstract void getTables(Analyzer analyzer, Map<Long, Table> tables, Set<String> parentViewNameSet)
+            throws AnalysisException;
 
     // get TableRefs in this query, including physical TableRefs of this statement and
     // nested statements of inline views and with_Clause.
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverDbStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverDbStmt.java
index 5be7c6bf24..05ccbf1fba 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverDbStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverDbStmt.java
@@ -51,11 +51,10 @@ public class RecoverDbStmt extends DdlStmt {
         dbName = ClusterNamespace.getFullName(getClusterName(), dbName);
 
         if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), dbName,
-                                                               PrivPredicate.of(PrivBitSet.of(PaloPrivilege.ALTER_PRIV,
-                                                                                              PaloPrivilege.CREATE_PRIV,
-                                                                                              PaloPrivilege.ADMIN_PRIV),
-                                                                                Operator.OR))) {
-            ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName);
+                PrivPredicate.of(PrivBitSet.of(
+                        PaloPrivilege.ALTER_PRIV, PaloPrivilege.CREATE_PRIV, PaloPrivilege.ADMIN_PRIV), Operator.OR))) {
+            ErrorReport.reportAnalysisException(
+                    ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName);
         }
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverPartitionStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverPartitionStmt.java
index eb1ef63eb2..312cd8129c 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverPartitionStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverPartitionStmt.java
@@ -55,11 +55,8 @@ public class RecoverPartitionStmt extends DdlStmt {
     public void analyze(Analyzer analyzer) throws AnalysisException, UserException {
         dbTblName.analyze(analyzer);
         if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), dbTblName.getDb(),
-                                                                dbTblName.getTbl(),
-                                                                PrivPredicate.of(PrivBitSet.of(PaloPrivilege.ALTER_PRIV,
-                                                                                               PaloPrivilege.CREATE_PRIV,
-                                                                                               PaloPrivilege.ADMIN_PRIV),
-                                                                                 Operator.OR))) {
+                dbTblName.getTbl(), PrivPredicate.of(PrivBitSet.of(
+                        PaloPrivilege.ALTER_PRIV, PaloPrivilege.CREATE_PRIV, PaloPrivilege.ADMIN_PRIV), Operator.OR))) {
             ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR, "RECOVERY",
                                                 ConnectContext.get().getQualifiedUser(),
                                                 ConnectContext.get().getRemoteIP(),
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverTableStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverTableStmt.java
index 5928b5fa44..dbe24f9e19 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverTableStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RecoverTableStmt.java
@@ -49,12 +49,10 @@ public class RecoverTableStmt extends DdlStmt {
     public void analyze(Analyzer analyzer) throws AnalysisException, UserException {
         dbTblName.analyze(analyzer);
 
-        if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), dbTblName.getDb(),
-                                                                dbTblName.getTbl(),
-                                                                PrivPredicate.of(PrivBitSet.of(PaloPrivilege.ALTER_PRIV,
-                                                                                               PaloPrivilege.CREATE_PRIV,
-                                                                                               PaloPrivilege.ADMIN_PRIV),
-                                                                                 Operator.OR))) {
+        if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(
+                ConnectContext.get(), dbTblName.getDb(), dbTblName.getTbl(), PrivPredicate.of(
+                        PrivBitSet.of(PaloPrivilege.ALTER_PRIV, PaloPrivilege.CREATE_PRIV, PaloPrivilege.ADMIN_PRIV),
+                        Operator.OR))) {
             ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR, "RECOVERY",
                                                 ConnectContext.get().getQualifiedUser(),
                                                 ConnectContext.get().getRemoteIP(),
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RefreshDbStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RefreshDbStmt.java
index 66a4cc9ee8..ff70985fe7 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/RefreshDbStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RefreshDbStmt.java
@@ -57,7 +57,8 @@ public class RefreshDbStmt extends DdlStmt {
 
         // Don't allow dropping 'information_schema' database
         if (dbName.equalsIgnoreCase(ClusterNamespace.getFullName(getClusterName(), InfoSchemaDb.DATABASE_NAME))) {
-            ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName);
+            ErrorReport.reportAnalysisException(
+                    ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName);
         }
         // check access
         if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), dbName, PrivPredicate.DROP)) {
@@ -65,7 +66,8 @@ public class RefreshDbStmt extends DdlStmt {
                     ConnectContext.get().getQualifiedUser(), dbName);
         }
         if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), dbName, PrivPredicate.CREATE)) {
-            ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName);
+            ErrorReport.reportAnalysisException(
+                    ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), dbName);
         }
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ReplacePartitionClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ReplacePartitionClause.java
index 666f1c0b18..6d7f88c089 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ReplacePartitionClause.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ReplacePartitionClause.java
@@ -44,8 +44,10 @@ public class ReplacePartitionClause extends AlterTableClause {
     // Otherwise, the replaced partition's name will be the temp partitions name.
     // This parameter is valid only when the number of partitions is the same as the number of temp partitions.
     // For example:
-    // 1. REPLACE PARTITION (p1, p2, p3) WITH TEMPORARY PARTITION(tp1, tp2) PROPERTIES("use_temp_partition_name" = "false");
-    //      "use_temp_partition_name" will take no effect after replacing, and the partition names will be "tp1" and "tp2".
+    // 1. REPLACE PARTITION (p1, p2, p3) WITH TEMPORARY PARTITION(tp1, tp2)
+    //    PROPERTIES("use_temp_partition_name" = "false");
+    //      "use_temp_partition_name" will take no effect after replacing,
+    //      and the partition names will be "tp1" and "tp2".
     //
     // 2. REPLACE PARTITION (p1, p2) WITH TEMPORARY PARTITION(tp1, tp2) PROPERTIES("use_temp_partition_name" = "false");
     //      alter replacing, the partition names will be "p1" and "p2".
@@ -90,7 +92,8 @@ public class ReplacePartitionClause extends AlterTableClause {
             throw new AnalysisException("Only support replace partitions with temp partitions");
         }
 
-        this.isStrictRange = PropertyAnalyzer.analyzeBooleanProp(properties, PropertyAnalyzer.PROPERTIES_STRICT_RANGE, true);
+        this.isStrictRange = PropertyAnalyzer.analyzeBooleanProp(
+                properties, PropertyAnalyzer.PROPERTIES_STRICT_RANGE, true);
         this.useTempPartitionName = PropertyAnalyzer.analyzeBooleanProp(properties,
                 PropertyAnalyzer.PROPERTIES_USE_TEMP_PARTITION_NAME, false);
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ResourcePattern.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ResourcePattern.java
index 059f4bfa6a..19771f68b5 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ResourcePattern.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ResourcePattern.java
@@ -39,6 +39,7 @@ public class ResourcePattern implements Writable {
     private String resourceName;
 
     public static ResourcePattern ALL;
+
     static {
         ALL = new ResourcePattern("*");
         try {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RestoreStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RestoreStmt.java
index 6a0bfe6f53..cb9b4e3bb4 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/RestoreStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RestoreStmt.java
@@ -32,10 +32,10 @@ import java.util.Map;
 import java.util.Set;
 
 public class RestoreStmt extends AbstractBackupStmt {
-    private final static String PROP_ALLOW_LOAD = "allow_load";
-    private final static String PROP_REPLICATION_NUM = "replication_num";
-    private final static String PROP_BACKUP_TIMESTAMP = "backup_timestamp";
-    private final static String PROP_META_VERSION = "meta_version";
+    private static final String PROP_ALLOW_LOAD = "allow_load";
+    private static final String PROP_REPLICATION_NUM = "replication_num";
+    private static final String PROP_BACKUP_TIMESTAMP = "backup_timestamp";
+    private static final String PROP_META_VERSION = "meta_version";
 
     private boolean allowLoad = false;
     private ReplicaAllocation replicaAlloc = ReplicaAllocation.DEFAULT_ALLOCATION;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RevokeStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RevokeStmt.java
index 05e229d1a6..c84f490ddf 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/RevokeStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RevokeStmt.java
@@ -54,7 +54,8 @@ public class RevokeStmt extends DdlStmt {
         this.privileges = privs.toPrivilegeList();
     }
 
-    public RevokeStmt(UserIdentity userIdent, String role, ResourcePattern resourcePattern, List<AccessPrivilege> privileges) {
+    public RevokeStmt(UserIdentity userIdent, String role,
+            ResourcePattern resourcePattern, List<AccessPrivilege> privileges) {
         this.userIdent = userIdent;
         this.role = role;
         this.tblPattern = null;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/RoutineLoadDataSourceProperties.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/RoutineLoadDataSourceProperties.java
index d7dbfab53e..26d335a18e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/RoutineLoadDataSourceProperties.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/RoutineLoadDataSourceProperties.java
@@ -49,7 +49,8 @@ public class RoutineLoadDataSourceProperties {
             .add(CreateRoutineLoadStmt.KAFKA_DEFAULT_OFFSETS)
             .build();
 
-    private static final ImmutableSet<String> CONFIGURABLE_DATA_SOURCE_PROPERTIES_SET = new ImmutableSet.Builder<String>()
+    private static final ImmutableSet<String> CONFIGURABLE_DATA_SOURCE_PROPERTIES_SET
+            = new ImmutableSet.Builder<String>()
             .add(CreateRoutineLoadStmt.KAFKA_BROKER_LIST_PROPERTY)
             .add(CreateRoutineLoadStmt.KAFKA_TOPIC_PROPERTY)
             .add(CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY)
@@ -160,7 +161,8 @@ public class RoutineLoadDataSourceProperties {
      * 4. other properties start with "property."
      */
     private void checkKafkaProperties() throws UserException {
-        ImmutableSet<String> propertySet = isAlter ? CONFIGURABLE_DATA_SOURCE_PROPERTIES_SET : DATA_SOURCE_PROPERTIES_SET;
+        ImmutableSet<String> propertySet = isAlter
+                ? CONFIGURABLE_DATA_SOURCE_PROPERTIES_SET : DATA_SOURCE_PROPERTIES_SET;
         Optional<String> optional = properties.keySet().stream()
                 .filter(entity -> !propertySet.contains(entity))
                 .filter(entity -> !entity.startsWith("property."))
@@ -170,7 +172,8 @@ public class RoutineLoadDataSourceProperties {
         }
 
         // check broker list
-        kafkaBrokerList = Strings.nullToEmpty(properties.get(CreateRoutineLoadStmt.KAFKA_BROKER_LIST_PROPERTY)).replaceAll(" ", "");
+        kafkaBrokerList = Strings.nullToEmpty(properties.get(CreateRoutineLoadStmt.KAFKA_BROKER_LIST_PROPERTY))
+                .replaceAll(" ", "");
         if (!isAlter && Strings.isNullOrEmpty(kafkaBrokerList)) {
             throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_BROKER_LIST_PROPERTY + " is a required property");
         }
@@ -185,7 +188,8 @@ public class RoutineLoadDataSourceProperties {
         }
 
         // check topic
-        kafkaTopic = Strings.nullToEmpty(properties.get(CreateRoutineLoadStmt.KAFKA_TOPIC_PROPERTY)).replaceAll(" ", "");
+        kafkaTopic = Strings.nullToEmpty(properties.get(CreateRoutineLoadStmt.KAFKA_TOPIC_PROPERTY))
+                .replaceAll(" ", "");
         if (!isAlter && Strings.isNullOrEmpty(kafkaTopic)) {
             throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_TOPIC_PROPERTY + " is a required property");
         }
@@ -223,20 +227,23 @@ public class RoutineLoadDataSourceProperties {
             throw new AnalysisException("Only one of " + CreateRoutineLoadStmt.KAFKA_OFFSETS_PROPERTY
                     + " and " + CreateRoutineLoadStmt.KAFKA_DEFAULT_OFFSETS + " can be set.");
         }
-        if (isAlter && kafkaPartitionsString != null && kafkaOffsetsString == null && kafkaDefaultOffsetString == null) {
+        if (isAlter && kafkaPartitionsString != null
+                && kafkaOffsetsString == null && kafkaDefaultOffsetString == null) {
             // if this is an alter operation, the partition and (default)offset must be set together.
             throw new AnalysisException("Must set offset or default offset with partition property");
         }
 
         if (kafkaOffsetsString != null) {
-            this.isOffsetsForTimes = analyzeKafkaOffsetProperty(kafkaOffsetsString, this.kafkaPartitionOffsets, this.timezone);
+            this.isOffsetsForTimes = analyzeKafkaOffsetProperty(kafkaOffsetsString,
+                    this.kafkaPartitionOffsets, this.timezone);
         } else {
             // offset is not set, check default offset.
             this.isOffsetsForTimes = analyzeKafkaDefaultOffsetProperty(this.customKafkaProperties, this.timezone);
             if (!this.kafkaPartitionOffsets.isEmpty()) {
                 // Case C
                 kafkaDefaultOffsetString = customKafkaProperties.get(CreateRoutineLoadStmt.KAFKA_DEFAULT_OFFSETS);
-                setDefaultOffsetForPartition(this.kafkaPartitionOffsets, kafkaDefaultOffsetString, this.isOffsetsForTimes);
+                setDefaultOffsetForPartition(this.kafkaPartitionOffsets,
+                        kafkaDefaultOffsetString, this.isOffsetsForTimes);
             }
         }
     }
@@ -259,10 +266,12 @@ public class RoutineLoadDataSourceProperties {
     }
 
     // If the default offset is not set, set the default offset to OFFSET_END.
-    // If the offset is in datetime format, convert it to a timestamp, and also save the origin datatime formatted offset
+    // If the offset is in datetime format, convert it to a timestamp,
+    // and also save the origin datatime formatted offset
     // in "customKafkaProperties"
     // return true if the offset is in datetime format.
-    private static boolean analyzeKafkaDefaultOffsetProperty(Map<String, String> customKafkaProperties, String timeZoneStr)
+    private static boolean analyzeKafkaDefaultOffsetProperty(
+            Map<String, String> customKafkaProperties, String timeZoneStr)
             throws AnalysisException {
         customKafkaProperties.putIfAbsent(CreateRoutineLoadStmt.KAFKA_DEFAULT_OFFSETS, KafkaProgress.OFFSET_END);
         String defaultOffsetStr = customKafkaProperties.get(CreateRoutineLoadStmt.KAFKA_DEFAULT_OFFSETS);
@@ -275,8 +284,10 @@ public class RoutineLoadDataSourceProperties {
             customKafkaProperties.put(CreateRoutineLoadStmt.KAFKA_ORIGIN_DEFAULT_OFFSETS, defaultOffsetStr);
             return true;
         } else {
-            if (!defaultOffsetStr.equalsIgnoreCase(KafkaProgress.OFFSET_BEGINNING) && !defaultOffsetStr.equalsIgnoreCase(KafkaProgress.OFFSET_END)) {
-                throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_DEFAULT_OFFSETS + " can only be set to OFFSET_BEGINNING, OFFSET_END or date time");
+            if (!defaultOffsetStr.equalsIgnoreCase(KafkaProgress.OFFSET_BEGINNING)
+                    && !defaultOffsetStr.equalsIgnoreCase(KafkaProgress.OFFSET_END)) {
+                throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_DEFAULT_OFFSETS
+                        + " can only be set to OFFSET_BEGINNING, OFFSET_END or date time");
             }
             return false;
         }
@@ -285,16 +296,17 @@ public class RoutineLoadDataSourceProperties {
     // init "kafkaPartitionOffsets" with partition property.
     // The offset will be set to OFFSET_END for now, and will be changed in later analysis process.
     private static void analyzeKafkaPartitionProperty(String kafkaPartitionsString,
-                                                      List<Pair<Integer, Long>> kafkaPartitionOffsets) throws AnalysisException {
+            List<Pair<Integer, Long>> kafkaPartitionOffsets) throws AnalysisException {
         kafkaPartitionsString = kafkaPartitionsString.replaceAll(" ", "");
         if (kafkaPartitionsString.isEmpty()) {
-            throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY + " could not be a empty string");
+            throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY
+                    + " could not be a empty string");
         }
         String[] kafkaPartitionsStringList = kafkaPartitionsString.split(",");
         for (String s : kafkaPartitionsStringList) {
             try {
-                kafkaPartitionOffsets.add(Pair.create(getIntegerValueFromString(s, CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY),
-                        KafkaProgress.OFFSET_END_VAL));
+                kafkaPartitionOffsets.add(Pair.create(getIntegerValueFromString(
+                        s, CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY), KafkaProgress.OFFSET_END_VAL));
             } catch (AnalysisException e) {
                 throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY
                         + " must be a number string with comma-separated");
@@ -304,8 +316,8 @@ public class RoutineLoadDataSourceProperties {
 
     // Fill the partition's offset with given kafkaOffsetsString,
     // Return true if offset is specified by timestamp.
-    private static boolean analyzeKafkaOffsetProperty(String kafkaOffsetsString, List<Pair<Integer, Long>> kafkaPartitionOffsets,
-                                                      String timeZoneStr)
+    private static boolean analyzeKafkaOffsetProperty(String kafkaOffsetsString,
+            List<Pair<Integer, Long>> kafkaPartitionOffsets, String timeZoneStr)
             throws UserException {
         if (Strings.isNullOrEmpty(kafkaOffsetsString)) {
             throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_OFFSETS_PROPERTY + " could not be a empty string");
@@ -354,7 +366,8 @@ public class RoutineLoadDataSourceProperties {
                 } else if (NumberUtils.isDigits(kafkaOffsetsStr)) {
                     kafkaPartitionOffsets.get(i).second = Long.valueOf(NumberUtils.toLong(kafkaOffsetsStr));
                 } else {
-                    throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_OFFSETS_PROPERTY + " must be an integer or a date time");
+                    throw new AnalysisException(CreateRoutineLoadStmt.KAFKA_OFFSETS_PROPERTY
+                            + " must be an integer or a date time");
                 }
             }
         }
@@ -368,7 +381,7 @@ public class RoutineLoadDataSourceProperties {
             if (dataSourceProperty.getKey().startsWith("property.")) {
                 String propertyKey = dataSourceProperty.getKey();
                 String propertyValue = dataSourceProperty.getValue();
-                String propertyValueArr[] = propertyKey.split("\\.");
+                String[] propertyValueArr = propertyKey.split("\\.");
                 if (propertyValueArr.length < 2) {
                     throw new AnalysisException("kafka property value could not be a empty string");
                 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SchemaTableType.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SchemaTableType.java
index 2eb892ed1c..127285bcb5 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SchemaTableType.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SchemaTableType.java
@@ -72,8 +72,9 @@ public enum SchemaTableType {
         fullSelectLists = new SelectList();
         fullSelectLists.addItem(SelectListItem.createStarItem(null));
     }
-    private final String           description;
-    private final String           tableName;
+
+    private final String description;
+    private final String tableName;
     private final TSchemaTableType tableType;
 
     SchemaTableType(String description, String tableName, TSchemaTableType tableType) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectListItem.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectListItem.java
index db2e688b6a..643da0095a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectListItem.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectListItem.java
@@ -62,7 +62,7 @@ public class SelectListItem {
     }
 
     // select list item corresponding to "[[db.]tbl.]*"
-    static public SelectListItem createStarItem(TableName tblName) {
+    public static SelectListItem createStarItem(TableName tblName) {
         return new SelectListItem(tblName);
     }
 
@@ -115,6 +115,7 @@ public class SelectListItem {
             return "*";
         }
     }
+
     /**
      * Return a column label for the select list item.
      */
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java
index e9ae2aa66e..ca450021b3 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SelectStmt.java
@@ -71,7 +71,7 @@ import java.util.stream.Collectors;
  * clauses.
  */
 public class SelectStmt extends QueryStmt {
-    private final static Logger LOG = LogManager.getLogger(SelectStmt.class);
+    private static final Logger LOG = LogManager.getLogger(SelectStmt.class);
     private UUID id = UUID.randomUUID();
 
     // ///////////////////////////////////////
@@ -291,7 +291,8 @@ public class SelectStmt extends QueryStmt {
     }
 
     @Override
-    public void getTables(Analyzer analyzer, Map<Long, Table> tableMap, Set<String> parentViewNameSet) throws AnalysisException {
+    public void getTables(Analyzer analyzer, Map<Long, Table> tableMap,
+            Set<String> parentViewNameSet) throws AnalysisException {
         getWithClauseTables(analyzer, tableMap, parentViewNameSet);
         for (TableRef tblRef : fromClause) {
             if (tblRef instanceof InlineViewRef) {
@@ -962,7 +963,8 @@ public class SelectStmt extends QueryStmt {
 
         if (groupByClause == null && !selectList.isDistinct()
                 && !TreeNode.contains(resultExprs, Expr.isAggregatePredicate())
-                && (havingClauseAfterAnaylzed == null || !havingClauseAfterAnaylzed.contains(Expr.isAggregatePredicate()))
+                && (havingClauseAfterAnaylzed == null || !havingClauseAfterAnaylzed.contains(
+                        Expr.isAggregatePredicate()))
                 && (sortInfo == null || !TreeNode.contains(sortInfo.getOrderingExprs(),
                 Expr.isAggregatePredicate()))) {
             // We're not computing aggregates but we still need to register the HAVING
@@ -993,7 +995,8 @@ public class SelectStmt extends QueryStmt {
         if (selectList.isDistinct()
                 && (groupByClause != null
                 || TreeNode.contains(resultExprs, Expr.isAggregatePredicate())
-                || (havingClauseAfterAnaylzed != null && havingClauseAfterAnaylzed.contains(Expr.isAggregatePredicate())))) {
+                || (havingClauseAfterAnaylzed != null && havingClauseAfterAnaylzed.contains(
+                        Expr.isAggregatePredicate())))) {
             throw new AnalysisException("cannot combine SELECT DISTINCT with aggregate functions or GROUP BY");
         }
 
@@ -1063,7 +1066,8 @@ public class SelectStmt extends QueryStmt {
                         ? aggInfo.getSecondPhaseDistinctAggInfo()
                         : aggInfo;
         groupingByTupleIds.add(finalAggInfo.getOutputTupleId());
-        ExprSubstitutionMap combinedSmap = ExprSubstitutionMap.compose(countAllMap, finalAggInfo.getOutputSmap(), analyzer);
+        ExprSubstitutionMap combinedSmap = ExprSubstitutionMap.compose(
+                countAllMap, finalAggInfo.getOutputSmap(), analyzer);
         // change select list, having and ordering exprs to point to agg output. We need
         // to reanalyze the exprs at this point.
         if (LOG.isDebugEnabled()) {
@@ -1084,7 +1088,8 @@ public class SelectStmt extends QueryStmt {
         }
 
         /*
-         * All of columns of result and having clause are replaced by new slot ref which is bound by top tuple of agg info.
+         * All of columns of result and having clause are replaced by new slot ref
+         * which is bound by top tuple of agg info.
          * For example:
          * ResultExprs: SlotRef(k1), FunctionCall(sum(SlotRef(k2)))
          * Having predicate: FunctionCall(sum(SlotRef(k2))) > subquery
@@ -1448,9 +1453,9 @@ public class SelectStmt extends QueryStmt {
                      * Aliases information of groupBy and orderBy clauses is recorded in `QueryStmt.aliasSMap`.
                      * The select clause has its own alias info in `SelectListItem.alias`.
                      *
-                     * Aliases expr in the `group by` and `order by` clauses are not analyzed, i.e. `Expr.isAnalyzed=false`
-                     * Subsequent constant folding will analyze the unanalyzed Expr before collecting the constant
-                     * expressions, preventing the `INVALID_TYPE` expr from being sent to BE.
+                     * Aliases expr in the `group by` and `order by` clauses are not analyzed,
+                     * i.e. `Expr.isAnalyzed=false`. Subsequent constant folding will analyze the unanalyzed Expr before
+                     * collecting the constant expressions, preventing the `INVALID_TYPE` expr from being sent to BE.
                      *
                      * But when analyzing the alias, the meta information corresponding to the slot cannot be found
                      * in the catalog, an error will be reported.
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SetOperationStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SetOperationStmt.java
index 2062bf26df..21968fd19c 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SetOperationStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SetOperationStmt.java
@@ -48,7 +48,7 @@ import java.util.Set;
  * and we need to mark the slots of resolved exprs as materialized.
  */
 public class SetOperationStmt extends QueryStmt {
-    private final static Logger LOG = LogManager.getLogger(SetOperationStmt.class);
+    private static final Logger LOG = LogManager.getLogger(SetOperationStmt.class);
 
     public enum Operation {
         UNION,
@@ -213,7 +213,8 @@ public class SetOperationStmt extends QueryStmt {
     }
 
     @Override
-    public void getTables(Analyzer analyzer, Map<Long, Table> tableMap, Set<String> parentViewNameSet) throws AnalysisException {
+    public void getTables(Analyzer analyzer, Map<Long, Table> tableMap, Set<String> parentViewNameSet)
+            throws AnalysisException {
         getWithClauseTables(analyzer, tableMap, parentViewNameSet);
         for (SetOperand op : operands) {
             op.getQueryStmt().getTables(analyzer, tableMap, parentViewNameSet);
@@ -886,6 +887,7 @@ public class SetOperationStmt extends QueryStmt {
         public Operation getOperation() {
             return operation;
         }
+
         // Used for propagating DISTINCT.
         public void setQualifier(Qualifier qualifier) {
             this.qualifier = qualifier;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SetVar.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SetVar.java
index 3b1eeec8e9..b753a3ffc1 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SetVar.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SetVar.java
@@ -132,7 +132,8 @@ public class SetVar {
         if (getVariable().equalsIgnoreCase(SessionVariable.PREFER_JOIN_METHOD)) {
             String value = getValue().getStringValue();
             if (!value.equalsIgnoreCase("broadcast") && !value.equalsIgnoreCase("shuffle")) {
-                ErrorReport.reportAnalysisException(ErrorCode.ERR_WRONG_VALUE_FOR_VAR, SessionVariable.PREFER_JOIN_METHOD, value);
+                ErrorReport.reportAnalysisException(ErrorCode.ERR_WRONG_VALUE_FOR_VAR,
+                        SessionVariable.PREFER_JOIN_METHOD, value);
             }
         }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java
index 8bfc5ed59d..1b4dc7f4ac 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowAlterStmt.java
@@ -46,7 +46,8 @@ import java.util.List;
 /*
  * ShowAlterStmt: used to show process state of alter statement.
  * Syntax:
- *      SHOW ALTER TABLE [COLUMN | ROLLUP] [FROM dbName] [WHERE TableName="xxx"] [ORDER BY CreateTime DESC] [LIMIT [offset,]rows]
+ *      SHOW ALTER TABLE [COLUMN | ROLLUP] [FROM dbName] [WHERE TableName="xxx"]
+ *      [ORDER BY CreateTime DESC] [LIMIT [offset,]rows]
  */
 public class ShowAlterStmt extends ShowStmt {
     private static final Logger LOG = LogManager.getLogger(ShowAlterStmt.class);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackupStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackupStmt.java
index eec29684d1..6e58296c64 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackupStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowBackupStmt.java
@@ -177,9 +177,11 @@ public class ShowBackupStmt extends ShowStmt {
             return label -> true;
         }
         if (isAccurateMatch) {
-            return CaseSensibility.LABEL.getCaseSensibility() ? label -> label.equals(labelValue) : label -> label.equalsIgnoreCase(labelValue);
+            return CaseSensibility.LABEL.getCaseSensibility()
+                    ? label -> label.equals(labelValue) : label -> label.equalsIgnoreCase(labelValue);
         } else {
-            PatternMatcher patternMatcher = PatternMatcher.createMysqlPattern(labelValue, CaseSensibility.LABEL.getCaseSensibility());
+            PatternMatcher patternMatcher = PatternMatcher.createMysqlPattern(
+                    labelValue, CaseSensibility.LABEL.getCaseSensibility());
             return patternMatcher::match;
         }
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowClusterStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowClusterStmt.java
index 0339c4f5ac..57b8766b4a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowClusterStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowClusterStmt.java
@@ -59,9 +59,7 @@ public class ShowClusterStmt extends ShowStmt {
     @Override
     public void analyze(Analyzer analyzer) throws AnalysisException {
         if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(),
-                                                                   PrivPredicate.of(PrivBitSet.of(PaloPrivilege.ADMIN_PRIV,
-                                                                                                  PaloPrivilege.NODE_PRIV),
-                                                                                    Operator.OR))) {
+                PrivPredicate.of(PrivBitSet.of(PaloPrivilege.ADMIN_PRIV, PaloPrivilege.NODE_PRIV), Operator.OR))) {
             ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "ADMIN");
         }
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowExportStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowExportStmt.java
index 481550ab29..d826da8340 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowExportStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowExportStmt.java
@@ -164,7 +164,8 @@ public class ShowExportStmt extends ShowStmt {
                     valid = true;
                 }
 
-            } else if (whereExpr instanceof LikePredicate && ((LikePredicate) whereExpr).getOp() == LikePredicate.Operator.LIKE) {
+            } else if (whereExpr instanceof LikePredicate
+                    && ((LikePredicate) whereExpr).getOp() == LikePredicate.Operator.LIKE) {
                 if ("label".equals(leftKey) && whereExpr.getChild(1) instanceof StringLiteral) {
                     label = whereExpr.getChild(1).getStringValue();
                     isLabelUseLike = true;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowGrantsStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowGrantsStmt.java
index 197f61df59..fa765da9e4 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowGrantsStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowGrantsStmt.java
@@ -46,6 +46,7 @@ import com.google.common.base.Preconditions;
 public class ShowGrantsStmt extends ShowStmt {
 
     private static final ShowResultSetMetaData META_DATA;
+
     static {
         ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder();
         for (String col : AuthProcDir.TITLE_NAMES) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowIndexStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowIndexStmt.java
index 58c35a9284..82616d3262 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowIndexStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowIndexStmt.java
@@ -69,8 +69,8 @@ public class ShowIndexStmt extends ShowStmt {
         }
         tableName.analyze(analyzer);
 
-        if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), tableName.getDb(), tableName.getTbl(),
-                PrivPredicate.SHOW)) {
+        if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(
+                ConnectContext.get(), tableName.getDb(), tableName.getTbl(), PrivPredicate.SHOW)) {
             ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR, analyzer.getQualifiedUser(),
                     tableName.getDb() + ": " + tableName.toString());
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowLoadProfileStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowLoadProfileStmt.java
index 50f8fcc28b..a30ef18b9f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowLoadProfileStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowLoadProfileStmt.java
@@ -29,7 +29,7 @@ import com.google.common.base.Strings;
 // show load profile "/";   # list all saving load job ids
 // show load profile "/10014"  # show task ids of specified job
 // show load profile "/10014/e0f7390f5363419e-b416a2a79996083e/" # show instance list of the task
-// show load profile "/10014/e0f7390f5363419e-b416a2a79996083e/e0f7390f5363419e-b416a2a799960906" # show instance tree graph
+// show load profile "/10014/e0f7390f5363419e-b416a2a79996083e/e0f7390f5363419e-b416a2a7999" # show instance's graph
 public class ShowLoadProfileStmt extends ShowStmt {
     private static final ShowResultSetMetaData META_DATA_TASK_IDS =
             ShowResultSetMetaData.builder()
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowPolicyStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowPolicyStmt.java
index c7b84bcef7..81b9f294dc 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowPolicyStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowPolicyStmt.java
@@ -18,8 +18,6 @@
 package org.apache.doris.analysis;
 
 import org.apache.doris.catalog.Catalog;
-import org.apache.doris.catalog.Column;
-import org.apache.doris.catalog.ScalarType;
 import org.apache.doris.common.ErrorCode;
 import org.apache.doris.common.ErrorReport;
 import org.apache.doris.common.UserException;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowQueryProfileStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowQueryProfileStmt.java
index 7c0994c434..15be652aed 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowQueryProfileStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowQueryProfileStmt.java
@@ -29,7 +29,7 @@ import com.google.common.base.Strings;
 // show query profile "/";   # list all saving query ids
 // show query profile "/e0f7390f5363419e-b416a2a79996083e"  # show graph of fragments of the query
 // show query profile "/e0f7390f5363419e-b416a2a79996083e/0" # show instance list of the specified fragment
-// show query profile "/e0f7390f5363419e-b416a2a79996083e/0/e0f7390f5363419e-b416a2a799960906" # show graph of the instance
+// show query profile "/e0f7390f5363419e-b416a2a79996083e/0/e0f7390f5363419e-b416a2a799960906" # show instance's graph
 public class ShowQueryProfileStmt extends ShowStmt {
     // This should be same as ProfileManager.PROFILE_HEADERS
     public static final ShowResultSetMetaData META_DATA_QUERY_IDS =
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRestoreStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRestoreStmt.java
index 0c42178415..4aa2007bd5 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRestoreStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRestoreStmt.java
@@ -176,9 +176,11 @@ public class ShowRestoreStmt extends ShowStmt {
             return label -> true;
         }
         if (isAccurateMatch) {
-            return CaseSensibility.LABEL.getCaseSensibility() ? label -> label.equals(labelValue) : label -> label.equalsIgnoreCase(labelValue);
+            return CaseSensibility.LABEL.getCaseSensibility()
+                    ? label -> label.equals(labelValue) : label -> label.equalsIgnoreCase(labelValue);
         } else {
-            PatternMatcher patternMatcher = PatternMatcher.createMysqlPattern(labelValue, CaseSensibility.LABEL.getCaseSensibility());
+            PatternMatcher patternMatcher = PatternMatcher.createMysqlPattern(
+                    labelValue, CaseSensibility.LABEL.getCaseSensibility());
             return patternMatcher::match;
         }
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRolesStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRolesStmt.java
index ccf84e2c95..47b9bacda2 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRolesStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRolesStmt.java
@@ -29,6 +29,7 @@ import org.apache.doris.qe.ShowResultSetMetaData;
 
 public class ShowRolesStmt extends ShowStmt {
     private static final ShowResultSetMetaData META_DATA;
+
     static {
         ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder();
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRoutineLoadTaskStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRoutineLoadTaskStmt.java
index 553151165b..84047f0b3b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRoutineLoadTaskStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowRoutineLoadTaskStmt.java
@@ -128,7 +128,8 @@ public class ShowRoutineLoadTaskStmt extends ShowStmt {
         } // CHECKSTYLE IGNORE THIS LINE
 
         if (!valid) {
-            throw new AnalysisException("show routine load job only support one equal expr which is sames like JobName=\"ILoveDoris\"");
+            throw new AnalysisException("show routine load job only support one equal expr "
+                    + "which is sames like JobName=\"ILoveDoris\"");
         }
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowStreamLoadStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowStreamLoadStmt.java
index 1ecfa5e060..9ae208e936 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowStreamLoadStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowStreamLoadStmt.java
@@ -68,7 +68,8 @@ public class ShowStreamLoadStmt extends ShowStmt {
             .add("StartTime").add("FinishTime")
             .build();
 
-    public ShowStreamLoadStmt(String db, Expr labelExpr, List<OrderByElement> orderByElements, LimitElement limitElement) {
+    public ShowStreamLoadStmt(String db, Expr labelExpr,
+            List<OrderByElement> orderByElements, LimitElement limitElement) {
         this.dbName = db;
         this.whereClause = labelExpr;
         this.orderByElements = orderByElements;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowViewStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowViewStmt.java
index 2842c4f48d..9c7ea82381 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowViewStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowViewStmt.java
@@ -89,7 +89,8 @@ public class ShowViewStmt extends ShowStmt {
         tbl.analyze(analyzer);
 
         String dbName = tbl.getDb();
-        if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), dbName, getTbl(), PrivPredicate.SHOW)) {
+        if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(
+                ConnectContext.get(), dbName, getTbl(), PrivPredicate.SHOW)) {
             ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR, "SHOW VIEW",
                     ConnectContext.get().getQualifiedUser(),
                     ConnectContext.get().getRemoteIP(),
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/SortInfo.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/SortInfo.java
index fe287e3900..3e0703414b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/SortInfo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/SortInfo.java
@@ -40,7 +40,7 @@ import java.util.Set;
  * particular input row (materialize all row slots)
  */
 public class SortInfo {
-    private final static Logger LOG = LogManager.getLogger(SortInfo.class);
+    private static final Logger LOG = LogManager.getLogger(SortInfo.class);
     // All ordering exprs with cost greater than this will be materialized. Since we don't
     // currently have any information about actual function costs, this value is intended to
     // ensure that all expensive functions will be materialized while still leaving simple
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/StmtRewriter.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/StmtRewriter.java
index 4f0ea9c059..66c864fb57 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/StmtRewriter.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/StmtRewriter.java
@@ -182,7 +182,8 @@ public class StmtRewriter {
          * For example:
          * Query: select cs_item_sk, sum(cs_sales_price) from catalog_sales a group by cs_item_sk having ...;
          * Inline view:
-         *     from (select cs_item_sk $ColumnA, sum(cs_sales_price) $ColumnB from catalog_sales a group by cs_item_sk) $TableA
+         *     from (select cs_item_sk $ColumnA, sum(cs_sales_price) $ColumnB
+         *     from catalog_sales a group by cs_item_sk) $TableA
          *
          * Add missing aggregation columns in select list
          * For example:
@@ -595,7 +596,7 @@ public class StmtRewriter {
                     lhsExprs, rhsExprs, updateGroupBy);
         }
 
-        /**
+        /*
          * Situation: The expr is a uncorrelated subquery for outer stmt.
          * Rewrite: Add a limit 1 for subquery.
          * origin stmt: select * from t1 where exists (select * from table2);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/StorageBackend.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/StorageBackend.java
index 12e570296c..b073ff7067 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/StorageBackend.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/StorageBackend.java
@@ -38,7 +38,8 @@ public class StorageBackend extends StorageDesc implements ParseNode {
     private StorageType storageType;
     private Map<String, String> properties;
 
-    public StorageBackend(String storageName, String location, StorageType storageType, Map<String, String> properties) {
+    public StorageBackend(String storageName, String location,
+            StorageType storageType, Map<String, String> properties) {
         this.name = storageName;
         this.location = location;
         this.storageType = storageType;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/StringLiteral.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/StringLiteral.java
index 16eaddf455..a215cc3132 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/StringLiteral.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/StringLiteral.java
@@ -206,7 +206,8 @@ public class StringLiteral extends LiteralExpr {
                 case LARGEINT:
                     if (VariableVarConverters.hasConverter(beConverted)) {
                         try {
-                            return new LargeIntLiteral(String.valueOf(VariableVarConverters.encode(beConverted, value)));
+                            return new LargeIntLiteral(String.valueOf(
+                                    VariableVarConverters.encode(beConverted, value)));
                         } catch (DdlException e) {
                             throw new AnalysisException(e.getMessage());
                         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/Subquery.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/Subquery.java
index d3056bbe2f..10b51b80db 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/Subquery.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/Subquery.java
@@ -41,7 +41,7 @@ import java.util.List;
  * its own Analyzer context.
  */
 public class Subquery extends Expr {
-    private final static Logger LOG = LoggerFactory.getLogger(Subquery.class);
+    private static final Logger LOG = LoggerFactory.getLogger(Subquery.class);
 
     // The QueryStmt of the subquery.
     protected QueryStmt stmt;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/TablePattern.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/TablePattern.java
index f03a71fb3d..6f6a3d393a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/TablePattern.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/TablePattern.java
@@ -41,6 +41,7 @@ public class TablePattern implements Writable {
     boolean isAnalyzed = false;
 
     public static TablePattern ALL;
+
     static {
         ALL = new TablePattern("*", "*");
         try {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/TransactionBeginStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/TransactionBeginStmt.java
index 287e45952c..58184b9bb2 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/TransactionBeginStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/TransactionBeginStmt.java
@@ -24,12 +24,15 @@ import org.apache.doris.transaction.TransactionEntry;
 
 public class TransactionBeginStmt extends TransactionStmt {
     private String label = null;
+
     public TransactionBeginStmt() {
         this.label = "";
     }
+
     public TransactionBeginStmt(final String label) {
         this.label = label;
     }
+
     @Override
     public void analyze(Analyzer analyzer) throws AnalysisException, UserException {
         if (label == null || label.isEmpty()) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/TypeDef.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/TypeDef.java
index e8ed068a43..bdbb4c88fc 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/TypeDef.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/TypeDef.java
@@ -28,7 +28,6 @@ import org.apache.doris.catalog.StructField;
 import org.apache.doris.catalog.StructType;
 import org.apache.doris.catalog.Type;
 import org.apache.doris.common.AnalysisException;
-import org.apache.doris.common.Config;
 
 import com.google.common.base.Preconditions;
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/UseStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/UseStmt.java
index 8727418625..d0da7479aa 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/UseStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/UseStmt.java
@@ -63,7 +63,8 @@ public class UseStmt extends StatementBase {
         database = ClusterNamespace.getFullName(getClusterName(), database);
 
         if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), database, PrivPredicate.SHOW)) {
-            ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR, analyzer.getQualifiedUser(), database);
+            ErrorReport.reportAnalysisException(ErrorCode.ERR_DBACCESS_DENIED_ERROR,
+                    analyzer.getQualifiedUser(), database);
         }
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/UserIdentity.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/UserIdentity.java
index b87878fa4e..ec24d06a41 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/UserIdentity.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/UserIdentity.java
@@ -98,7 +98,8 @@ public class UserIdentity implements Writable {
     }
 
     public static UserIdentity fromThrift(TUserIdentity tUserIdent) {
-        UserIdentity userIdentity = new UserIdentity(tUserIdent.getUsername(), tUserIdent.getHost(), tUserIdent.is_domain);
+        UserIdentity userIdentity = new UserIdentity(tUserIdent.getUsername(),
+                tUserIdent.getHost(), tUserIdent.is_domain);
         userIdentity.setIsAnalyzed();
         return userIdentity;
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/ValueList.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/ValueList.java
index a9f3951bff..49e7e92e35 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ValueList.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ValueList.java
@@ -32,6 +32,7 @@ public class ValueList {
         rows = Lists.newArrayList();
         rows.add(row);
     }
+
     public ValueList(List<ArrayList<Expr>> rows) {
         this.rows = rows;
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/WithClause.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/WithClause.java
index 44ab177c25..98979fc028 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/WithClause.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/WithClause.java
@@ -113,7 +113,8 @@ public class WithClause implements ParseNode {
         }
     }
 
-    public void getTables(Analyzer analyzer, Map<Long, Table> tableMap, Set<String> parentViewNameSet) throws AnalysisException {
+    public void getTables(Analyzer analyzer, Map<Long, Table> tableMap,
+            Set<String> parentViewNameSet) throws AnalysisException {
         for (View view : views) {
             QueryStmt stmt = view.getQueryStmt();
             parentViewNameSet.add(view.getName());
diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java
index 2f0b07e461..c529259fbc 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java
@@ -192,7 +192,8 @@ public class BackupHandler extends MasterDaemon implements Writable {
     public void createRepository(CreateRepositoryStmt stmt) throws DdlException {
         if (!catalog.getBrokerMgr().containsBroker(stmt.getBrokerName())
                 && stmt.getStorageType() == StorageBackend.StorageType.BROKER) {
-            ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR, "broker does not exist: " + stmt.getBrokerName());
+            ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR,
+                    "broker does not exist: " + stmt.getBrokerName());
         }
 
         BlobStorage storage = BlobStorage.create(stmt.getBrokerName(), stmt.getStorageType(), stmt.getProperties());
@@ -328,13 +329,15 @@ public class BackupHandler extends MasterDaemon implements Writable {
             tbl.readLock();
             try {
                 if (olapTbl.existTempPartitions()) {
-                    ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR, "Do not support backup table with temp partitions");
+                    ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR,
+                            "Do not support backup table with temp partitions");
                 }
 
                 PartitionNames partitionNames = tblRef.getPartitionNames();
                 if (partitionNames != null) {
                     if (partitionNames.isTemp()) {
-                        ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR, "Do not support backup temp partitions");
+                        ErrorReport.reportDdlException(ErrorCode.ERR_COMMON_ERROR,
+                                "Do not support backup temp partitions");
                     }
 
                     for (String partName : partitionNames.getPartitionNames()) {
@@ -671,7 +674,8 @@ public class BackupHandler extends MasterDaemon implements Writable {
     public void write(DataOutput out) throws IOException {
         repoMgr.write(out);
 
-        List<AbstractJob> jobs = dbIdToBackupOrRestoreJobs.values().stream().flatMap(Deque::stream).collect(Collectors.toList());
+        List<AbstractJob> jobs = dbIdToBackupOrRestoreJobs.values()
+                .stream().flatMap(Deque::stream).collect(Collectors.toList());
         out.writeInt(jobs.size());
         for (AbstractJob job : jobs) {
             job.write(out);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java
index 96d89a2707..da07f64342 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BackupJob.java
@@ -162,7 +162,8 @@ public class BackupJob extends AbstractJob {
             // snapshot task could not finish if status_code is OLAP_ERR_VERSION_ALREADY_MERGED,
             // so cancel this job
             if (request.getTaskStatus().getStatusCode() == TStatusCode.OLAP_ERR_VERSION_ALREADY_MERGED) {
-                status = new Status(ErrCode.OLAP_VERSION_ALREADY_MERGED, "make snapshot failed, version already merged");
+                status = new Status(ErrCode.OLAP_VERSION_ALREADY_MERGED,
+                        "make snapshot failed, version already merged");
                 cancelInternal();
             }
             return false;
@@ -427,7 +428,8 @@ public class BackupJob extends AbstractJob {
         }
     }
 
-    private void prepareSnapshotTaskForOlapTable(OlapTable olapTable, TableRef backupTableRef, AgentBatchTask batchTask) {
+    private void prepareSnapshotTaskForOlapTable(OlapTable olapTable,
+            TableRef backupTableRef, AgentBatchTask batchTask) {
         olapTable.readLock();
         try {
             // check backup table again
@@ -654,8 +656,8 @@ public class BackupJob extends AbstractJob {
             File jobDir = new File(localJobDirPath.toString());
             if (jobDir.exists()) {
                 // if dir exists, delete it first
-                Files.walk(localJobDirPath,
-                           FileVisitOption.FOLLOW_LINKS).sorted(Comparator.reverseOrder()).map(Path::toFile).forEach(File::delete);
+                Files.walk(localJobDirPath, FileVisitOption.FOLLOW_LINKS).sorted(Comparator.reverseOrder())
+                        .map(Path::toFile).forEach(File::delete);
             }
             if (!jobDir.mkdir()) {
                 status = new Status(ErrCode.COMMON_ERROR, "Failed to create tmp dir: " + localJobDirPath);
@@ -673,7 +675,8 @@ public class BackupJob extends AbstractJob {
             localMetaInfoFilePath = metaInfoFile.getAbsolutePath();
 
             // 3. save job info file
-            jobInfo = BackupJobInfo.fromCatalog(createTime, label, dbName, dbId, getContent(), backupMeta, snapshotInfos);
+            jobInfo = BackupJobInfo.fromCatalog(createTime, label, dbName, dbId,
+                    getContent(), backupMeta, snapshotInfos);
             LOG.debug("job info: {}. {}", jobInfo, this);
             File jobInfoFile = new File(jobDir, Repository.PREFIX_JOB_INFO + createTimeStr);
             if (!jobInfoFile.createNewFile()) {
@@ -805,8 +808,8 @@ public class BackupJob extends AbstractJob {
             try {
                 File jobDir = new File(localJobDirPath.toString());
                 if (jobDir.exists()) {
-                    Files.walk(localJobDirPath,
-                               FileVisitOption.FOLLOW_LINKS).sorted(Comparator.reverseOrder()).map(Path::toFile).forEach(File::delete);
+                    Files.walk(localJobDirPath, FileVisitOption.FOLLOW_LINKS).sorted(Comparator.reverseOrder())
+                            .map(Path::toFile).forEach(File::delete);
                 }
             } catch (Exception e) {
                 LOG.warn("failed to clean the backup job dir: " + localJobDirPath.toString());
diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/BrokerStorage.java b/fe/fe-core/src/main/java/org/apache/doris/backup/BrokerStorage.java
index 9295095f40..f703634413 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/backup/BrokerStorage.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/backup/BrokerStorage.java
@@ -126,8 +126,8 @@ public class BrokerStorage extends BlobStorage {
         File localFile = new File(localFilePath);
         if (localFile.exists()) {
             try {
-                Files.walk(Paths.get(localFilePath),
-                    FileVisitOption.FOLLOW_LINKS).sorted(Comparator.reverseOrder()).map(Path::toFile).forEach(File::delete);
+                Files.walk(Paths.get(localFilePath), FileVisitOption.FOLLOW_LINKS)
+                        .sorted(Comparator.reverseOrder()).map(Path::toFile).forEach(File::delete);
             } catch (IOException e) {
                 return new Status(Status.ErrCode.COMMON_ERROR, "failed to delete exist local file: " + localFilePath);
             }
@@ -294,7 +294,8 @@ public class BrokerStorage extends BlobStorage {
             }
         } finally {
             Status closeStatus = closeWriter(client, address, fd);
-            if (closeStatus.getErrCode() == Status.ErrCode.BAD_CONNECTION || status.getErrCode() == Status.ErrCode.BAD_CONNECTION) {
+            if (closeStatus.getErrCode() == Status.ErrCode.BAD_CONNECTION
+                    || status.getErrCode() == Status.ErrCode.BAD_CONNECTION) {
                 ClientPool.brokerPool.invalidateObject(address, client);
             } else {
                 ClientPool.brokerPool.returnObject(address, client);
@@ -340,7 +341,8 @@ public class BrokerStorage extends BlobStorage {
                 int tryTimes = 0;
                 while (tryTimes < 3) {
                     try {
-                        TBrokerPWriteRequest req = new TBrokerPWriteRequest(TBrokerVersion.VERSION_ONE, fd, writeOffset, bb);
+                        TBrokerPWriteRequest req
+                                = new TBrokerPWriteRequest(TBrokerVersion.VERSION_ONE, fd, writeOffset, bb);
                         TBrokerOperationStatus opst = client.pwrite(req);
                         if (opst.getStatusCode() != TBrokerOperationStatusCode.OK) {
                             // pwrite return failure.
diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/Repository.java b/fe/fe-core/src/main/java/org/apache/doris/backup/Repository.java
index 5a99017b2c..4b5b13ad70 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/backup/Repository.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/backup/Repository.java
@@ -366,7 +366,8 @@ public class Repository implements Writable {
 
     // create remote tablet snapshot path
     // eg:
-    // /location/__palo_repository_repo_name/__ss_my_ss1/__ss_content/__db_10001/__tbl_10020/__part_10031/__idx_10032/__10023/__3481721
+    // /location/__palo_repository_repo_name/__ss_my_ss1/__ss_content/
+    // __db_10001/__tbl_10020/__part_10031/__idx_10032/__10023/__3481721
     public String assembleRemoteSnapshotPath(String label, SnapshotInfo info) {
         String path = Joiner.on(PATH_DELIMITER).join(location,
                 joinPrefix(PREFIX_REPO, name),
@@ -453,7 +454,8 @@ public class Repository implements Writable {
         if (storage instanceof BrokerStorage) {
             // this may be a retry, so we should first delete remote file
             String tmpRemotePath = assembleFileNameWithSuffix(remoteFilePath, SUFFIX_TMP_FILE);
-            LOG.debug("get md5sum of file: {}. tmp remote path: {}. final remote path: {}", localFilePath, tmpRemotePath, finalRemotePath);
+            LOG.debug("get md5sum of file: {}. tmp remote path: {}. final remote path: {}",
+                    localFilePath, tmpRemotePath, finalRemotePath);
             st = storage.delete(tmpRemotePath);
             if (!st.ok()) {
                 return st;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java
index 25267dbad8..78148acc1f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java
@@ -511,8 +511,7 @@ public class RestoreJob extends AbstractJob {
             }
         }
         for (BackupJobInfo.BackupOdbcResourceInfo backupOdbcResourceInfo : jobInfo.newBackupObjects.odbcResources) {
-            Resource resource = Catalog.getCurrentCatalog().getResourceMgr().
-                    getResource(backupOdbcResourceInfo.name);
+            Resource resource = Catalog.getCurrentCatalog().getResourceMgr().getResource(backupOdbcResourceInfo.name);
             if (resource == null) {
                 continue;
             }
@@ -538,7 +537,8 @@ public class RestoreJob extends AbstractJob {
                     // table already exist, check schema
                     if (localTbl.getType() != TableType.OLAP) {
                         status = new Status(ErrCode.COMMON_ERROR,
-                                "The type of local table should be same as type of remote table: " + remoteTbl.getName());
+                                "The type of local table should be same as type of remote table: "
+                                        + remoteTbl.getName());
                         return;
                     }
                     OlapTable localOlapTbl = (OlapTable) localTbl;
@@ -554,8 +554,10 @@ public class RestoreJob extends AbstractJob {
                         }
                         LOG.debug("get intersect part names: {}, job: {}", intersectPartNames, this);
                         if (!localOlapTbl.getSignature(BackupHandler.SIGNATURE_VERSION, intersectPartNames)
-                                .equals(remoteOlapTbl.getSignature(BackupHandler.SIGNATURE_VERSION, intersectPartNames))) {
-                            status = new Status(ErrCode.COMMON_ERROR, "Table " + jobInfo.getAliasByOriginNameIfSet(tableName)
+                                .equals(remoteOlapTbl.getSignature(
+                                        BackupHandler.SIGNATURE_VERSION, intersectPartNames))) {
+                            status = new Status(ErrCode.COMMON_ERROR, "Table "
+                                    + jobInfo.getAliasByOriginNameIfSet(tableName)
                                     + " already exist but with different schema");
                             return;
                         }
@@ -571,10 +573,12 @@ public class RestoreJob extends AbstractJob {
                                 if (localPartInfo.getType() == PartitionType.RANGE
                                         || localPartInfo.getType() == PartitionType.LIST) {
                                     PartitionItem localItem = localPartInfo.getItem(localPartition.getId());
-                                    PartitionItem remoteItem = remoteOlapTbl.getPartitionInfo().getItem(backupPartInfo.id);
+                                    PartitionItem remoteItem = remoteOlapTbl
+                                            .getPartitionInfo().getItem(backupPartInfo.id);
                                     if (localItem.equals(remoteItem)) {
                                         // Same partition, same range
-                                        if (genFileMappingWhenBackupReplicasEqual(localPartInfo, localPartition, localTbl, backupPartInfo, partitionName, tblInfo)) {
+                                        if (genFileMappingWhenBackupReplicasEqual(localPartInfo, localPartition,
+                                                localTbl, backupPartInfo, partitionName, tblInfo)) {
                                             return;
                                         }
                                     } else {
@@ -586,7 +590,8 @@ public class RestoreJob extends AbstractJob {
                                     }
                                 } else {
                                     // If this is a single partitioned table.
-                                    if (genFileMappingWhenBackupReplicasEqual(localPartInfo, localPartition, localTbl, backupPartInfo, partitionName, tblInfo)) {
+                                    if (genFileMappingWhenBackupReplicasEqual(localPartInfo, localPartition, localTbl,
+                                            backupPartInfo, partitionName, tblInfo)) {
                                         return;
                                     }
                                 }
@@ -596,7 +601,8 @@ public class RestoreJob extends AbstractJob {
                                 PartitionInfo localPartitionInfo = localOlapTbl.getPartitionInfo();
                                 if (localPartitionInfo.getType() == PartitionType.RANGE
                                         || localPartitionInfo.getType() == PartitionType.LIST) {
-                                    PartitionItem remoteItem = remoteOlapTbl.getPartitionInfo().getItem(backupPartInfo.id);
+                                    PartitionItem remoteItem = remoteOlapTbl.getPartitionInfo()
+                                            .getItem(backupPartInfo.id);
                                     if (localPartitionInfo.getAnyIntersectItem(remoteItem, false) != null) {
                                         status = new Status(ErrCode.COMMON_ERROR, "Partition " + partitionName
                                                 + " in table " + localTbl.getName()
@@ -614,7 +620,8 @@ public class RestoreJob extends AbstractJob {
                                         restoredPartitions.add(Pair.create(localOlapTbl.getName(), restorePart));
                                     }
                                 } else {
-                                    // It is impossible that a single partitioned table exist without any existing partition
+                                    // It is impossible that a single partitioned table exist
+                                    // without any existing partition
                                     status = new Status(ErrCode.COMMON_ERROR,
                                             "No partition exist in single partitioned table " + localOlapTbl.getName());
                                     return;
@@ -876,7 +883,8 @@ public class RestoreJob extends AbstractJob {
         }
 
         // check disk capacity
-        org.apache.doris.common.Status st = Catalog.getCurrentSystemInfo().checkExceedDiskCapacityLimit(bePathsMap, true);
+        org.apache.doris.common.Status st = Catalog.getCurrentSystemInfo()
+                .checkExceedDiskCapacityLimit(bePathsMap, true);
         if (!st.ok()) {
             status = new Status(ErrCode.COMMON_ERROR, st.getErrorMsg());
             return;
@@ -923,8 +931,8 @@ public class RestoreJob extends AbstractJob {
         }
     }
 
-    private boolean genFileMappingWhenBackupReplicasEqual(PartitionInfo localPartInfo, Partition localPartition, Table localTbl,
-                                                          BackupPartitionInfo backupPartInfo, String partitionName, BackupOlapTableInfo tblInfo) {
+    private boolean genFileMappingWhenBackupReplicasEqual(PartitionInfo localPartInfo, Partition localPartition,
+            Table localTbl, BackupPartitionInfo backupPartInfo, String partitionName, BackupOlapTableInfo tblInfo) {
         short restoreReplicaNum = replicaAlloc.getTotalReplicaNum();
         short localReplicaNum = localPartInfo.getReplicaAllocation(localPartition.getId()).getTotalReplicaNum();
         if (localReplicaNum != restoreReplicaNum) {
@@ -1116,7 +1124,8 @@ public class RestoreJob extends AbstractJob {
             OlapTable remoteTbl = (OlapTable) backupMeta.getTable(entry.first);
             PartitionInfo localPartitionInfo = localTbl.getPartitionInfo();
             PartitionInfo remotePartitionInfo = remoteTbl.getPartitionInfo();
-            BackupPartitionInfo backupPartitionInfo = jobInfo.getOlapTableInfo(entry.first).getPartInfo(restorePart.getName());
+            BackupPartitionInfo backupPartitionInfo = jobInfo.getOlapTableInfo(entry.first)
+                    .getPartInfo(restorePart.getName());
             long remotePartId = backupPartitionInfo.id;
             DataProperty remoteDataProperty = remotePartitionInfo.getDataProperty(remotePartId);
             localPartitionInfo.addPartition(restorePart.getId(), false, remotePartitionInfo.getItem(remotePartId),
@@ -1271,9 +1280,9 @@ public class RestoreJob extends AbstractJob {
 
                                 MaterializedIndex idx = part.getIndex(info.getIndexId());
                                 if (idx == null) {
-                                    status = new Status(ErrCode.NOT_FOUND,
-                                            "index " + info.getIndexId() + " does not exist in partion " + part.getName()
-                                                    + "of restored table " + tbl.getName());
+                                    status = new Status(ErrCode.NOT_FOUND, "index " + info.getIndexId()
+                                            + " does not exist in partion " + part.getName()
+                                            + "of restored table " + tbl.getName());
                                     return;
                                 }
 
@@ -1745,7 +1754,8 @@ public class RestoreJob extends AbstractJob {
             for (Map.Entry<Long, Long> entry : restoredVersionInfo.row(tblId).entrySet()) {
                 out.writeLong(entry.getKey());
                 out.writeLong(entry.getValue());
-                // It is version hash in the past, but it useless but should compatible with old version so that write 0 here
+                // It is version hash in the past,
+                // but it useless but should compatible with old version so that write 0 here
                 out.writeLong(0L);
             }
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/S3Storage.java b/fe/fe-core/src/main/java/org/apache/doris/backup/S3Storage.java
index a67b0992e5..ae89175ebd 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/backup/S3Storage.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/backup/S3Storage.java
@@ -104,7 +104,8 @@ public class S3Storage extends BlobStorage {
         // If not, it will not be converted ( https://github.com/aws/aws-sdk-java-v2/pull/763),
         // but the endpoints of many cloud service providers for object storage do not start with s3,
         // so they cannot be converted to virtual hosted-sytle.
-        // Some of them, such as aliyun's oss, only support virtual hosted-sytle, and some of them(ceph) may only support
+        // Some of them, such as aliyun's oss, only support virtual hosted-sytle,
+        // and some of them(ceph) may only support
         // path-style, so we need to do some additional conversion.
         //
         //          use_path_style          |     !use_path_style
@@ -202,7 +203,8 @@ public class S3Storage extends BlobStorage {
         }
         try {
             S3URI uri = S3URI.create(remoteFilePath, forceHostedStyle);
-            GetObjectResponse response = getClient(uri.getVirtualBucket()).getObject(GetObjectRequest.builder().bucket(uri.getBucket()).key(uri.getKey()).build(), localFile.toPath());
+            GetObjectResponse response = getClient(uri.getVirtualBucket()).getObject(
+                    GetObjectRequest.builder().bucket(uri.getBucket()).key(uri.getKey()).build(), localFile.toPath());
             if (localFile.length() == fileSize) {
                 LOG.info(
                         "finished to download from {} to {} with size: {}. cost {} ms",
@@ -348,7 +350,9 @@ public class S3Storage extends BlobStorage {
                 return Status.OK;
             }
             for (FileStatus fileStatus : files) {
-                RemoteFile remoteFile = new RemoteFile(fileNameOnly ? fileStatus.getPath().getName() : fileStatus.getPath().toString(), !fileStatus.isDirectory(), fileStatus.isDirectory() ? -1 : fileStatus.getLen());
+                RemoteFile remoteFile = new RemoteFile(
+                        fileNameOnly ? fileStatus.getPath().getName() : fileStatus.getPath().toString(),
+                        !fileStatus.isDirectory(), fileStatus.isDirectory() ? -1 : fileStatus.getLen());
                 result.add(remoteFile);
             }
         } catch (FileNotFoundException e) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRuleMgr.java b/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRuleMgr.java
index 3bf66989fd..db99f4de43 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRuleMgr.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/blockrule/SqlBlockRuleMgr.java
@@ -307,4 +307,4 @@ public class SqlBlockRuleMgr implements Writable {
         String json = Text.readString(in);
         return GsonUtils.GSON.fromJson(json, SqlBlockRuleMgr.class);
     }
-}
\ No newline at end of file
+}
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateFunction.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateFunction.java
index efa1c0c2f1..d0b5e28733 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateFunction.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateFunction.java
@@ -101,18 +101,19 @@ public class AggregateFunction extends Function {
     }
 
     public AggregateFunction(FunctionName fnName, List<Type> argTypes,
-                             Type retType, Type intermediateType,
-                             URI location, String updateFnSymbol, String initFnSymbol,
-                             String serializeFnSymbol, String mergeFnSymbol, String getValueFnSymbol,
-                             String removeFnSymbol, String finalizeFnSymbol) {
+            Type retType, Type intermediateType,
+            URI location, String updateFnSymbol, String initFnSymbol,
+            String serializeFnSymbol, String mergeFnSymbol, String getValueFnSymbol,
+            String removeFnSymbol, String finalizeFnSymbol) {
         this(fnName, argTypes, retType, intermediateType, location, updateFnSymbol, initFnSymbol, serializeFnSymbol,
                 mergeFnSymbol, getValueFnSymbol, removeFnSymbol, finalizeFnSymbol, false);
     }
 
     public AggregateFunction(FunctionName fnName, List<Type> argTypes,
-                             Type retType, Type intermediateType, boolean hasVarArgs) {
+            Type retType, Type intermediateType, boolean hasVarArgs) {
         super(fnName, argTypes, retType, hasVarArgs);
-        this.intermediateType = (intermediateType != null && intermediateType.equals(retType)) ? null : intermediateType;
+        this.intermediateType = (intermediateType != null && intermediateType.equals(retType))
+                ? null : intermediateType;
         ignoresDistinct = false;
         isAnalyticFn = false;
         isAggregateFn = true;
@@ -120,19 +121,21 @@ public class AggregateFunction extends Function {
     }
 
     public static AggregateFunction createBuiltin(String name,
-                                                  List<Type> argTypes, Type retType, Type intermediateType,
-                                                  boolean ignoresDistinct,
-                                                  boolean isAnalyticFn,
-                                                  boolean returnsNonNullOnEmpty) {
-        return createBuiltin(name, argTypes, retType, intermediateType, false, ignoresDistinct, isAnalyticFn, returnsNonNullOnEmpty);
+            List<Type> argTypes, Type retType, Type intermediateType,
+            boolean ignoresDistinct,
+            boolean isAnalyticFn,
+            boolean returnsNonNullOnEmpty) {
+        return createBuiltin(name, argTypes, retType, intermediateType, false,
+                ignoresDistinct, isAnalyticFn, returnsNonNullOnEmpty);
     }
 
     public static AggregateFunction createBuiltin(String name,
-                                                  List<Type> argTypes, Type retType, Type intermediateType,
-                                                  boolean hasVarArgs, boolean ignoresDistinct,
-                                                  boolean isAnalyticFn,
-                                                  boolean returnsNonNullOnEmpty) {
-        AggregateFunction fn = new AggregateFunction(new FunctionName(name), argTypes, retType, intermediateType, hasVarArgs);
+            List<Type> argTypes, Type retType, Type intermediateType,
+            boolean hasVarArgs, boolean ignoresDistinct,
+            boolean isAnalyticFn,
+            boolean returnsNonNullOnEmpty) {
+        AggregateFunction fn = new AggregateFunction(new FunctionName(name),
+                argTypes, retType, intermediateType, hasVarArgs);
         fn.setBinaryType(TFunctionBinaryType.BUILTIN);
         fn.ignoresDistinct = ignoresDistinct;
         fn.isAnalyticFn = isAnalyticFn;
@@ -146,7 +149,8 @@ public class AggregateFunction extends Function {
                              URI location, String updateFnSymbol, String initFnSymbol,
                              String serializeFnSymbol, String mergeFnSymbol, String getValueFnSymbol,
                              String removeFnSymbol, String finalizeFnSymbol, boolean vectorized) {
-        this(fnName, argTypes, retType, intermediateType, false, location, updateFnSymbol, initFnSymbol, serializeFnSymbol,
+        this(fnName, argTypes, retType, intermediateType, false, location,
+                updateFnSymbol, initFnSymbol, serializeFnSymbol,
                 mergeFnSymbol, getValueFnSymbol, removeFnSymbol, finalizeFnSymbol, vectorized);
     }
 
@@ -157,8 +161,10 @@ public class AggregateFunction extends Function {
                              String removeFnSymbol, String finalizeFnSymbol, boolean vectorized) {
         // only `count` is always not nullable, other aggregate function is always nullable
         super(fnName, argTypes, retType, hasVarArgs, vectorized,
-                AggregateFunction.NOT_NULLABLE_AGGREGATE_FUNCTION_NAME_SET.contains(fnName.getFunction()) ? NullableMode.ALWAYS_NOT_NULLABLE :
-                AggregateFunction.ALWAYS_NULLABLE_AGGREGATE_FUNCTION_NAME_SET.contains(fnName.getFunction()) ? NullableMode.ALWAYS_NULLABLE : NullableMode.DEPEND_ON_ARGUMENT);
+                AggregateFunction.NOT_NULLABLE_AGGREGATE_FUNCTION_NAME_SET.contains(fnName.getFunction())
+                        ? NullableMode.ALWAYS_NOT_NULLABLE :
+                AggregateFunction.ALWAYS_NULLABLE_AGGREGATE_FUNCTION_NAME_SET.contains(fnName.getFunction())
+                        ? NullableMode.ALWAYS_NULLABLE : NullableMode.DEPEND_ON_ARGUMENT);
         setLocation(location);
         this.intermediateType = (intermediateType.equals(retType)) ? null : intermediateType;
         this.updateFnSymbol = updateFnSymbol;
@@ -175,30 +181,31 @@ public class AggregateFunction extends Function {
     }
 
     public static AggregateFunction createBuiltin(String name,
-                                                  List<Type> argTypes, Type retType, Type intermediateType,
-                                                  String initFnSymbol, String updateFnSymbol, String mergeFnSymbol,
-                                                  String serializeFnSymbol, String finalizeFnSymbol, boolean ignoresDistinct,
-                                                  boolean isAnalyticFn, boolean returnsNonNullOnEmpty) {
+            List<Type> argTypes, Type retType, Type intermediateType,
+            String initFnSymbol, String updateFnSymbol, String mergeFnSymbol,
+            String serializeFnSymbol, String finalizeFnSymbol, boolean ignoresDistinct,
+            boolean isAnalyticFn, boolean returnsNonNullOnEmpty) {
         return createBuiltin(name, argTypes, retType, intermediateType,
                 initFnSymbol, updateFnSymbol, mergeFnSymbol,
                 serializeFnSymbol, finalizeFnSymbol, ignoresDistinct, isAnalyticFn, returnsNonNullOnEmpty, false);
     }
+
     public static AggregateFunction createBuiltin(String name,
-                                                  List<Type> argTypes, Type retType, Type intermediateType,
-                                                  String initFnSymbol, String updateFnSymbol, String mergeFnSymbol,
-                                                  String serializeFnSymbol, String finalizeFnSymbol, boolean ignoresDistinct,
-                                                  boolean isAnalyticFn, boolean returnsNonNullOnEmpty, boolean vectorized) {
+            List<Type> argTypes, Type retType, Type intermediateType,
+            String initFnSymbol, String updateFnSymbol, String mergeFnSymbol,
+            String serializeFnSymbol, String finalizeFnSymbol, boolean ignoresDistinct,
+            boolean isAnalyticFn, boolean returnsNonNullOnEmpty, boolean vectorized) {
         return createBuiltin(name, argTypes, retType, intermediateType, initFnSymbol,
                 updateFnSymbol, mergeFnSymbol, serializeFnSymbol, null, null, finalizeFnSymbol,
                 ignoresDistinct, isAnalyticFn, returnsNonNullOnEmpty, vectorized);
     }
 
     public static AggregateFunction createBuiltin(String name,
-                                                  List<Type> argTypes, Type retType, Type intermediateType,
-                                                  String initFnSymbol, String updateFnSymbol, String mergeFnSymbol,
-                                                  String serializeFnSymbol, String getValueFnSymbol, String removeFnSymbol,
-                                                  String finalizeFnSymbol, boolean ignoresDistinct, boolean isAnalyticFn,
-                                                  boolean returnsNonNullOnEmpty) {
+            List<Type> argTypes, Type retType, Type intermediateType,
+            String initFnSymbol, String updateFnSymbol, String mergeFnSymbol,
+            String serializeFnSymbol, String getValueFnSymbol, String removeFnSymbol,
+            String finalizeFnSymbol, boolean ignoresDistinct, boolean isAnalyticFn,
+            boolean returnsNonNullOnEmpty) {
         return createBuiltin(name, argTypes, retType, intermediateType,
                 initFnSymbol, updateFnSymbol, mergeFnSymbol,
                 serializeFnSymbol, getValueFnSymbol, removeFnSymbol,
@@ -206,11 +213,11 @@ public class AggregateFunction extends Function {
     }
 
     public static AggregateFunction createBuiltin(String name,
-                                                  List<Type> argTypes, Type retType, Type intermediateType,
-                                                  String initFnSymbol, String updateFnSymbol, String mergeFnSymbol,
-                                                  String serializeFnSymbol, String getValueFnSymbol, String removeFnSymbol,
-                                                  String finalizeFnSymbol, boolean ignoresDistinct, boolean isAnalyticFn,
-                                                  boolean returnsNonNullOnEmpty, boolean vectorized) {
+            List<Type> argTypes, Type retType, Type intermediateType,
+            String initFnSymbol, String updateFnSymbol, String mergeFnSymbol,
+            String serializeFnSymbol, String getValueFnSymbol, String removeFnSymbol,
+            String finalizeFnSymbol, boolean ignoresDistinct, boolean isAnalyticFn,
+            boolean returnsNonNullOnEmpty, boolean vectorized) {
         return createBuiltin(name, argTypes, retType, intermediateType, false,
                 initFnSymbol, updateFnSymbol, mergeFnSymbol,
                 serializeFnSymbol, getValueFnSymbol, removeFnSymbol,
@@ -218,22 +225,22 @@ public class AggregateFunction extends Function {
     }
 
     public static AggregateFunction createBuiltin(String name,
-                                                  List<Type> argTypes, Type retType, Type intermediateType, boolean hasVarArgs,
-                                                  String initFnSymbol, String updateFnSymbol, String mergeFnSymbol,
-                                                  String serializeFnSymbol, String getValueFnSymbol, String removeFnSymbol,
-                                                  String finalizeFnSymbol, boolean ignoresDistinct, boolean isAnalyticFn,
-                                                  boolean returnsNonNullOnEmpty) {
+            List<Type> argTypes, Type retType, Type intermediateType, boolean hasVarArgs,
+            String initFnSymbol, String updateFnSymbol, String mergeFnSymbol,
+            String serializeFnSymbol, String getValueFnSymbol, String removeFnSymbol,
+            String finalizeFnSymbol, boolean ignoresDistinct, boolean isAnalyticFn,
+            boolean returnsNonNullOnEmpty) {
         return createBuiltin(name, argTypes, retType, intermediateType, hasVarArgs, initFnSymbol, updateFnSymbol,
                 mergeFnSymbol, serializeFnSymbol, getValueFnSymbol, removeFnSymbol, finalizeFnSymbol, ignoresDistinct,
                 isAnalyticFn, returnsNonNullOnEmpty, false);
     }
 
     public static AggregateFunction createBuiltin(String name,
-                                                  List<Type> argTypes, Type retType, Type intermediateType, boolean hasVarArgs,
-                                                  String initFnSymbol, String updateFnSymbol, String mergeFnSymbol,
-                                                  String serializeFnSymbol, String getValueFnSymbol, String removeFnSymbol,
-                                                  String finalizeFnSymbol, boolean ignoresDistinct, boolean isAnalyticFn,
-                                                  boolean returnsNonNullOnEmpty, boolean vectorized) {
+            List<Type> argTypes, Type retType, Type intermediateType, boolean hasVarArgs,
+            String initFnSymbol, String updateFnSymbol, String mergeFnSymbol,
+            String serializeFnSymbol, String getValueFnSymbol, String removeFnSymbol,
+            String finalizeFnSymbol, boolean ignoresDistinct, boolean isAnalyticFn,
+            boolean returnsNonNullOnEmpty, boolean vectorized) {
         AggregateFunction fn = new AggregateFunction(new FunctionName(name),
                 argTypes, retType, intermediateType, hasVarArgs, null, updateFnSymbol, initFnSymbol,
                 serializeFnSymbol, mergeFnSymbol, getValueFnSymbol, removeFnSymbol,
@@ -397,6 +404,7 @@ public class AggregateFunction extends Function {
             this.removeFnSymbol = symbol;
             return this;
         }
+
         public AggregateFunctionBuilder binaryType(TFunctionBinaryType binaryType) {
             this.binaryType = binaryType;
             return this;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateType.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateType.java
index ec25ade6ee..386e4e6433 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateType.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/AggregateType.java
@@ -109,6 +109,7 @@ public enum AggregateType {
 
         compatibilityMap.put(NONE, EnumSet.copyOf(excObjectStored));
     }
+
     private final String sqlName;
 
     private AggregateType(String sqlName) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/AuthType.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/AuthType.java
index c0c97530a0..25097bb24e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/AuthType.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/AuthType.java
@@ -18,7 +18,7 @@
 package org.apache.doris.catalog;
 
 /**
- * Define different auth type for external table such as hive/iceberg,
+ * Define different auth type for external table such as hive/iceberg
  * so that BE could call secured under fileStorageSystem (enable kerberos)
  */
 public enum AuthType {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/BrokerTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/BrokerTable.java
index c0c09375c4..8802df9747 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/BrokerTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/BrokerTable.java
@@ -190,14 +190,14 @@ public class BrokerTable extends Table {
                 case "parquet":
                     break;
                 default:
-                    throw new DdlException("Invalid file type: " + copiedProps.toString() + ".Only support csv and parquet.");
+                    throw new DdlException("Invalid file type: " + copiedProps + ".Only support csv and parquet.");
             }
         }
 
         copiedProps.remove(FILE_FORMAT);
 
         if (!copiedProps.isEmpty()) {
-            throw new DdlException("Unknown table properties: " + copiedProps.toString());
+            throw new DdlException("Unknown table properties: " + copiedProps);
         }
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java
index 1d649a29a7..9bf9608a45 100755
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Catalog.java
@@ -555,22 +555,26 @@ public class Catalog {
         this.metaContext.setThreadLocalInfo();
 
         this.stat = new TabletSchedulerStat();
-        this.tabletScheduler = new TabletScheduler(this, systemInfo, tabletInvertedIndex, stat, Config.tablet_rebalancer_type);
+        this.tabletScheduler = new TabletScheduler(this, systemInfo,
+                tabletInvertedIndex, stat, Config.tablet_rebalancer_type);
         this.tabletChecker = new TabletChecker(this, systemInfo, tabletScheduler, stat);
 
         // The pendingLoadTaskScheduler's queue size should not less than Config.desired_max_waiting_jobs.
         // So that we can guarantee that all submitted load jobs can be scheduled without being starved.
-        this.pendingLoadTaskScheduler = new MasterTaskExecutor("pending_load_task_scheduler", Config.async_pending_load_task_pool_size,
+        this.pendingLoadTaskScheduler = new MasterTaskExecutor("pending_load_task_scheduler",
+                Config.async_pending_load_task_pool_size,
                 Config.desired_max_waiting_jobs, !isCheckpointCatalog);
         // The loadingLoadTaskScheduler's queue size is unlimited, so that it can receive all loading tasks
         // created after pending tasks finish. And don't worry about the high concurrency, because the
         // concurrency is limited by Config.desired_max_waiting_jobs and Config.async_loading_load_task_pool_size.
-        this.loadingLoadTaskScheduler = new MasterTaskExecutor("loading_load_task_scheduler", Config.async_loading_load_task_pool_size,
+        this.loadingLoadTaskScheduler = new MasterTaskExecutor("loading_load_task_scheduler",
+                Config.async_loading_load_task_pool_size,
                 Integer.MAX_VALUE, !isCheckpointCatalog);
 
         this.loadJobScheduler = new LoadJobScheduler();
         this.loadManager = new LoadManager(loadJobScheduler);
-        this.streamLoadRecordMgr = new StreamLoadRecordMgr("stream_load_record_manager", Config.fetch_stream_load_record_interval_second * 1000);
+        this.streamLoadRecordMgr = new StreamLoadRecordMgr("stream_load_record_manager",
+                Config.fetch_stream_load_record_interval_second * 1000L);
         this.loadEtlChecker = new LoadEtlChecker(loadManager);
         this.loadLoadingChecker = new LoadLoadingChecker(loadManager);
         this.routineLoadScheduler = new RoutineLoadScheduler(routineLoadManager);
@@ -776,7 +780,8 @@ public class Catalog {
         // 1. check and create dirs and files
         File meta = new File(metaDir);
         if (!meta.exists()) {
-            LOG.warn("Doris' meta dir {} does not exist. You need to create it before starting FE", meta.getAbsolutePath());
+            LOG.warn("Doris' meta dir {} does not exist."
+                    + " You need to create it before starting FE", meta.getAbsolutePath());
             throw new Exception(meta.getAbsolutePath() + " does not exist, will exit");
         }
 
@@ -894,9 +899,11 @@ public class Catalog {
                     // nodeName should be like "192.168.1.1_9217_1620296111213"
                     // and the selfNode should be the prefix of nodeName.
                     // If not, it means that the ip used last time is different from this time, which is not allowed.
-                    // But is metadata_failure_recovery is true, we will not check it because this may be a FE migration.
+                    // But is metadata_failure_recovery is true,
+                    // we will not check it because this may be a FE migration.
                     String[] split = nodeName.split("_");
-                    if (Config.metadata_failure_recovery.equals("false") && !selfNode.first.equalsIgnoreCase(split[0])) {
+                    if (Config.metadata_failure_recovery.equals("false")
+                            && !selfNode.first.equalsIgnoreCase(split[0])) {
                         throw new IOException("the self host " + selfNode.first
                                 + " does not equal to the host in ROLE"
                                 + " file " + split[0] + ". You need to set 'priority_networks' config"
@@ -969,7 +976,8 @@ public class Catalog {
             if (!versionFile.exists()) {
                 // If the version file doesn't exist, download it from helper node
                 if (!getVersionFileFromHelper(rightHelperNode)) {
-                    throw new IOException("fail to download version file from " + rightHelperNode.first + " will exit.");
+                    throw new IOException("fail to download version file from "
+                            + rightHelperNode.first + " will exit.");
                 }
 
                 // NOTE: cluster_id will be init when Storage object is constructed,
@@ -1008,7 +1016,8 @@ public class Catalog {
                         Preconditions.checkNotNull(token);
                         Preconditions.checkNotNull(remoteToken);
                         if (!token.equals(remoteToken)) {
-                            throw new IOException("token is not equal with helper node " + rightHelperNode.first + ". will exit.");
+                            throw new IOException("token is not equal with helper node "
+                                    + rightHelperNode.first + ". will exit.");
                         }
                     }
                 } catch (Exception e) {
@@ -1533,7 +1542,8 @@ public class Catalog {
             connection.setReadTimeout(HTTP_TIMEOUT_SECOND * 1000);
 
             String response;
-            try (BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(connection.getInputStream()))) {
+            try (BufferedReader bufferedReader
+                    = new BufferedReader(new InputStreamReader(connection.getInputStream()))) {
                 String line;
                 StringBuilder sb = new StringBuilder();
                 while ((line = bufferedReader.readLine()) != null) {
@@ -1612,7 +1622,7 @@ public class Catalog {
         LOG.info("finished replay masterInfo from image");
         return newChecksum;
     }
-    
+
     public long loadFrontends(DataInputStream dis, long checksum) throws IOException {
         int size = dis.readInt();
         long newChecksum = checksum ^ size;
@@ -1727,14 +1737,16 @@ public class Catalog {
         long newChecksum = checksum ^ size;
         if (size > 0) {
             // There should be no old alter jobs, if exist throw exception, should not use this FE version
-            throw new IOException("There are [" + size + "] old alter jobs. Please downgrade FE to an older version and handle residual jobs");
+            throw new IOException("There are [" + size + "] old alter jobs."
+                    + " Please downgrade FE to an older version and handle residual jobs");
         }
 
         // finished or cancelled jobs
         size = dis.readInt();
         newChecksum ^= size;
         if (size > 0) {
-            throw new IOException("There are [" + size + "] old finished or cancelled alter jobs. Please downgrade FE to an older version and handle residual jobs");
+            throw new IOException("There are [" + size + "] old finished or cancelled alter jobs."
+                    + " Please downgrade FE to an older version and handle residual jobs");
         }
 
         // alter job v2
@@ -2008,7 +2020,8 @@ public class Catalog {
 
     public long saveExportJob(CountingDataOutputStream dos, long checksum) throws IOException {
         long curTime = System.currentTimeMillis();
-        List<ExportJob> jobs = exportMgr.getJobs().stream().filter(t -> !t.isExpired(curTime)).collect(Collectors.toList());
+        List<ExportJob> jobs = exportMgr.getJobs().stream()
+                .filter(t -> !t.isExpired(curTime)).collect(Collectors.toList());
         int size = jobs.size();
         checksum ^= size;
         dos.writeInt(size);
@@ -2667,8 +2680,11 @@ public class Catalog {
 
         // 1.2 other table type
         sb.append("CREATE ");
-        if (table.getType() == TableType.ODBC || table.getType() == TableType.MYSQL || table.getType() == TableType.ELASTICSEARCH
-                || table.getType() == TableType.BROKER || table.getType() == TableType.HIVE) {
+        if (table.getType() == TableType.ODBC
+                || table.getType() == TableType.MYSQL
+                || table.getType() == TableType.ELASTICSEARCH
+                || table.getType() == TableType.BROKER
+                || table.getType() == TableType.HIVE) {
             sb.append("EXTERNAL ");
         }
         sb.append("TABLE ");
@@ -2864,7 +2880,8 @@ public class Catalog {
                 sb.append("\"password\" = \"").append(hidePassword ? "" : mysqlTable.getPasswd()).append("\",\n");
                 sb.append("\"charset\" = \"").append(mysqlTable.getCharset()).append("\",\n");
             } else {
-                sb.append("\"odbc_catalog_resource\" = \"").append(mysqlTable.getOdbcCatalogResourceName()).append("\",\n");
+                sb.append("\"odbc_catalog_resource\" = \"")
+                        .append(mysqlTable.getOdbcCatalogResourceName()).append("\",\n");
             }
             sb.append("\"database\" = \"").append(mysqlTable.getMysqlDatabaseName()).append("\",\n");
             sb.append("\"table\" = \"").append(mysqlTable.getMysqlTableName()).append("\"\n");
@@ -2884,7 +2901,8 @@ public class Catalog {
                 sb.append("\"driver\" = \"").append(odbcTable.getOdbcDriver()).append("\",\n");
                 sb.append("\"odbc_type\" = \"").append(odbcTable.getOdbcTableTypeName()).append("\",\n");
             } else {
-                sb.append("\"odbc_catalog_resource\" = \"").append(odbcTable.getOdbcCatalogResourceName()).append("\",\n");
+                sb.append("\"odbc_catalog_resource\" = \"")
+                        .append(odbcTable.getOdbcCatalogResourceName()).append("\",\n");
             }
             sb.append("\"database\" = \"").append(odbcTable.getOdbcDatabaseName()).append("\",\n");
             sb.append("\"table\" = \"").append(odbcTable.getOdbcTableName()).append("\"\n");
@@ -3045,7 +3063,8 @@ public class Catalog {
         getInternalDataSource().replayCreateTable(dbName, table);
     }
 
-    public void replayAlterExternalTableSchema(String dbName, String tableName, List<Column> newSchema) throws MetaNotFoundException {
+    public void replayAlterExternalTableSchema(String dbName, String tableName, List<Column> newSchema)
+            throws MetaNotFoundException {
         getInternalDataSource().replayAlterExternalTableSchema(dbName, tableName, newSchema);
     }
 
@@ -3261,7 +3280,8 @@ public class Catalog {
                     for (Partition partition : olapTable.getAllPartitions()) {
                         long partitionId = partition.getId();
                         DataProperty dataProperty = partitionInfo.getDataProperty(partition.getId());
-                        Preconditions.checkNotNull(dataProperty, partition.getName() + ", pId:" + partitionId + ", db: " + dbId + ", tbl: " + tableId);
+                        Preconditions.checkNotNull(dataProperty, partition.getName()
+                                + ", pId:" + partitionId + ", db: " + dbId + ", tbl: " + tableId);
                         if (dataProperty.getStorageMedium() == TStorageMedium.SSD
                                 && dataProperty.getCooldownTimeMs() < currentTimeMs) {
                             // expire. change to HDD.
@@ -3300,7 +3320,8 @@ public class Catalog {
                 // use try lock to avoid blocking a long time.
                 // if block too long, backend report rpc will timeout.
                 if (!olapTable.tryWriteLockIfExist(Table.TRY_LOCK_TIMEOUT_MS, TimeUnit.MILLISECONDS)) {
-                    LOG.warn("try get table {} writelock but failed when checking backend storage medium", table.getName());
+                    LOG.warn("try get table {} writelock but failed"
+                            + " when checking backend storage medium", table.getName());
                     continue;
                 }
                 Preconditions.checkState(olapTable.isWriteLockHeldByCurrentThread());
@@ -3763,13 +3784,15 @@ public class Catalog {
                         if (bucketsNum == -1) {
                             bucketsNum = partition.getDistributionInfo().getBucketNum();
                         } else if (bucketsNum != partition.getDistributionInfo().getBucketNum()) {
-                            throw new DdlException("Partitions in table " + table.getName() + " have different buckets number");
+                            throw new DdlException("Partitions in table " + table.getName()
+                                    + " have different buckets number");
                         }
 
                         if (replicaAlloc == null) {
                             replicaAlloc = partitionInfo.getReplicaAllocation(partition.getId());
                         } else if (!replicaAlloc.equals(partitionInfo.getReplicaAllocation(partition.getId()))) {
-                            throw new DdlException("Partitions in table " + table.getName() + " have different replica allocation.");
+                            throw new DdlException("Partitions in table " + table.getName()
+                                    + " have different replica allocation.");
                         }
                     }
                 }
@@ -3794,7 +3817,8 @@ public class Catalog {
             }
 
             // set this group as unstable
-            colocateTableIndex.markGroupUnstable(groupId, "Colocation group modified by user", false /* edit log is along with modify table log */);
+            colocateTableIndex.markGroupUnstable(groupId, "Colocation group modified by user",
+                    false /* edit log is along with modify table log */);
             table.setColocateGroup(colocateGroup);
         } else {
             // unset colocation group
@@ -3908,7 +3932,8 @@ public class Catalog {
                 throw new DdlException("Table[" + table.getName() + "] is under " + table.getState());
             }
 
-            if (table.getPartitionInfo().getType() != PartitionType.RANGE && table.getPartitionInfo().getType() != PartitionType.LIST) {
+            if (table.getPartitionInfo().getType() != PartitionType.RANGE
+                    && table.getPartitionInfo().getType() != PartitionType.LIST) {
                 throw new DdlException("Table[" + table.getName() + "] is single partitioned. "
                         + "no need to rename partition name.");
             }
@@ -3978,8 +4003,8 @@ public class Catalog {
             // Merge the new properties with origin properties, and then analyze them
             Map<String, String> origDynamicProperties = tableProperty.getOriginDynamicPartitionProperty();
             origDynamicProperties.putAll(properties);
-            Map<String, String> analyzedDynamicPartition = DynamicPartitionUtil.
-                    analyzeDynamicPartition(origDynamicProperties, table.getPartitionInfo());
+            Map<String, String> analyzedDynamicPartition = DynamicPartitionUtil
+                    .analyzeDynamicPartition(origDynamicProperties, table.getPartitionInfo());
             tableProperty.modifyTableProperties(analyzedDynamicPartition);
             tableProperty.buildDynamicProperty();
         }
@@ -3987,13 +4012,14 @@ public class Catalog {
         DynamicPartitionUtil.registerOrRemoveDynamicPartitionTable(db.getId(), table, false);
         dynamicPartitionScheduler.createOrUpdateRuntimeInfo(
                 table.getId(), DynamicPartitionScheduler.LAST_UPDATE_TIME, TimeUtils.getCurrentFormatTime());
-        ModifyTablePropertyOperationLog info = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), logProperties);
+        ModifyTablePropertyOperationLog info
+                = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), logProperties);
         editLog.logDynamicPartition(info);
     }
 
     private void convertDynamicPartitionReplicaNumToReplicaAllocation(Map<String, String> properties) {
         if (properties.containsKey(DynamicPartitionProperty.REPLICATION_NUM)) {
-            Short repNum = Short.valueOf(properties.remove(DynamicPartitionProperty.REPLICATION_NUM));
+            short repNum = Short.parseShort(properties.remove(DynamicPartitionProperty.REPLICATION_NUM));
             ReplicaAllocation replicaAlloc = new ReplicaAllocation(repNum);
             properties.put(DynamicPartitionProperty.REPLICATION_ALLOCATION, replicaAlloc.toCreateStmt());
         }
@@ -4007,7 +4033,8 @@ public class Catalog {
      * @throws DdlException
      */
     // The caller need to hold the table write lock
-    public void modifyTableReplicaAllocation(Database db, OlapTable table, Map<String, String> properties) throws UserException {
+    public void modifyTableReplicaAllocation(Database db, OlapTable table,
+            Map<String, String> properties) throws UserException {
         Preconditions.checkArgument(table.isWriteLockHeldByCurrentThread());
         String defaultReplicationNumName = "default." + PropertyAnalyzer.PROPERTIES_REPLICATION_NUM;
         PartitionInfo partitionInfo = table.getPartitionInfo();
@@ -4058,7 +4085,8 @@ public class Catalog {
         tableProperty.buildReplicaAllocation();
 
         // log
-        ModifyTablePropertyOperationLog info = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties);
+        ModifyTablePropertyOperationLog info
+                = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties);
         editLog.logModifyReplicationNum(info);
         LOG.debug("modify table[{}] replication num to {}", table.getName(),
                 properties.get(PropertyAnalyzer.PROPERTIES_REPLICATION_NUM));
@@ -4080,11 +4108,13 @@ public class Catalog {
             table.getPartitionInfo().setIsInMemory(partition.getId(), tableProperty.isInMemory());
         }
 
-        ModifyTablePropertyOperationLog info = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties);
+        ModifyTablePropertyOperationLog info
+                = new ModifyTablePropertyOperationLog(db.getId(), table.getId(), properties);
         editLog.logModifyInMemory(info);
     }
 
-    public void replayModifyTableProperty(short opCode, ModifyTablePropertyOperationLog info) throws MetaNotFoundException {
+    public void replayModifyTableProperty(short opCode,
+            ModifyTablePropertyOperationLog info) throws MetaNotFoundException {
         long dbId = info.getDbId();
         long tableId = info.getTableId();
         Map<String, String> properties = info.getProperties();
@@ -4113,7 +4143,8 @@ public class Catalog {
         }
     }
 
-    public void modifyDefaultDistributionBucketNum(Database db, OlapTable olapTable, ModifyDistributionClause modifyDistributionClause) throws DdlException {
+    public void modifyDefaultDistributionBucketNum(Database db, OlapTable olapTable,
+            ModifyDistributionClause modifyDistributionClause) throws DdlException {
         olapTable.writeLockOrDdlException();
         try {
             if (olapTable.isColocateTable()) {
@@ -4131,12 +4162,14 @@ public class Catalog {
                 DistributionInfo distributionInfo = distributionDesc.toDistributionInfo(baseSchema);
                 // for now. we only support modify distribution's bucket num
                 if (distributionInfo.getType() != defaultDistributionInfo.getType()) {
-                    throw new DdlException("Cannot change distribution type when modify default distribution bucket num");
+                    throw new DdlException("Cannot change distribution type when modify"
+                            + " default distribution bucket num");
                 }
                 if (distributionInfo.getType() == DistributionInfoType.HASH) {
                     HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo;
                     List<Column> newDistriCols = hashDistributionInfo.getDistributionColumns();
-                    List<Column> defaultDistriCols = ((HashDistributionInfo) defaultDistributionInfo).getDistributionColumns();
+                    List<Column> defaultDistriCols = ((HashDistributionInfo) defaultDistributionInfo)
+                            .getDistributionColumns();
                     if (!newDistriCols.equals(defaultDistriCols)) {
                         throw new DdlException("Cannot assign hash distribution with different distribution cols. "
                                 + "default is: " + defaultDistriCols);
@@ -4150,7 +4183,9 @@ public class Catalog {
 
                 defaultDistributionInfo.setBucketNum(bucketNum);
 
-                ModifyTableDefaultDistributionBucketNumOperationLog info = new ModifyTableDefaultDistributionBucketNumOperationLog(db.getId(), olapTable.getId(), bucketNum);
+                ModifyTableDefaultDistributionBucketNumOperationLog info
+                        = new ModifyTableDefaultDistributionBucketNumOperationLog(
+                                db.getId(), olapTable.getId(), bucketNum);
                 editLog.logModifyDefaultDistributionBucketNum(info);
                 LOG.info("modify table[{}] default bucket num to {}", olapTable.getName(), bucketNum);
             }
@@ -4159,7 +4194,8 @@ public class Catalog {
         }
     }
 
-    public void replayModifyTableDefaultDistributionBucketNum(ModifyTableDefaultDistributionBucketNumOperationLog info) throws MetaNotFoundException {
+    public void replayModifyTableDefaultDistributionBucketNum(ModifyTableDefaultDistributionBucketNumOperationLog info)
+            throws MetaNotFoundException {
         long dbId = info.getDbId();
         long tableId = info.getTableId();
         int bucketNum = info.getBucketNum();
@@ -4576,9 +4612,10 @@ public class Catalog {
             // but it is wrong because we can not get replica from `tabletInvertedIndex` when doing checkpoint,
             // because when doing checkpoint, the tabletInvertedIndex is not initialized at all.
             //
-            // So we can only discard this information, in this case, it is equivalent to losing the record of these operations.
-            // But it doesn't matter, these records are currently only used to record whether a replica is in a bad state.
-            // This state has little effect on the system, and it can be restored after the system has processed the bad state replica.
+            // So we can only discard this information, in this case, it is equivalent to losing the record of these
+            // operations. But it doesn't matter, these records are currently only used to record whether a replica is
+            // in a bad state. This state has little effect on the system, and it can be restored after the system
+            // has processed the bad state replica.
             for (Pair<Long, Integer> tabletInfo : tabletsWithSchemaHash) {
                 LOG.warn("find an old backendTabletsInfo for tablet {}, ignore it", tabletInfo.first);
             }
@@ -4653,7 +4690,8 @@ public class Catalog {
     /*
      * The entry of replacing partitions with temp partitions.
      */
-    public void replaceTempPartition(Database db, OlapTable olapTable, ReplacePartitionClause clause) throws DdlException {
+    public void replaceTempPartition(Database db, OlapTable olapTable, ReplacePartitionClause clause)
+            throws DdlException {
         Preconditions.checkState(olapTable.isWriteLockHeldByCurrentThread());
         List<String> partitionNames = clause.getPartitionNames();
         List<String> tempPartitionNames = clause.getTempPartitionNames();
@@ -4680,7 +4718,8 @@ public class Catalog {
                 clause.getPartitionNames(), clause.getTempPartitionNames(), olapTable.getName());
     }
 
-    public void replayReplaceTempPartition(ReplacePartitionOperationLog replaceTempPartitionLog) throws MetaNotFoundException {
+    public void replayReplaceTempPartition(ReplacePartitionOperationLog replaceTempPartitionLog)
+            throws MetaNotFoundException {
         long dbId = replaceTempPartitionLog.getDbId();
         long tableId = replaceTempPartitionLog.getTblId();
         Database db = this.getDbOrMetaException(dbId);
@@ -4765,7 +4804,8 @@ public class Catalog {
         setReplicaStatusInternal(log.getTabletId(), log.getBackendId(), log.getReplicaStatus(), true);
     }
 
-    private void setReplicaStatusInternal(long tabletId, long backendId, ReplicaStatus status, boolean isReplay) throws MetaNotFoundException {
+    private void setReplicaStatusInternal(long tabletId, long backendId,
+            ReplicaStatus status, boolean isReplay) throws MetaNotFoundException {
         try {
             TabletMeta meta = tabletInvertedIndex.getTabletMeta(tabletId);
             if (meta == null) {
@@ -4782,7 +4822,8 @@ public class Catalog {
                 if (status == ReplicaStatus.BAD || status == ReplicaStatus.OK) {
                     if (replica.setBad(status == ReplicaStatus.BAD)) {
                         if (!isReplay) {
-                            SetReplicaStatusOperationLog log = new SetReplicaStatusOperationLog(backendId, tabletId, status);
+                            SetReplicaStatusOperationLog log
+                                    = new SetReplicaStatusOperationLog(backendId, tabletId, status);
                             getEditLog().logSetReplicaStatus(log);
                         }
                         LOG.info("set replica {} of tablet {} on backend {} as {}. is replay: {}",
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateGroupSchema.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateGroupSchema.java
index ea2d8ca20f..617c08bc4d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateGroupSchema.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ColocateGroupSchema.java
@@ -45,7 +45,8 @@ public class ColocateGroupSchema implements Writable {
 
     }
 
-    public ColocateGroupSchema(GroupId groupId, List<Column> distributionCols, int bucketsNum, ReplicaAllocation replicaAlloc) {
+    public ColocateGroupSchema(GroupId groupId, List<Column> distributionCols,
+            int bucketsNum, ReplicaAllocation replicaAlloc) {
         this.groupId = groupId;
         this.distributionColTypes = distributionCols.stream().map(c -> c.getType()).collect(Collectors.toList());
         this.bucketsNum = bucketsNum;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Column.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Column.java
index 7f75392090..02892573bc 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Column.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Column.java
@@ -83,7 +83,8 @@ public class Column implements Writable {
     @SerializedName(value = "children")
     private List<Column> children;
     // Define expr may exist in two forms, one is analyzed, and the other is not analyzed.
-    // Currently, analyzed define expr is only used when creating materialized views, so the define expr in RollupJob must be analyzed.
+    // Currently, analyzed define expr is only used when creating materialized views,
+    // so the define expr in RollupJob must be analyzed.
     // In other cases, such as define expr in `MaterializedIndexMeta`, it may not be analyzed after being replayed.
     private Expr defineExpr; // use to define column in materialize view
     @SerializedName(value = "visible")
@@ -123,6 +124,7 @@ public class Column implements Writable {
                   String defaultValue, String comment) {
         this(name, type, isKey, aggregateType, isAllowNull, defaultValue, comment, true, null);
     }
+
     public Column(String name, Type type, boolean isKey, AggregateType aggregateType, boolean isAllowNull,
                   String defaultValue, String comment, boolean visible, DefaultValueExprDef defaultValueExprDef) {
         this.name = name;
@@ -368,7 +370,8 @@ public class Column implements Writable {
         // And CreateReplicaTask does not need `defineExpr` field.
         // The `defineExpr` is only used when creating `TAlterMaterializedViewParam`, which is in `AlterReplicaTask`.
         // And when creating `TAlterMaterializedViewParam`, the `defineExpr` is certainly analyzed.
-        // If we need to use `defineExpr` and call defineExpr.treeToThrift(), make sure it is analyzed, or NPE will thrown.
+        // If we need to use `defineExpr` and call defineExpr.treeToThrift(),
+        // make sure it is analyzed, or NPE will thrown.
         return tColumn;
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ColumnStats.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ColumnStats.java
index 995d1603d5..8ba694b42d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ColumnStats.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ColumnStats.java
@@ -36,7 +36,7 @@ import java.util.Objects;
  * Statistics for a single column.
  */
 public class ColumnStats implements Writable {
-    private final static Logger LOG = LogManager.getLogger(ColumnStats.class);
+    private static final Logger LOG = LogManager.getLogger(ColumnStats.class);
 
     @SerializedName(value = "avgSerializedSize")
     private float avgSerializedSize;  // in bytes; includes serialization overhead
@@ -126,6 +126,7 @@ public class ColumnStats implements Writable {
         out.writeLong(maxSize);
         out.writeLong(numNulls);
     }
+
     public void readFields(DataInput in) throws IOException {
         numDistinctValues = in.readLong();
         avgSerializedSize = in.readFloat();
@@ -158,6 +159,7 @@ public class ColumnStats implements Writable {
                 && (maxSize == stats.maxSize)
                 && (numNulls == stats.numNulls);
     }
+
     /**
      * For fixed-length type (those which don't need additional storage besides
      * the slot they occupy), sets avgSerializedSize and maxSize to their slot size.
@@ -172,6 +174,7 @@ public class ColumnStats implements Writable {
             maxSize = colType.getSlotSize();
         }
     }
+
     /**
      * Creates ColumnStats from the given expr. Sets numDistinctValues and if the expr
      * is a SlotRef also numNulls.
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Database.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Database.java
index 5607f3f59e..a94ebc8022 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Database.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Database.java
@@ -325,7 +325,8 @@ public class Database extends MetaObject implements Writable, DatabaseIf<Table>
     }
 
     // return pair <success?, table exist?>
-    public Pair<Boolean, Boolean> createTableWithLock(Table table, boolean isReplay, boolean setIfNotExist) throws DdlException {
+    public Pair<Boolean, Boolean> createTableWithLock(
+            Table table, boolean isReplay, boolean setIfNotExist) throws DdlException {
         boolean result = true;
         // if a table is already exists, then edit log won't be executed
         // some caller of this method may need to know this message
@@ -484,7 +485,8 @@ public class Database extends MetaObject implements Writable, DatabaseIf<Table>
         return Optional.ofNullable(getTableNullable(tableId));
     }
 
-    public <E extends Exception> Table getTableOrException(String tableName, java.util.function.Function<String, E> e) throws E {
+    public <E extends Exception> Table getTableOrException(
+            String tableName, java.util.function.Function<String, E> e) throws E {
         Table table = getTableNullable(tableName);
         if (table == null) {
             throw e.apply(tableName);
@@ -492,7 +494,8 @@ public class Database extends MetaObject implements Writable, DatabaseIf<Table>
         return table;
     }
 
-    public <E extends Exception> Table getTableOrException(long tableId, java.util.function.Function<Long, E> e) throws E {
+    public <E extends Exception> Table getTableOrException(
+            long tableId, java.util.function.Function<Long, E> e) throws E {
         Table table = getTableNullable(tableId);
         if (table == null) {
             throw e.apply(tableId);
@@ -512,7 +515,8 @@ public class Database extends MetaObject implements Writable, DatabaseIf<Table>
     public Table getTableOrMetaException(String tableName, TableType tableType) throws MetaNotFoundException {
         Table table = getTableOrMetaException(tableName);
         if (table.getType() != tableType) {
-            throw new MetaNotFoundException("table type is not " + tableType + ", tableName=" + tableName + ", type=" + table.getType());
+            throw new MetaNotFoundException("table type is not "
+                    + tableType + ", tableName=" + tableName + ", type=" + table.getType());
         }
         return table;
     }
@@ -521,7 +525,8 @@ public class Database extends MetaObject implements Writable, DatabaseIf<Table>
     public Table getTableOrMetaException(long tableId, TableType tableType) throws MetaNotFoundException {
         Table table = getTableOrMetaException(tableId);
         if (table.getType() != tableType) {
-            throw new MetaNotFoundException("table type is not " + tableType + ", tableId=" + tableId + ", type=" + table.getType());
+            throw new MetaNotFoundException("table type is not " + tableType
+                    + ", tableId=" + tableId + ", type=" + table.getType());
         }
         return table;
     }
@@ -543,8 +548,8 @@ public class Database extends MetaObject implements Writable, DatabaseIf<Table>
     }
 
     public Table getTableOrAnalysisException(String tableName) throws AnalysisException {
-        return getTableOrException(tableName, t -> new AnalysisException(ErrorCode.ERR_UNKNOWN_TABLE.formatErrorMsg(t
-                , fullQualifiedName)));
+        return getTableOrException(tableName,
+                t -> new AnalysisException(ErrorCode.ERR_UNKNOWN_TABLE.formatErrorMsg(t, fullQualifiedName)));
     }
 
     public OlapTable getOlapTableOrAnalysisException(String tableName) throws AnalysisException {
@@ -556,7 +561,8 @@ public class Database extends MetaObject implements Writable, DatabaseIf<Table>
     }
 
     public Table getTableOrAnalysisException(long tableId) throws AnalysisException {
-        return getTableOrException(tableId, t -> new AnalysisException(ErrorCode.ERR_BAD_TABLE_ERROR.formatErrorMsg(t)));
+        return getTableOrException(tableId,
+                t -> new AnalysisException(ErrorCode.ERR_BAD_TABLE_ERROR.formatErrorMsg(t)));
     }
 
     public int getMaxReplicationNum() {
@@ -571,7 +577,8 @@ public class Database extends MetaObject implements Writable, DatabaseIf<Table>
                 table.readLock();
                 try {
                     for (Partition partition : olapTable.getAllPartitions()) {
-                        short replicationNum = olapTable.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum();
+                        short replicationNum = olapTable.getPartitionInfo()
+                                .getReplicaAllocation(partition.getId()).getTotalReplicaNum();
                         if (ret < replicationNum) {
                             ret = replicationNum;
                         }
@@ -880,7 +887,8 @@ public class Database extends MetaObject implements Writable, DatabaseIf<Table>
         if (!isReplay) {
             if (existKey != null) {
                 if (existKey.isIdentical(encryptKey)) {
-                    throw new UserException("encryptKey [" + existKey.getEncryptKeyName().toString() + "] already exists");
+                    throw new UserException("encryptKey ["
+                            + existKey.getEncryptKeyName().toString() + "] already exists");
                 }
             }
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/DynamicPartitionProperty.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/DynamicPartitionProperty.java
index 83ba5b67a6..0756bc0e85 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/DynamicPartitionProperty.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/DynamicPartitionProperty.java
@@ -86,9 +86,11 @@ public class DynamicPartitionProperty {
             this.buckets = Integer.parseInt(properties.get(BUCKETS));
             this.replicaAlloc = analyzeReplicaAllocation(properties);
             this.createHistoryPartition = Boolean.parseBoolean(properties.get(CREATE_HISTORY_PARTITION));
-            this.historyPartitionNum = Integer.parseInt(properties.getOrDefault(HISTORY_PARTITION_NUM, String.valueOf(NOT_SET_HISTORY_PARTITION_NUM)));
+            this.historyPartitionNum = Integer.parseInt(properties.getOrDefault(
+                    HISTORY_PARTITION_NUM, String.valueOf(NOT_SET_HISTORY_PARTITION_NUM)));
             this.hotPartitionNum = Integer.parseInt(properties.getOrDefault(HOT_PARTITION_NUM, "0"));
-            this.reservedHistoryPeriods = properties.getOrDefault(RESERVED_HISTORY_PERIODS, NOT_SET_RESERVED_HISTORY_PERIODS);
+            this.reservedHistoryPeriods = properties.getOrDefault(
+                    RESERVED_HISTORY_PERIODS, NOT_SET_RESERVED_HISTORY_PERIODS);
             createStartOfs(properties);
         } else {
             this.exist = false;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/EsTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/EsTable.java
index 6981d62857..f34c8b69cb 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/EsTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/EsTable.java
@@ -93,12 +93,12 @@ public class EsTable extends Table {
     private boolean httpSslEnabled = false;
 
     // Solr doc_values vs stored_fields performance-smackdown indicate:
-    // It is possible to notice that retrieving an high number of fields leads
-    // to a sensible worsening of performance if DocValues are used.
-    // Instead,  the (almost) surprising thing is that, by returning less than 20 fields,
-    // DocValues performs better than stored fields and the difference gets little as the number of fields returned increases.
-    // Asking for 9 DocValues fields and 1 stored field takes an average query time is 6.86 (more than returning 10 stored fields)
-    // Here we have a slightly conservative value of 20, but at the same time we also provide configurable parameters for expert-using
+    // It is possible to notice that retrieving an high number of fields leads to a sensible worsening of performance
+    // if DocValues are used. Instead, the (almost) surprising thing is that, by returning less than 20 fields,
+    // DocValues performs better than stored fields and the difference gets little as the number of fields
+    // returned increases. Asking for 9 DocValues fields and 1 stored field takes an average query time is 6.86
+    // (more than returning 10 stored fields) Here we have a slightly conservative value of 20, but at the same time
+    // we also provide configurable parameters for expert-using
     // @see `MAX_DOCVALUE_FIELDS`
     private static final int DEFAULT_MAX_DOCVALUE_FIELDS = 20;
 
@@ -230,8 +230,8 @@ public class EsTable extends Table {
                 && !Strings.isNullOrEmpty(properties.get(TRANSPORT).trim())) {
             transport = properties.get(TRANSPORT).trim();
             if (!(TRANSPORT_HTTP.equals(transport) || TRANSPORT_THRIFT.equals(transport))) {
-                throw new DdlException("transport of ES table must be http/https(recommend) or thrift(reserved inner usage),"
-                        + " but value is " + transport);
+                throw new DdlException("transport of ES table must be http/https(recommend)"
+                        + " or thrift(reserved inner usage), but value is " + transport);
             }
         }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Function.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Function.java
index 7e609c5070..9079deaa2d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Function.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Function.java
@@ -133,7 +133,8 @@ public class Function implements Writable {
         this(0, name, args, retType, varArgs, vectorized, NullableMode.DEPEND_ON_ARGUMENT);
     }
 
-    public Function(FunctionName name, List<Type> args, Type retType, boolean varArgs, boolean vectorized, NullableMode mode) {
+    public Function(FunctionName name, List<Type> args, Type retType,
+            boolean varArgs, boolean vectorized, NullableMode mode) {
         this(0, name, args, retType, varArgs, vectorized, mode);
     }
 
@@ -606,6 +607,7 @@ public class Function implements Writable {
         FunctionType(int code) {
             this.code = code;
         }
+
         public int getCode() {
             return code;
         }
@@ -627,10 +629,11 @@ public class Function implements Writable {
         public void write(DataOutput output) throws IOException {
             output.writeInt(code);
         }
+
         public static FunctionType read(DataInput input) throws IOException {
             return fromCode(input.readInt());
         }
-    };
+    }
 
     protected void writeFields(DataOutput output) throws IOException {
         output.writeLong(id);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSet.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSet.java
index 5b263589c3..9f8e978fe2 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSet.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/FunctionSet.java
@@ -1312,6 +1312,7 @@ public class FunctionSet<T> {
 
     public static final String COUNT = "count";
     public static final String WINDOW_FUNNEL = "window_funnel";
+
     // Populate all the aggregate builtins in the catalog.
     // null symbols indicate the function does not need that step of the evaluation.
     // An empty symbol indicates a TODO for the BE to implement the function.
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/HashDistributionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/HashDistributionInfo.java
index d5cac299f8..80aced6f0a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/HashDistributionInfo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/HashDistributionInfo.java
@@ -73,6 +73,7 @@ public class HashDistributionInfo extends DistributionInfo {
         }
         out.writeInt(bucketNum);
     }
+
     public void readFields(DataInput in) throws IOException {
         super.readFields(in);
         int columnCount = in.readInt();
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java
index 1b46d40e75..46f1ea0b63 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveMetaStoreClientHelper.java
@@ -37,7 +37,6 @@ import org.apache.doris.thrift.TBrokerFileStatus;
 import org.apache.doris.thrift.TExprOpcode;
 
 import com.google.common.base.Strings;
-
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -200,7 +199,8 @@ public class HiveMetaStoreClientHelper {
                     brokerFileStatus.setIsSplitable(true);
                     brokerFileStatus.setSize(fileStatus.getLen());
                     // path = "/path/to/partition/file_name"
-                    // eg: /home/work/dev/hive/apache-hive-2.3.7-bin/data/warehouse/dae.db/customer/state=CA/city=SanJose/000000_0
+                    // eg: /home/work/dev/hive/apache-hive-2.3.7-bin/data/warehouse
+                    //     + /dae.db/customer/state=CA/city=SanJose/000000_0
                     String path = fileStatus.getPath().toUri().getPath();
                     if (onS3) {
                         // Backend need full s3 path (with s3://bucket at the beginning) to read the data on s3.
@@ -305,7 +305,7 @@ public class HiveMetaStoreClientHelper {
                 configuration.set(entry.getKey(), entry.getValue());
             }
             if (entry.getKey().equals(BrokerUtil.HADOOP_SECURITY_AUTHENTICATION)
-                && entry.getValue().equals(AuthType.KERBEROS.getDesc())) {
+                    && entry.getValue().equals(AuthType.KERBEROS.getDesc())) {
                 isSecurityEnabled = true;
             }
         }
@@ -319,7 +319,7 @@ public class HiveMetaStoreClientHelper {
                 UserGroupInformation.setConfiguration(configuration);
                 // login user from keytab
                 UserGroupInformation.loginUserFromKeytab(properties.get(BrokerUtil.HADOOP_KERBEROS_PRINCIPAL),
-                    properties.get(BrokerUtil.HADOOP_KERBEROS_KEYTAB));
+                        properties.get(BrokerUtil.HADOOP_KERBEROS_KEYTAB));
             }
             FileSystem fileSystem = path.getFileSystem(configuration);
             iterators.add(fileSystem.listLocatedStatus(path));
@@ -407,7 +407,8 @@ public class HiveMetaStoreClientHelper {
      * @throws DdlException
      * @throws SemanticException
      */
-    public static ExprNodeGenericFuncDesc convertToHivePartitionExpr(Expr dorisExpr, List<String> partitions, String tblName) throws DdlException {
+    public static ExprNodeGenericFuncDesc convertToHivePartitionExpr(Expr dorisExpr,
+            List<String> partitions, String tblName) throws DdlException {
         if (dorisExpr == null) {
             return null;
         }
@@ -416,8 +417,10 @@ public class HiveMetaStoreClientHelper {
             CompoundPredicate compoundPredicate = (CompoundPredicate) dorisExpr;
             switch (compoundPredicate.getOp()) {
                 case AND: {
-                    ExprNodeGenericFuncDesc left = convertToHivePartitionExpr(compoundPredicate.getChild(0), partitions, tblName);
-                    ExprNodeGenericFuncDesc right = convertToHivePartitionExpr(compoundPredicate.getChild(0), partitions, tblName);
+                    ExprNodeGenericFuncDesc left = convertToHivePartitionExpr(
+                            compoundPredicate.getChild(0), partitions, tblName);
+                    ExprNodeGenericFuncDesc right = convertToHivePartitionExpr(
+                            compoundPredicate.getChild(0), partitions, tblName);
                     if (left != null && right != null) {
                         List<ExprNodeDesc> andArgs = new ArrayList<>();
                         andArgs.add(left);
@@ -431,8 +434,10 @@ public class HiveMetaStoreClientHelper {
                     return null;
                 }
                 case OR: {
-                    ExprNodeGenericFuncDesc left = convertToHivePartitionExpr(compoundPredicate.getChild(0), partitions, tblName);
-                    ExprNodeGenericFuncDesc right = convertToHivePartitionExpr(compoundPredicate.getChild(0), partitions, tblName);
+                    ExprNodeGenericFuncDesc left = convertToHivePartitionExpr(
+                            compoundPredicate.getChild(0), partitions, tblName);
+                    ExprNodeGenericFuncDesc right = convertToHivePartitionExpr(
+                            compoundPredicate.getChild(0), partitions, tblName);
                     if (left != null && right != null) {
                         List<ExprNodeDesc> orArgs = new ArrayList<>();
                         orArgs.add(left);
@@ -587,6 +592,7 @@ public class HiveMetaStoreClientHelper {
         }
         return null;
     }
+
     /**
      * Convert from Doris column type to Hive column type
      * @param dorisType
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveTable.java
index 3736548948..b7d3ee013f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/HiveTable.java
@@ -39,8 +39,11 @@ import java.util.Map;
  * Currently only support loading from hive table
  */
 public class HiveTable extends Table {
-    private static final String PROPERTY_MISSING_MSG = "Hive %s is null. Please add properties('%s'='xxx') when create table";
-    private static final String PROPERTY_ERROR_MSG = "Hive table properties('%s'='%s') is illegal or not supported. Please check it";
+
+    private static final String PROPERTY_MISSING_MSG = "Hive %s is null. Please add properties('%s'='xxx')"
+            + " when create table";
+    private static final String PROPERTY_ERROR_MSG = "Hive table properties('%s'='%s')"
+            + " is illegal or not supported. Please check it";
 
     private String hiveDb;
     private String hiveTable;
@@ -100,7 +103,7 @@ public class HiveTable extends Table {
         copiedProps.remove(HIVE_TABLE);
 
         // check hive properties
-        // hive.metastore.uris 
+        // hive.metastore.uris
         String hiveMetaStoreUris = copiedProps.get(HIVE_METASTORE_URIS);
         if (Strings.isNullOrEmpty(hiveMetaStoreUris)) {
             throw new DdlException(String.format(PROPERTY_MISSING_MSG, HIVE_METASTORE_URIS, HIVE_METASTORE_URIS));
@@ -114,7 +117,8 @@ public class HiveTable extends Table {
             authType = AuthType.SIMPLE.getDesc();
         }
         if (!AuthType.isSupportedAuthType(authType)) {
-            throw new DdlException(String.format(PROPERTY_ERROR_MSG, BrokerUtil.HADOOP_SECURITY_AUTHENTICATION, authType));
+            throw new DdlException(String.format(PROPERTY_ERROR_MSG,
+                    BrokerUtil.HADOOP_SECURITY_AUTHENTICATION, authType));
         }
         copiedProps.remove(BrokerUtil.HADOOP_SECURITY_AUTHENTICATION);
         hiveProperties.put(BrokerUtil.HADOOP_SECURITY_AUTHENTICATION, authType);
@@ -123,23 +127,25 @@ public class HiveTable extends Table {
             // check principal
             String principal = copiedProps.get(BrokerUtil.HADOOP_KERBEROS_PRINCIPAL);
             if (Strings.isNullOrEmpty(principal)) {
-                throw new DdlException(String.format(PROPERTY_MISSING_MSG, BrokerUtil.HADOOP_KERBEROS_PRINCIPAL, BrokerUtil.HADOOP_KERBEROS_PRINCIPAL));
+                throw new DdlException(String.format(PROPERTY_MISSING_MSG,
+                        BrokerUtil.HADOOP_KERBEROS_PRINCIPAL, BrokerUtil.HADOOP_KERBEROS_PRINCIPAL));
             }
             hiveProperties.put(BrokerUtil.HADOOP_KERBEROS_PRINCIPAL, principal);
             copiedProps.remove(BrokerUtil.HADOOP_KERBEROS_PRINCIPAL);
             // check keytab
             String keytabPath = copiedProps.get(BrokerUtil.HADOOP_KERBEROS_KEYTAB);
             if (Strings.isNullOrEmpty(keytabPath)) {
-                throw new DdlException(String.format(PROPERTY_MISSING_MSG, BrokerUtil.HADOOP_KERBEROS_KEYTAB, BrokerUtil.HADOOP_KERBEROS_KEYTAB));
+                throw new DdlException(String.format(PROPERTY_MISSING_MSG,
+                        BrokerUtil.HADOOP_KERBEROS_KEYTAB, BrokerUtil.HADOOP_KERBEROS_KEYTAB));
             }
             if (!Strings.isNullOrEmpty(keytabPath)) {
                 hiveProperties.put(BrokerUtil.HADOOP_KERBEROS_KEYTAB, keytabPath);
                 copiedProps.remove(BrokerUtil.HADOOP_KERBEROS_KEYTAB);
             }
         }
-        String HDFSUserName = copiedProps.get(BrokerUtil.HADOOP_USER_NAME);
-        if (!Strings.isNullOrEmpty(HDFSUserName)) {
-            hiveProperties.put(BrokerUtil.HADOOP_USER_NAME, HDFSUserName);
+        String hdfsUserName = copiedProps.get(BrokerUtil.HADOOP_USER_NAME);
+        if (!Strings.isNullOrEmpty(hdfsUserName)) {
+            hiveProperties.put(BrokerUtil.HADOOP_USER_NAME, hdfsUserName);
             copiedProps.remove(BrokerUtil.HADOOP_USER_NAME);
         }
         if (!copiedProps.isEmpty()) {
@@ -189,7 +195,7 @@ public class HiveTable extends Table {
     public TTableDescriptor toThrift() {
         THiveTable tHiveTable = new THiveTable(getHiveDb(), getHiveTable(), getHiveProperties());
         TTableDescriptor tTableDescriptor = new TTableDescriptor(getId(), TTableType.HIVE_TABLE,
-            fullSchema.size(), 0, getName(), "");
+                fullSchema.size(), 0, getName(), "");
         tTableDescriptor.setHiveTable(tHiveTable);
         return tTableDescriptor;
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ListPartitionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ListPartitionInfo.java
index afa6562f86..8198fbe760 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ListPartitionInfo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ListPartitionInfo.java
@@ -69,8 +69,8 @@ public class ListPartitionInfo extends PartitionInfo {
                 PartitionKey partitionKey = PartitionKey.createListPartitionKey(values, partitionColumns);
                 checkNewPartitionKey(partitionKey, partitionKeyDesc, isTemp);
                 if (partitionKeys.contains(partitionKey)) {
-                    throw new AnalysisException("The partition key[" + partitionKeyDesc.toSql() + "] has duplicate item ["
-                            + partitionKey.toSql() + "].");
+                    throw new AnalysisException("The partition key["
+                            + partitionKeyDesc.toSql() + "] has duplicate item [" + partitionKey.toSql() + "].");
                 }
                 partitionKeys.add(partitionKey);
             }
@@ -80,7 +80,8 @@ public class ListPartitionInfo extends PartitionInfo {
         return new ListPartitionItem(partitionKeys);
     }
 
-    private void checkNewPartitionKey(PartitionKey newKey, PartitionKeyDesc keyDesc, boolean isTemp) throws AnalysisException {
+    private void checkNewPartitionKey(PartitionKey newKey, PartitionKeyDesc keyDesc,
+            boolean isTemp) throws AnalysisException {
         Map<Long, PartitionItem> id2Item = idToItem;
         if (isTemp) {
             id2Item = idToTempItem;
@@ -103,7 +104,8 @@ public class ListPartitionInfo extends PartitionInfo {
     }
 
     @Override
-    public void checkPartitionItemListsConflict(List<PartitionItem> list1, List<PartitionItem> list2) throws DdlException {
+    public void checkPartitionItemListsConflict(List<PartitionItem> list1,
+            List<PartitionItem> list2) throws DdlException {
         ListUtil.checkListsConflict(list1, list2);
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/MapType.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/MapType.java
index 4547539bb0..1a746a2374 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/MapType.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/MapType.java
@@ -30,10 +30,12 @@ import com.google.common.base.Strings;
 public class MapType extends Type {
     private final Type keyType;
     private final Type valueType;
+
     public MapType() {
         this.keyType = NULL;
         this.valueType = NULL;
     }
+
     public MapType(Type keyType, Type valueType) {
         Preconditions.checkNotNull(keyType);
         Preconditions.checkNotNull(valueType);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/MetadataViewer.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/MetadataViewer.java
index 887af28a74..f8babe93b9 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/MetadataViewer.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/MetadataViewer.java
@@ -71,7 +71,8 @@ public class MetadataViewer {
             for (String partName : partitions) {
                 Partition partition = olapTable.getPartition(partName);
                 long visibleVersion = partition.getVisibleVersion();
-                short replicationNum = olapTable.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum();
+                short replicationNum = olapTable.getPartitionInfo()
+                        .getReplicaAllocation(partition.getId()).getTotalReplicaNum();
 
                 for (MaterializedIndex index : partition.getMaterializedIndices(IndexExtState.VISIBLE)) {
                     int schemaHash = olapTable.getSchemaHashByIndexId(index.getId());
@@ -158,7 +159,8 @@ public class MetadataViewer {
         return getTabletDistribution(stmt.getDbName(), stmt.getTblName(), stmt.getPartitionNames());
     }
 
-    private static List<List<String>> getTabletDistribution(String dbName, String tblName, PartitionNames partitionNames)
+    private static List<List<String>> getTabletDistribution(
+            String dbName, String tblName, PartitionNames partitionNames)
             throws DdlException {
         DecimalFormat df = new DecimalFormat("00.00 %");
 
@@ -209,7 +211,8 @@ public class MetadataViewer {
                                 continue;
                             }
                             countMap.put(replica.getBackendId(), countMap.get(replica.getBackendId()) + 1);
-                            sizeMap.put(replica.getBackendId(), sizeMap.get(replica.getBackendId()) + replica.getDataSize());
+                            sizeMap.put(replica.getBackendId(),
+                                    sizeMap.get(replica.getBackendId()) + replica.getDataSize());
                             totalReplicaNum++;
                             totalReplicaSize += replica.getDataSize();
                         }
@@ -225,9 +228,11 @@ public class MetadataViewer {
                 row.add(String.valueOf(countMap.get(beId)));
                 row.add(String.valueOf(sizeMap.get(beId)));
                 row.add(graph(countMap.get(beId), totalReplicaNum));
-                row.add(totalReplicaNum == countMap.get(beId) ? "100.00%" : df.format((double) countMap.get(beId) / totalReplicaNum));
+                row.add(totalReplicaNum == countMap.get(beId)
+                        ? "100.00%" : df.format((double) countMap.get(beId) / totalReplicaNum));
                 row.add(graph(sizeMap.get(beId), totalReplicaSize));
-                row.add(totalReplicaSize == sizeMap.get(beId) ? "100.00%" : df.format((double) sizeMap.get(beId) / totalReplicaSize));
+                row.add(totalReplicaSize == sizeMap.get(beId)
+                        ? "100.00%" : df.format((double) sizeMap.get(beId) / totalReplicaSize));
                 result.add(row);
             }
 
@@ -299,7 +304,8 @@ public class MetadataViewer {
                 row.add(String.valueOf(i));
                 row.add(tabletInfos.get(i).toString());
                 row.add(graph(tabletInfos.get(i), totalSize));
-                row.add(totalSize == tabletInfos.get(i) ? "100.00%" : df.format((double) tabletInfos.get(i) / totalSize));
+                row.add(totalSize == tabletInfos.get(i)
+                        ? "100.00%" : df.format((double) tabletInfos.get(i) / totalSize));
                 result.add(row);
             }
         } finally {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/MysqlTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/MysqlTable.java
index d1332d3c33..eacef3de26 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/MysqlTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/MysqlTable.java
@@ -158,7 +158,8 @@ public class MysqlTable extends Table {
 
         String property = odbcCatalogResource.getProperty(propertyName);
         if (property == null) {
-            throw new RuntimeException("The property:" + propertyName + " do not set in resource " + odbcCatalogResourceName);
+            throw new RuntimeException("The property:" + propertyName
+                    + " do not set in resource " + odbcCatalogResourceName);
         }
         return property;
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcTable.java
index 0fa55265d5..a482553595 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OdbcTable.java
@@ -232,10 +232,12 @@ public class OdbcTable extends Table {
 
         String property = odbcCatalogResource.getProperty(propertyName);
         if (property == null) {
-            throw new RuntimeException("The property:" + propertyName + " do not set in resource " + odbcCatalogResourceName);
+            throw new RuntimeException("The property:" + propertyName
+                    + " do not set in resource " + odbcCatalogResourceName);
         }
         return property;
     }
+
     public String getExtraParameter(Map<String, String> extraMap) {
         if (extraMap == null || extraMap.isEmpty()) {
             return "";
@@ -252,6 +254,7 @@ public class OdbcTable extends Table {
         }
         return getExtraParameter(resourceProperties);
     }
+
     public String getOdbcCatalogResourceName() {
         return odbcCatalogResourceName;
     }
@@ -336,7 +339,8 @@ public class OdbcTable extends Table {
                         getCharset());
                 break;
             case POSTGRESQL:
-                connectString = String.format("Driver=%s;Server=%s;Port=%s;DataBase=%s;Uid=%s;Pwd=%s;charset=%s;UseDeclareFetch=1;Fetch=4096",
+                connectString = String.format("Driver=%s;Server=%s;Port=%s;DataBase=%s;"
+                                + "Uid=%s;Pwd=%s;charset=%s;UseDeclareFetch=1;Fetch=4096",
                         getOdbcDriver(),
                         getHost(),
                         getPort(),
@@ -346,7 +350,8 @@ public class OdbcTable extends Table {
                         getCharset());
                 break;
             case MYSQL:
-                connectString = String.format("Driver=%s;Server=%s;Port=%s;DataBase=%s;Uid=%s;Pwd=%s;charset=%s;forward_cursor=1;no_cache=1",
+                connectString = String.format("Driver=%s;Server=%s;Port=%s;DataBase=%s;"
+                                + "Uid=%s;Pwd=%s;charset=%s;forward_cursor=1;no_cache=1",
                         getOdbcDriver(),
                         getHost(),
                         getPort(),
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
index 25eb7f65db..a117762274 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
@@ -340,8 +340,8 @@ public class OlapTable extends Table {
         this.indexIdToMeta.remove(indexId);
         // Some column of deleted index should be removed during `deleteIndexInfo` such as `mv_bitmap_union_c1`
         // If deleted index id == base index id, the schema will not be rebuilt.
-        // The reason is that the base index has been removed from indexIdToMeta while the new base index hasn't changed.
-        // The schema could not be rebuild in here with error base index id.
+        // The reason is that the base index has been removed from indexIdToMeta while the new base index
+        // hasn't changed. The schema could not be rebuild in here with error base index id.
         if (indexId != baseIndexId) {
             rebuildFullSchema();
         }
@@ -643,10 +643,12 @@ public class OlapTable extends Table {
             return partitionColumnNames;
         } else if (partitionInfo instanceof RangePartitionInfo) {
             RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo;
-            return rangePartitionInfo.getPartitionColumns().stream().map(c -> c.getName().toLowerCase()).collect(Collectors.toSet());
+            return rangePartitionInfo.getPartitionColumns().stream()
+                    .map(c -> c.getName().toLowerCase()).collect(Collectors.toSet());
         } else if (partitionInfo instanceof ListPartitionInfo) {
             ListPartitionInfo listPartitionInfo = (ListPartitionInfo) partitionInfo;
-            return listPartitionInfo.getPartitionColumns().stream().map(c -> c.getName().toLowerCase()).collect(Collectors.toSet());
+            return listPartitionInfo.getPartitionColumns().stream()
+                    .map(c -> c.getName().toLowerCase()).collect(Collectors.toSet());
         } else {
             throw new DdlException("Unknown partition info type: " + partitionInfo.getType().name());
         }
@@ -1255,7 +1257,8 @@ public class OlapTable extends Table {
         }
 
         // remove shadow index from copied table
-        List<MaterializedIndex> shadowIndex = copied.getPartitions().stream().findFirst().get().getMaterializedIndices(IndexExtState.SHADOW);
+        List<MaterializedIndex> shadowIndex = copied.getPartitions().stream().findFirst()
+                .get().getMaterializedIndices(IndexExtState.SHADOW);
         for (MaterializedIndex deleteIndex : shadowIndex) {
             LOG.debug("copied table delete shadow index : {}", deleteIndex.getId());
             copied.deleteIndexInfo(copied.getIndexNameById(deleteIndex.getId()));
@@ -1292,7 +1295,8 @@ public class OlapTable extends Table {
         partNames.addAll(copied.getPartitionNames());
 
         // partition name is case insensitive:
-        Set<String> lowerReservedPartitionNames = reservedPartitions.stream().map(String::toLowerCase).collect(Collectors.toSet());
+        Set<String> lowerReservedPartitionNames = reservedPartitions.stream()
+                .map(String::toLowerCase).collect(Collectors.toSet());
         for (String partName : partNames) {
             if (!lowerReservedPartitionNames.contains(partName.toLowerCase())) {
                 copied.dropPartitionAndReserveTablet(partName);
@@ -1530,7 +1534,8 @@ public class OlapTable extends Table {
         if (tableProperty == null) {
             tableProperty = new TableProperty(new HashMap<>());
         }
-        tableProperty.modifyTableProperties(PropertyAnalyzer.PROPERTIES_INMEMORY, Boolean.valueOf(isInMemory).toString());
+        tableProperty.modifyTableProperties(PropertyAnalyzer.PROPERTIES_INMEMORY,
+                Boolean.valueOf(isInMemory).toString());
         tableProperty.buildInMemory();
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java
index bee4c382a0..9d95b14b61 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionInfo.java
@@ -197,7 +197,8 @@ public class PartitionInfo implements Writable {
     public void checkPartitionItemListsMatch(List<PartitionItem> list1, List<PartitionItem> list2) throws DdlException {
     }
 
-    public void checkPartitionItemListsConflict(List<PartitionItem> list1, List<PartitionItem> list2) throws DdlException {
+    public void checkPartitionItemListsConflict(List<PartitionItem> list1,
+            List<PartitionItem> list2) throws DdlException {
     }
 
     public DataProperty getDataProperty(long partitionId) {
@@ -282,8 +283,8 @@ public class PartitionInfo implements Writable {
         }
     }
 
-    public void resetPartitionIdForRestore(long newPartitionId, long oldPartitionId, ReplicaAllocation restoreReplicaAlloc,
-                                           boolean isSinglePartitioned) {
+    public void resetPartitionIdForRestore(long newPartitionId, long oldPartitionId,
+            ReplicaAllocation restoreReplicaAlloc, boolean isSinglePartitioned) {
         idToDataProperty.put(newPartitionId, idToDataProperty.remove(oldPartitionId));
         idToReplicaAllocation.remove(oldPartitionId);
         idToReplicaAllocation.put(newPartitionId, restoreReplicaAlloc);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/PrimitiveType.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/PrimitiveType.java
index a953386630..4c5c4c4c6f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/PrimitiveType.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/PrimitiveType.java
@@ -353,9 +353,11 @@ public enum PrimitiveType {
     public static ArrayList<PrimitiveType> getIntegerTypes() {
         return integerTypes;
     }
+
     public static ArrayList<PrimitiveType> getNumericTypes() {
         return numericTypes;
     }
+
     public static ArrayList<PrimitiveType> getSupportedTypes() {
         return supportedTypes;
     }
@@ -400,8 +402,10 @@ public enum PrimitiveType {
         compatibilityMatrix[NULL_TYPE.ordinal()][STRING.ordinal()] = STRING;
         compatibilityMatrix[NULL_TYPE.ordinal()][DECIMALV2.ordinal()] = DECIMALV2;
         compatibilityMatrix[NULL_TYPE.ordinal()][TIME.ordinal()] = TIME;
-        compatibilityMatrix[NULL_TYPE.ordinal()][BITMAP.ordinal()] = BITMAP;    //TODO(weixiang): bitmap can be null?
-        compatibilityMatrix[NULL_TYPE.ordinal()][QUANTILE_STATE.ordinal()] = QUANTILE_STATE;   //TODO(weixiang): QUANTILE_STATE can be null?
+        //TODO(weixiang): bitmap can be null?
+        compatibilityMatrix[NULL_TYPE.ordinal()][BITMAP.ordinal()] = BITMAP;
+        //TODO(weixiang): QUANTILE_STATE can be null?
+        compatibilityMatrix[NULL_TYPE.ordinal()][QUANTILE_STATE.ordinal()] = QUANTILE_STATE;
 
         compatibilityMatrix[BOOLEAN.ordinal()][BOOLEAN.ordinal()] = BOOLEAN;
         compatibilityMatrix[BOOLEAN.ordinal()][TINYINT.ordinal()] = TINYINT;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/RandomDistributionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/RandomDistributionInfo.java
index 5a9589b43d..168b1f9f33 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/RandomDistributionInfo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/RandomDistributionInfo.java
@@ -62,6 +62,7 @@ public class RandomDistributionInfo extends DistributionInfo {
         super.write(out);
         out.writeInt(bucketNum);
     }
+
     public void readFields(DataInput in) throws IOException {
         super.readFields(in);
         bucketNum = in.readInt();
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java
index 090c78c4e2..91d15db09f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/RangePartitionInfo.java
@@ -79,7 +79,8 @@ public class RangePartitionInfo extends PartitionInfo {
     // create a new range and check it.
     private Range<PartitionKey> createAndCheckNewRange(PartitionKeyDesc partKeyDesc, boolean isTemp)
             throws AnalysisException, DdlException {
-        boolean isFixedPartitionKeyValueType = partKeyDesc.getPartitionType() == PartitionKeyDesc.PartitionKeyValueType.FIXED;
+        boolean isFixedPartitionKeyValueType
+                = partKeyDesc.getPartitionType() == PartitionKeyDesc.PartitionKeyValueType.FIXED;
 
         // generate partitionItemEntryList
         List<Map.Entry<Long, PartitionItem>> partitionItemEntryList = isFixedPartitionKeyValueType
@@ -122,7 +123,7 @@ public class RangePartitionInfo extends PartitionInfo {
     }
 
     private Range<PartitionKey> createNewRangeForFixedPartitionValueType(PartitionKeyDesc partKeyDesc,
-                                                                         List<Map.Entry<Long, PartitionItem>> partitionItemEntryList)
+            List<Map.Entry<Long, PartitionItem>> partitionItemEntryList)
             throws AnalysisException, DdlException {
         PartitionKey lowKey = PartitionKey.createPartitionKey(partKeyDesc.getLowerValues(), partitionColumns);
         PartitionKey upperKey =  PartitionKey.createPartitionKey(partKeyDesc.getUpperValues(), partitionColumns);
@@ -169,7 +170,8 @@ public class RangePartitionInfo extends PartitionInfo {
     }
 
     @Override
-    public void checkPartitionItemListsConflict(List<PartitionItem> list1, List<PartitionItem> list2) throws DdlException {
+    public void checkPartitionItemListsConflict(List<PartitionItem> list1, List<PartitionItem> list2)
+            throws DdlException {
         RangeUtils.checkRangeConflict(list1, list2);
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Replica.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Replica.java
index 99c37a0fe7..665cd26482 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Replica.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Replica.java
@@ -255,7 +255,8 @@ public class Replica implements Writable {
         updateReplicaInfo(newVersion, this.lastFailedVersion, this.lastSuccessVersion, newDataSize, newRowCount);
     }
 
-    public synchronized void updateVersionWithFailedInfo(long newVersion, long lastFailedVersion, long lastSuccessVersion) {
+    public synchronized void updateVersionWithFailedInfo(
+            long newVersion, long lastFailedVersion, long lastSuccessVersion) {
         updateReplicaInfo(newVersion, lastFailedVersion, lastSuccessVersion, dataSize, rowCount);
     }
 
@@ -292,11 +293,13 @@ public class Replica implements Writable {
 
         if (newVersion < this.version) {
             // This case means that replica meta version has been updated by ReportHandler before
-            // For example, the publish version daemon has already sent some publish version tasks to one be to publish version 2, 3, 4, 5, 6,
-            // and the be finish all publish version tasks, the be's replica version is 6 now, but publish version daemon need to wait
+            // For example, the publish version daemon has already sent some publish version tasks
+            // to one be to publish version 2, 3, 4, 5, 6, and the be finish all publish version tasks,
+            // the be's replica version is 6 now, but publish version daemon need to wait
             // for other be to finish most of publish version tasks to update replica version in fe.
-            // At the moment, the replica version in fe is 4, when ReportHandler sync tablet, it find reported replica version in be is 6 and then
-            // set version to 6 for replica in fe. And then publish version daemon try to finish txn, and use visible version(5)
+            // At the moment, the replica version in fe is 4, when ReportHandler sync tablet,
+            // it find reported replica version in be is 6 and then set version to 6 for replica in fe.
+            // And then publish version daemon try to finish txn, and use visible version(5)
             // to update replica. Finally, it find the newer version(5) is lower than replica version(6) in fe.
             if (LOG.isDebugEnabled()) {
                 LOG.debug("replica {} on backend {}'s new version {} is lower than meta version {},"
@@ -365,8 +368,10 @@ public class Replica implements Writable {
 
     /*
      * Check whether the replica's version catch up with the expected version.
-     * If ignoreAlter is true, and state is ALTER, and replica's version is PARTITION_INIT_VERSION, just return true, ignore the version.
-     *      This is for the case that when altering table, the newly created replica's version is PARTITION_INIT_VERSION,
+     * If ignoreAlter is true, and state is ALTER, and replica's version is
+     *  PARTITION_INIT_VERSION, just return true, ignore the version.
+     *      This is for the case that when altering table,
+     *      the newly created replica's version is PARTITION_INIT_VERSION,
      *      but we need to treat it as a "normal" replica which version is supposed to be "catch-up".
      *      But if state is ALTER but version larger than PARTITION_INIT_VERSION, which means this replica
      *      is already updated by load process, so we need to consider its version.
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Resource.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Resource.java
index d2c2e7f48f..fac6a30e45 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Resource.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Resource.java
@@ -137,6 +137,7 @@ public abstract class Resource implements Writable {
 
 
     public abstract Map<String, String> getCopiedProperties();
+
     /**
      * Fill BaseProcResult with different properties in child resources
      * ResourceMgr.RESOURCE_PROC_NODE_TITLE_NAMES format:
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ResourceGroup.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ResourceGroup.java
index 9d1b3c43e9..7c934570b8 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ResourceGroup.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ResourceGroup.java
@@ -75,6 +75,7 @@ public class ResourceGroup implements Writable {
     public Map<ResourceType, Integer> getQuotaMap() {
         return quotaByType;
     }
+
     public static Builder builder() {
         return new Builder();
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarFunction.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarFunction.java
index c87fd65481..e716fb65e2 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarFunction.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarFunction.java
@@ -70,7 +70,8 @@ public class ScalarFunction extends Function {
 
     public ScalarFunction(FunctionName fnName, List<Type> argTypes, Type retType, boolean hasVarArgs,
                           TFunctionBinaryType binaryType, boolean userVisible, boolean isVec) {
-        super(0, fnName, argTypes, retType, hasVarArgs, binaryType, userVisible, isVec, NullableMode.DEPEND_ON_ARGUMENT);
+        super(0, fnName, argTypes, retType, hasVarArgs, binaryType, userVisible, isVec,
+                NullableMode.DEPEND_ON_ARGUMENT);
     }
 
     public ScalarFunction(FunctionName fnName, List<Type> argTypes,
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarType.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarType.java
index bb9a7a9520..11202c8a56 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarType.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/ScalarType.java
@@ -360,9 +360,11 @@ public class ScalarType extends Type {
                 break;
             case DECIMALV2:
                 if (Strings.isNullOrEmpty(precisionStr)) {
-                    stringBuilder.append("decimal").append("(").append(precision).append(", ").append(scale).append(")");
+                    stringBuilder.append("decimal").append("(").append(precision)
+                            .append(", ").append(scale).append(")");
                 } else if (!Strings.isNullOrEmpty(precisionStr) && !Strings.isNullOrEmpty(scaleStr)) {
-                    stringBuilder.append("decimal").append("(`").append(precisionStr).append("`, `").append(scaleStr).append("`)");
+                    stringBuilder.append("decimal").append("(`").append(precisionStr)
+                            .append("`, `").append(scaleStr).append("`)");
                 } else {
                     stringBuilder.append("decimal").append("(`").append(precisionStr).append("`)");
                 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java
index a053aa5809..ef04f1e58e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/SchemaTable.java
@@ -42,6 +42,7 @@ public class SchemaTable extends Table {
     private static final int GRANTEE_len = 81;
     private static final int PRIVILEGE_TYPE_LEN = 64;
     private static final int IS_GRANTABLE_LEN = 3;
+
     // Now we just mock tables, table_privileges, referential_constraints, key_column_usage and routines table
     // Because in MySQL ODBC, these tables are used.
     // TODO(zhaochun): Review some commercial BI to check if we need support where clause in show statement
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/SparkResource.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/SparkResource.java
index 1a95031e3e..01b1e0488b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/SparkResource.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/SparkResource.java
@@ -162,8 +162,11 @@ public class SparkResource extends Resource {
         Map<String, String> copiedProperties = Maps.newHashMap(sparkConfigs);
         return copiedProperties;
     }
-    // Each SparkResource has and only has one SparkRepository.
-    // This method get the remote archive which matches the dpp version from remote repository
+
+    /**
+     * Each SparkResource has and only has one SparkRepository.
+     * This method get the remote archive which matches the dpp version from remote repository
+     */
     public synchronized SparkRepository.SparkArchive prepareArchive() throws LoadException {
         String remoteRepositoryPath = workingDir + "/" + Catalog.getCurrentCatalog().getClusterId()
                 + "/" + SparkRepository.REPOSITORY_DIR + name;
@@ -252,7 +255,8 @@ public class SparkResource extends Resource {
             throw new DdlException("Missing " + SPARK_SUBMIT_DEPLOY_MODE + " in properties");
         }
         // if deploy machines do not set HADOOP_CONF_DIR env, we should set these configs blow
-        if ((!sparkConfigs.containsKey(SPARK_YARN_RESOURCE_MANAGER_ADDRESS) || !sparkConfigs.containsKey(SPARK_FS_DEFAULT_FS))
+        if ((!sparkConfigs.containsKey(SPARK_YARN_RESOURCE_MANAGER_ADDRESS)
+                || !sparkConfigs.containsKey(SPARK_FS_DEFAULT_FS))
                 && isYarnMaster()) {
             throw new DdlException("Missing (" + SPARK_YARN_RESOURCE_MANAGER_ADDRESS + " and " + SPARK_FS_DEFAULT_FS
                                            + ") in yarn master");
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/StructType.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/StructType.java
index 5d68be8f5c..e3ffa3a521 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/StructType.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/StructType.java
@@ -114,6 +114,7 @@ public class StructType extends Type {
             field.toThrift(container, node);
         }
     }
+
     @Override
     public String toString() {
         return toSql(0);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/TableIf.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/TableIf.java
index 4b4a049253..d69bbe3213 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/TableIf.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/TableIf.java
@@ -42,8 +42,7 @@ public interface TableIf {
 
     boolean isWriteLockHeldByCurrentThread();
 
-    <E extends Exception>
-    void writeLockOrException(E e) throws E;
+    <E extends Exception> void writeLockOrException(E e) throws E;
 
     void writeLockOrDdlException() throws DdlException;
 
@@ -53,8 +52,7 @@ public interface TableIf {
 
     boolean tryWriteLockOrMetaException(long timeout, TimeUnit unit) throws MetaNotFoundException;
 
-    <E extends Exception>
-    boolean tryWriteLockOrException(long timeout, TimeUnit unit, E e) throws E;
+    <E extends Exception> boolean tryWriteLockOrException(long timeout, TimeUnit unit, E e) throws E;
 
     boolean tryWriteLockIfExist(long timeout, TimeUnit unit);
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java
index b9c3835c96..fd70e718e9 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/TableProperty.java
@@ -283,9 +283,11 @@ public class TableProperty implements Writable {
         return tableProperty;
     }
 
-    // For some historical reason, both "dynamic_partition.replication_num" and "dynamic_partition.replication_allocation"
+    // For some historical reason,
+    // both "dynamic_partition.replication_num" and "dynamic_partition.replication_allocation"
     // may be exist in "properties". we need remove the "dynamic_partition.replication_num", or it will always replace
-    // the "dynamic_partition.replication_allocation", result in unable to set "dynamic_partition.replication_allocation".
+    // the "dynamic_partition.replication_allocation",
+    // result in unable to set "dynamic_partition.replication_allocation".
     private void removeDuplicateReplicaNumProperty() {
         if (properties.containsKey(DynamicPartitionProperty.REPLICATION_NUM)
                 && properties.containsKey(DynamicPartitionProperty.REPLICATION_ALLOCATION)) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Tablet.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Tablet.java
index c96c59e1c7..5642455c4d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Tablet.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Tablet.java
@@ -53,7 +53,8 @@ import java.util.stream.LongStream;
  */
 public class Tablet extends MetaObject implements Writable {
     private static final Logger LOG = LogManager.getLogger(Tablet.class);
-    // if current version count of replica is more than QUERYABLE_TIMES_OF_MIN_VERSION_COUNT times the minimum version count,
+    // if current version count of replica is mor than
+    // QUERYABLE_TIMES_OF_MIN_VERSION_COUNT times the minimum version count,
     // then the replica would not be considered as queryable.
     private static final int QUERYABLE_TIMES_OF_MIN_VERSION_COUNT = 3;
 
@@ -472,7 +473,8 @@ public class Tablet extends MetaObject implements Writable {
             // condition explain:
             // 1. alive < replicationNum: replica is missing or bad
             // 2. replicas.size() >= aliveBackendsNum: the existing replicas occupies all available backends
-            // 3. aliveBackendsNum >= replicationNum: make sure after deleting, there will be at least one backend for new replica.
+            // 3. aliveBackendsNum >= replicationNum: make sure after deleting,
+            //    there will be at least one backend for new replica.
             // 4. replicationNum > 1: if replication num is set to 1, do not delete any replica, for safety reason
             return Pair.create(TabletStatus.FORCE_REDUNDANT, TabletSchedCtx.Priority.VERY_HIGH);
         } else if (alive < (replicationNum / 2) + 1) {
@@ -507,7 +509,8 @@ public class Tablet extends MetaObject implements Writable {
                     && availableBeIds.size() >= replicationNum
                     && replicationNum > 1) { // No BE can be choose to create a new replica
                 return Pair.create(TabletStatus.FORCE_REDUNDANT,
-                        stable < (replicationNum / 2) + 1 ? TabletSchedCtx.Priority.NORMAL : TabletSchedCtx.Priority.LOW);
+                        stable < (replicationNum / 2) + 1
+                                ? TabletSchedCtx.Priority.NORMAL : TabletSchedCtx.Priority.LOW);
             }
             if (stable < (replicationNum / 2) + 1) {
                 return Pair.create(TabletStatus.REPLICA_RELOCATING, TabletSchedCtx.Priority.NORMAL);
@@ -576,7 +579,8 @@ public class Tablet extends MetaObject implements Writable {
      * No need to check if backend is available. We consider all backends in 'backendsSet' are available,
      * If not, unavailable backends will be relocated by CalocateTableBalancer first.
      */
-    public TabletStatus getColocateHealthStatus(long visibleVersion, ReplicaAllocation replicaAlloc, Set<Long> backendsSet) {
+    public TabletStatus getColocateHealthStatus(long visibleVersion,
+            ReplicaAllocation replicaAlloc, Set<Long> backendsSet) {
         // Here we don't need to care about tag. Because the replicas of the colocate table has been confirmed
         // in ColocateTableCheckerAndBalancer.
         Short totalReplicaNum = replicaAlloc.getTotalReplicaNum();
@@ -592,7 +596,8 @@ public class Tablet extends MetaObject implements Writable {
         for (Replica replica : replicas) {
             if (!backendsSet.contains(replica.getBackendId())) {
                 // We don't care about replicas that are not in backendsSet.
-                // eg:  replicaBackendIds=(1,2,3,4); backendsSet=(1,2,3), then replica 4 should be skipped here and then goto ```COLOCATE_REDUNDANT``` in step 3
+                // eg:  replicaBackendIds=(1,2,3,4); backendsSet=(1,2,3),
+                //      then replica 4 should be skipped here and then goto ```COLOCATE_REDUNDANT``` in step 3
                 continue;
             }
 
@@ -603,7 +608,8 @@ public class Tablet extends MetaObject implements Writable {
                     return TabletStatus.COLOCATE_REDUNDANT;
                 } else {
                     // maybe in replica's DECOMMISSION state
-                    // Here we return VERSION_INCOMPLETE, and the tablet scheduler will finally set it's state to NORMAL.
+                    // Here we return VERSION_INCOMPLETE,
+                    // and the tablet scheduler will finally set it's state to NORMAL.
                     return TabletStatus.VERSION_INCOMPLETE;
                 }
             }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java
index 1d843226f8..21c55fb4a8 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/TabletInvertedIndex.java
@@ -139,9 +139,11 @@ public class TabletInvertedIndex {
                         Replica replica = entry.getValue();
                         tabletFoundInMeta.add(tabletId);
                         TTabletInfo backendTabletInfo = backendTablet.getTabletInfos().get(0);
-                        if (partitionIdInMemorySet.contains(backendTabletInfo.getPartitionId()) != backendTabletInfo.isIsInMemory()) {
+                        if (partitionIdInMemorySet.contains(
+                                backendTabletInfo.getPartitionId()) != backendTabletInfo.isIsInMemory()) {
                             synchronized (tabletToInMemory) {
-                                tabletToInMemory.add(new ImmutableTriple<>(tabletId, backendTabletInfo.getSchemaHash(), !backendTabletInfo.isIsInMemory()));
+                                tabletToInMemory.add(new ImmutableTriple<>(tabletId, backendTabletInfo.getSchemaHash(),
+                                        !backendTabletInfo.isIsInMemory()));
                             }
                         }
                         // 1. (intersection)
@@ -200,21 +202,27 @@ public class TabletInvertedIndex {
                             List<Long> transactionIds = backendTabletInfo.getTransactionIds();
                             GlobalTransactionMgr transactionMgr = Catalog.getCurrentGlobalTransactionMgr();
                             for (Long transactionId : transactionIds) {
-                                TransactionState transactionState = transactionMgr.getTransactionState(tabletMeta.getDbId(), transactionId);
-                                if (transactionState == null || transactionState.getTransactionStatus() == TransactionStatus.ABORTED) {
+                                TransactionState transactionState
+                                        = transactionMgr.getTransactionState(tabletMeta.getDbId(), transactionId);
+                                if (transactionState == null
+                                        || transactionState.getTransactionStatus() == TransactionStatus.ABORTED) {
                                     synchronized (transactionsToClear) {
                                         transactionsToClear.put(transactionId, tabletMeta.getPartitionId());
                                     }
                                     LOG.debug("transaction id [{}] is not valid any more, "
                                             + "clear it from backend [{}]", transactionId, backendId);
                                 } else if (transactionState.getTransactionStatus() == TransactionStatus.VISIBLE) {
-                                    TableCommitInfo tableCommitInfo = transactionState.getTableCommitInfo(tabletMeta.getTableId());
-                                    PartitionCommitInfo partitionCommitInfo = tableCommitInfo == null ? null : tableCommitInfo.getPartitionCommitInfo(partitionId);
+                                    TableCommitInfo tableCommitInfo
+                                            = transactionState.getTableCommitInfo(tabletMeta.getTableId());
+                                    PartitionCommitInfo partitionCommitInfo = tableCommitInfo == null
+                                            ? null : tableCommitInfo.getPartitionCommitInfo(partitionId);
                                     if (partitionCommitInfo != null) {
-                                        TPartitionVersionInfo versionInfo = new TPartitionVersionInfo(tabletMeta.getPartitionId(),
+                                        TPartitionVersionInfo versionInfo
+                                                = new TPartitionVersionInfo(tabletMeta.getPartitionId(),
                                                 partitionCommitInfo.getVersion(), 0);
                                         synchronized (transactionsToPublish) {
-                                            ListMultimap<Long, TPartitionVersionInfo> map = transactionsToPublish.get(transactionState.getDbId());
+                                            ListMultimap<Long, TPartitionVersionInfo> map
+                                                    = transactionsToPublish.get(transactionState.getDbId());
                                             if (map == null) {
                                                 map = ArrayListMultimap.create();
                                                 transactionsToPublish.put(transactionState.getDbId(), map);
@@ -246,11 +254,13 @@ public class TabletInvertedIndex {
         }
 
         long end = System.currentTimeMillis();
-        LOG.info("finished to do tablet diff with backend[{}]. sync: {}. metaDel: {}. foundInMeta: {}. migration: {}. "
+        LOG.info("finished to do tablet diff with backend[{}]. sync: {}."
+                        + " metaDel: {}. foundInMeta: {}. migration: {}. "
                         + "found invalid transactions {}. found republish transactions {}. tabletInMemorySync: {}."
-                        + " need recovery: {}. cost: {} ms", backendId, tabletSyncMap.size(), tabletDeleteFromMeta.size(),
-                tabletFoundInMeta.size(), tabletMigrationMap.size(), transactionsToClear.size(), transactionsToPublish.size(),
-                tabletToInMemory.size(), tabletRecoveryMap.size(), (end - start));
+                        + " need recovery: {}. cost: {} ms", backendId, tabletSyncMap.size(),
+                tabletDeleteFromMeta.size(), tabletFoundInMeta.size(), tabletMigrationMap.size(),
+                transactionsToClear.size(), transactionsToPublish.size(), tabletToInMemory.size(),
+                tabletRecoveryMap.size(), (end - start));
     }
 
     public Long getTabletIdByReplica(long replicaId) {
@@ -302,7 +312,8 @@ public class TabletInvertedIndex {
             // backend replica's version is larger or newer than replica in FE, sync it.
             return true;
         } else if (versionInFe == backendTabletInfo.getVersion() && replicaInFe.isBad()) {
-            // backend replica's version is equal to replica in FE, but replica in FE is bad, while backend replica is good, sync it
+            // backend replica's version is equal to replica in FE, but replica in FE is bad,
+            // while backend replica is good, sync it
             return true;
         }
 
@@ -546,7 +557,8 @@ public class TabletInvertedIndex {
     }
 
     // Only build from available bes, exclude colocate tables
-    public Map<TStorageMedium, TreeMultimap<Long, PartitionBalanceInfo>> buildPartitionInfoBySkew(List<Long> availableBeIds) {
+    public Map<TStorageMedium, TreeMultimap<Long, PartitionBalanceInfo>> buildPartitionInfoBySkew(
+            List<Long> availableBeIds) {
         readLock();
 
         // 1. gen <partitionId-indexId, <beId, replicaCount>>
@@ -567,12 +579,14 @@ public class TabletInvertedIndex {
                     Preconditions.checkState(availableBeIds.contains(beId), "dead be " + beId);
                     TabletMeta tabletMeta = tabletMetaMap.get(tabletId);
                     Preconditions.checkNotNull(tabletMeta, "invalid tablet " + tabletId);
-                    Preconditions.checkState(!Catalog.getCurrentColocateIndex().isColocateTable(tabletMeta.getTableId()),
+                    Preconditions.checkState(
+                            !Catalog.getCurrentColocateIndex().isColocateTable(tabletMeta.getTableId()),
                             "should not be the colocate table");
 
                     TStorageMedium medium = tabletMeta.getStorageMedium();
                     Table<Long, Long, Map<Long, Long>> partitionReplicasInfo = partitionReplicasInfoMaps.get(medium);
-                    Map<Long, Long> countMap = partitionReplicasInfo.get(tabletMeta.getPartitionId(), tabletMeta.getIndexId());
+                    Map<Long, Long> countMap = partitionReplicasInfo.get(
+                            tabletMeta.getPartitionId(), tabletMeta.getIndexId());
                     if (countMap == null) {
                         // If one be doesn't have any replica of one partition, it should be counted too.
                         countMap = availableBeIds.stream().collect(Collectors.toMap(i -> i, i -> 0L));
@@ -597,8 +611,10 @@ public class TabletInvertedIndex {
         //      put <max_count-min_count, TableBalanceInfo> to table_info_by_skew
         Map<TStorageMedium, TreeMultimap<Long, PartitionBalanceInfo>> skewMaps = Maps.newHashMap();
         for (TStorageMedium medium : TStorageMedium.values()) {
-            TreeMultimap<Long, PartitionBalanceInfo> partitionInfoBySkew = TreeMultimap.create(Ordering.natural(), Ordering.arbitrary());
-            Set<Table.Cell<Long, Long, Map<Long, Long>>> mapCells = partitionReplicasInfoMaps.getOrDefault(medium, HashBasedTable.create()).cellSet();
+            TreeMultimap<Long, PartitionBalanceInfo> partitionInfoBySkew
+                    = TreeMultimap.create(Ordering.natural(), Ordering.arbitrary());
+            Set<Table.Cell<Long, Long, Map<Long, Long>>> mapCells
+                    = partitionReplicasInfoMaps.getOrDefault(medium, HashBasedTable.create()).cellSet();
             for (Table.Cell<Long, Long, Map<Long, Long>> cell : mapCells) {
                 Map<Long, Long> countMap = cell.getValue();
                 Preconditions.checkNotNull(countMap);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/TempPartitions.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/TempPartitions.java
index f389b73f51..21430dd71a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/TempPartitions.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/TempPartitions.java
@@ -56,6 +56,7 @@ public class TempPartitions implements Writable, GsonPostProcessable {
         idToPartition.put(partition.getId(), partition);
         nameToPartition.put(partition.getName(), partition);
     }
+
     public long getUpdateTime() {
         long updateTime = -1L;
         for (Partition p : idToPartition.values()) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/Type.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/Type.java
index 8a89b0ffe3..9bf27226e1 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/Type.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/Type.java
@@ -132,9 +132,11 @@ public abstract class Type {
     public static ArrayList<ScalarType> getIntegerTypes() {
         return integerTypes;
     }
+
     public static ArrayList<ScalarType> getNumericTypes() {
         return numericTypes;
     }
+
     public static ArrayList<ScalarType> getSupportedTypes() {
         return supportedTypes;
     }
@@ -323,6 +325,7 @@ public abstract class Type {
     public boolean isDate() {
         return isScalarType(PrimitiveType.DATE);
     }
+
     /**
      * Returns true if Impala supports this type in the metdata. It does not mean we
      * can manipulate data of this type. For tables that contain columns with these
@@ -567,6 +570,7 @@ public abstract class Type {
                 return null;
         }
     }
+
     public static List<TTypeDesc> toThrift(Type[] types) {
         return toThrift(Lists.newArrayList(types));
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/external/ExternalDatabase.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/external/ExternalDatabase.java
index 8b604dd142..dd3d78ff9d 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/external/ExternalDatabase.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/external/ExternalDatabase.java
@@ -17,7 +17,6 @@
 
 package org.apache.doris.catalog.external;
 
-import org.apache.commons.lang.NotImplementedException;
 import org.apache.doris.catalog.DatabaseIf;
 import org.apache.doris.catalog.DatabaseProperty;
 import org.apache.doris.catalog.OlapTable;
@@ -28,6 +27,7 @@ import org.apache.doris.common.MetaNotFoundException;
 import org.apache.doris.datasource.ExternalDataSource;
 import org.apache.doris.qe.ConnectContext;
 
+import org.apache.commons.lang.NotImplementedException;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/BackendLoadStatistic.java b/fe/fe-core/src/main/java/org/apache/doris/clone/BackendLoadStatistic.java
index 824137b2f1..85136dd17a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/clone/BackendLoadStatistic.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/clone/BackendLoadStatistic.java
@@ -181,8 +181,10 @@ public class BackendLoadStatistic {
             TStorageMedium medium = diskInfo.getStorageMedium();
             if (diskInfo.getState() == DiskState.ONLINE) {
                 // we only collect online disk's capacity
-                totalCapacityMap.put(medium, totalCapacityMap.getOrDefault(medium, 0L) + diskInfo.getTotalCapacityB());
-                totalUsedCapacityMap.put(medium, totalUsedCapacityMap.getOrDefault(medium, 0L) + (diskInfo.getTotalCapacityB() - diskInfo.getAvailableCapacityB()));
+                totalCapacityMap.put(medium, totalCapacityMap.getOrDefault(medium, 0L)
+                        + diskInfo.getTotalCapacityB());
+                totalUsedCapacityMap.put(medium, totalUsedCapacityMap.getOrDefault(medium, 0L)
+                        + (diskInfo.getTotalCapacityB() - diskInfo.getAvailableCapacityB()));
             }
 
             RootPathLoadStatistic pathStatistic = new RootPathLoadStatistic(beId, diskInfo.getRootPath(),
diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/BeLoadRebalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/BeLoadRebalancer.java
index fb55091b35..1b03a7044f 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/clone/BeLoadRebalancer.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/clone/BeLoadRebalancer.java
@@ -201,7 +201,8 @@ public class BeLoadRebalancer extends Rebalancer {
      * 2. Select a low load backend as destination. And tablet should not has replica on this backend.
      */
     @Override
-    public void completeSchedCtx(TabletSchedCtx tabletCtx, Map<Long, PathSlot> backendsWorkingSlots) throws SchedException {
+    public void completeSchedCtx(TabletSchedCtx tabletCtx,
+            Map<Long, PathSlot> backendsWorkingSlots) throws SchedException {
         ClusterLoadStatistic clusterStat = statisticMap.get(tabletCtx.getCluster(), tabletCtx.getTag());
         if (clusterStat == null) {
             throw new SchedException(Status.UNRECOVERABLE, "cluster does not exist");
@@ -283,8 +284,9 @@ public class BeLoadRebalancer extends Rebalancer {
                     continue;
                 }
 
-                if (!Config.be_rebalancer_fuzzy_test && !clusterStat.isMoreBalanced(tabletCtx.getSrcBackendId(), beStat.getBeId(),
-                        tabletCtx.getTabletId(), tabletCtx.getTabletSize(), tabletCtx.getStorageMedium())) {
+                if (!Config.be_rebalancer_fuzzy_test && !clusterStat.isMoreBalanced(
+                        tabletCtx.getSrcBackendId(), beStat.getBeId(), tabletCtx.getTabletId(),
+                        tabletCtx.getTabletSize(), tabletCtx.getStorageMedium())) {
                     continue;
                 }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/ClusterLoadStatistic.java b/fe/fe-core/src/main/java/org/apache/doris/clone/ClusterLoadStatistic.java
index a407347de3..aee2665afc 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/clone/ClusterLoadStatistic.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/clone/ClusterLoadStatistic.java
@@ -45,23 +45,24 @@ import java.util.stream.Collectors;
 public class ClusterLoadStatistic {
     private static final Logger LOG = LogManager.getLogger(ClusterLoadStatistic.class);
 
-    private SystemInfoService infoService;
-    private TabletInvertedIndex invertedIndex;
-
-    private String clusterName;
-    private Tag tag;
-
-    private Map<TStorageMedium, Long> totalCapacityMap = Maps.newHashMap();
-    private Map<TStorageMedium, Long> totalUsedCapacityMap = Maps.newHashMap();
-    private Map<TStorageMedium, Long> totalReplicaNumMap = Maps.newHashMap();
-    private Map<TStorageMedium, Double> avgUsedCapacityPercentMap = Maps.newHashMap();
-    private Map<TStorageMedium, Double> avgReplicaNumPercentMap = Maps.newHashMap();
-    private Map<TStorageMedium, Double> avgLoadScoreMap = Maps.newHashMap();
+    private final SystemInfoService infoService;
+    private final TabletInvertedIndex invertedIndex;
+
+    private final String clusterName;
+    private final Tag tag;
+
+    private final Map<TStorageMedium, Long> totalCapacityMap = Maps.newHashMap();
+    private final Map<TStorageMedium, Long> totalUsedCapacityMap = Maps.newHashMap();
+    private final Map<TStorageMedium, Long> totalReplicaNumMap = Maps.newHashMap();
+    private final Map<TStorageMedium, Double> avgUsedCapacityPercentMap = Maps.newHashMap();
+    private final Map<TStorageMedium, Double> avgReplicaNumPercentMap = Maps.newHashMap();
+    private final Map<TStorageMedium, Double> avgLoadScoreMap = Maps.newHashMap();
     // storage medium -> number of backend which has this kind of medium
-    private Map<TStorageMedium, Integer> backendNumMap = Maps.newHashMap();
-    private List<BackendLoadStatistic> beLoadStatistics = Lists.newArrayList();
-    private Map<TStorageMedium, TreeMultimap<Long, Long>> beByTotalReplicaCountMaps = Maps.newHashMap();
-    private Map<TStorageMedium, TreeMultimap<Long, TabletInvertedIndex.PartitionBalanceInfo>> skewMaps = Maps.newHashMap();
+    private final Map<TStorageMedium, Integer> backendNumMap = Maps.newHashMap();
+    private final List<BackendLoadStatistic> beLoadStatistics = Lists.newArrayList();
+    private final Map<TStorageMedium, TreeMultimap<Long, Long>> beByTotalReplicaCountMaps = Maps.newHashMap();
+    private Map<TStorageMedium, TreeMultimap<Long, TabletInvertedIndex.PartitionBalanceInfo>> skewMaps
+            = Maps.newHashMap();
 
     public ClusterLoadStatistic(String clusterName, Tag tag, SystemInfoService infoService,
                                 TabletInvertedIndex invertedIndex) {
@@ -99,20 +100,24 @@ public class ClusterLoadStatistic {
             }
 
             for (TStorageMedium medium : TStorageMedium.values()) {
-                totalCapacityMap.put(medium, totalCapacityMap.getOrDefault(medium, 0L) + beStatistic.getTotalCapacityB(medium));
-                totalUsedCapacityMap.put(medium, totalUsedCapacityMap.getOrDefault(medium, 0L) + beStatistic.getTotalUsedCapacityB(medium));
-                totalReplicaNumMap.put(medium, totalReplicaNumMap.getOrDefault(medium, 0L) + beStatistic.getReplicaNum(medium));
+                totalCapacityMap.put(medium, totalCapacityMap.getOrDefault(medium, 0L)
+                        + beStatistic.getTotalCapacityB(medium));
+                totalUsedCapacityMap.put(medium, totalUsedCapacityMap.getOrDefault(medium, 0L)
+                        + beStatistic.getTotalUsedCapacityB(medium));
+                totalReplicaNumMap.put(medium, totalReplicaNumMap.getOrDefault(medium, 0L)
+                        + beStatistic.getReplicaNum(medium));
                 if (beStatistic.hasMedium(medium)) {
                     backendNumMap.put(medium, backendNumMap.getOrDefault(medium, 0) + 1);
                 }
             }
-
             beLoadStatistics.add(beStatistic);
         }
 
         for (TStorageMedium medium : TStorageMedium.values()) {
-            avgUsedCapacityPercentMap.put(medium, totalUsedCapacityMap.getOrDefault(medium, 0L) / (double) totalCapacityMap.getOrDefault(medium, 1L));
-            avgReplicaNumPercentMap.put(medium, totalReplicaNumMap.getOrDefault(medium, 0L) / (double) backendNumMap.getOrDefault(medium, 1));
+            avgUsedCapacityPercentMap.put(medium, totalUsedCapacityMap.getOrDefault(medium, 0L)
+                    / (double) totalCapacityMap.getOrDefault(medium, 1L));
+            avgReplicaNumPercentMap.put(medium, totalReplicaNumMap.getOrDefault(medium, 0L)
+                    / (double) backendNumMap.getOrDefault(medium, 1));
         }
 
         for (BackendLoadStatistic beStatistic : beLoadStatistics) {
@@ -141,8 +146,10 @@ public class ClusterLoadStatistic {
             // Multimap<skew -> PartitionBalanceInfo>
             //                  PartitionBalanceInfo: <pid -> <partitionReplicaCount, beId>>
             // Only count available bes here, aligned with the beByTotalReplicaCountMaps.
-            skewMaps = invertedIndex.buildPartitionInfoBySkew(beLoadStatistics.stream().filter(BackendLoadStatistic::isAvailable).
-                    map(BackendLoadStatistic::getBeId).collect(Collectors.toList()));
+            skewMaps = invertedIndex.buildPartitionInfoBySkew(beLoadStatistics.stream()
+                    .filter(BackendLoadStatistic::isAvailable)
+                    .map(BackendLoadStatistic::getBeId)
+                    .collect(Collectors.toList()));
         }
     }
 
@@ -168,6 +175,7 @@ public class ClusterLoadStatistic {
                 continue;
             }
 
+
             if (Config.be_rebalancer_fuzzy_test) {
                 if (beStat.getLoadScore(medium) > avgLoadScore) {
                     beStat.setClazz(medium, Classification.HIGH);
@@ -252,8 +260,10 @@ public class ClusterLoadStatistic {
                 destBeStat.getTotalCapacityB(medium), destBeStat.getReplicaNum(medium) + 1,
                 avgUsedCapacityPercentMap.get(medium), avgReplicaNumPercentMap.get(medium));
 
-        double currentDiff = Math.abs(currentSrcBeScore - avgLoadScoreMap.get(medium)) + Math.abs(currentDestBeScore - avgLoadScoreMap.get(medium));
-        double newDiff = Math.abs(newSrcBeScore.score - avgLoadScoreMap.get(medium)) + Math.abs(newDestBeScore.score - avgLoadScoreMap.get(medium));
+        double currentDiff = Math.abs(currentSrcBeScore - avgLoadScoreMap.get(medium))
+                + Math.abs(currentDestBeScore - avgLoadScoreMap.get(medium));
+        double newDiff = Math.abs(newSrcBeScore.score - avgLoadScoreMap.get(medium))
+                + Math.abs(newDestBeScore.score - avgLoadScoreMap.get(medium));
 
         LOG.debug("after migrate {}(size: {}) from {} to {}, medium: {}, the load score changed."
                         + " src: {} -> {}, dest: {}->{}, average score: {}. current diff: {}, new diff: {},"
@@ -294,8 +304,8 @@ public class ClusterLoadStatistic {
                 pathStat.add(pathStatistic.getStorageMedium().name());
                 pathStat.add(String.valueOf(pathStatistic.getUsedCapacityB()));
                 pathStat.add(String.valueOf(pathStatistic.getCapacityB()));
-                pathStat.add(String.valueOf(DebugUtil.DECIMAL_FORMAT_SCALE_3.format(pathStatistic.getUsedCapacityB() * 100
-                        / (double) pathStatistic.getCapacityB())));
+                pathStat.add(String.valueOf(DebugUtil.DECIMAL_FORMAT_SCALE_3.format(
+                        pathStatistic.getUsedCapacityB() * 100 / (double) pathStatistic.getCapacityB())));
                 pathStat.add(pathStatistic.getClazz().name());
                 pathStat.add(pathStatistic.getDiskState().name());
                 statistics.add(pathStat);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java
index 111f8d0aa7..240c02ad56 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/clone/ColocateTableCheckerAndBalancer.java
@@ -178,7 +178,8 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon {
                 }
 
                 // get all unavailable backends in the backend bucket sequence of this group
-                Set<Long> unavailableBeIdsInGroup = getUnavailableBeIdsInGroup(infoService, colocateIndex, groupId, tag);
+                Set<Long> unavailableBeIdsInGroup = getUnavailableBeIdsInGroup(
+                        infoService, colocateIndex, groupId, tag);
                 // get all available backends for this group
                 Set<Long> beIdsInOtherTag = colocateIndex.getBackendIdsExceptForTag(groupId, tag);
                 List<Long> availableBeIds = getAvailableBeIds(db.getClusterName(), tag, beIdsInOtherTag, infoService);
@@ -189,7 +190,8 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon {
                     colocateIndex.addBackendsPerBucketSeqByTag(groupId, tag, balancedBackendsPerBucketSeq);
                     Map<Tag, List<List<Long>>> balancedBackendsPerBucketSeqMap = Maps.newHashMap();
                     balancedBackendsPerBucketSeqMap.put(tag, balancedBackendsPerBucketSeq);
-                    ColocatePersistInfo info = ColocatePersistInfo.createForBackendsPerBucketSeq(groupId, balancedBackendsPerBucketSeqMap);
+                    ColocatePersistInfo info = ColocatePersistInfo
+                            .createForBackendsPerBucketSeq(groupId, balancedBackendsPerBucketSeqMap);
                     catalog.getEditLog().logColocateBackendsPerBucketSeq(info);
                     LOG.info("balance group {}. now backends per bucket sequence for tag {} is: {}",
                             groupId, tag, balancedBackendsPerBucketSeq);
@@ -232,7 +234,8 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon {
                 olapTable.readLock();
                 try {
                     for (Partition partition : olapTable.getPartitions()) {
-                        ReplicaAllocation replicaAlloc = olapTable.getPartitionInfo().getReplicaAllocation(partition.getId());
+                        ReplicaAllocation replicaAlloc
+                                = olapTable.getPartitionInfo().getReplicaAllocation(partition.getId());
                         short replicationNum = replicaAlloc.getTotalReplicaNum();
                         long visibleVersion = partition.getVisibleVersion();
                         // Here we only get VISIBLE indexes. All other indexes are not queryable.
@@ -243,11 +246,14 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon {
                             int idx = 0;
                             for (Long tabletId : index.getTabletIdsInOrder()) {
                                 Set<Long> bucketsSeq = backendBucketsSeq.get(idx);
-                                Preconditions.checkState(bucketsSeq.size() == replicationNum, bucketsSeq.size() + " vs. " + replicationNum);
+                                Preconditions.checkState(bucketsSeq.size() == replicationNum,
+                                        bucketsSeq.size() + " vs. " + replicationNum);
                                 Tablet tablet = index.getTablet(tabletId);
-                                TabletStatus st = tablet.getColocateHealthStatus(visibleVersion, replicaAlloc, bucketsSeq);
+                                TabletStatus st = tablet.getColocateHealthStatus(
+                                        visibleVersion, replicaAlloc, bucketsSeq);
                                 if (st != TabletStatus.HEALTHY) {
-                                    unstableReason = String.format("get unhealthy tablet %d in colocate table. status: %s", tablet.getId(), st);
+                                    unstableReason = String.format("get unhealthy tablet %d in colocate table."
+                                            + " status: %s", tablet.getId(), st);
                                     LOG.debug(unstableReason);
 
                                     if (!tablet.readyToBeRepaired(Priority.NORMAL)) {
@@ -266,7 +272,8 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon {
 
                                     AddResult res = tabletScheduler.addTablet(tabletCtx, false /* not force */);
                                     if (res == AddResult.LIMIT_EXCEED || res == AddResult.DISABLED) {
-                                        // tablet in scheduler exceed limit, or scheduler is disabled, skip this group and check next one.
+                                        // tablet in scheduler exceed limit, or scheduler is disabled,
+                                        // skip this group and check next one.
                                         LOG.info("tablet scheduler return: {}. stop colocate table check", res.name());
                                         break OUT;
                                     }
@@ -347,13 +354,15 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon {
      *  Return false if nothing changed.
      */
     private boolean relocateAndBalance(GroupId groupId, Tag tag, Set<Long> unavailableBeIds, List<Long> availableBeIds,
-                                       ColocateTableIndex colocateIndex, SystemInfoService infoService,
-                                       ClusterLoadStatistic statistic, List<List<Long>> balancedBackendsPerBucketSeq) {
+            ColocateTableIndex colocateIndex, SystemInfoService infoService,
+            ClusterLoadStatistic statistic, List<List<Long>> balancedBackendsPerBucketSeq) {
         ColocateGroupSchema groupSchema = colocateIndex.getGroupSchema(groupId);
         short replicaNum = groupSchema.getReplicaAlloc().getReplicaNumByTag(tag);
-        List<List<Long>> backendsPerBucketSeq = Lists.newArrayList(colocateIndex.getBackendsPerBucketSeqByTag(groupId, tag));
+        List<List<Long>> backendsPerBucketSeq = Lists.newArrayList(
+                colocateIndex.getBackendsPerBucketSeqByTag(groupId, tag));
         // [[A,B,C],[B,C,D]] -> [A,B,C,B,C,D]
-        List<Long> flatBackendsPerBucketSeq = backendsPerBucketSeq.stream().flatMap(List::stream).collect(Collectors.toList());
+        List<Long> flatBackendsPerBucketSeq = backendsPerBucketSeq.stream()
+                .flatMap(List::stream).collect(Collectors.toList());
 
         boolean isChanged = false;
         OUT:
@@ -381,7 +390,8 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon {
             }
             // sort backends with replica num in desc order
             List<Map.Entry<Long, Long>> backendWithReplicaNum =
-                    getSortedBackendReplicaNumPairs(availableBeIds, unavailableBeIds, statistic, flatBackendsPerBucketSeq);
+                    getSortedBackendReplicaNumPairs(availableBeIds,
+                            unavailableBeIds, statistic, flatBackendsPerBucketSeq);
 
             // if there is only one available backend and no unavailable bucketId to relocate, end the outer loop
             if (backendWithReplicaNum.size() <= 1) {
@@ -484,8 +494,8 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon {
         return hostsPerBucketSeq;
     }
 
-    private List<Map.Entry<Long, Long>> getSortedBackendReplicaNumPairs(List<Long> allAvailBackendIds, Set<Long> unavailBackendIds,
-                                                                        ClusterLoadStatistic statistic, List<Long> flatBackendsPerBucketSeq) {
+    private List<Map.Entry<Long, Long>> getSortedBackendReplicaNumPairs(List<Long> allAvailBackendIds,
+            Set<Long> unavailBackendIds, ClusterLoadStatistic statistic, List<Long> flatBackendsPerBucketSeq) {
         // backend id -> replica num, and sorted by replica num, descending.
         Map<Long, Long> backendToReplicaNum = flatBackendsPerBucketSeq.stream()
                 .collect(Collectors.groupingBy(Function.identity(), Collectors.counting()));
@@ -544,20 +554,23 @@ public class ColocateTableCheckerAndBalancer extends MasterDaemon {
         Set<Long> backends = colocateIndex.getBackendsByGroup(groupId, tag);
         Set<Long> unavailableBeIds = Sets.newHashSet();
         for (Long backendId : backends) {
-            if (!checkBackendAvailable(backendId, tag, Sets.newHashSet(), infoService, Config.colocate_group_relocate_delay_second)) {
+            if (!checkBackendAvailable(backendId, tag, Sets.newHashSet(), infoService,
+                    Config.colocate_group_relocate_delay_second)) {
                 unavailableBeIds.add(backendId);
             }
         }
         return unavailableBeIds;
     }
 
-    private List<Long> getAvailableBeIds(String cluster, Tag tag, Set<Long> excludedBeIds, SystemInfoService infoService) {
+    private List<Long> getAvailableBeIds(String cluster, Tag tag, Set<Long> excludedBeIds,
+            SystemInfoService infoService) {
         // get all backends to allBackendIds, and check be availability using checkBackendAvailable
         // backend stopped for a short period of time is still considered available
         List<Long> allBackendIds = infoService.getClusterBackendIds(cluster, false);
         List<Long> availableBeIds = Lists.newArrayList();
         for (Long backendId : allBackendIds) {
-            if (checkBackendAvailable(backendId, tag, excludedBeIds, infoService, Config.colocate_group_relocate_delay_second)) {
+            if (checkBackendAvailable(backendId, tag, excludedBeIds, infoService,
+                    Config.colocate_group_relocate_delay_second)) {
                 availableBeIds.add(backendId);
             }
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/DiskRebalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/DiskRebalancer.java
index 80bac26717..493e465b40 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/clone/DiskRebalancer.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/clone/DiskRebalancer.java
@@ -246,7 +246,8 @@ public class DiskRebalancer extends Rebalancer {
      * 3. Select a low load path from this backend as destination.
      */
     @Override
-    public void completeSchedCtx(TabletSchedCtx tabletCtx, Map<Long, PathSlot> backendsWorkingSlots) throws SchedException {
+    public void completeSchedCtx(TabletSchedCtx tabletCtx,
+            Map<Long, PathSlot> backendsWorkingSlots) throws SchedException {
         ClusterLoadStatistic clusterStat = statisticMap.get(tabletCtx.getCluster(), tabletCtx.getTag());
         if (clusterStat == null) {
             throw new SchedException(Status.UNRECOVERABLE, "cluster does not exist");
diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/DynamicPartitionScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/clone/DynamicPartitionScheduler.java
index 0aec8f21f0..fef969f646 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/clone/DynamicPartitionScheduler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/clone/DynamicPartitionScheduler.java
@@ -69,8 +69,8 @@ import java.util.Set;
 
 /**
  * This class is used to periodically add or drop partition on an olapTable which specify dynamic partition properties
- * Config.dynamic_partition_enable determine whether this feature is enable, Config.dynamic_partition_check_interval_seconds
- * determine how often the task is performed
+ * Config.dynamic_partition_enable determine whether this feature is enable,
+ * Config.dynamic_partition_check_interval_seconds determine how often the task is performed
  */
 public class DynamicPartitionScheduler extends MasterDaemon {
     private static final Logger LOG = LogManager.getLogger(DynamicPartitionScheduler.class);
@@ -163,16 +163,20 @@ public class DynamicPartitionScheduler extends MasterDaemon {
         int hotPartitionNum = dynamicPartitionProperty.getHotPartitionNum();
 
         for (; idx <= dynamicPartitionProperty.getEnd(); idx++) {
-            String prevBorder = DynamicPartitionUtil.getPartitionRangeString(dynamicPartitionProperty, now, idx, partitionFormat);
-            String nextBorder = DynamicPartitionUtil.getPartitionRangeString(dynamicPartitionProperty, now, idx + 1, partitionFormat);
+            String prevBorder = DynamicPartitionUtil.getPartitionRangeString(
+                    dynamicPartitionProperty, now, idx, partitionFormat);
+            String nextBorder = DynamicPartitionUtil.getPartitionRangeString(
+                    dynamicPartitionProperty, now, idx + 1, partitionFormat);
             PartitionValue lowerValue = new PartitionValue(prevBorder);
             PartitionValue upperValue = new PartitionValue(nextBorder);
 
             boolean isPartitionExists = false;
             Range<PartitionKey> addPartitionKeyRange;
             try {
-                PartitionKey lowerBound = PartitionKey.createPartitionKey(Collections.singletonList(lowerValue), Collections.singletonList(partitionColumn));
-                PartitionKey upperBound = PartitionKey.createPartitionKey(Collections.singletonList(upperValue), Collections.singletonList(partitionColumn));
+                PartitionKey lowerBound = PartitionKey.createPartitionKey(Collections.singletonList(lowerValue),
+                        Collections.singletonList(partitionColumn));
+                PartitionKey upperBound = PartitionKey.createPartitionKey(Collections.singletonList(upperValue),
+                        Collections.singletonList(partitionColumn));
                 addPartitionKeyRange = Range.closedOpen(lowerBound, upperBound);
             } catch (AnalysisException | IllegalArgumentException e) {
                 // AnalysisException: keys.size is always equal to column.size, cannot reach this exception
@@ -188,10 +192,12 @@ public class DynamicPartitionScheduler extends MasterDaemon {
                 } catch (Exception e) {
                     isPartitionExists = true;
                     if (addPartitionKeyRange.equals(partitionItem.getItems())) {
-                        LOG.info("partition range {} exist in table {}, clear fail msg", addPartitionKeyRange, olapTable.getName());
+                        LOG.info("partition range {} exist in table {}, clear fail msg",
+                                addPartitionKeyRange, olapTable.getName());
                         clearCreatePartitionFailedMsg(olapTable.getId());
                     } else {
-                        recordCreatePartitionFailedMsg(db.getFullName(), olapTable.getName(), e.getMessage(), olapTable.getId());
+                        recordCreatePartitionFailedMsg(db.getFullName(), olapTable.getName(),
+                                e.getMessage(), olapTable.getId());
                     }
                     break;
                 }
@@ -201,7 +207,8 @@ public class DynamicPartitionScheduler extends MasterDaemon {
             }
 
             // construct partition desc
-            PartitionKeyDesc partitionKeyDesc = PartitionKeyDesc.createFixed(Collections.singletonList(lowerValue), Collections.singletonList(upperValue));
+            PartitionKeyDesc partitionKeyDesc = PartitionKeyDesc.createFixed(Collections.singletonList(lowerValue),
+                    Collections.singletonList(upperValue));
             HashMap<String, String> partitionProperties = new HashMap<>(1);
             if (dynamicPartitionProperty.getReplicaAllocation().isNotSet()) {
                 partitionProperties.put(PropertyAnalyzer.PROPERTIES_REPLICATION_ALLOCATION,
@@ -216,8 +223,9 @@ public class DynamicPartitionScheduler extends MasterDaemon {
                 setStorageMediumProperty(partitionProperties, dynamicPartitionProperty, now, hotPartitionNum, idx);
             }
 
-            String partitionName = dynamicPartitionProperty.getPrefix() + DynamicPartitionUtil.getFormattedPartitionName(
-                    dynamicPartitionProperty.getTimeZone(), prevBorder, dynamicPartitionProperty.getTimeUnit());
+            String partitionName = dynamicPartitionProperty.getPrefix()
+                    + DynamicPartitionUtil.getFormattedPartitionName(dynamicPartitionProperty.getTimeZone(),
+                    prevBorder, dynamicPartitionProperty.getTimeUnit());
             SinglePartitionDesc rangePartitionDesc = new SinglePartitionDesc(true, partitionName,
                     partitionKeyDesc, partitionProperties);
 
@@ -239,8 +247,8 @@ public class DynamicPartitionScheduler extends MasterDaemon {
         return addPartitionClauses;
     }
 
-    private void setStorageMediumProperty(HashMap<String, String> partitionProperties, DynamicPartitionProperty property,
-                                          ZonedDateTime now, int hotPartitionNum, int offset) {
+    private void setStorageMediumProperty(HashMap<String, String> partitionProperties,
+            DynamicPartitionProperty property, ZonedDateTime now, int hotPartitionNum, int offset) {
         if (offset + hotPartitionNum <= 0) {
             return;
         }
@@ -250,14 +258,16 @@ public class DynamicPartitionScheduler extends MasterDaemon {
         partitionProperties.put(PropertyAnalyzer.PROPERTIES_STORAGE_COOLDOWN_TIME, cooldownTime);
     }
 
-    private Range<PartitionKey> getClosedRange(Database db, OlapTable olapTable, Column partitionColumn, String partitionFormat,
-                                               String lowerBorderOfReservedHistory, String upperBorderOfReservedHistory) {
+    private Range<PartitionKey> getClosedRange(Database db, OlapTable olapTable, Column partitionColumn,
+            String partitionFormat, String lowerBorderOfReservedHistory, String upperBorderOfReservedHistory) {
         Range<PartitionKey> reservedHistoryPartitionKeyRange = null;
         PartitionValue lowerBorderPartitionValue = new PartitionValue(lowerBorderOfReservedHistory);
         PartitionValue upperBorderPartitionValue = new PartitionValue(upperBorderOfReservedHistory);
         try {
-            PartitionKey lowerBorderBound = PartitionKey.createPartitionKey(Collections.singletonList(lowerBorderPartitionValue), Collections.singletonList(partitionColumn));
-            PartitionKey upperBorderBound = PartitionKey.createPartitionKey(Collections.singletonList(upperBorderPartitionValue), Collections.singletonList(partitionColumn));
+            PartitionKey lowerBorderBound = PartitionKey.createPartitionKey(
+                    Collections.singletonList(lowerBorderPartitionValue), Collections.singletonList(partitionColumn));
+            PartitionKey upperBorderBound = PartitionKey.createPartitionKey(
+                    Collections.singletonList(upperBorderPartitionValue), Collections.singletonList(partitionColumn));
             reservedHistoryPartitionKeyRange = Range.closed(lowerBorderBound, upperBorderBound);
         } catch (AnalysisException e) {
             // AnalysisException: keys.size is always equal to column.size, cannot reach this exception
@@ -272,7 +282,8 @@ public class DynamicPartitionScheduler extends MasterDaemon {
      * 1. get the range of [start, 0) as a reserved range.
      * 2. get DropPartitionClause of partitions which range are before this reserved range.
      */
-    private ArrayList<DropPartitionClause> getDropPartitionClause(Database db, OlapTable olapTable, Column partitionColumn, String partitionFormat) throws DdlException {
+    private ArrayList<DropPartitionClause> getDropPartitionClause(Database db, OlapTable olapTable,
+            Column partitionColumn, String partitionFormat) throws DdlException {
         ArrayList<DropPartitionClause> dropPartitionClauses = new ArrayList<>();
         DynamicPartitionProperty dynamicPartitionProperty = olapTable.getTableProperty().getDynamicPartitionProperty();
         if (dynamicPartitionProperty.getStart() == DynamicPartitionProperty.MIN_START_OFFSET) {
@@ -290,8 +301,10 @@ public class DynamicPartitionScheduler extends MasterDaemon {
         List<Range<PartitionKey>> reservedHistoryPartitionKeyRangeList = new ArrayList<Range<PartitionKey>>();
         Range<PartitionKey> reservePartitionKeyRange;
         try {
-            PartitionKey lowerBound = PartitionKey.createPartitionKey(Collections.singletonList(lowerPartitionValue), Collections.singletonList(partitionColumn));
-            PartitionKey upperBound = PartitionKey.createPartitionKey(Collections.singletonList(upperPartitionValue), Collections.singletonList(partitionColumn));
+            PartitionKey lowerBound = PartitionKey.createPartitionKey(Collections.singletonList(lowerPartitionValue),
+                    Collections.singletonList(partitionColumn));
+            PartitionKey upperBound = PartitionKey.createPartitionKey(Collections.singletonList(upperPartitionValue),
+                    Collections.singletonList(partitionColumn));
             reservePartitionKeyRange = Range.closedOpen(lowerBound, upperBound);
             reservedHistoryPartitionKeyRangeList.add(reservePartitionKeyRange);
         } catch (AnalysisException | IllegalArgumentException e) {
@@ -303,14 +316,19 @@ public class DynamicPartitionScheduler extends MasterDaemon {
         }
 
         String reservedHistoryPeriods = dynamicPartitionProperty.getReservedHistoryPeriods();
-        List<Range> ranges = DynamicPartitionUtil.convertStringToPeriodsList(reservedHistoryPeriods, dynamicPartitionProperty.getTimeUnit());
+        List<Range> ranges = DynamicPartitionUtil.convertStringToPeriodsList(reservedHistoryPeriods,
+                dynamicPartitionProperty.getTimeUnit());
 
         if (ranges.size() != 0) {
             for (Range range : ranges) {
                 try {
-                    String lowerBorderOfReservedHistory = DynamicPartitionUtil.getHistoryPartitionRangeString(dynamicPartitionProperty, range.lowerEndpoint().toString(), partitionFormat);
-                    String upperBorderOfReservedHistory = DynamicPartitionUtil.getHistoryPartitionRangeString(dynamicPartitionProperty, range.upperEndpoint().toString(), partitionFormat);
-                    Range<PartitionKey> reservedHistoryPartitionKeyRange = getClosedRange(db, olapTable, partitionColumn, partitionFormat, lowerBorderOfReservedHistory, upperBorderOfReservedHistory);
+                    String lowerBorderOfReservedHistory = DynamicPartitionUtil.getHistoryPartitionRangeString(
+                            dynamicPartitionProperty, range.lowerEndpoint().toString(), partitionFormat);
+                    String upperBorderOfReservedHistory = DynamicPartitionUtil.getHistoryPartitionRangeString(
+                            dynamicPartitionProperty, range.upperEndpoint().toString(), partitionFormat);
+                    Range<PartitionKey> reservedHistoryPartitionKeyRange
+                            = getClosedRange(db, olapTable, partitionColumn, partitionFormat,
+                            lowerBorderOfReservedHistory, upperBorderOfReservedHistory);
                     reservedHistoryPartitionKeyRangeList.add(reservedHistoryPartitionKeyRange);
                 } catch (IllegalArgumentException e) {
                     return dropPartitionClauses;
@@ -395,7 +413,8 @@ public class DynamicPartitionScheduler extends MasterDaemon {
                 try {
                     partitionFormat = DynamicPartitionUtil.getPartitionFormat(partitionColumn);
                 } catch (Exception e) {
-                    recordCreatePartitionFailedMsg(db.getFullName(), olapTable.getName(), e.getMessage(), olapTable.getId());
+                    recordCreatePartitionFailedMsg(db.getFullName(), olapTable.getName(),
+                            e.getMessage(), olapTable.getId());
                     continue;
                 }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/MovesCacheMap.java b/fe/fe-core/src/main/java/org/apache/doris/clone/MovesCacheMap.java
index 51fd8bd930..856c3836e7 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/clone/MovesCacheMap.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/clone/MovesCacheMap.java
@@ -49,7 +49,8 @@ public class MovesCacheMap {
     private final Table<String, Tag, Map<TStorageMedium, MovesCache>> cacheMap = HashBasedTable.create();
     private long lastExpireConfig = -1L;
 
-    // TabletId -> Pair<Move, ToDeleteReplicaId>, 'ToDeleteReplicaId == -1' means this move haven't been scheduled successfully.
+    // TabletId -> Pair<Move, ToDeleteReplicaId>, 'ToDeleteReplicaId == -1'
+    // means this move haven't been scheduled successfully.
     public static class MovesCache {
         Cache<Long, Pair<PartitionRebalancer.TabletMove, Long>> cache;
 
@@ -65,7 +66,8 @@ public class MovesCacheMap {
     // Cyclical update the cache mapping, cuz the cluster may be deleted, we should delete the corresponding cache too.
     public void updateMapping(Table<String, Tag, ClusterLoadStatistic> statisticMap, long expireAfterAccessSecond) {
         if (expireAfterAccessSecond > 0 && lastExpireConfig != expireAfterAccessSecond) {
-            LOG.debug("Reset expireAfterAccess, last {} s, now {} s. Moves will be cleared.", lastExpireConfig, expireAfterAccessSecond);
+            LOG.debug("Reset expireAfterAccess, last {} s, now {} s. Moves will be cleared.",
+                    lastExpireConfig, expireAfterAccessSecond);
             cacheMap.clear();
             lastExpireConfig = expireAfterAccessSecond;
         }
@@ -78,7 +80,8 @@ public class MovesCacheMap {
                 .collect(Collectors.toList());
         for (Table.Cell<String, Tag, ClusterLoadStatistic> cell : toAdd) {
             Map<TStorageMedium, MovesCache> newCacheMap = Maps.newHashMap();
-            Arrays.stream(TStorageMedium.values()).forEach(m -> newCacheMap.put(m, new MovesCache(expireAfterAccessSecond, TimeUnit.SECONDS)));
+            Arrays.stream(TStorageMedium.values())
+                    .forEach(m -> newCacheMap.put(m, new MovesCache(expireAfterAccessSecond, TimeUnit.SECONDS)));
             this.cacheMap.put(cell.getRowKey(), cell.getColumnKey(), newCacheMap);
         }
     }
@@ -114,7 +117,8 @@ public class MovesCacheMap {
     }
 
     public long size() {
-        return cacheMap.values().stream().mapToLong(maps -> maps.values().stream().mapToLong(map -> map.get().size()).sum()).sum();
+        return cacheMap.values().stream().mapToLong(
+                maps -> maps.values().stream().mapToLong(map -> map.get().size()).sum()).sum();
     }
 
     @Override
diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java
index 406bf53bb5..b43af45c29 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/clone/PartitionRebalancer.java
@@ -73,7 +73,8 @@ public class PartitionRebalancer extends Rebalancer {
             ClusterLoadStatistic clusterStat, TStorageMedium medium) {
         String clusterName = clusterStat.getClusterName();
         MovesCacheMap.MovesCache movesInProgress = movesCacheMap.getCache(clusterName, clusterStat.getTag(), medium);
-        Preconditions.checkNotNull(movesInProgress, "clusterStat is got from statisticMap, movesCacheMap should have the same entry");
+        Preconditions.checkNotNull(movesInProgress,
+                "clusterStat is got from statisticMap, movesCacheMap should have the same entry");
 
         // Iterating through Cache.asMap().values() does not reset access time for the entries you retrieve.
         List<TabletMove> movesInProgressList = movesInProgress.get().asMap().values()
@@ -111,7 +112,8 @@ public class PartitionRebalancer extends Rebalancer {
         LOG.debug("Cluster {}-{}: peek max skew {}, assume {} in-progress moves are succeeded {}", clusterName, medium,
                 skews.isEmpty() ? 0 : skews.last(), movesInProgressList.size(), movesInProgressList);
 
-        List<TwoDimensionalGreedyRebalanceAlgo.PartitionMove> moves = algo.getNextMoves(clusterBalanceInfo, Config.partition_rebalance_max_moves_num_per_selection);
+        List<TwoDimensionalGreedyRebalanceAlgo.PartitionMove> moves
+                = algo.getNextMoves(clusterBalanceInfo, Config.partition_rebalance_max_moves_num_per_selection);
 
         List<TabletSchedCtx> alternativeTablets = Lists.newArrayList();
         List<Long> inProgressIds = movesInProgressList.stream().map(m -> m.tabletId).collect(Collectors.toList());
@@ -154,7 +156,8 @@ public class PartitionRebalancer extends Rebalancer {
             tabletCtx.setOrigPriority(TabletSchedCtx.Priority.LOW);
             alternativeTablets.add(tabletCtx);
             // Pair<Move, ToDeleteReplicaId>, ToDeleteReplicaId should be -1L before scheduled successfully
-            movesInProgress.get().put(pickedTabletId, new Pair<>(new TabletMove(pickedTabletId, move.fromBe, move.toBe), -1L));
+            movesInProgress.get().put(pickedTabletId,
+                    new Pair<>(new TabletMove(pickedTabletId, move.fromBe, move.toBe), -1L));
             counterBalanceMoveCreated.incrementAndGet();
             // Synchronize with movesInProgress
             inProgressIds.add(pickedTabletId);
@@ -172,7 +175,7 @@ public class PartitionRebalancer extends Rebalancer {
     }
 
     private boolean buildClusterInfo(ClusterLoadStatistic clusterStat, TStorageMedium medium,
-                                     List<TabletMove> movesInProgress, ClusterBalanceInfo info, List<Long> toDeleteKeys) {
+            List<TabletMove> movesInProgress, ClusterBalanceInfo info, List<Long> toDeleteKeys) {
         Preconditions.checkState(info.beByTotalReplicaCount.isEmpty() && info.partitionInfoBySkew.isEmpty(), "");
 
         // If we wanna modify the PartitionBalanceInfo in info.beByTotalReplicaCount, deep-copy it
@@ -180,7 +183,8 @@ public class PartitionRebalancer extends Rebalancer {
         info.partitionInfoBySkew.putAll(clusterStat.getSkewMap(medium));
 
         // Skip the toDeleteKeys
-        List<TabletMove> filteredMoves = movesInProgress.stream().filter(m -> !toDeleteKeys.contains(m.tabletId)).collect(Collectors.toList());
+        List<TabletMove> filteredMoves = movesInProgress.stream()
+                .filter(m -> !toDeleteKeys.contains(m.tabletId)).collect(Collectors.toList());
 
         for (TabletMove move : filteredMoves) {
             TabletMeta meta = invertedIndex.getTabletMeta(move.tabletId);
@@ -190,8 +194,11 @@ public class PartitionRebalancer extends Rebalancer {
                 continue;
             }
 
-            TwoDimensionalGreedyRebalanceAlgo.PartitionMove partitionMove = new TwoDimensionalGreedyRebalanceAlgo.PartitionMove(meta.getPartitionId(), meta.getIndexId(), move.fromBe, move.toBe);
-            boolean st = TwoDimensionalGreedyRebalanceAlgo.applyMove(partitionMove, info.beByTotalReplicaCount, info.partitionInfoBySkew);
+            TwoDimensionalGreedyRebalanceAlgo.PartitionMove partitionMove
+                    = new TwoDimensionalGreedyRebalanceAlgo.PartitionMove(
+                            meta.getPartitionId(), meta.getIndexId(), move.fromBe, move.toBe);
+            boolean st = TwoDimensionalGreedyRebalanceAlgo.applyMove(
+                    partitionMove, info.beByTotalReplicaCount, info.partitionInfoBySkew);
             if (!st) {
                 // Can't apply this move, mark it failed, continue to apply the next.
                 toDeleteKeys.add(move.tabletId);
@@ -208,7 +215,8 @@ public class PartitionRebalancer extends Rebalancer {
             if (moveIsComplete) {
                 toDeleteKeys.add(move.tabletId);
                 LOG.debug("Move {} is completed. The cur dist: {}", move,
-                        invertedIndex.getReplicasByTabletId(move.tabletId).stream().map(Replica::getBackendId).collect(Collectors.toList()));
+                        invertedIndex.getReplicasByTabletId(move.tabletId).stream()
+                                .map(Replica::getBackendId).collect(Collectors.toList()));
                 counterBalanceMoveSucceeded.incrementAndGet();
             }
         }
@@ -217,15 +225,18 @@ public class PartitionRebalancer extends Rebalancer {
     // Move completed: fromBe doesn't have a replica and toBe has a replica
     private boolean checkMoveCompleted(TabletMove move) {
         Long tabletId = move.tabletId;
-        List<Long> bes = invertedIndex.getReplicasByTabletId(tabletId).stream().map(Replica::getBackendId).collect(Collectors.toList());
+        List<Long> bes = invertedIndex.getReplicasByTabletId(tabletId).stream()
+                .map(Replica::getBackendId).collect(Collectors.toList());
         return !bes.contains(move.fromBe) && bes.contains(move.toBe);
     }
 
     @Override
     protected void completeSchedCtx(TabletSchedCtx tabletCtx, Map<Long, TabletScheduler.PathSlot> backendsWorkingSlots)
             throws SchedException {
-        MovesCacheMap.MovesCache movesInProgress = movesCacheMap.getCache(tabletCtx.getCluster(), tabletCtx.getTag(), tabletCtx.getStorageMedium());
-        Preconditions.checkNotNull(movesInProgress, "clusterStat is got from statisticMap, movesInProgressMap should have the same entry");
+        MovesCacheMap.MovesCache movesInProgress = movesCacheMap.getCache(
+                tabletCtx.getCluster(), tabletCtx.getTag(), tabletCtx.getStorageMedium());
+        Preconditions.checkNotNull(movesInProgress,
+                "clusterStat is got from statisticMap, movesInProgressMap should have the same entry");
 
         try {
             Pair<TabletMove, Long> pair = movesInProgress.get().getIfPresent(tabletCtx.getTabletId());
@@ -242,7 +253,8 @@ public class PartitionRebalancer extends Rebalancer {
             if (slot.takeBalanceSlot(srcReplica.getPathHash()) != -1) {
                 tabletCtx.setSrc(srcReplica);
             } else {
-                throw new SchedException(SchedException.Status.SCHEDULE_FAILED, "no slot for src replica " + srcReplica + ", pathHash " + srcReplica.getPathHash());
+                throw new SchedException(SchedException.Status.SCHEDULE_FAILED,
+                        "no slot for src replica " + srcReplica + ", pathHash " + srcReplica.getPathHash());
             }
 
             // Choose a path in destination
@@ -259,7 +271,8 @@ public class PartitionRebalancer extends Rebalancer {
                     .map(RootPathLoadStatistic::getPathHash).collect(Collectors.toSet());
             long pathHash = slot.takeAnAvailBalanceSlotFrom(availPath);
             if (pathHash == -1) {
-                throw new SchedException(SchedException.Status.SCHEDULE_FAILED, "paths has no available balance slot: " + availPath);
+                throw new SchedException(SchedException.Status.SCHEDULE_FAILED,
+                        "paths has no available balance slot: " + availPath);
             } else {
                 tabletCtx.setDest(beStat.getBeId(), pathHash);
             }
@@ -280,7 +293,8 @@ public class PartitionRebalancer extends Rebalancer {
     private void checkMoveValidation(TabletMove move) throws IllegalStateException {
         boolean fromAvailable = infoService.checkBackendScheduleAvailable(move.fromBe);
         boolean toAvailable = infoService.checkBackendScheduleAvailable(move.toBe);
-        Preconditions.checkState(fromAvailable && toAvailable, move + "'s bes are not all available: from " + fromAvailable + ", to " + toAvailable);
+        Preconditions.checkState(fromAvailable && toAvailable,
+                move + "'s bes are not all available: from " + fromAvailable + ", to " + toAvailable);
         // To be improved
     }
 
@@ -333,7 +347,8 @@ public class PartitionRebalancer extends Rebalancer {
     // Balance information for a cluster(one medium), excluding decommissioned/dead bes and replicas on them.
     // Natural ordering, so the last key is the max key.
     public static class ClusterBalanceInfo {
-        TreeMultimap<Long, TabletInvertedIndex.PartitionBalanceInfo> partitionInfoBySkew = TreeMultimap.create(Ordering.natural(), Ordering.arbitrary());
+        TreeMultimap<Long, TabletInvertedIndex.PartitionBalanceInfo> partitionInfoBySkew
+                = TreeMultimap.create(Ordering.natural(), Ordering.arbitrary());
         TreeMultimap<Long, Long> beByTotalReplicaCount = TreeMultimap.create();
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/Rebalancer.java b/fe/fe-core/src/main/java/org/apache/doris/clone/Rebalancer.java
index ef7ae27995..09aea8e68a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/clone/Rebalancer.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/clone/Rebalancer.java
@@ -42,8 +42,9 @@ import java.util.Map;
  * 3. getToDeleteReplicaId: if the rebalance strategy wants to delete the specified replica,
  * override this func to let TabletScheduler know in handling redundant replica.
  * NOTICE:
- * 1. Adding the selected tablets by TabletScheduler may not succeed at all. And the move may be failed in some other places.
- * So the thing you need to know is, Rebalancer cannot know when the move is failed.
+ * 1. Adding the selected tablets by TabletScheduler may not succeed at all.
+ *  And the move may be failed in some other places. So the thing you need to know is,
+ *  Rebalancer cannot know when the move is failed.
  * 2. If you want to make sure the move is succeed, you can assume that it's succeed when getToDeleteReplicaId called.
  */
 public abstract class Rebalancer {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java
index b56d9bc9a5..6bb75a8333 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletChecker.java
@@ -167,7 +167,8 @@ public class TabletChecker extends MasterDaemon {
         }
 
         // we also need to change the priority of tablets which are already in
-        tabletScheduler.changeTabletsPriorityToVeryHigh(repairTabletInfo.dbId, repairTabletInfo.tblId, repairTabletInfo.partIds);
+        tabletScheduler.changeTabletsPriorityToVeryHigh(
+                repairTabletInfo.dbId, repairTabletInfo.tblId, repairTabletInfo.partIds);
     }
 
     private void removePrios(RepairTabletInfo repairTabletInfo) {
@@ -333,7 +334,7 @@ public class TabletChecker extends MasterDaemon {
     }
 
     private LoopControlStatus handlePartitionTablet(Database db, OlapTable tbl, Partition partition, boolean isInPrios,
-                                                    List<Long> aliveBeIdsInCluster, long startTime, CheckerCounter counter) {
+            List<Long> aliveBeIdsInCluster, long startTime, CheckerCounter counter) {
         if (partition.getState() != PartitionState.NORMAL) {
             // when alter job is in FINISHING state, partition state will be set to NORMAL,
             // and we can schedule the tablets in it.
@@ -478,9 +479,11 @@ public class TabletChecker extends MasterDaemon {
      * when being scheduled.
      */
     public void repairTable(AdminRepairTableStmt stmt) throws DdlException {
-        RepairTabletInfo repairTabletInfo = getRepairTabletInfo(stmt.getDbName(), stmt.getTblName(), stmt.getPartitions());
+        RepairTabletInfo repairTabletInfo = getRepairTabletInfo(
+                stmt.getDbName(), stmt.getTblName(), stmt.getPartitions());
         addPrios(repairTabletInfo, stmt.getTimeoutS() * 1000);
-        LOG.info("repair database: {}, table: {}, partition: {}", repairTabletInfo.dbId, repairTabletInfo.tblId, repairTabletInfo.partIds);
+        LOG.info("repair database: {}, table: {}, partition: {}",
+                repairTabletInfo.dbId, repairTabletInfo.tblId, repairTabletInfo.partIds);
     }
 
     /*
@@ -488,9 +491,11 @@ public class TabletChecker extends MasterDaemon {
      * This operation will remove the specified partitions from 'prios'
      */
     public void cancelRepairTable(AdminCancelRepairTableStmt stmt) throws DdlException {
-        RepairTabletInfo repairTabletInfo = getRepairTabletInfo(stmt.getDbName(), stmt.getTblName(), stmt.getPartitions());
+        RepairTabletInfo repairTabletInfo
+                = getRepairTabletInfo(stmt.getDbName(), stmt.getTblName(), stmt.getPartitions());
         removePrios(repairTabletInfo);
-        LOG.info("cancel repair database: {}, table: {}, partition: {}", repairTabletInfo.dbId, repairTabletInfo.tblId, repairTabletInfo.partIds);
+        LOG.info("cancel repair database: {}, table: {}, partition: {}",
+                repairTabletInfo.dbId, repairTabletInfo.tblId, repairTabletInfo.partIds);
     }
 
     public int getPrioPartitionNum() {
@@ -520,7 +525,8 @@ public class TabletChecker extends MasterDaemon {
         return infos;
     }
 
-    public static RepairTabletInfo getRepairTabletInfo(String dbName, String tblName, List<String> partitions) throws DdlException {
+    public static RepairTabletInfo getRepairTabletInfo(String dbName, String tblName,
+            List<String> partitions) throws DdlException {
         Catalog catalog = Catalog.getCurrentCatalog();
         Database db = catalog.getDbOrDdlException(dbName);
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletSchedCtx.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletSchedCtx.java
index d6e8eeeed9..daff5f1e06 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletSchedCtx.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletSchedCtx.java
@@ -685,7 +685,8 @@ public class TabletSchedCtx implements Comparable<TabletSchedCtx> {
             // So if this replica was previously set to DECOMMISSION, this state needs to be reset to NORMAL.
             // It may happen as follows:
             // 1. A tablet of colocation table is in COLOCATION_REDUNDANT state
-            // 2. The tablet is being scheduled and set one of replica as DECOMMISSION in TabletScheduler.deleteReplicaInternal()
+            // 2. The tablet is being scheduled and set one of replica as
+            //    DECOMMISSION in TabletScheduler.deleteReplicaInternal()
             // 3. The tablet will then be scheduled again
             // 4. But at that time, the BE node of the replica that was
             //    set to the DECOMMISSION state in step 2 is returned to the colocation group.
@@ -735,7 +736,8 @@ public class TabletSchedCtx implements Comparable<TabletSchedCtx> {
         }
 
         if (storageMediaMigrationTask != null) {
-            AgentTaskQueue.removeTask(storageMediaMigrationTask.getBackendId(), TTaskType.STORAGE_MEDIUM_MIGRATE, storageMediaMigrationTask.getSignature());
+            AgentTaskQueue.removeTask(storageMediaMigrationTask.getBackendId(),
+                    TTaskType.STORAGE_MEDIUM_MIGRATE, storageMediaMigrationTask.getSignature());
         }
         if (cloneTask != null) {
             AgentTaskQueue.removeTask(cloneTask.getBackendId(), TTaskType.CLONE, cloneTask.getSignature());
@@ -829,11 +831,13 @@ public class TabletSchedCtx implements Comparable<TabletSchedCtx> {
         // That is, we may need to use 2 clone tasks to create a new replica. It is inefficient,
         // but there is no other way now.
 
-        // if this is a balance task, or this is a repair task with REPLICA_MISSING/REPLICA_RELOCATING or REPLICA_MISSING_IN_CLUSTER,
+        // if this is a balance task, or this is a repair task with
+        // REPLICA_MISSING/REPLICA_RELOCATING or REPLICA_MISSING_IN_CLUSTER,
         // we create a new replica with state CLONE
         if (tabletStatus == TabletStatus.REPLICA_MISSING || tabletStatus == TabletStatus.REPLICA_MISSING_IN_CLUSTER
                 || tabletStatus == TabletStatus.REPLICA_RELOCATING || type == Type.BALANCE
-                || tabletStatus == TabletStatus.COLOCATE_MISMATCH || tabletStatus == TabletStatus.REPLICA_MISSING_FOR_TAG) {
+                || tabletStatus == TabletStatus.COLOCATE_MISMATCH
+                || tabletStatus == TabletStatus.REPLICA_MISSING_FOR_TAG) {
             Replica cloneReplica = new Replica(
                     Catalog.getCurrentCatalog().getNextId(), destBackendId,
                     -1 /* version */, schemaHash,
@@ -920,9 +924,12 @@ public class TabletSchedCtx implements Comparable<TabletSchedCtx> {
         }
 
         // 1. check the tablet status first
-        Database db = Catalog.getCurrentCatalog().getDbOrException(dbId, s -> new SchedException(Status.UNRECOVERABLE, "db " + dbId + " does not exist"));
-        OlapTable olapTable = (OlapTable) db.getTableOrException(tblId, s -> new SchedException(Status.UNRECOVERABLE, "tbl " + tabletId + " does not exist"));
-        olapTable.writeLockOrException(new SchedException(Status.UNRECOVERABLE, "table " + olapTable.getName() + " does not exist"));
+        Database db = Catalog.getCurrentCatalog().getDbOrException(dbId,
+                s -> new SchedException(Status.UNRECOVERABLE, "db " + dbId + " does not exist"));
+        OlapTable olapTable = (OlapTable) db.getTableOrException(tblId,
+                s -> new SchedException(Status.UNRECOVERABLE, "tbl " + tabletId + " does not exist"));
+        olapTable.writeLockOrException(new SchedException(Status.UNRECOVERABLE, "table "
+                + olapTable.getName() + " does not exist"));
         try {
             Partition partition = olapTable.getPartition(partitionId);
             if (partition == null) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java
index 166fdc17a4..8e35c5fa23 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TabletScheduler.java
@@ -207,7 +207,8 @@ public class TabletScheduler extends MasterDaemon {
         // add new backends
         for (Backend be : backends.values()) {
             if (!backendsWorkingSlots.containsKey(be.getId())) {
-                List<Long> pathHashes = be.getDisks().values().stream().map(DiskInfo::getPathHash).collect(Collectors.toList());
+                List<Long> pathHashes = be.getDisks().values().stream()
+                        .map(DiskInfo::getPathHash).collect(Collectors.toList());
                 PathSlot slot = new PathSlot(pathHashes, Config.schedule_slot_num_per_path);
                 backendsWorkingSlots.put(be.getId(), slot);
                 LOG.info("add new backend {} with slots num: {}", be.getId(), be.getDisks().size());
@@ -287,7 +288,8 @@ public class TabletScheduler extends MasterDaemon {
      * 3. priority may be upgraded if it is not being schedule for a long time.
      * 4. every pending task should has a max scheduled time, if schedule fails too many times, if should be removed.
      * 5. every running task should has a timeout, to avoid running forever.
-     * 6. every running task should also has a max failure time, if clone task fails too many times, if should be removed.
+     * 6. every running task should also has a max failure time,
+     *    if clone task fails too many times, if should be removed.
      *
      */
     @Override
@@ -489,7 +491,8 @@ public class TabletScheduler extends MasterDaemon {
                 s -> new SchedException(Status.UNRECOVERABLE, "db " + tabletCtx.getDbId() + " does not exist"));
         OlapTable tbl = (OlapTable) db.getTableOrException(tabletCtx.getTblId(),
                 s -> new SchedException(Status.UNRECOVERABLE, "tbl " + tabletCtx.getTblId() + " does not exist"));
-        tbl.writeLockOrException(new SchedException(Status.UNRECOVERABLE, "table " + tbl.getName() + " does not exist"));
+        tbl.writeLockOrException(new SchedException(Status.UNRECOVERABLE, "table "
+                + tbl.getName() + " does not exist"));
         try {
             boolean isColocateTable = colocateTableIndex.isColocateTable(tbl.getId());
 
@@ -543,7 +546,8 @@ public class TabletScheduler extends MasterDaemon {
 
             if (tabletCtx.getType() == TabletSchedCtx.Type.BALANCE) {
                 try {
-                    DatabaseTransactionMgr dbTransactionMgr = Catalog.getCurrentGlobalTransactionMgr().getDatabaseTransactionMgr(db.getId());
+                    DatabaseTransactionMgr dbTransactionMgr
+                            = Catalog.getCurrentGlobalTransactionMgr().getDatabaseTransactionMgr(db.getId());
                     for (TransactionState transactionState : dbTransactionMgr.getPreCommittedTxnList()) {
                         if (transactionState.getTableIdList().contains(tbl.getId())) {
                             // If table releate to transaction with precommitted status, do not allow to do balance.
@@ -623,7 +627,8 @@ public class TabletScheduler extends MasterDaemon {
                     handleReplicaMissing(tabletCtx, batchTask);
                     break;
                 case VERSION_INCOMPLETE:
-                case NEED_FURTHER_REPAIR: // same as version incomplete, it prefer to the dest replica which need further repair
+                case NEED_FURTHER_REPAIR:
+                    // same as version incomplete, it prefers to the dest replica which need further repair
                     handleReplicaVersionIncomplete(tabletCtx, batchTask);
                     break;
                 case REPLICA_RELOCATING:
@@ -743,7 +748,8 @@ public class TabletScheduler extends MasterDaemon {
             tabletCtx.chooseDestReplicaForVersionIncomplete(backendsWorkingSlots);
         } catch (SchedException e) {
             if (e.getMessage().equals("unable to choose dest replica")) {
-                // This situation may occur when the BE nodes where all replicas of a tablet are located are decommission,
+                // This situation may occur when the BE nodes
+                // where all replicas of a tablet are located are decommission,
                 // and this task is a VERSION_INCOMPLETE task.
                 // This will lead to failure to select a suitable dest replica.
                 // At this time, we try to convert this task to a REPLICA_MISSING task, and schedule it again.
@@ -752,7 +758,8 @@ public class TabletScheduler extends MasterDaemon {
                 tabletCtx.releaseResource(this, true);
                 tabletCtx.setTabletStatus(TabletStatus.REPLICA_MISSING);
                 handleReplicaMissing(tabletCtx, batchTask);
-                LOG.debug("succeed to find new backend for VERSION_INCOMPLETE task. tablet id: {}", tabletCtx.getTabletId());
+                LOG.debug("succeed to find new backend for VERSION_INCOMPLETE task. tablet id: {}",
+                        tabletCtx.getTabletId());
                 return;
             } else {
                 throw e;
@@ -1085,26 +1092,30 @@ public class TabletScheduler extends MasterDaemon {
         if (chosenReplica != null && !chosenReplica.equals(minReplica) && minReplica.isAlive() && !minReplica.tooSlow()
                 && normalReplicaCount >= 1) {
             chosenReplica.setState(ReplicaState.COMPACTION_TOO_SLOW);
-            LOG.info("set replica id :{} tablet id: {}, backend id: {} to COMPACTION_TOO_SLOW", chosenReplica.getId()
-                    , tabletCtx.getTablet()
-                    .getId(), chosenReplica.getBackendId());
+            LOG.info("set replica id :{} tablet id: {}, backend id: {} to COMPACTION_TOO_SLOW",
+                    chosenReplica.getId(), tabletCtx.getTablet().getId(), chosenReplica.getBackendId());
             throw new SchedException(Status.FINISHED, "set replica to COMPACTION_TOO_SLOW");
         }
         throw new SchedException(Status.FINISHED, "No replica too slow");
     }
 
-    private void deleteReplicaInternal(TabletSchedCtx tabletCtx, Replica replica, String reason, boolean force) throws SchedException {
+    private void deleteReplicaInternal(TabletSchedCtx tabletCtx,
+            Replica replica, String reason, boolean force) throws SchedException {
         /*
-         * Before deleting a replica, we should make sure that there is no running txn on it and no more txns will be on it.
+         * Before deleting a replica, we should make sure that
+         * there is no running txn on it and no more txns will be on it.
          * So we do followings:
-         * 1. If replica is loadable, set a watermark txn id on it and set it state as DECOMMISSION, but not deleting it this time.
+         * 1. If replica is loadable, set a watermark txn id on it and set it state as DECOMMISSION,
+         *      but not deleting it this time.
          *      The DECOMMISSION state will ensure that no more txns will be on this replicas.
-         * 2. Wait for any txns before the watermark txn id to be finished. If all are finished, which means this replica is
+         * 2. Wait for any txns before the watermark txn id to be finished.
+         *      If all are finished, which means this replica is
          *      safe to be deleted.
          */
         if (!force && !Config.enable_force_drop_redundant_replica && replica.getState().canLoad()
                 && replica.getWatermarkTxnId() == -1 && !FeConstants.runningUnitTest) {
-            long nextTxnId = Catalog.getCurrentGlobalTransactionMgr().getTransactionIDGenerator().getNextTransactionId();
+            long nextTxnId = Catalog.getCurrentGlobalTransactionMgr()
+                    .getTransactionIDGenerator().getNextTransactionId();
             replica.setWatermarkTxnId(nextTxnId);
             replica.setState(ReplicaState.DECOMMISSION);
             // set priority to normal because it may wait for a long time. Remain it as VERY_HIGH may block other task.
@@ -1176,7 +1187,8 @@ public class TabletScheduler extends MasterDaemon {
 
     /**
      * Missing for tag, which means some of replicas of this tablet are allocated in wrong backend with specified tag.
-     * Treat it as replica missing, and in handleReplicaMissing(), it will find a property backend to create new replica.
+     * Treat it as replica missing, and in handleReplicaMissing(),
+     * it will find a property backend to create new replica.
      */
     private void handleReplicaMissingForTag(TabletSchedCtx tabletCtx, AgentBatchTask batchTask)
             throws SchedException {
@@ -1329,7 +1341,8 @@ public class TabletScheduler extends MasterDaemon {
                 // This is to solve, when we decommission some BEs with SSD disks,
                 // if there are no SSD disks on the remaining BEs, it will be impossible to select a
                 // suitable destination path.
-                // In this case, we need to ignore the storage medium property and try to select the destination path again.
+                // In this case, we need to ignore the storage medium property
+                // and try to select the destination path again.
                 // Set `isSupplement` to true will ignore the  storage medium property.
                 st = bes.isFit(tabletCtx.getTabletSize(), tabletCtx.getStorageMedium(),
                         resultPaths, true);
@@ -1463,6 +1476,7 @@ public class TabletScheduler extends MasterDaemon {
         finalizeTabletCtx(tabletCtx, TabletSchedCtx.State.FINISHED, Status.FINISHED, "finished");
         return true;
     }
+
     /**
      * return true if we want to remove the clone task from AgentTaskQueue
      */
diff --git a/fe/fe-core/src/main/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgo.java b/fe/fe-core/src/main/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgo.java
index 6dcb401f43..b589b3f157 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/clone/TwoDimensionalGreedyRebalanceAlgo.java
@@ -35,11 +35,13 @@ import java.util.Random;
 import java.util.Set;
 import java.util.stream.Collectors;
 
-/*
- * A two-dimensional greedy rebalancing algorithm. The two dims are cluster and partition. It'll generate multiple `PartitionMove`,
- * only decide which partition to move, fromBe, toBe. The next step is to select a tablet to move.
+/**
+ * A two-dimensional greedy rebalancing algorithm. The two dims are cluster and partition.
+ * It'll generate multiple `PartitionMove`, only decide which partition to move, fromBe, toBe.
+ * The next step is to select a tablet to move.
  *
- * From among moves that decrease the skew of a most skewed partition, it prefers ones that reduce the skew of the cluster.
+ * <p>From among moves that decrease the skew of a most skewed partition,
+ * it prefers ones that reduce the skew of the cluster.
  * A cluster is considered balanced when the skew of every partition is <= 1 and the skew of the cluster is <= 1.
  * The skew of the cluster is defined as the difference between the maximum total replica count over all bes and the
  * minimum total replica count over all bes.
@@ -194,18 +196,24 @@ public class TwoDimensionalGreedyRebalanceAlgo {
 
             Long minReplicaCount = pbi.beByReplicaCount.keySet().first();
             Long maxReplicaCount = pbi.beByReplicaCount.keySet().last();
-            LOG.debug("balancing partition {}-{} with replica count skew {} (min_replica_count: {}, max_replica_count: {})",
+            LOG.debug("balancing partition {}-{} with replica count skew {}"
+                            + " (min_replica_count: {}, max_replica_count: {})",
                     pbi.partitionId, pbi.indexId, maxPartitionSkew,
                     minReplicaCount, maxReplicaCount);
 
             // Compute the intersection of the bes most loaded for the table
             // with the bes most loaded overall, and likewise for least loaded.
             // These are our ideal candidates for moving from and to, respectively.
-            IntersectionResult maxLoaded = getIntersection(ExtremumType.MAX, pbi.beByReplicaCount, beByTotalReplicaCount);
-            IntersectionResult minLoaded = getIntersection(ExtremumType.MIN, pbi.beByReplicaCount, beByTotalReplicaCount);
-            LOG.debug("partition-wise: min_count: {}, max_count: {}", minLoaded.replicaCountPartition, maxLoaded.replicaCountPartition);
-            LOG.debug("cluster-wise: min_count: {}, max_count: {}", minLoaded.replicaCountTotal, maxLoaded.replicaCountTotal);
-            LOG.debug("min_loaded_intersection: {}, max_loaded_intersection: {}", minLoaded.intersection.toString(), maxLoaded.intersection.toString());
+            IntersectionResult maxLoaded = getIntersection(ExtremumType.MAX,
+                    pbi.beByReplicaCount, beByTotalReplicaCount);
+            IntersectionResult minLoaded = getIntersection(ExtremumType.MIN,
+                    pbi.beByReplicaCount, beByTotalReplicaCount);
+            LOG.debug("partition-wise: min_count: {}, max_count: {}",
+                    minLoaded.replicaCountPartition, maxLoaded.replicaCountPartition);
+            LOG.debug("cluster-wise: min_count: {}, max_count: {}",
+                    minLoaded.replicaCountTotal, maxLoaded.replicaCountTotal);
+            LOG.debug("min_loaded_intersection: {}, max_loaded_intersection: {}",
+                    minLoaded.intersection.toString(), maxLoaded.intersection.toString());
 
             // Do not move replicas of a balanced table if the least (most) loaded
             // servers overall do not intersect the servers hosting the least (most)
@@ -220,8 +228,10 @@ public class TwoDimensionalGreedyRebalanceAlgo {
             Long maxLoadedBe;
             if (equalSkewOption == EqualSkewOption.PICK_FIRST) {
                 // beWithExtremumCount lists & intersection lists are natural ordering
-                minLoadedBe = minLoaded.intersection.isEmpty() ? minLoaded.beWithExtremumCount.get(0) : minLoaded.intersection.get(0);
-                maxLoadedBe = maxLoaded.intersection.isEmpty() ? maxLoaded.beWithExtremumCount.get(0) : maxLoaded.intersection.get(0);
+                minLoadedBe = minLoaded.intersection.isEmpty()
+                        ? minLoaded.beWithExtremumCount.get(0) : minLoaded.intersection.get(0);
+                maxLoadedBe = maxLoaded.intersection.isEmpty()
+                        ? maxLoaded.beWithExtremumCount.get(0) : maxLoaded.intersection.get(0);
             } else {
                 minLoadedBe = minLoaded.intersection.isEmpty() ? getRandomListElement(minLoaded.beWithExtremumCount)
                         : getRandomListElement(minLoaded.intersection);
@@ -247,8 +257,8 @@ public class TwoDimensionalGreedyRebalanceAlgo {
         return items.get(rand.nextInt(items.size()));
     }
 
-    public static IntersectionResult getIntersection(ExtremumType extremumType, TreeMultimap<Long, Long> beByReplicaCount,
-                                                     TreeMultimap<Long, Long> beByTotalReplicaCount) {
+    public static IntersectionResult getIntersection(ExtremumType extremumType,
+            TreeMultimap<Long, Long> beByReplicaCount, TreeMultimap<Long, Long> beByTotalReplicaCount) {
         Pair<Long, Set<Long>> beSelectedByPartition = getMinMaxLoadedServers(beByReplicaCount, extremumType);
         Pair<Long, Set<Long>> beSelectedByTotal = getMinMaxLoadedServers(beByTotalReplicaCount, extremumType);
         Preconditions.checkNotNull(beSelectedByPartition);
@@ -258,11 +268,13 @@ public class TwoDimensionalGreedyRebalanceAlgo {
         res.replicaCountPartition = beSelectedByPartition.first;
         res.replicaCountTotal = beSelectedByTotal.first;
         res.beWithExtremumCount = Lists.newArrayList(beSelectedByPartition.second);
-        res.intersection = Lists.newArrayList(Sets.intersection(beSelectedByPartition.second, beSelectedByTotal.second));
+        res.intersection = Lists.newArrayList(
+                Sets.intersection(beSelectedByPartition.second, beSelectedByTotal.second));
         return res;
     }
 
-    private static Pair<Long, Set<Long>> getMinMaxLoadedServers(TreeMultimap<Long, Long> multimap, ExtremumType extremumType) {
+    private static Pair<Long, Set<Long>> getMinMaxLoadedServers(
+            TreeMultimap<Long, Long> multimap, ExtremumType extremumType) {
         if (multimap.isEmpty()) {
             return null;
         }
@@ -270,8 +282,10 @@ public class TwoDimensionalGreedyRebalanceAlgo {
         return new Pair<>(count, multimap.get(count));
     }
 
-    // Update the balance state in 'ClusterBalanceInfo'(the two maps) with the outcome of the move 'move'.
-    // To support apply in-progress moves to current cluster balance info, if apply failed, the maps should not be modified.
+    /** Update the balance state in 'ClusterBalanceInfo'(the two maps) with the outcome of the move 'move'.
+     * To support apply in-progress moves to current cluster balance info,
+     * if apply failed, the maps should not be modified.
+     */
     public static boolean applyMove(PartitionMove move, TreeMultimap<Long, Long> beByTotalReplicaCount,
                                     TreeMultimap<Long, PartitionBalanceInfo> skewMap) {
         // Update the total counts
@@ -282,8 +296,9 @@ public class TwoDimensionalGreedyRebalanceAlgo {
             Long skew = -1L;
             for (Long key : skewMap.keySet()) {
                 NavigableSet<PartitionBalanceInfo> pbiSet = skewMap.get(key);
-                List<PartitionBalanceInfo> pbis = pbiSet.stream().filter(info ->
-                        info.partitionId.equals(move.partitionId) && info.indexId.equals(move.indexId)).collect(Collectors.toList());
+                List<PartitionBalanceInfo> pbis = pbiSet.stream()
+                        .filter(info -> info.partitionId.equals(move.partitionId) && info.indexId.equals(move.indexId))
+                        .collect(Collectors.toList());
                 Preconditions.checkState(pbis.size() <= 1, "skew map has dup partition info");
                 if (pbis.size() == 1) {
                     partitionBalanceInfo = pbis.get(0);
@@ -301,7 +316,8 @@ public class TwoDimensionalGreedyRebalanceAlgo {
             long maxCount = newInfo.beByReplicaCount.keySet().last();
             skewMap.put(maxCount - minCount, newInfo);
         } catch (IllegalStateException e) {
-            // If touch IllegalState, the skew map doesn't be modified, so we should rollback the move of beByTotalReplicaCount
+            // If touch IllegalState, the skew map doesn't be modified,
+            // so we should rollback the move of beByTotalReplicaCount
             moveOneReplica(move.toBe, move.fromBe, beByTotalReplicaCount);
             LOG.info("{} apply failed, {}", move, e.getMessage());
             return false;
@@ -316,7 +332,8 @@ public class TwoDimensionalGreedyRebalanceAlgo {
     // Applies to 'm' a move of a replica from the be with id 'src' to the be with id 'dst' by decrementing
     // the count of 'src' and incrementing the count of 'dst'.
     // If check failed, won't modify the map.
-    private static void moveOneReplica(Long fromBe, Long toBe, TreeMultimap<Long, Long> m) throws IllegalStateException {
+    private static void moveOneReplica(Long fromBe, Long toBe,
+            TreeMultimap<Long, Long> m) throws IllegalStateException {
         boolean foundSrc = false;
         boolean foundDst = false;
         Long countSrc = 0L;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/CIDR.java b/fe/fe-core/src/main/java/org/apache/doris/common/CIDR.java
index 4cef7be025..e927b621d9 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/CIDR.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/CIDR.java
@@ -40,6 +40,7 @@ public class CIDR {
 
     // Count the number of 1-bits in a 32-bit integer
     private static ImmutableMap<Integer, Integer> maskBitNumMap;
+
     static {
         ImmutableMap.Builder<Integer, Integer> builder = ImmutableMap.builder();
         builder.put(0, 0);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/CheckedMath.java b/fe/fe-core/src/main/java/org/apache/doris/common/CheckedMath.java
index 2d92e24948..02857f42d7 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/CheckedMath.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/CheckedMath.java
@@ -23,7 +23,7 @@ import org.apache.logging.log4j.Logger;
 
 public class CheckedMath {
 
-    private final static Logger LOG = LogManager.getLogger(CheckedMath.class);
+    private static final Logger LOG = LogManager.getLogger(CheckedMath.class);
 
     /**
      * Computes and returns the multiply of two longs. If an overflow occurs,
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/Config.java b/fe/fe-core/src/main/java/org/apache/doris/common/Config.java
index e5f109686d..2b473d7ac0 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/Config.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/Config.java
@@ -156,7 +156,8 @@ public class Config extends ConfigBase {
     public static int history_job_keep_max_second = 7 * 24 * 3600; // 7 days
 
     /**
-     * the transaction will be cleaned after transaction_clean_interval_second seconds if the transaction is visible or aborted
+     * the transaction will be cleaned after transaction_clean_interval_second seconds
+     * if the transaction is visible or aborted
      * we should make this interval as short as possible and each clean cycle as soon as possible
      */
     @ConfField
@@ -729,7 +730,8 @@ public class Config extends ConfigBase {
     public static int max_running_txn_num_per_db = 100;
 
     /**
-     * This configuration is just for compatible with old version, this config has been replaced by async_loading_load_task_pool_size,
+     * This configuration is just for compatible with old version,
+     * this config has been replaced by async_loading_load_task_pool_size,
      * it will be removed in the future.
      */
     @ConfField(mutable = false, masterOnly = true)
@@ -1071,7 +1073,8 @@ public class Config extends ConfigBase {
     public static boolean disable_load_job = false;
 
     /*
-     * One master daemon thread will update database used data quota for db txn manager every db_used_data_quota_update_interval_secs
+     * One master daemon thread will update database used data quota for db txn manager
+     * every db_used_data_quota_update_interval_secs
      */
     @ConfField(mutable = false, masterOnly = true)
     public static int db_used_data_quota_update_interval_secs = 300;
@@ -1486,7 +1489,8 @@ public class Config extends ConfigBase {
     public static int default_max_query_instances = -1;
 
     /*
-     * One master daemon thread will update global partition in memory info every partition_in_memory_update_interval_secs
+     * One master daemon thread will update global partition in memory
+     * info every partition_in_memory_update_interval_secs
      */
     @ConfField(mutable = false, masterOnly = true)
     public static int partition_in_memory_update_interval_secs = 300;
@@ -1538,7 +1542,8 @@ public class Config extends ConfigBase {
      * is to wait until the loading task finished before dropping them.
      * But the default strategy may takes very long time to handle these redundant replicas.
      * So we can set this config to true to not wait any loading task.
-     * Set this config to true may cause loading task failed, but will speed up the process of tablet balance and repair.
+     * Set this config to true may cause loading task failed, but will
+     * speed up the process of tablet balance and repair.
      */
     @ConfField(mutable = true, masterOnly = true)
     public static boolean enable_force_drop_redundant_replica = false;
@@ -1551,7 +1556,8 @@ public class Config extends ConfigBase {
 
     /*
      * The relocation of a colocation group may involve a large number of tablets moving within the cluster.
-     * Therefore, we should use a more conservative strategy to avoid relocation of colocation groups as much as possible.
+     * Therefore, we should use a more conservative strategy to avoid relocation
+     * of colocation groups as much as possible.
      * Reloaction usually occurs after a BE node goes offline or goes down.
      * This parameter is used to delay the determination of BE node unavailability.
      * The default is 30 minutes, i.e., if a BE node recovers within 30 minutes, relocation of the colocation group
@@ -1577,8 +1583,8 @@ public class Config extends ConfigBase {
     public static int min_version_count_indicate_replica_compaction_too_slow = 300;
 
     /**
-     * The valid ratio threshold of the difference between the version count of the slowest replica and the fastest replica.
-     * If repair_slow_replica is set to true, it is used to determine whether to repair the slowest replica
+     * The valid ratio threshold of the difference between the version count of the slowest replicaand the fastest
+     * replica. If repair_slow_replica is set to true, it is used to determine whether to repair the slowest replica
      */
     @ConfField(mutable = true, masterOnly = true)
     public static double valid_version_count_delta_ratio_between_replicas = 0.5;
@@ -1649,13 +1655,13 @@ public class Config extends ConfigBase {
     public static boolean enable_multi_catalog = false; // 1 min
 
     /**
-     * If set to TRUE, FE will: 
+     * If set to TRUE, FE will:
      * 1. divide BE into high load and low load(no mid load) to force triggering tablet scheduling;
      * 2. ignore whether the cluster can be more balanced during tablet scheduling;
      *
-     * It's used to test the reliability in single replica case when tablet scheduling are frequent. 
+     * It's used to test the reliability in single replica case when tablet scheduling are frequent.
      * Default is false.
-     */    
+     */
     @ConfField(mutable = false, masterOnly = true)
     public static boolean be_rebalancer_fuzzy_test = false;
 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/ConfigBase.java b/fe/fe-core/src/main/java/org/apache/doris/common/ConfigBase.java
index 213e8f5907..4ae2e15220 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/ConfigBase.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/ConfigBase.java
@@ -289,7 +289,7 @@ public class ConfigBase {
         throw new IllegalArgumentException("type mismatch");
     }
 
-    public synchronized static void setMutableConfig(String key, String value) throws DdlException {
+    public static synchronized void setMutableConfig(String key, String value) throws DdlException {
         Field field = confFields.get(key);
         if (field == null) {
             throw new DdlException("Config '" + key + "' does not exist");
@@ -312,7 +312,7 @@ public class ConfigBase {
         LOG.info("set config {} to {}", key, value);
     }
 
-    public synchronized static List<List<String>> getConfigInfo(PatternMatcher matcher) {
+    public static synchronized List<List<String>> getConfigInfo(PatternMatcher matcher) {
         return confFields.entrySet().stream().sorted(Map.Entry.comparingByKey()).flatMap(e -> {
             String confKey = e.getKey();
             Field f = e.getValue();
@@ -332,7 +332,7 @@ public class ConfigBase {
         }).collect(Collectors.toList());
     }
 
-    public synchronized static boolean checkIsMasterOnly(String key) {
+    public static synchronized boolean checkIsMasterOnly(String key) {
         Field f = confFields.get(key);
         if (f == null) {
             return false;
@@ -343,7 +343,8 @@ public class ConfigBase {
     }
 
     // use synchronized to make sure only one thread modify this file
-    public synchronized static void persistConfig(Map<String, String> customConf, boolean resetPersist) throws IOException {
+    public static synchronized void persistConfig(Map<String, String> customConf, boolean resetPersist)
+            throws IOException {
         File file = new File(customConfFile);
         if (!file.exists()) {
             file.createNewFile();
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/DdlException.java b/fe/fe-core/src/main/java/org/apache/doris/common/DdlException.java
index eb96451cd0..2c7310ad0b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/DdlException.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/DdlException.java
@@ -21,6 +21,7 @@ public class DdlException extends UserException {
     public DdlException(String msg) {
         super(msg);
     }
+
     public DdlException(String msg, ErrorCode mysqlErrorCode) {
         super(msg);
         setMysqlErrorCode(mysqlErrorCode);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/ErrorReport.java b/fe/fe-core/src/main/java/org/apache/doris/common/ErrorReport.java
index 23c65b33b5..e2f55a34ba 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/ErrorReport.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/ErrorReport.java
@@ -39,7 +39,8 @@ public class ErrorReport {
 
     public static void reportAnalysisException(String pattern, Object... objs)
             throws AnalysisException {
-        throw new AnalysisException(reportCommon(pattern, ErrorCode.ERR_UNKNOWN_ERROR, objs), ErrorCode.ERR_UNKNOWN_ERROR);
+        throw new AnalysisException(reportCommon(pattern, ErrorCode.ERR_UNKNOWN_ERROR, objs),
+                ErrorCode.ERR_UNKNOWN_ERROR);
     }
 
     public static void reportAnalysisException(ErrorCode errorCode, Object... objs)
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/GenericPool.java b/fe/fe-core/src/main/java/org/apache/doris/common/GenericPool.java
index 04de9901a4..ee52f10577 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/GenericPool.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/GenericPool.java
@@ -137,8 +137,9 @@ public class GenericPool<VALUE extends org.apache.thrift.TServiceClient>  {
                 LOG.debug("before create socket hostname={} key.port={} timeoutMs={}",
                         key.hostname, key.port, timeoutMs);
             }
-            TTransport transport = isNonBlockingIO ? new TFramedTransport(new TSocket(key.hostname, key.port, timeoutMs)) :
-                    new TSocket(key.hostname, key.port, timeoutMs);
+            TTransport transport = isNonBlockingIO
+                    ? new TFramedTransport(new TSocket(key.hostname, key.port, timeoutMs))
+                    : new TSocket(key.hostname, key.port, timeoutMs);
             transport.open();
             TProtocol protocol = new TBinaryProtocol(transport);
             VALUE client = (VALUE) newInstance(className, protocol);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/Id.java b/fe/fe-core/src/main/java/org/apache/doris/common/Id.java
index 52187d6fb5..79372c52a0 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/Id.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/Id.java
@@ -26,7 +26,7 @@ import java.util.ArrayList;
  * Integer ids that cannot accidentally be compared with ints.
  */
 public class Id<IdType extends Id<IdType>> {
-    static private int INVALID_ID = -1;
+    private static final int INVALID_ID = -1;
     protected final int id;
 
     public Id() {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/IdGenerator.java b/fe/fe-core/src/main/java/org/apache/doris/common/IdGenerator.java
index 6f025a396c..120a9a924e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/IdGenerator.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/IdGenerator.java
@@ -26,6 +26,8 @@ package org.apache.doris.common;
  */
 public abstract class IdGenerator<IdType extends Id<IdType>> {
     protected int nextId = 0;
+
     public abstract IdType getNextId();
+
     public abstract IdType getMaxId();
 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/Log4jConfig.java b/fe/fe-core/src/main/java/org/apache/doris/common/Log4jConfig.java
index 87105d56b9..7bf38fd786 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/Log4jConfig.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/Log4jConfig.java
@@ -37,6 +37,7 @@ import java.util.Map;
 public class Log4jConfig extends XmlConfiguration {
     private static final long serialVersionUID = 1L;
 
+    // CHECKSTYLE OFF
     private static String xmlConfTemplate = "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"
             + "\n<!-- Auto Generated. DO NOT MODIFY IT! -->\n"
             + "<Configuration status=\"info\" packages=\"org.apache.doris.common\">\n"
@@ -104,6 +105,7 @@ public class Log4jConfig extends XmlConfiguration {
             + "    <!--REPLACED BY AUDIT AND VERBOSE MODULE NAMES-->\n"
             + "  </Loggers>\n"
             + "</Configuration>";
+    // CHECKSTYLE ON
 
     private static StrSubstitutor strSub;
     private static String sysLogLevel;
@@ -235,7 +237,7 @@ public class Log4jConfig extends XmlConfiguration {
         super(LoggerContext.getContext(), configSource);
     }
 
-    public synchronized static void initLogging(String dorisConfDir) throws IOException {
+    public static synchronized void initLogging(String dorisConfDir) throws IOException {
         sysLogLevel = Config.sys_log_level;
         verboseModules = Config.sys_log_verbose_modules;
         auditModules = Config.audit_log_modules;
@@ -244,7 +246,7 @@ public class Log4jConfig extends XmlConfiguration {
         reconfig();
     }
 
-    public synchronized static Tuple<String, String[], String[]> updateLogging(
+    public static synchronized Tuple<String, String[], String[]> updateLogging(
             String level, String[] verboseNames, String[] auditNames) throws IOException {
         boolean toReconfig = false;
         if (level != null) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/Pair.java b/fe/fe-core/src/main/java/org/apache/doris/common/Pair.java
index 3894632d0b..5cb522ac7b 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/Pair.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/Pair.java
@@ -42,7 +42,7 @@ public class Pair<F, S> {
         this.second = second;
     }
 
-    static public <F, S> Pair<F, S> create(F first, S second) {
+    public static <F, S> Pair<F, S> create(F first, S second) {
         return new Pair<F, S>(first, second);
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/ThreadPoolManager.java b/fe/fe-core/src/main/java/org/apache/doris/common/ThreadPoolManager.java
index bbabf26abf..4e5ae73b29 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/ThreadPoolManager.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/ThreadPoolManager.java
@@ -54,7 +54,8 @@ import java.util.concurrent.TimeUnit;
  *
  *  All thread pool constructed by ThreadPoolManager will be added to the nameToThreadPoolMap,
  *  so the thread pool name in fe must be unique.
- *  when all thread pools are constructed, ThreadPoolManager will register some metrics of all thread pool to MetricRepo,
+ *  when all thread pools are constructed,
+ *  ThreadPoolManager will register some metrics of all thread pool to MetricRepo,
  *  so we can know the runtime state for all thread pool by prometheus metrics
  */
 
@@ -64,7 +65,7 @@ public class ThreadPoolManager {
 
     private static String[] poolMetricTypes = {"pool_size", "active_thread_num", "task_in_queue"};
 
-    private final static long KEEP_ALIVE_TIME = 60L;
+    private static final long KEEP_ALIVE_TIME = 60L;
 
     public static void registerAllThreadPoolMetric() {
         for (Map.Entry<String, ThreadPoolExecutor> entry : nameToThreadPoolMap.entrySet()) {
@@ -75,7 +76,8 @@ public class ThreadPoolManager {
 
     public static void registerThreadPoolMetric(String poolName, ThreadPoolExecutor threadPool) {
         for (String poolMetricType : poolMetricTypes) {
-            GaugeMetric<Integer> gauge = new GaugeMetric<Integer>("thread_pool", MetricUnit.NOUNIT, "thread_pool statistics") {
+            GaugeMetric<Integer> gauge = new GaugeMetric<Integer>(
+                    "thread_pool", MetricUnit.NOUNIT, "thread_pool statistics") {
                 @Override
                 public Integer getValue() {
                     String metricType = this.getLabels().get(1).getValue();
@@ -97,14 +99,18 @@ public class ThreadPoolManager {
         }
     }
 
-    public static ThreadPoolExecutor newDaemonCacheThreadPool(int maxNumThread, String poolName, boolean needRegisterMetric) {
-        return newDaemonThreadPool(0, maxNumThread, KEEP_ALIVE_TIME, TimeUnit.SECONDS, new SynchronousQueue(),
+    public static ThreadPoolExecutor newDaemonCacheThreadPool(int maxNumThread,
+            String poolName, boolean needRegisterMetric) {
+        return newDaemonThreadPool(0, maxNumThread, KEEP_ALIVE_TIME,
+                TimeUnit.SECONDS, new SynchronousQueue(),
                 new LogDiscardPolicy(poolName), poolName, needRegisterMetric);
     }
 
-    public static ThreadPoolExecutor newDaemonFixedThreadPool(int numThread, int queueSize, String poolName, boolean needRegisterMetric) {
-        return newDaemonThreadPool(numThread, numThread, KEEP_ALIVE_TIME, TimeUnit.SECONDS, new LinkedBlockingQueue<>(queueSize),
-                new BlockedPolicy(poolName, 60), poolName, needRegisterMetric);
+    public static ThreadPoolExecutor newDaemonFixedThreadPool(int numThread,
+            int queueSize, String poolName, boolean needRegisterMetric) {
+        return newDaemonThreadPool(numThread, numThread, KEEP_ALIVE_TIME, TimeUnit.SECONDS,
+                new LinkedBlockingQueue<>(queueSize), new BlockedPolicy(poolName, 60),
+                poolName, needRegisterMetric);
     }
 
     public static ThreadPoolExecutor newDaemonProfileThreadPool(int numThread, int queueSize, String poolName,
@@ -123,7 +129,8 @@ public class ThreadPoolManager {
                                                          String poolName,
                                                          boolean needRegisterMetric) {
         ThreadFactory threadFactory = namedThreadFactory(poolName);
-        ThreadPoolExecutor threadPool = new ThreadPoolExecutor(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler);
+        ThreadPoolExecutor threadPool = new ThreadPoolExecutor(corePoolSize, maximumPoolSize,
+                keepAliveTime, unit, workQueue, threadFactory, handler);
         if (needRegisterMetric) {
             nameToThreadPoolMap.put(poolName, threadPool);
         }
@@ -133,9 +140,11 @@ public class ThreadPoolManager {
     // Now, we have no delay task num limit and thread num limit in ScheduledThreadPoolExecutor,
     // so it may cause oom when there are too many delay tasks or threads in ScheduledThreadPoolExecutor
     // Please use this api only for scheduling short task at fix rate.
-    public static ScheduledThreadPoolExecutor newDaemonScheduledThreadPool(int corePoolSize, String poolName, boolean needRegisterMetric) {
+    public static ScheduledThreadPoolExecutor newDaemonScheduledThreadPool(
+            int corePoolSize, String poolName, boolean needRegisterMetric) {
         ThreadFactory threadFactory = namedThreadFactory(poolName);
-        ScheduledThreadPoolExecutor scheduledThreadPoolExecutor = new ScheduledThreadPoolExecutor(corePoolSize, threadFactory);
+        ScheduledThreadPoolExecutor scheduledThreadPoolExecutor
+                = new ScheduledThreadPoolExecutor(corePoolSize, threadFactory);
         if (needRegisterMetric) {
             nameToThreadPoolMap.put(poolName, scheduledThreadPoolExecutor);
         }
@@ -169,7 +178,8 @@ public class ThreadPoolManager {
     }
 
     /**
-     * A handler for rejected task that try to be blocked until the pool enqueue task succeed or timeout, used for fixed thread pool
+     * A handler for rejected task that try to be blocked until the pool enqueue task succeed or timeout,
+     * used for fixed thread pool
      */
     static class BlockedPolicy implements RejectedExecutionHandler {
 
@@ -189,7 +199,8 @@ public class ThreadPoolManager {
             try {
                 boolean ret = executor.getQueue().offer(r, timeoutSeconds, TimeUnit.SECONDS);
                 if (!ret) {
-                    throw new RejectedExecutionException("submit task failed, queue size is full: " + this.threadPoolName);
+                    throw new RejectedExecutionException("submit task failed, queue size is full: "
+                            + this.threadPoolName);
                 }
             } catch (InterruptedException e) {
                 String errMsg = String.format("Task %s wait to enqueue in %s %s failed",
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/ThriftServer.java b/fe/fe-core/src/main/java/org/apache/doris/common/ThriftServer.java
index 40ee36606a..d101f5d227 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/ThriftServer.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/ThriftServer.java
@@ -99,7 +99,8 @@ public class ThriftServer {
         TThreadedSelectorServer.Args args = new TThreadedSelectorServer.Args(
                 new TNonblockingServerSocket(port, Config.thrift_client_timeout_ms)).protocolFactory(
                         new TBinaryProtocol.Factory()).processor(processor);
-        ThreadPoolExecutor threadPoolExecutor = ThreadPoolManager.newDaemonCacheThreadPool(Config.thrift_server_max_worker_threads, "thrift-server-pool", true);
+        ThreadPoolExecutor threadPoolExecutor = ThreadPoolManager.newDaemonCacheThreadPool(
+                Config.thrift_server_max_worker_threads, "thrift-server-pool", true);
         args.executorService(threadPoolExecutor);
         server = new TThreadedSelectorServer(args);
     }
@@ -114,7 +115,8 @@ public class ThriftServer {
         TThreadPoolServer.Args serverArgs =
                 new TThreadPoolServer.Args(new TServerSocket(socketTransportArgs)).protocolFactory(
                         new TBinaryProtocol.Factory()).processor(processor);
-        ThreadPoolExecutor threadPoolExecutor = ThreadPoolManager.newDaemonCacheThreadPool(Config.thrift_server_max_worker_threads, "thrift-server-pool", true);
+        ThreadPoolExecutor threadPoolExecutor = ThreadPoolManager.newDaemonCacheThreadPool(
+                Config.thrift_server_max_worker_threads, "thrift-server-pool", true);
         serverArgs.executorService(threadPoolExecutor);
         server = new TThreadPoolServer(serverArgs);
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/UserException.java b/fe/fe-core/src/main/java/org/apache/doris/common/UserException.java
index e67eec7033..0ccb2b1c17 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/UserException.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/UserException.java
@@ -25,6 +25,7 @@ import com.google.common.base.Strings;
 public class UserException extends Exception {
     private InternalErrorCode errorCode;
     private ErrorCode mysqlErrorCode;
+
     public UserException(String msg, Throwable cause) {
         super(Strings.nullToEmpty(msg), cause);
         errorCode = InternalErrorCode.INTERNAL_ERR;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/parquet/BrokerInputFile.java b/fe/fe-core/src/main/java/org/apache/doris/common/parquet/BrokerInputFile.java
index 20ad46a8ee..d13c924a56 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/parquet/BrokerInputFile.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/parquet/BrokerInputFile.java
@@ -48,7 +48,8 @@ public class BrokerInputFile implements InputFile {
     }
 
     // For test only. ip port is broker ip port
-    public static BrokerInputFile create(String filePath, BrokerDesc brokerDesc, String ip, int port) throws IOException {
+    public static BrokerInputFile create(String filePath, BrokerDesc brokerDesc,
+            String ip, int port) throws IOException {
         BrokerInputFile inputFile = new BrokerInputFile(filePath, brokerDesc);
         inputFile.init(ip, port);
         return inputFile;
@@ -95,7 +96,8 @@ public class BrokerInputFile implements InputFile {
                         fill();
                     }
                     if (currentPos > bufferLimit) {
-                        LOG.warn("current pos {} is larger than buffer limit {}. should not happen.", currentPos, bufferLimit);
+                        LOG.warn("current pos {} is larger than buffer limit {}."
+                                + " should not happen.", currentPos, bufferLimit);
                         return -1;
                     }
 
@@ -146,7 +148,8 @@ public class BrokerInputFile implements InputFile {
                     }
 
                     if (currentPos > bufferLimit) {
-                        LOG.warn("current pos {} is larger than buffer limit {}. should not happen.", currentPos, bufferLimit);
+                        LOG.warn("current pos {} is larger than buffer limit {}."
+                                + " should not happen.", currentPos, bufferLimit);
                         return -1;
                     }
 
@@ -260,7 +263,8 @@ public class BrokerInputFile implements InputFile {
                         currentPos += data.length;
                     } catch (BrokerReader.EOFException e) {
                         if (byteBuffer.remaining() > 0) {
-                            throw new EOFException("Reach the end of file with " + byteBuffer.remaining() + " bytes left to read. "
+                            throw new EOFException("Reach the end of file with "
+                                    + byteBuffer.remaining() + " bytes left to read. "
                                 + "read len: " + (currentPos - markCurPos));
                         }
                     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendLoadStatisticProcNode.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendLoadStatisticProcNode.java
index cd6a4691ab..86a5437456 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendLoadStatisticProcNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/BackendLoadStatisticProcNode.java
@@ -25,10 +25,10 @@ import com.google.common.collect.ImmutableList;
 
 public class BackendLoadStatisticProcNode implements ProcNodeInterface {
     public static final ImmutableList<String> TITLE_NAMES = new ImmutableList.Builder<String>()
-        .add("RootPath").add("PathHash").add("StorageMedium")
-        .add("DataUsedCapacity").add("TotalCapacity").add("TotalUsedPct")
-        .add("Class").add("State")
-        .build();
+            .add("RootPath").add("PathHash").add("StorageMedium")
+            .add("DataUsedCapacity").add("TotalCapacity").add("TotalUsedPct")
+            .add("Class").add("State")
+            .build();
 
     private final ClusterLoadStatistic statistic;
     private final long beId;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ClusterLoadStatisticProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ClusterLoadStatisticProcDir.java
index 2d2842bdb0..aabee58fc5 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ClusterLoadStatisticProcDir.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ClusterLoadStatisticProcDir.java
@@ -25,7 +25,6 @@ import org.apache.doris.system.Backend;
 import org.apache.doris.thrift.TStorageMedium;
 
 import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Table;
 
 import java.util.List;
 import java.util.Map;
@@ -51,7 +50,8 @@ public class ClusterLoadStatisticProcDir implements ProcDirInterface {
         BaseProcResult result = new BaseProcResult();
         result.setNames(TITLE_NAMES);
 
-        Map<String, ClusterLoadStatistic> map = Catalog.getCurrentCatalog().getTabletScheduler().getStatisticMap().column(tag);
+        Map<String, ClusterLoadStatistic> map = Catalog.getCurrentCatalog()
+                .getTabletScheduler().getStatisticMap().column(tag);
 
         map.values().forEach(t -> {
             List<List<String>> statistics = t.getClusterStatistic(medium);
@@ -80,7 +80,8 @@ public class ClusterLoadStatisticProcDir implements ProcDirInterface {
             throw new AnalysisException("backend " + beId + " does not exist");
         }
 
-        Map<String, ClusterLoadStatistic> map = Catalog.getCurrentCatalog().getTabletScheduler().getStatisticMap().column(tag);
+        Map<String, ClusterLoadStatistic> map = Catalog.getCurrentCatalog()
+                .getTabletScheduler().getStatisticMap().column(tag);
         return new BackendLoadStatisticProcNode(map.get(be.getOwnerClusterName()), beId);
     }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupBackendSeqsProcNode.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupBackendSeqsProcNode.java
index 2a164cba6d..4ef502b119 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupBackendSeqsProcNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ColocationGroupBackendSeqsProcNode.java
@@ -47,7 +47,8 @@ public class ColocationGroupBackendSeqsProcNode implements ProcNodeInterface {
             if (bucketNum == 0) {
                 bucketNum = backendsSeq.get(tag).size();
             } else if (bucketNum != backendsSeq.get(tag).size()) {
-                throw new AnalysisException("Invalid bucket number: " + bucketNum + " vs. " + backendsSeq.get(tag).size());
+                throw new AnalysisException("Invalid bucket number: "
+                        + bucketNum + " vs. " + backendsSeq.get(tag).size());
             }
         }
         result.setNames(titleNames);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/CurrentQueryInfoProvider.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/CurrentQueryInfoProvider.java
index 3fa789989a..2b6d8f6702 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/CurrentQueryInfoProvider.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/CurrentQueryInfoProvider.java
@@ -80,7 +80,8 @@ public class CurrentQueryInfoProvider {
         final Map<String, RuntimeProfile> instanceProfiles = collectInstanceProfile(item.getQueryProfile());
         final List<InstanceStatistics> instanceStatisticsList = Lists.newArrayList();
         for (QueryStatisticsItem.FragmentInstanceInfo instanceInfo : item.getFragmentInstanceInfos()) {
-            final RuntimeProfile instanceProfile = instanceProfiles.get(DebugUtil.printId(instanceInfo.getInstanceId()));
+            final RuntimeProfile instanceProfile
+                    = instanceProfiles.get(DebugUtil.printId(instanceInfo.getInstanceId()));
             Preconditions.checkNotNull(instanceProfile);
             final InstanceStatistics Statistics =
                     new InstanceStatistics(
@@ -102,7 +103,8 @@ public class CurrentQueryInfoProvider {
         final Map<String, RuntimeProfile> instanceProfiles = Maps.newHashMap();
         for (RuntimeProfile fragmentProfile : queryProfile.getChildMap().values()) {
             for (Map.Entry<String, RuntimeProfile> entry : fragmentProfile.getChildMap().entrySet()) {
-                Preconditions.checkState(instanceProfiles.put(parseInstanceId(entry.getKey()), entry.getValue()) == null);
+                Preconditions.checkState(instanceProfiles.put(
+                        parseInstanceId(entry.getKey()), entry.getValue()) == null);
             }
         }
         return instanceProfiles;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/EsPartitionsProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/EsPartitionsProcDir.java
index aaee7d7e6f..5919855723 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/EsPartitionsProcDir.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/EsPartitionsProcDir.java
@@ -69,8 +69,10 @@ public class EsPartitionsProcDir implements ProcDirInterface {
                 rangePartitionInfo = (RangePartitionInfo) esTable.getEsTablePartitions().getPartitionInfo();
             }
             Joiner joiner = Joiner.on(", ");
-            Map<String, EsShardPartitions> unPartitionedIndices = esTable.getEsTablePartitions().getUnPartitionedIndexStates();
-            Map<String, EsShardPartitions> partitionedIndices = esTable.getEsTablePartitions().getPartitionedIndexStates();
+            Map<String, EsShardPartitions> unPartitionedIndices
+                    = esTable.getEsTablePartitions().getUnPartitionedIndexStates();
+            Map<String, EsShardPartitions> partitionedIndices
+                    = esTable.getEsTablePartitions().getPartitionedIndexStates();
             for (EsShardPartitions esShardPartitions : unPartitionedIndices.values()) {
                 List<Comparable> partitionInfo = new ArrayList<Comparable>();
                 partitionInfo.add(esShardPartitions.getIndexName());
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/JobsProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/JobsProcDir.java
index dfe272ff10..6bda28968a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/JobsProcDir.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/JobsProcDir.java
@@ -121,10 +121,13 @@ public class JobsProcDir implements ProcDirInterface {
         // rollup
         MaterializedViewHandler materializedViewHandler = Catalog.getCurrentCatalog().getMaterializedViewHandler();
         pendingNum = materializedViewHandler.getAlterJobV2Num(org.apache.doris.alter.AlterJobV2.JobState.PENDING, dbId);
-        runningNum = materializedViewHandler.getAlterJobV2Num(org.apache.doris.alter.AlterJobV2.JobState.WAITING_TXN, dbId)
+        runningNum = materializedViewHandler.getAlterJobV2Num(
+                org.apache.doris.alter.AlterJobV2.JobState.WAITING_TXN, dbId)
                 + materializedViewHandler.getAlterJobV2Num(org.apache.doris.alter.AlterJobV2.JobState.RUNNING, dbId);
-        finishedNum = materializedViewHandler.getAlterJobV2Num(org.apache.doris.alter.AlterJobV2.JobState.FINISHED, dbId);
-        cancelledNum = materializedViewHandler.getAlterJobV2Num(org.apache.doris.alter.AlterJobV2.JobState.CANCELLED, dbId);
+        finishedNum = materializedViewHandler.getAlterJobV2Num(
+                org.apache.doris.alter.AlterJobV2.JobState.FINISHED, dbId);
+        cancelledNum = materializedViewHandler.getAlterJobV2Num(
+                org.apache.doris.alter.AlterJobV2.JobState.CANCELLED, dbId);
         totalNum = pendingNum + runningNum + finishedNum + cancelledNum;
         result.addRow(Lists.newArrayList(ROLLUP, pendingNum.toString(), runningNum.toString(), finishedNum.toString(),
                                          cancelledNum.toString(), totalNum.toString()));
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/JvmProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/JvmProcDir.java
index 8ec5060573..7cacc69746 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/JvmProcDir.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/JvmProcDir.java
@@ -51,12 +51,12 @@ public class JvmProcDir implements ProcNodeInterface {
         JvmInfo jvmInfo = jvmService.info();
         result.addRow(genRow("jvm start time", TimeUtils.longToTimeString(jvmInfo.getStartTime())));
         result.addRow(genRow("jvm version info", Joiner.on(" ").join(jvmInfo.getVersion(),
-                                                                     jvmInfo.getVmName(),
-                                                                     jvmInfo.getVmVendor(),
-                                                                     jvmInfo.getVmVersion())));
+                jvmInfo.getVmName(), jvmInfo.getVmVendor(), jvmInfo.getVmVersion())));
 
-        result.addRow(genRow("configured init heap size", DebugUtil.printByteWithUnit(jvmInfo.getConfiguredInitialHeapSize())));
-        result.addRow(genRow("configured max heap size", DebugUtil.printByteWithUnit(jvmInfo.getConfiguredMaxHeapSize())));
+        result.addRow(genRow("configured init heap size",
+                DebugUtil.printByteWithUnit(jvmInfo.getConfiguredInitialHeapSize())));
+        result.addRow(genRow("configured max heap size",
+                DebugUtil.printByteWithUnit(jvmInfo.getConfiguredMaxHeapSize())));
         result.addRow(genRow("frontend pid", jvmInfo.getPid()));
 
         // 2. jvm stats
@@ -65,24 +65,34 @@ public class JvmProcDir implements ProcNodeInterface {
         result.addRow(genRow("classes total loaded", jvmStats.getClasses().getTotalLoadedClassCount()));
         result.addRow(genRow("classes unloaded", jvmStats.getClasses().getUnloadedClassCount()));
 
-        result.addRow(genRow("mem heap committed", DebugUtil.printByteWithUnit(jvmStats.getMem().getHeapCommitted().getBytes())));
-        result.addRow(genRow("mem heap used", DebugUtil.printByteWithUnit(jvmStats.getMem().getHeapUsed().getBytes())));
-        result.addRow(genRow("mem non heap committed", DebugUtil.printByteWithUnit(jvmStats.getMem().getNonHeapCommitted().getBytes())));
-        result.addRow(genRow("mem non heap used", DebugUtil.printByteWithUnit(jvmStats.getMem().getNonHeapUsed().getBytes())));
+        result.addRow(genRow("mem heap committed",
+                DebugUtil.printByteWithUnit(jvmStats.getMem().getHeapCommitted().getBytes())));
+        result.addRow(genRow("mem heap used",
+                DebugUtil.printByteWithUnit(jvmStats.getMem().getHeapUsed().getBytes())));
+        result.addRow(genRow("mem non heap committed",
+                DebugUtil.printByteWithUnit(jvmStats.getMem().getNonHeapCommitted().getBytes())));
+        result.addRow(genRow("mem non heap used",
+                DebugUtil.printByteWithUnit(jvmStats.getMem().getNonHeapUsed().getBytes())));
 
         Iterator<MemoryPool> memIter = jvmStats.getMem().iterator();
         while (memIter.hasNext()) {
             MemoryPool memPool = memIter.next();
-            result.addRow(genRow("mem pool " + memPool.getName() + " used", DebugUtil.printByteWithUnit(memPool.getUsed().getBytes())));
-            result.addRow(genRow("mem pool " + memPool.getName() + " max", DebugUtil.printByteWithUnit(memPool.getMax().getBytes())));
-            result.addRow(genRow("mem pool " + memPool.getName() + " peak used", DebugUtil.printByteWithUnit(memPool.getPeakUsed().getBytes())));
-            result.addRow(genRow("mem pool " + memPool.getName() + " peak max", DebugUtil.printByteWithUnit(memPool.getPeakMax().getBytes())));
+            result.addRow(genRow("mem pool " + memPool.getName() + " used",
+                    DebugUtil.printByteWithUnit(memPool.getUsed().getBytes())));
+            result.addRow(genRow("mem pool " + memPool.getName() + " max",
+                    DebugUtil.printByteWithUnit(memPool.getMax().getBytes())));
+            result.addRow(genRow("mem pool " + memPool.getName() + " peak used",
+                    DebugUtil.printByteWithUnit(memPool.getPeakUsed().getBytes())));
+            result.addRow(genRow("mem pool " + memPool.getName() + " peak max",
+                    DebugUtil.printByteWithUnit(memPool.getPeakMax().getBytes())));
         }
 
         for (BufferPool bp : jvmStats.getBufferPools()) {
             result.addRow(genRow("buffer pool " + bp.getName() + " count", bp.getCount()));
-            result.addRow(genRow("buffer pool " + bp.getName() + " used", DebugUtil.printByteWithUnit(bp.getUsed().getBytes())));
-            result.addRow(genRow("buffer pool " + bp.getName() + " capacity", DebugUtil.printByteWithUnit(bp.getTotalCapacity().getBytes())));
+            result.addRow(genRow("buffer pool " + bp.getName() + " used",
+                    DebugUtil.printByteWithUnit(bp.getUsed().getBytes())));
+            result.addRow(genRow("buffer pool " + bp.getName() + " capacity",
+                    DebugUtil.printByteWithUnit(bp.getTotalCapacity().getBytes())));
         }
 
         Iterator<GarbageCollector> gcIter = jvmStats.getGc().iterator();
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java
index 543755b8c5..c29a8cbfd4 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/PartitionsProcDir.java
@@ -91,7 +91,8 @@ public class PartitionsProcDir implements ProcDirInterface {
         }
         if (subExpr instanceof BinaryPredicate) {
             BinaryPredicate binaryPredicate = (BinaryPredicate) subExpr;
-            if (subExpr.getChild(1) instanceof StringLiteral && binaryPredicate.getOp() == BinaryPredicate.Operator.EQ) {
+            if (subExpr.getChild(1) instanceof StringLiteral
+                    && binaryPredicate.getOp() == BinaryPredicate.Operator.EQ) {
                 return ((StringLiteral) subExpr.getChild(1)).getValue().equals(element);
             }
             long leftVal;
@@ -135,7 +136,8 @@ public class PartitionsProcDir implements ProcDirInterface {
         return str.matches(expr);
     }
 
-    public ProcResult fetchResultByFilter(Map<String, Expr> filterMap, List<OrderByPair> orderByPairs, LimitElement limitElement) throws AnalysisException {
+    public ProcResult fetchResultByFilter(Map<String, Expr> filterMap, List<OrderByPair> orderByPairs,
+            LimitElement limitElement) throws AnalysisException {
         List<List<Comparable>> partitionInfos = getPartitionInfos();
         List<List<Comparable>> filterPartitionInfos;
         //where
@@ -216,7 +218,8 @@ public class PartitionsProcDir implements ProcDirInterface {
                 partitionIds = tblPartitionInfo.getPartitionItemEntryList(isTempPartition, true).stream()
                         .map(Map.Entry::getKey).collect(Collectors.toList());
             } else {
-                Collection<Partition> partitions = isTempPartition ? olapTable.getTempPartitions() : olapTable.getPartitions();
+                Collection<Partition> partitions = isTempPartition
+                        ? olapTable.getTempPartitions() : olapTable.getPartitions();
                 partitionIds = partitions.stream().map(Partition::getId).collect(Collectors.toList());
             }
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ProcResult.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ProcResult.java
index 40c66fc599..ddc4872410 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/ProcResult.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/ProcResult.java
@@ -23,5 +23,6 @@ import java.util.List;
 // TODO(zhaochun): merge proc result to show result
 public interface ProcResult {
     List<String> getColumnNames();
+
     List<List<String>> getRows();
 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/StatisticProcNode.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/StatisticProcNode.java
index 4ff5efb9b5..a585a29f76 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/StatisticProcNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/StatisticProcNode.java
@@ -95,7 +95,8 @@ public class StatisticProcNode implements ProcNodeInterface {
                 try {
                     for (Partition partition : olapTable.getAllPartitions()) {
                         ++partitionNum;
-                        for (MaterializedIndex materializedIndex : partition.getMaterializedIndices(IndexExtState.VISIBLE)) {
+                        for (MaterializedIndex materializedIndex
+                                : partition.getMaterializedIndices(IndexExtState.VISIBLE)) {
                             ++indexNum;
                             List<Tablet> tablets = materializedIndex.getTablets();
                             for (int i = 0; i < tablets.size(); ++i) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletHealthProcDir.java b/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletHealthProcDir.java
index 403cda9120..bf847c84af 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletHealthProcDir.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/proc/TabletHealthProcDir.java
@@ -178,8 +178,10 @@ public class TabletHealthProcDir implements ProcDirInterface {
                 olapTable.readLock();
                 try {
                     for (Partition partition : olapTable.getAllPartitions()) {
-                        ReplicaAllocation replicaAlloc = olapTable.getPartitionInfo().getReplicaAllocation(partition.getId());
-                        for (MaterializedIndex materializedIndex : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) {
+                        ReplicaAllocation replicaAlloc = olapTable.getPartitionInfo()
+                                .getReplicaAllocation(partition.getId());
+                        for (MaterializedIndex materializedIndex : partition
+                                .getMaterializedIndices(MaterializedIndex.IndexExtState.VISIBLE)) {
                             List<Tablet> tablets = materializedIndex.getTablets();
                             for (int i = 0; i < tablets.size(); ++i) {
                                 Tablet tablet = tablets.get(i);
@@ -187,12 +189,12 @@ public class TabletHealthProcDir implements ProcDirInterface {
                                 Tablet.TabletStatus res = null;
                                 if (groupId != null) {
                                     Set<Long> backendsSet = colocateTableIndex.getTabletBackendsByGroup(groupId, i);
-                                    res = tablet.getColocateHealthStatus(partition.getVisibleVersion(), replicaAlloc, backendsSet);
+                                    res = tablet.getColocateHealthStatus(
+                                            partition.getVisibleVersion(), replicaAlloc, backendsSet);
                                 } else {
-                                    Pair<Tablet.TabletStatus, TabletSchedCtx.Priority> pair = tablet.getHealthStatusWithPriority(
-                                            infoService, db.getClusterName(),
-                                            partition.getVisibleVersion(),
-                                            replicaAlloc, aliveBeIdsInCluster);
+                                    Pair<Tablet.TabletStatus, TabletSchedCtx.Priority> pair
+                                            = tablet.getHealthStatusWithPriority(infoService, db.getClusterName(),
+                                            partition.getVisibleVersion(), replicaAlloc, aliveBeIdsInCluster);
                                     res = pair.first;
                                 }
                                 switch (res) { // CHECKSTYLE IGNORE THIS LINE: missing switch default
@@ -256,7 +258,8 @@ public class TabletHealthProcDir implements ProcDirInterface {
                                     oversizeTabletIds.add(tablet.getId());
                                 }
                                 for (Replica replica : tablet.getReplicas()) {
-                                    if (replica.getVersionCount() > Config.min_version_count_indicate_replica_compaction_too_slow) {
+                                    if (replica.getVersionCount()
+                                            > Config.min_version_count_indicate_replica_compaction_too_slow) {
                                         replicaCompactionTooSlowNum++;
                                         replicaCompactionTooSlowTabletIds.add(tablet.getId());
                                         break;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeBuilder.java b/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeBuilder.java
index c5720d76ab..42bced258e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeBuilder.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeBuilder.java
@@ -91,7 +91,8 @@ public class ProfileTreeBuilder {
     private static final Pattern FRAGMENT_ID_PATTERN;
 
     // Match string like:
-    // Instance e0f7390f5363419e-b416a2a7999608b6 (host=TNetworkAddress(hostname:192.168.1.1, port:9060)):(Active: 1s858ms, % non-child: 0.02%)
+    // Instance e0f7390f5363419e-b416a2a7999608b6
+    //   (host=TNetworkAddress(hostname:192.168.1.1, port:9060)):(Active: 1s858ms, % non-child: 0.02%)
     // Extract "e0f7390f5363419e-b416a2a7999608b6", "192.168.1.1", "9060"
     private static final String INSTANCE_PATTERN_STR = "^Instance (.*) \\(.*hostname:(.*), port:([0-9]+).*";
     private static final Pattern INSTANCE_PATTERN;
@@ -250,7 +251,8 @@ public class ProfileTreeBuilder {
         String extractName;
         String extractId;
         if ((!m.find() && finalSenderName == null) || m.groupCount() != 2) {
-            // DataStreamBuffer name like: "DataBufferSender (dst_fragment_instance_id=d95356f9219b4831-986b4602b41683ca):"
+            // DataStreamBuffer name like:
+            // "DataBufferSender (dst_fragment_instance_id=d95356f9219b4831-986b4602b41683ca):"
             // So it has no id.
             // Other profile should has id like:
             // EXCHANGE_NODE (id=3):(Active: 103.899ms, % non-child: 2.27%)
@@ -330,7 +332,8 @@ public class ProfileTreeBuilder {
             if (root != null) {
                 root.addChild(counterNode);
             }
-            counterNode.setCounter(childCounterName, RuntimeProfile.printCounter(counter.getValue(), counter.getType()));
+            counterNode.setCounter(childCounterName,
+                    RuntimeProfile.printCounter(counter.getValue(), counter.getType()));
             buildCounterNode(profile, childCounterName, counterNode);
         }
         return;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeNode.java b/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeNode.java
index aa31682904..45cf25d603 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeNode.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/profile/ProfileTreeNode.java
@@ -18,6 +18,7 @@
 package org.apache.doris.common.profile;
 
 import org.apache.doris.common.TreeNode;
+
 import com.google.common.base.Strings;
 import com.google.common.collect.Lists;
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/publish/ClusterStatePublisher.java b/fe/fe-core/src/main/java/org/apache/doris/common/publish/ClusterStatePublisher.java
index ff7e225937..da5acc0188 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/publish/ClusterStatePublisher.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/publish/ClusterStatePublisher.java
@@ -41,7 +41,8 @@ public class ClusterStatePublisher {
     private static final Logger LOG = LogManager.getLogger(ClusterStatePublisher.class);
     private static volatile ClusterStatePublisher INSTANCE;
 
-    private ExecutorService executor = ThreadPoolManager.newDaemonFixedThreadPool(5, 256, "cluster-state-publisher", true);
+    private ExecutorService executor = ThreadPoolManager
+            .newDaemonFixedThreadPool(5, 256, "cluster-state-publisher", true);
 
     private SystemInfoService clusterInfoService;
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/BrokerUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/BrokerUtil.java
index f5ecef57cf..60dd5a8b33 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/util/BrokerUtil.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/BrokerUtil.java
@@ -172,7 +172,7 @@ public class BrokerUtil {
             }
         } else if (brokerDesc.getStorageType() == StorageBackend.StorageType.HDFS) {
             if (!brokerDesc.getProperties().containsKey(HADOOP_FS_NAME)
-                || !brokerDesc.getProperties().containsKey(HADOOP_USER_NAME)) {
+                    || !brokerDesc.getProperties().containsKey(HADOOP_USER_NAME)) {
                 throw new UserException(String.format(
                     "The properties of hdfs is invalid. %s and %s are needed", HADOOP_FS_NAME, HADOOP_USER_NAME));
             }
@@ -183,7 +183,7 @@ public class BrokerUtil {
             for (Map.Entry<String, String> propEntry : brokerDesc.getProperties().entrySet()) {
                 conf.set(propEntry.getKey(), propEntry.getValue());
                 if (propEntry.getKey().equals(BrokerUtil.HADOOP_SECURITY_AUTHENTICATION)
-                    && propEntry.getValue().equals(AuthType.KERBEROS.getDesc())) {
+                        && propEntry.getValue().equals(AuthType.KERBEROS.getDesc())) {
                     isSecurityEnabled = true;
                 }
             }
@@ -191,15 +191,15 @@ public class BrokerUtil {
                 if (isSecurityEnabled) {
                     UserGroupInformation.setConfiguration(conf);
                     UserGroupInformation.loginUserFromKeytab(
-                        brokerDesc.getProperties().get(BrokerUtil.HADOOP_KERBEROS_PRINCIPAL),
-                        brokerDesc.getProperties().get(BrokerUtil.HADOOP_KERBEROS_KEYTAB));
+                            brokerDesc.getProperties().get(BrokerUtil.HADOOP_KERBEROS_PRINCIPAL),
+                            brokerDesc.getProperties().get(BrokerUtil.HADOOP_KERBEROS_KEYTAB));
                 }
                 FileSystem fs = FileSystem.get(new URI(fsName), conf, userName);
                 FileStatus[] statusList = fs.globStatus(new Path(path));
                 for (FileStatus status : statusList) {
                     if (status.isFile()) {
                         fileStatuses.add(new TBrokerFileStatus(status.getPath().toUri().getPath(),
-                            status.isDirectory(), status.getLen(), status.isFile()));
+                                status.isDirectory(), status.getLen(), status.isFile()));
                     }
                 }
             } catch (IOException | InterruptedException | URISyntaxException e) {
@@ -213,13 +213,15 @@ public class BrokerUtil {
         return brokerName + "[" + address.toString() + "]";
     }
 
-    public static List<String> parseColumnsFromPath(String filePath, List<String> columnsFromPath) throws UserException {
+    public static List<String> parseColumnsFromPath(String filePath, List<String> columnsFromPath)
+            throws UserException {
         if (columnsFromPath == null || columnsFromPath.isEmpty()) {
             return Collections.emptyList();
         }
         String[] strings = filePath.split("/");
         if (strings.length < 2) {
-            throw new UserException("Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath);
+            throw new UserException("Fail to parse columnsFromPath, expected: "
+                    + columnsFromPath + ", filePath: " + filePath);
         }
         String[] columns = new String[columnsFromPath.size()];
         int size = 0;
@@ -229,11 +231,13 @@ public class BrokerUtil {
                 continue;
             }
             if (str == null || !str.contains("=")) {
-                throw new UserException("Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath);
+                throw new UserException("Fail to parse columnsFromPath, expected: "
+                        + columnsFromPath + ", filePath: " + filePath);
             }
             String[] pair = str.split("=", 2);
             if (pair.length != 2) {
-                throw new UserException("Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath);
+                throw new UserException("Fail to parse columnsFromPath, expected: "
+                        + columnsFromPath + ", filePath: " + filePath);
             }
             int index = columnsFromPath.indexOf(pair[0]);
             if (index == -1) {
@@ -246,7 +250,8 @@ public class BrokerUtil {
             }
         }
         if (size != columnsFromPath.size()) {
-            throw new UserException("Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath);
+            throw new UserException("Fail to parse columnsFromPath, expected: "
+                    + columnsFromPath + ", filePath: " + filePath);
         }
         return Lists.newArrayList(columns);
     }
@@ -503,8 +508,9 @@ public class BrokerUtil {
         }
     }
 
-    public static Pair<TPaloBrokerService.Client, TNetworkAddress> getBrokerAddressAndClient(BrokerDesc brokerDesc) throws UserException {
-        Pair<TPaloBrokerService.Client, TNetworkAddress> pair = new Pair<TPaloBrokerService.Client, TNetworkAddress>(null, null);
+    public static Pair<TPaloBrokerService.Client, TNetworkAddress> getBrokerAddressAndClient(BrokerDesc brokerDesc)
+            throws UserException {
+        Pair<TPaloBrokerService.Client, TNetworkAddress> pair = new Pair<>(null, null);
         TNetworkAddress address = getAddress(brokerDesc);
         TPaloBrokerService.Client client = borrowClient(address);
         pair.first = client;
@@ -600,7 +606,8 @@ public class BrokerUtil {
 
         public void write(ByteBuffer byteBuffer, long bufferSize) throws UserException {
             if (!isReady) {
-                throw new UserException("Broker writer is not ready. filePath=" + brokerFilePath + ", broker=" + address);
+                throw new UserException("Broker writer is not ready. filePath="
+                        + brokerFilePath + ", broker=" + address);
             }
 
             failed = true;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/DynamicPartitionUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/DynamicPartitionUtil.java
index b632fb26c7..30cd75cd52 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/util/DynamicPartitionUtil.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/DynamicPartitionUtil.java
@@ -149,14 +149,16 @@ public class DynamicPartitionUtil {
 
     private static void checkEnable(String enable) throws DdlException {
         if (Strings.isNullOrEmpty(enable)
-                || (!Boolean.TRUE.toString().equalsIgnoreCase(enable) && !Boolean.FALSE.toString().equalsIgnoreCase(enable))) {
+                || (!Boolean.TRUE.toString().equalsIgnoreCase(enable)
+                && !Boolean.FALSE.toString().equalsIgnoreCase(enable))) {
             ErrorReport.reportDdlException(ErrorCode.ERROR_DYNAMIC_PARTITION_ENABLE, enable);
         }
     }
 
     private static boolean checkCreateHistoryPartition(String create) throws DdlException {
         if (Strings.isNullOrEmpty(create)
-                || (!Boolean.TRUE.toString().equalsIgnoreCase(create) && !Boolean.FALSE.toString().equalsIgnoreCase(create))) {
+                || (!Boolean.TRUE.toString().equalsIgnoreCase(create)
+                && !Boolean.FALSE.toString().equalsIgnoreCase(create))) {
             ErrorReport.reportDdlException(ErrorCode.ERROR_DYNAMIC_PARTITION_CREATE_HISTORY_PARTITION, create);
         }
         return Boolean.valueOf(create);
@@ -168,7 +170,8 @@ public class DynamicPartitionUtil {
         }
         try {
             int historyPartitionNum = Integer.parseInt(val);
-            if (historyPartitionNum < 0 && historyPartitionNum != DynamicPartitionProperty.NOT_SET_HISTORY_PARTITION_NUM) {
+            if (historyPartitionNum < 0
+                    && historyPartitionNum != DynamicPartitionProperty.NOT_SET_HISTORY_PARTITION_NUM) {
                 ErrorReport.reportDdlException(ErrorCode.ERROR_DYNAMIC_PARTITION_HISTORY_PARTITION_NUM_ZERO);
             }
         } catch (NumberFormatException e) {
@@ -238,7 +241,8 @@ public class DynamicPartitionUtil {
         }
     }
 
-    public static List<Range> convertStringToPeriodsList(String reservedHistoryPeriods, String timeUnit) throws DdlException {
+    public static List<Range> convertStringToPeriodsList(String reservedHistoryPeriods, String timeUnit)
+            throws DdlException {
         List<Range> reservedHistoryPeriodsToRangeList = new ArrayList<Range>();
         if (DynamicPartitionProperty.NOT_SET_RESERVED_HISTORY_PERIODS.equals(reservedHistoryPeriods)) {
             return reservedHistoryPeriodsToRangeList;
@@ -250,9 +254,12 @@ public class DynamicPartitionUtil {
             String lowerBorderOfReservedHistory = matcher.group(1);
             String upperBorderOfReservedHistory = matcher.group(2);
             if (lowerBorderOfReservedHistory.compareTo(upperBorderOfReservedHistory) > 0) {
-                ErrorReport.reportDdlException(ErrorCode.ERROR_DYNAMIC_PARTITION_RESERVED_HISTORY_PERIODS_START_LARGER_THAN_ENDS, lowerBorderOfReservedHistory, upperBorderOfReservedHistory);
+                ErrorReport.reportDdlException(
+                        ErrorCode.ERROR_DYNAMIC_PARTITION_RESERVED_HISTORY_PERIODS_START_LARGER_THAN_ENDS,
+                        lowerBorderOfReservedHistory, upperBorderOfReservedHistory);
             } else {
-                reservedHistoryPeriodsToRangeList.add(Range.closed(lowerBorderOfReservedHistory, upperBorderOfReservedHistory));
+                reservedHistoryPeriodsToRangeList.add(
+                        Range.closed(lowerBorderOfReservedHistory, upperBorderOfReservedHistory));
             }
         }
         return reservedHistoryPeriodsToRangeList;
@@ -260,7 +267,8 @@ public class DynamicPartitionUtil {
 
     private static Pattern getPattern(String timeUnit) {
         if (timeUnit.equalsIgnoreCase(TimeUnit.HOUR.toString())) {
-            return Pattern.compile("\\[([0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}),([0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2})\\]");
+            return Pattern.compile("\\[([0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2})"
+                    + ",([0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2})\\]");
         } else {
             return Pattern.compile("\\[([0-9]{4}-[0-9]{2}-[0-9]{2}),([0-9]{4}-[0-9]{2}-[0-9]{2})\\]");
         }
@@ -277,13 +285,14 @@ public class DynamicPartitionUtil {
                 return o1.lowerEndpoint().compareTo(o2.lowerEndpoint());
             }
         });
-        List<String> sortedReservedHistoryPeriods = reservedHistoryPeriodsToRangeList.stream().
-                map(e -> "[" + e.lowerEndpoint() + "," + e.upperEndpoint() + "]").collect(Collectors.toList());
+        List<String> sortedReservedHistoryPeriods = reservedHistoryPeriodsToRangeList.stream()
+                .map(e -> "[" + e.lowerEndpoint() + "," + e.upperEndpoint() + "]").collect(Collectors.toList());
 
         return String.join(",", sortedReservedHistoryPeriods);
     }
 
-    private static void checkReservedHistoryPeriodValidate(String reservedHistoryPeriods, String timeUnit) throws DdlException {
+    private static void checkReservedHistoryPeriodValidate(String reservedHistoryPeriods,
+            String timeUnit) throws DdlException {
         if (Strings.isNullOrEmpty(reservedHistoryPeriods)) {
             ErrorReport.reportDdlException(ErrorCode.ERROR_DYNAMIC_PARTITION_RESERVED_HISTORY_PERIODS_EMPTY);
         }
@@ -297,7 +306,8 @@ public class DynamicPartitionUtil {
         // 2. "dynamic_partition.reserved_history_periods" = "[,2021-08-01]" invalid one, needs pairs of values
         // 3. "dynamic_partition.reserved_history_periods" = "[2021-07-01,2020-08-01,]" invalid format
         if (!reservedHistoryPeriods.startsWith("[") || !reservedHistoryPeriods.endsWith("]")) {
-            ErrorReport.reportDdlException(ErrorCode.ERROR_DYNAMIC_PARTITION_RESERVED_HISTORY_PERIODS_INVALID, DynamicPartitionProperty.RESERVED_HISTORY_PERIODS, reservedHistoryPeriods);
+            ErrorReport.reportDdlException(ErrorCode.ERROR_DYNAMIC_PARTITION_RESERVED_HISTORY_PERIODS_INVALID,
+                    DynamicPartitionProperty.RESERVED_HISTORY_PERIODS, reservedHistoryPeriods);
         }
 
         List<Range> reservedHistoryPeriodsToRangeList = convertStringToPeriodsList(reservedHistoryPeriods, timeUnit);
@@ -305,13 +315,15 @@ public class DynamicPartitionUtil {
         SimpleDateFormat sdf = getSimpleDateFormat(timeUnit);
 
         if (reservedHistoryPeriodsToRangeList.size() != sizeOfPeriods) {
-            ErrorReport.reportDdlException(ErrorCode.ERROR_DYNAMIC_PARTITION_RESERVED_HISTORY_PERIODS_INVALID, DynamicPartitionProperty.RESERVED_HISTORY_PERIODS, reservedHistoryPeriods);
+            ErrorReport.reportDdlException(ErrorCode.ERROR_DYNAMIC_PARTITION_RESERVED_HISTORY_PERIODS_INVALID,
+                    DynamicPartitionProperty.RESERVED_HISTORY_PERIODS, reservedHistoryPeriods);
         } else {
             try {
                 for (Range range : reservedHistoryPeriodsToRangeList) {
                     String formattedLowerBound = sdf.format(sdf.parse(range.lowerEndpoint().toString()));
                     String formattedUpperBound = sdf.format(sdf.parse(range.upperEndpoint().toString()));
-                    if (!range.lowerEndpoint().toString().equals(formattedLowerBound) || !range.upperEndpoint().toString().equals(formattedUpperBound)) {
+                    if (!range.lowerEndpoint().toString().equals(formattedLowerBound)
+                            || !range.upperEndpoint().toString().equals(formattedUpperBound)) {
                         throw new DdlException("Invalid " + DynamicPartitionProperty.RESERVED_HISTORY_PERIODS
                                 + " value. It must be correct DATE value \"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\""
                                 + " while time_unit is DAY/WEEK/MONTH or"
@@ -350,7 +362,8 @@ public class DynamicPartitionUtil {
 
     // Check if all requried properties has been set.
     // And also check all optional properties, if not set, set them to default value.
-    public static boolean checkInputDynamicPartitionProperties(Map<String, String> properties, PartitionInfo partitionInfo) throws DdlException {
+    public static boolean checkInputDynamicPartitionProperties(Map<String, String> properties,
+            PartitionInfo partitionInfo) throws DdlException {
         if (properties == null || properties.isEmpty()) {
             return false;
         }
@@ -421,18 +434,21 @@ public class DynamicPartitionUtil {
                 if (!isReplay) {
                     // execute create partition first time only in master of FE, So no need execute
                     // when it's replay
-                    Catalog.getCurrentCatalog().getDynamicPartitionScheduler().executeDynamicPartitionFirstTime(dbId, olapTable.getId());
+                    Catalog.getCurrentCatalog().getDynamicPartitionScheduler()
+                            .executeDynamicPartitionFirstTime(dbId, olapTable.getId());
                 }
-                Catalog.getCurrentCatalog().getDynamicPartitionScheduler().registerDynamicPartitionTable(dbId, olapTable.getId());
+                Catalog.getCurrentCatalog().getDynamicPartitionScheduler()
+                        .registerDynamicPartitionTable(dbId, olapTable.getId());
             } else {
-                Catalog.getCurrentCatalog().getDynamicPartitionScheduler().removeDynamicPartitionTable(dbId, olapTable.getId());
+                Catalog.getCurrentCatalog().getDynamicPartitionScheduler()
+                        .removeDynamicPartitionTable(dbId, olapTable.getId());
             }
         }
     }
 
     // Analyze all properties to check their validation
-    public static Map<String, String> analyzeDynamicPartition(Map<String, String> properties, PartitionInfo partitionInfo)
-            throws UserException {
+    public static Map<String, String> analyzeDynamicPartition(Map<String, String> properties,
+            PartitionInfo partitionInfo) throws UserException {
         // properties should not be empty, check properties before call this function
         Map<String, String> analyzedProperties = new HashMap<>();
         if (properties.containsKey(DynamicPartitionProperty.TIME_UNIT)) {
@@ -505,19 +521,22 @@ public class DynamicPartitionUtil {
             start = 0;
             expectCreatePartitionNum = end - start;
         } else {
-            int historyPartitionNum = Integer.valueOf(analyzedProperties.getOrDefault(DynamicPartitionProperty.HISTORY_PARTITION_NUM,
+            int historyPartitionNum = Integer.parseInt(analyzedProperties.getOrDefault(
+                    DynamicPartitionProperty.HISTORY_PARTITION_NUM,
                     String.valueOf(DynamicPartitionProperty.NOT_SET_HISTORY_PARTITION_NUM)));
             if (historyPartitionNum != DynamicPartitionProperty.NOT_SET_HISTORY_PARTITION_NUM) {
                 expectCreatePartitionNum = end - Math.max(start, -historyPartitionNum);
             } else {
                 if (start == Integer.MIN_VALUE) {
-                    throw new DdlException("Provide start or history_partition_num property when creating history partition");
+                    throw new DdlException("Provide start or history_partition_num property"
+                            + " when creating history partition");
                 }
                 expectCreatePartitionNum = end - start;
             }
         }
         if (hasEnd && (expectCreatePartitionNum > Config.max_dynamic_partition_num)) {
-            throw new DdlException("Too many dynamic partitions: " + expectCreatePartitionNum + ". Limit: " + Config.max_dynamic_partition_num);
+            throw new DdlException("Too many dynamic partitions: "
+                    + expectCreatePartitionNum + ". Limit: " + Config.max_dynamic_partition_num);
         }
 
         if (properties.containsKey(DynamicPartitionProperty.START_DAY_OF_MONTH)) {
@@ -564,7 +583,8 @@ public class DynamicPartitionUtil {
         }
         if (properties.containsKey(DynamicPartitionProperty.RESERVED_HISTORY_PERIODS)) {
             String reservedHistoryPeriods = properties.get(DynamicPartitionProperty.RESERVED_HISTORY_PERIODS);
-            checkReservedHistoryPeriodValidate(reservedHistoryPeriods, analyzedProperties.get(DynamicPartitionProperty.TIME_UNIT));
+            checkReservedHistoryPeriodValidate(reservedHistoryPeriods,
+                    analyzedProperties.get(DynamicPartitionProperty.TIME_UNIT));
             properties.remove(DynamicPartitionProperty.RESERVED_HISTORY_PERIODS);
             analyzedProperties.put(DynamicPartitionProperty.RESERVED_HISTORY_PERIODS, reservedHistoryPeriods);
         }
@@ -592,7 +612,8 @@ public class DynamicPartitionUtil {
             return false;
         }
 
-        return rangePartitionInfo.getPartitionColumns().size() == 1 && tableProperty.getDynamicPartitionProperty().getEnable();
+        return rangePartitionInfo.getPartitionColumns().size() == 1
+                && tableProperty.getDynamicPartitionProperty().getEnable();
     }
 
     /**
@@ -670,7 +691,8 @@ public class DynamicPartitionUtil {
         }
     }
 
-    public static String getHistoryPartitionRangeString(DynamicPartitionProperty dynamicPartitionProperty, String time, String format) {
+    public static String getHistoryPartitionRangeString(DynamicPartitionProperty dynamicPartitionProperty,
+            String time, String format) {
         ZoneId zoneId = dynamicPartitionProperty.getTimeZone().toZoneId();
         Date date = null;
         Timestamp timestamp = null;
@@ -681,10 +703,12 @@ public class DynamicPartitionUtil {
             date = simpleDateFormat.parse(time);
         } catch (ParseException e) {
             LOG.warn("Parse dynamic partition periods error. Error={}", e.getMessage());
-            return getFormattedTimeWithoutMinuteSecond(ZonedDateTime.parse(timestamp.toString(), dateTimeFormatter), format);
+            return getFormattedTimeWithoutMinuteSecond(
+                    ZonedDateTime.parse(timestamp.toString(), dateTimeFormatter), format);
         }
         timestamp = new Timestamp(date.getTime());
-        return getFormattedTimeWithoutMinuteSecond(ZonedDateTime.parse(timestamp.toString(), dateTimeFormatter), format);
+        return getFormattedTimeWithoutMinuteSecond(
+                ZonedDateTime.parse(timestamp.toString(), dateTimeFormatter), format);
     }
 
     /**
@@ -725,7 +749,8 @@ public class DynamicPartitionUtil {
      * Today is 2020-05-24, offset = -1, startOf.dayOfWeek = 3
      * It will return 2020-05-20  (Wednesday of last week)
      */
-    private static String getPartitionRangeOfWeek(ZonedDateTime current, int offset, StartOfDate startOf, String format) {
+    private static String getPartitionRangeOfWeek(ZonedDateTime current, int offset,
+            StartOfDate startOf, String format) {
         Preconditions.checkArgument(startOf.isStartOfWeek());
         // 1. get the offset week
         ZonedDateTime offsetWeek = current.plusWeeks(offset);
@@ -745,7 +770,8 @@ public class DynamicPartitionUtil {
      * Today is 2020-05-24, offset = 1, startOf.month = 3
      * It will return 2020-06-03
      */
-    private static String getPartitionRangeOfMonth(ZonedDateTime current, int offset, StartOfDate startOf, String format) {
+    private static String getPartitionRangeOfMonth(ZonedDateTime current,
+            int offset, StartOfDate startOf, String format) {
         Preconditions.checkArgument(startOf.isStartOfMonth());
         // 1. Get the offset date.
         int realOffset = offset;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/KafkaUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/KafkaUtil.java
index b6cd819afc..e25ebf5a57 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/util/KafkaUtil.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/KafkaUtil.java
@@ -87,8 +87,8 @@ public class KafkaUtil {
     // The input parameter "timestampOffsets" is <partition, timestamp>
     // Tne return value is <partition, offset>
     public static List<Pair<Integer, Long>> getOffsetsForTimes(String brokerList, String topic,
-                                                               Map<String, String> convertedCustomProperties,
-                                                               List<Pair<Integer, Long>> timestampOffsets) throws LoadException {
+            Map<String, String> convertedCustomProperties, List<Pair<Integer, Long>> timestampOffsets)
+            throws LoadException {
         TNetworkAddress address = null;
         LOG.debug("begin to get offsets for times of topic: {}, {}", topic, timestampOffsets);
         try {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/ListUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/ListUtil.java
index e203377b98..1fb8bc999a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/util/ListUtil.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/ListUtil.java
@@ -25,7 +25,6 @@ import org.apache.doris.common.DdlException;
 import com.google.common.base.Preconditions;
 
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
@@ -70,16 +69,15 @@ public class ListUtil {
         return result;
     }
 
-    public static void checkPartitionKeyListsMatch(List<PartitionItem> list1, List<PartitionItem> list2) throws DdlException {
-        Collections.sort(list1, PARTITION_KEY_COMPARATOR);
-        Collections.sort(list2, PARTITION_KEY_COMPARATOR);
+    public static void checkPartitionKeyListsMatch(List<PartitionItem> list1,
+            List<PartitionItem> list2) throws DdlException {
+        list1.sort(PARTITION_KEY_COMPARATOR);
+        list2.sort(PARTITION_KEY_COMPARATOR);
 
         int idx1 = 0;
         int idx2 = 0;
-        List<PartitionKey> keys1 = new ArrayList<>();
-        List<PartitionKey> keys2 = new ArrayList<>();
-        keys1.addAll(list1.get(idx1).getItems());
-        keys2.addAll(list2.get(idx2).getItems());
+        List<PartitionKey> keys1 = new ArrayList<>(list1.get(idx1).getItems());
+        List<PartitionKey> keys2 = new ArrayList<>(list2.get(idx2).getItems());
 
         while (true) {
             int size = Math.min(keys1.size(), keys2.size());
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/MetaLockUtils.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/MetaLockUtils.java
index 42fa8cd10b..82954dce5c 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/util/MetaLockUtils.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/MetaLockUtils.java
@@ -86,7 +86,8 @@ public class MetaLockUtils {
         }
     }
 
-    public static boolean tryWriteLockTablesOrMetaException(List<Table> tableList, long timeout, TimeUnit unit) throws MetaNotFoundException {
+    public static boolean tryWriteLockTablesOrMetaException(List<Table> tableList,
+            long timeout, TimeUnit unit) throws MetaNotFoundException {
         for (int i = 0; i < tableList.size(); i++) {
             try {
                 if (!tableList.get(i).tryWriteLockOrMetaException(timeout, unit)) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/PrintableMap.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/PrintableMap.java
index 2eaa0a0533..5f6412bf96 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/util/PrintableMap.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/PrintableMap.java
@@ -32,6 +32,7 @@ public class PrintableMap<K, V> {
     private String entryDelimiter = ",";
 
     public static final Set<String> SENSITIVE_KEY;
+
     static {
         SENSITIVE_KEY = Sets.newTreeSet(String.CASE_INSENSITIVE_ORDER);
         SENSITIVE_KEY.add("password");
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/ProfileManager.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/ProfileManager.java
index dfd5e2a13d..4f1a705920 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/util/ProfileManager.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/ProfileManager.java
@@ -224,7 +224,8 @@ public class ProfileManager {
         return builder.getFragmentTreeRoot(executionId);
     }
 
-    public List<Triple<String, String, Long>> getFragmentInstanceList(String queryID, String executionId, String fragmentId)
+    public List<Triple<String, String, Long>> getFragmentInstanceList(String queryID,
+            String executionId, String fragmentId)
             throws AnalysisException {
         MultiProfileTreeBuilder builder;
         readLock.lock();
@@ -242,7 +243,8 @@ public class ProfileManager {
         return builder.getInstanceList(executionId, fragmentId);
     }
 
-    public ProfileTreeNode getInstanceProfileTree(String queryID, String executionId, String fragmentId, String instanceId)
+    public ProfileTreeNode getInstanceProfileTree(String queryID, String executionId,
+            String fragmentId, String instanceId)
             throws AnalysisException {
         MultiProfileTreeBuilder builder;
         readLock.lock();
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java
index 5202122410..88ac119aeb 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/PropertyAnalyzer.java
@@ -207,7 +207,8 @@ public class PropertyAnalyzer {
                 throw new AnalysisException("Remote storage cool down time should later than now");
             }
             if (hasCooldown && (remoteCooldownTimeStamp <= cooldownTimeStamp)) {
-                throw new AnalysisException("`remote_storage_cooldown_time` should later than `storage_cooldown_time`.");
+                throw new AnalysisException(
+                        "`remote_storage_cooldown_time` should later than `storage_cooldown_time`.");
             }
         }
 
@@ -238,7 +239,8 @@ public class PropertyAnalyzer {
     private static Short analyzeReplicationNum(Map<String, String> properties, String prefix, short oldReplicationNum)
             throws AnalysisException {
         Short replicationNum = oldReplicationNum;
-        String propKey = Strings.isNullOrEmpty(prefix) ? PROPERTIES_REPLICATION_NUM : prefix + "." + PROPERTIES_REPLICATION_NUM;
+        String propKey = Strings.isNullOrEmpty(prefix)
+                ? PROPERTIES_REPLICATION_NUM : prefix + "." + PROPERTIES_REPLICATION_NUM;
         if (properties != null && properties.containsKey(propKey)) {
             try {
                 replicationNum = Short.valueOf(properties.get(propKey));
@@ -246,7 +248,8 @@ public class PropertyAnalyzer {
                 throw new AnalysisException(e.getMessage());
             }
 
-            if (replicationNum < Config.min_replication_num_per_tablet || replicationNum > Config.max_replication_num_per_tablet) {
+            if (replicationNum < Config.min_replication_num_per_tablet
+                    || replicationNum > Config.max_replication_num_per_tablet) {
                 throw new AnalysisException("Replication num should between " + Config.min_replication_num_per_tablet
                         + " and " + Config.max_replication_num_per_tablet);
             }
@@ -543,7 +546,8 @@ public class PropertyAnalyzer {
         return ScalarType.createType(type);
     }
 
-    public static Boolean analyzeBackendDisableProperties(Map<String, String> properties, String key, Boolean defaultValue) throws AnalysisException {
+    public static Boolean analyzeBackendDisableProperties(Map<String, String> properties,
+            String key, Boolean defaultValue) {
         if (properties.containsKey(key)) {
             String value = properties.remove(key);
             return Boolean.valueOf(value);
@@ -551,7 +555,8 @@ public class PropertyAnalyzer {
         return defaultValue;
     }
 
-    public static Tag analyzeBackendTagProperties(Map<String, String> properties, Tag defaultValue) throws AnalysisException {
+    public static Tag analyzeBackendTagProperties(Map<String, String> properties, Tag defaultValue)
+            throws AnalysisException {
         if (properties.containsKey(TAG_LOCATION)) {
             String tagVal = properties.remove(TAG_LOCATION);
             return Tag.create(Tag.TYPE_LOCATION, tagVal);
@@ -608,7 +613,8 @@ public class PropertyAnalyzer {
             replicaAlloc.put(Tag.create(Tag.TYPE_LOCATION, locationVal), replicationNum);
             totalReplicaNum += replicationNum;
         }
-        if (totalReplicaNum < Config.min_replication_num_per_tablet || totalReplicaNum > Config.max_replication_num_per_tablet) {
+        if (totalReplicaNum < Config.min_replication_num_per_tablet
+                || totalReplicaNum > Config.max_replication_num_per_tablet) {
             throw new AnalysisException("Total replication num should between " + Config.min_replication_num_per_tablet
                     + " and " + Config.max_replication_num_per_tablet);
         }
@@ -620,7 +626,7 @@ public class PropertyAnalyzer {
     }
 
     public static DataSortInfo analyzeDataSortInfo(Map<String, String> properties, KeysType keyType,
-                                                   int keyCount, TStorageFormat storageFormat) throws AnalysisException {
+            int keyCount, TStorageFormat storageFormat) throws AnalysisException {
         if (properties == null || properties.isEmpty()) {
             return new DataSortInfo(TSortType.LEXICAL, keyCount);
         }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/RangeUtils.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/RangeUtils.java
index 0a40cec59b..d3e659ee8e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/util/RangeUtils.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/RangeUtils.java
@@ -57,7 +57,8 @@ public class RangeUtils {
         }
         return false;
     }
-    /*
+
+    /**
      * Pass only if the 2 range lists are exactly same
      * What is "exactly same"?
      *      1. {[0, 10), [10, 20)} exactly same as {[0, 20)}
@@ -82,7 +83,8 @@ public class RangeUtils {
      *      4.2 upper bounds (20 and 20) are equal.
      *  5. Not more next ranges, so 2 lists are equal.
      */
-    public static void checkPartitionItemListsMatch(List<PartitionItem> list1, List<PartitionItem> list2) throws DdlException {
+    public static void checkPartitionItemListsMatch(List<PartitionItem> list1, List<PartitionItem> list2)
+            throws DdlException {
         Collections.sort(list1, RangeUtils.RANGE_COMPARATOR);
         Collections.sort(list2, RangeUtils.RANGE_COMPARATOR);
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/ReflectionUtils.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/ReflectionUtils.java
index 0f341f6397..0671f04405 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/util/ReflectionUtils.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/ReflectionUtils.java
@@ -58,7 +58,7 @@ public class ReflectionUtils {
         return result;
     }
 
-    static private ThreadMXBean threadBean =
+    private static ThreadMXBean threadBean =
             ManagementFactory.getThreadMXBean();
 
     public static void setContentionTracing(boolean val) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/RuntimeProfile.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/RuntimeProfile.java
index f44ce0b672..b81db22f52 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/util/RuntimeProfile.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/RuntimeProfile.java
@@ -398,8 +398,7 @@ public class RuntimeProfile {
     // Because the profile of summary and child fragment is not a real parent-child relationship
     // Each child profile needs to calculate the time proportion consumed by itself
     public void computeTimeInChildProfile() {
-        childMap.values().
-                forEach(RuntimeProfile::computeTimeInProfile);
+        childMap.values().forEach(RuntimeProfile::computeTimeInProfile);
     }
 
     public void computeTimeInProfile() {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/SmallFileMgr.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/SmallFileMgr.java
index ff4a476856..e96c527906 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/util/SmallFileMgr.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/SmallFileMgr.java
@@ -300,7 +300,8 @@ public class SmallFileMgr implements Writable {
 
             int contentLength = urlConnection.getContentLength();
             if (contentLength == -1 || contentLength > Config.max_small_file_size_bytes) {
-                throw new DdlException("Failed to download file from url: " + url + ", invalid content length: " + contentLength);
+                throw new DdlException("Failed to download file from url: " + url
+                        + ", invalid content length: " + contentLength);
             }
 
             int bytesRead = 0;
@@ -309,7 +310,7 @@ public class SmallFileMgr implements Writable {
             if (saveContent) {
                 // download from url, and check file size
                 bytesRead = 0;
-                byte buf[] = new byte[contentLength];
+                byte[] buf = new byte[contentLength];
                 try (BufferedInputStream in = new BufferedInputStream(url.openStream())) {
                     while (bytesRead < contentLength) {
                         bytesRead += in.read(buf, bytesRead, contentLength - bytesRead);
@@ -449,7 +450,8 @@ public class SmallFileMgr implements Writable {
     }
 
     private File getAbsoluteFile(long dbId, String catalog, String fileName) {
-        return Paths.get(Config.small_file_dir, String.valueOf(dbId), catalog, fileName).normalize().toAbsolutePath().toFile();
+        return Paths.get(Config.small_file_dir, String.valueOf(dbId), catalog, fileName)
+                .normalize().toAbsolutePath().toFile();
     }
 
     public List<List<String>> getInfo(String dbName) throws DdlException {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/SqlBlockUtil.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/SqlBlockUtil.java
index 50c60386b8..7fb2a7d8a8 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/util/SqlBlockUtil.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/SqlBlockUtil.java
@@ -40,7 +40,7 @@ public class SqlBlockUtil {
 
     // check (sql or sqlHash) and (limitations: partitioNum, tabletNum, cardinality) are not set both
     public static void checkSqlAndLimitationsSetBoth(String sql, String sqlHash,
-                                                     String partitionNumString, String tabletNumString, String cardinalityString) throws AnalysisException {
+            String partitionNumString, String tabletNumString, String cardinalityString) throws AnalysisException {
         if ((!STRING_DEFAULT.equals(sql) || !STRING_DEFAULT.equals(sqlHash))
                 && !isSqlBlockLimitationsEmpty(partitionNumString, tabletNumString, cardinalityString)) {
             ErrorReport.reportAnalysisException(ErrorCode.ERROR_SQL_AND_LIMITATIONS_SET_IN_ONE_RULE);
@@ -50,7 +50,7 @@ public class SqlBlockUtil {
     // 1. check (sql or sqlHash) and (limitations: partitioNum, tabletNum, cardinality) are not set both
     // 2. check any of limitations is set while sql or sqlHash is not set
     public static void checkPropertiesValidate(String sql, String sqlHash,
-                                        String partitionNumString, String tabletNumString, String cardinalityString)  throws AnalysisException {
+            String partitionNumString, String tabletNumString, String cardinalityString)  throws AnalysisException {
         if (((!STRING_DEFAULT.equals(sql) || !STRING_DEFAULT.equals(sqlHash))
                 && !isSqlBlockLimitationsEmpty(partitionNumString, tabletNumString, cardinalityString))
                 || ((STRING_DEFAULT.equals(sql) && STRING_DEFAULT.equals(sqlHash))
@@ -60,8 +60,10 @@ public class SqlBlockUtil {
     }
 
     // check at least one of the (limitations: partitioNum, tabletNum, cardinality) is not empty
-    public static Boolean isSqlBlockLimitationsEmpty(String partitionNumString, String tabletNumString, String cardinalityString) {
-        return StringUtils.isEmpty(partitionNumString) && StringUtils.isEmpty(tabletNumString) && StringUtils.isEmpty(cardinalityString);
+    public static Boolean isSqlBlockLimitationsEmpty(String partitionNumString,
+            String tabletNumString, String cardinalityString) {
+        return StringUtils.isEmpty(partitionNumString)
+                && StringUtils.isEmpty(tabletNumString) && StringUtils.isEmpty(cardinalityString);
     }
 
     public static Boolean isSqlBlockLimitationsDefault(Long partitionNum, Long tabletNum, Long cardinality) {
@@ -87,11 +89,14 @@ public class SqlBlockUtil {
         } else if (!STRING_DEFAULT.equals(sqlBlockRule.getSqlHash())) {
             if (!STRING_DEFAULT.equals(sqlBlockRule.getSql()) && StringUtils.isNotEmpty(sqlBlockRule.getSql())) {
                 throw new AnalysisException("Only sql or sqlHash can be configured");
-            } else if (!isSqlBlockLimitationsDefault(sqlBlockRule.getPartitionNum(), sqlBlockRule.getTabletNum(), sqlBlockRule.getCardinality())
-                    && !isSqlBlockLimitationsNull(sqlBlockRule.getPartitionNum(), sqlBlockRule.getTabletNum(), sqlBlockRule.getCardinality())) {
+            } else if (!isSqlBlockLimitationsDefault(sqlBlockRule.getPartitionNum(),
+                    sqlBlockRule.getTabletNum(), sqlBlockRule.getCardinality())
+                    && !isSqlBlockLimitationsNull(sqlBlockRule.getPartitionNum(),
+                    sqlBlockRule.getTabletNum(), sqlBlockRule.getCardinality())) {
                 ErrorReport.reportAnalysisException(ErrorCode.ERROR_SQL_AND_LIMITATIONS_SET_IN_ONE_RULE);
             }
-        } else if (!isSqlBlockLimitationsDefault(sqlBlockRule.getPartitionNum(), sqlBlockRule.getTabletNum(), sqlBlockRule.getCardinality())) {
+        } else if (!isSqlBlockLimitationsDefault(sqlBlockRule.getPartitionNum(),
+                sqlBlockRule.getTabletNum(), sqlBlockRule.getCardinality())) {
             if (!STRING_DEFAULT.equals(sqlBlockRule.getSql()) || !STRING_DEFAULT.equals(sqlBlockRule.getSqlHash())) {
                 ErrorReport.reportAnalysisException(ErrorCode.ERROR_SQL_AND_LIMITATIONS_SET_IN_ONE_RULE);
             }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/URI.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/URI.java
index 4f4409e2a8..ab26fe41b8 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/util/URI.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/URI.java
@@ -63,6 +63,7 @@ public class URI {
     public String getLocation() {
         return location;
     }
+
     public String getScheme() {
         return scheme;
     }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java
index 4ca18e3ce0..a63c40223e 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/Util.java
@@ -51,9 +51,11 @@ public class Util {
 
     private static final long DEFAULT_EXEC_CMD_TIMEOUT_MS = 600000L;
 
-    private static final String[] ORDINAL_SUFFIX = new String[] { "th", "st", "nd", "rd", "th", "th", "th", "th", "th", "th" };
+    private static final String[] ORDINAL_SUFFIX
+            = new String[] { "th", "st", "nd", "rd", "th", "th", "th", "th", "th", "th" };
 
-    private static final List<String> REGEX_ESCAPES = Lists.newArrayList("\\", "$", "(", ")", "*", "+", ".", "[", "]", "?", "^", "{", "}", "|");
+    private static final List<String> REGEX_ESCAPES
+            = Lists.newArrayList("\\", "$", "(", ")", "*", "+", ".", "[", "]", "?", "^", "{", "}", "|");
 
     static {
         TYPE_STRING_MAP.put(PrimitiveType.TINYINT, "tinyint(4)");
diff --git a/fe/fe-core/src/main/java/org/apache/doris/consistency/CheckConsistencyJob.java b/fe/fe-core/src/main/java/org/apache/doris/consistency/CheckConsistencyJob.java
index c78fba3925..0d0327d8b3 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/consistency/CheckConsistencyJob.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/consistency/CheckConsistencyJob.java
@@ -144,7 +144,8 @@ public class CheckConsistencyJob {
             }
 
             // check partition's replication num. if 1 replication. skip
-            short replicaNum = olapTable.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum();
+            short replicaNum = olapTable.getPartitionInfo()
+                    .getReplicaAllocation(partition.getId()).getTotalReplicaNum();
             if (replicaNum == (short) 1) {
                 LOG.debug("partition[{}]'s replication num is 1. skip consistency check", partition.getId());
                 return false;
diff --git a/fe/fe-core/src/main/java/org/apache/doris/consistency/ConsistencyChecker.java b/fe/fe-core/src/main/java/org/apache/doris/consistency/ConsistencyChecker.java
index b5e261083e..8b7741f724 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/consistency/ConsistencyChecker.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/consistency/ConsistencyChecker.java
@@ -279,7 +279,8 @@ public class ConsistencyChecker extends MasterDaemon {
                                 new PriorityQueue<>(Math.max(table.getAllPartitions().size(), 1), COMPARATOR);
                         for (Partition partition : table.getPartitions()) {
                             // check partition's replication num. if 1 replication. skip
-                            if (table.getPartitionInfo().getReplicaAllocation(partition.getId()).getTotalReplicaNum() == (short) 1) {
+                            if (table.getPartitionInfo().getReplicaAllocation(
+                                    partition.getId()).getTotalReplicaNum() == (short) 1) {
                                 LOG.debug("partition[{}]'s replication num is 1. ignore", partition.getId());
                                 continue;
                             }
@@ -297,15 +298,18 @@ public class ConsistencyChecker extends MasterDaemon {
                             Partition partition = (Partition) chosenOne;
 
                             // sort materializedIndices
-                            List<MaterializedIndex> visibleIndexes = partition.getMaterializedIndices(IndexExtState.VISIBLE);
-                            Queue<MetaObject> indexQueue = new PriorityQueue<>(Math.max(visibleIndexes.size(), 1), COMPARATOR);
+                            List<MaterializedIndex> visibleIndexes
+                                    = partition.getMaterializedIndices(IndexExtState.VISIBLE);
+                            Queue<MetaObject> indexQueue
+                                    = new PriorityQueue<>(Math.max(visibleIndexes.size(), 1), COMPARATOR);
                             indexQueue.addAll(visibleIndexes);
 
                             while ((chosenOne = indexQueue.poll()) != null) {
                                 MaterializedIndex index = (MaterializedIndex) chosenOne;
 
                                 // sort tablets
-                                Queue<MetaObject> tabletQueue = new PriorityQueue<>(Math.max(index.getTablets().size(), 1), COMPARATOR);
+                                Queue<MetaObject> tabletQueue
+                                        = new PriorityQueue<>(Math.max(index.getTablets().size(), 1), COMPARATOR);
                                 tabletQueue.addAll(index.getTablets());
 
                                 while ((chosenOne = tabletQueue.poll()) != null) {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalDataSource.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalDataSource.java
index 8ea01770ca..d9c4c711bb 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalDataSource.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/InternalDataSource.java
@@ -578,7 +578,8 @@ public class InternalDataSource implements DataSourceIf {
                                 if (olapTable.getState() != OlapTableState.NORMAL) {
                                     throw new DdlException("The table [" + olapTable.getState() + "]'s state is "
                                             + olapTable.getState() + ", cannot be dropped."
-                                            + " please cancel the operation on olap table firstly. If you want to forcibly drop(cannot be recovered),"
+                                            + " please cancel the operation on olap table firstly."
+                                            + " If you want to forcibly drop(cannot be recovered),"
                                             + " please use \"DROP table FORCE\".");
                                 }
                             }
@@ -909,7 +910,8 @@ public class InternalDataSource implements DataSourceIf {
                     if ((olapTable.getState() != OlapTableState.NORMAL)) {
                         throw new DdlException("The table [" + tableName + "]'s state is " + olapTable.getState()
                                 + ", cannot be dropped."
-                                + " please cancel the operation on olap table firstly. If you want to forcibly drop(cannot be recovered),"
+                                + " please cancel the operation on olap table firstly."
+                                + " If you want to forcibly drop(cannot be recovered),"
                                 + " please use \"DROP table FORCE\".");
                     }
                 }
@@ -1839,7 +1841,8 @@ public class InternalDataSource implements DataSourceIf {
 
         if (partitionInfo.getType() == PartitionType.UNPARTITIONED) {
             // if this is an unpartitioned table, we should analyze data property and replication num here.
-            // if this is a partitioned table, there properties are already analyzed in RangePartitionDesc analyze phase.
+            // if this is a partitioned table, there properties are already analyzed
+            // in RangePartitionDesc analyze phase.
 
             // use table name as this single partition name
             long partitionId = partitionNameToId.get(tableName);
diff --git a/fe/fe-core/src/main/java/org/apache/doris/deploy/DeployManager.java b/fe/fe-core/src/main/java/org/apache/doris/deploy/DeployManager.java
index 08ccf49641..66c3a81df1 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/deploy/DeployManager.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/deploy/DeployManager.java
@@ -366,7 +366,8 @@ public class DeployManager extends MasterDaemon {
                 break BE_BLOCK;
             }
             LOG.debug("get remote backend hosts: {}", remoteBackendHosts);
-            List<Backend> localBackends = Catalog.getCurrentSystemInfo().getClusterBackends(SystemInfoService.DEFAULT_CLUSTER);
+            List<Backend> localBackends = Catalog.getCurrentSystemInfo()
+                    .getClusterBackends(SystemInfoService.DEFAULT_CLUSTER);
             List<Pair<String, Integer>> localBackendHosts = Lists.newArrayList();
             for (Backend backend : localBackends) {
                 localBackendHosts.add(Pair.create(backend.getHost(), backend.getHeartbeatPort()));
diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsNodeInfo.java b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsNodeInfo.java
index 1504e7cb34..1893c49734 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsNodeInfo.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsNodeInfo.java
@@ -24,6 +24,7 @@ import org.apache.logging.log4j.Logger;
 
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 
 /**
  * This class represents one node with the http and potential thrift publish address
@@ -197,8 +198,8 @@ public class EsNodeInfo {
         if (hasThrift != nodeInfo.hasThrift) {
             return false;
         }
-        return (publishAddress != null ? publishAddress.equals(nodeInfo.publishAddress) : nodeInfo.publishAddress == null)
-                && (thriftAddress != null ? thriftAddress.equals(nodeInfo.thriftAddress) : nodeInfo.thriftAddress == null);
+        return (Objects.equals(publishAddress, nodeInfo.publishAddress))
+                && (Objects.equals(thriftAddress, nodeInfo.thriftAddress));
     }
 
     @Override
diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsRepository.java b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsRepository.java
index e92d06ec3f..2671485272 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsRepository.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsRepository.java
@@ -58,7 +58,8 @@ public class EsRepository extends MasterDaemon {
         }
         esTables.put(esTable.getId(), esTable);
         esClients.put(esTable.getId(),
-                new EsRestClient(esTable.getSeeds(), esTable.getUserName(), esTable.getPasswd(), esTable.isHttpSslEnabled()));
+                new EsRestClient(esTable.getSeeds(), esTable.getUserName(), esTable.getPasswd(),
+                        esTable.isHttpSslEnabled()));
         LOG.info("register a new table [{}] to sync list", esTable);
     }
 
@@ -74,7 +75,8 @@ public class EsRepository extends MasterDaemon {
             try {
                 esTable.syncTableMetaData(esClients.get(esTable.getId()));
             } catch (Throwable e) {
-                LOG.warn("Exception happens when fetch index [{}] meta data from remote es cluster", esTable.getName(), e);
+                LOG.warn("Exception happens when fetch index [{}] meta data from remote es cluster",
+                        esTable.getName(), e);
                 esTable.setEsTablePartitions(null);
                 esTable.setLastMetaDataSyncException(e);
             }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsShardPartitions.java b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsShardPartitions.java
index e2db385df2..7b967b0fcc 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsShardPartitions.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsShardPartitions.java
@@ -82,7 +82,8 @@ public class EsShardPartitions {
                                         (JSONObject) jsonObject.get("nodes")));
                     } catch (Exception e) {
                         LOG.error("fetch index [{}] shard partitions failure", indexName, e);
-                        throw new DorisEsException("fetch [" + indexName + "] shard partitions failure [" + e.getMessage() + "]");
+                        throw new DorisEsException("fetch [" + indexName
+                                + "] shard partitions failure [" + e.getMessage() + "]");
                     }
                 }
             }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsUtil.java b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsUtil.java
index dc1f6b60dc..4db3e96b0a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsUtil.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/EsUtil.java
@@ -92,7 +92,8 @@ public class EsUtil {
         try {
             return Boolean.parseBoolean(property);
         } catch (Exception e) {
-            throw new DdlException(String.format("fail to parse %s, %s = %s, `%s` should be like 'true' or 'false', value should be double quotation marks", name, name, property, name));
+            throw new DdlException(String.format("fail to parse %s, %s = %s, `%s` should be like 'true' or 'false', "
+                    + "value should be double quotation marks", name, name, property, name));
         }
     }
 }
diff --git a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/MappingPhase.java b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/MappingPhase.java
index f736a9ee2f..680ab2d1ce 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/MappingPhase.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/external/elasticsearch/MappingPhase.java
@@ -92,7 +92,8 @@ public class MappingPhase implements SearchPhase {
... 10607 lines suppressed ...


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org