You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@doris.apache.org by mo...@apache.org on 2020/03/05 10:19:26 UTC

[incubator-doris] branch master updated: [MaterializedView] Support different keys type between MVs and base table (#3036)

This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 7b30bbe  [MaterializedView] Support different keys type between MVs and base table (#3036)
7b30bbe is described below

commit 7b30bbea420b818a49607dff55afee1cb578a152
Author: Mingyu Chen <mo...@gmail.com>
AuthorDate: Thu Mar 5 18:19:18 2020 +0800

    [MaterializedView] Support different keys type between MVs and base table (#3036)
    
    Firstly, add materialized index meta in olap table
    
    The materialized index meta include index name, schema, schemahash, keystype etc.
    The information itself scattered in each map is encapsulated into MaterializedIndexMeta.
    
    Also the keys type of index meta maybe not same as keys type of base index after materialized view enabled.
    
    Secondly, support the deduplicate mv.
    If there is group by or aggregation function in create mv stmt, the keys type of mv is agg.
    At the same time, the keys type of base table is duplicate.
    For example
    Duplicate table (k1, k2, v1)
    MV (k1, k2) group by k1, k2
    It should be aggregated during executing mv.
---
 .../doris/alter/MaterializedViewHandler.java       |  16 +-
 .../java/org/apache/doris/alter/RollupJob.java     |  11 +-
 .../java/org/apache/doris/alter/RollupJobV2.java   |   5 +-
 .../apache/doris/alter/SchemaChangeHandler.java    |  15 +-
 .../org/apache/doris/alter/SchemaChangeJob.java    |  15 +-
 .../org/apache/doris/alter/SchemaChangeJobV2.java  |   5 +-
 .../doris/analysis/CreateMaterializedViewStmt.java |  22 +-
 .../java/org/apache/doris/backup/RestoreJob.java   |  33 +--
 .../java/org/apache/doris/catalog/Catalog.java     | 113 +++-----
 .../java/org/apache/doris/catalog/KeysType.java    |  15 +
 .../doris/catalog/MaterializedIndexMeta.java       | 148 ++++++++++
 .../java/org/apache/doris/catalog/OlapTable.java   | 320 ++++++++-------------
 .../org/apache/doris/common/FeMetaVersion.java     |   5 +-
 .../apache/doris/common/proc/IndexInfoProcDir.java |  24 +-
 .../doris/http/rest/StorageTypeCheckAction.java    |   9 +-
 .../org/apache/doris/master/ReportHandler.java     |  16 +-
 .../doris/planner/MaterializedViewSelector.java    |  99 ++++---
 .../org/apache/doris/planner/OlapTableSink.java    |   8 +-
 .../org/apache/doris/planner/RollupSelector.java   |   4 +-
 .../apache/doris/task/HadoopLoadPendingTask.java   |   8 +-
 .../apache/doris/transaction/TransactionState.java |   2 +-
 .../org/apache/doris/analysis/AccessTestUtil.java  |   6 +-
 .../analysis/CreateMaterializedViewStmtTest.java   |  39 ++-
 .../org/apache/doris/backup/CatalogMocker.java     |  16 +-
 .../org/apache/doris/catalog/CatalogTestUtil.java  |   4 +-
 .../org/apache/doris/catalog/DatabaseTest.java     |  19 +-
 .../doris/catalog/MaterializedIndexMetaTest.java   |  69 +++++
 .../java/org/apache/doris/catalog/TableTest.java   |  38 +--
 .../apache/doris/catalog/TempPartitionTest.java    |   2 +-
 .../org/apache/doris/common/util/UnitTestUtil.java |   4 +-
 .../org/apache/doris/http/DorisHttpTestCase.java   |   4 +-
 .../apache/doris/persist/CreateTableInfoTest.java  |  20 +-
 .../planner/MaterializedViewSelectorTest.java      |  87 ++++--
 33 files changed, 688 insertions(+), 513 deletions(-)

diff --git a/fe/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java b/fe/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java
index 57e8e42..65f5459 100644
--- a/fe/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java
+++ b/fe/src/main/java/org/apache/doris/alter/MaterializedViewHandler.java
@@ -194,8 +194,8 @@ public class MaterializedViewHandler extends AlterHandler {
         List<Column> mvColumns = checkAndPrepareMaterializedView(addMVClause, olapTable);
 
         // Step2: create mv job
-        RollupJobV2 rollupJobV2 = createMaterializedViewJob(mvIndexName, baseIndexName, mvColumns,
-                addMVClause.getProperties(), olapTable, db, baseIndexId);
+        RollupJobV2 rollupJobV2 = createMaterializedViewJob(mvIndexName, baseIndexName, mvColumns, addMVClause
+                .getProperties(), olapTable, db, baseIndexId, addMVClause.getMVKeysType());
 
         addAlterJobV2(rollupJobV2);
 
@@ -260,7 +260,7 @@ public class MaterializedViewHandler extends AlterHandler {
 
                 // step 3 create rollup job
                 RollupJobV2 alterJobV2 = createMaterializedViewJob(rollupIndexName, baseIndexName, rollupSchema, addRollupClause.getProperties(),
-                        olapTable, db, baseIndexId);
+                        olapTable, db, baseIndexId, olapTable.getKeysType());
 
                 rollupNameJobMap.put(addRollupClause.getRollupName(), alterJobV2);
                 logJobIdSet.add(alterJobV2.getJobId());
@@ -308,11 +308,13 @@ public class MaterializedViewHandler extends AlterHandler {
      * @throws AnalysisException
      */
     private RollupJobV2 createMaterializedViewJob(String mvName, String baseIndexName,
-                                           List<Column> mvColumns, Map<String, String> properties,
-                                           OlapTable olapTable, Database db, long baseIndexId)
+                                           List<Column> mvColumns, Map<String, String> properties, OlapTable
+            olapTable, Database db, long baseIndexId, KeysType mvKeysType)
             throws DdlException, AnalysisException {
-        // assign rollup index's key type, same as base index's
-        KeysType mvKeysType = olapTable.getKeysType();
+        if (mvKeysType == null) {
+            // assign rollup index's key type, same as base index's
+            mvKeysType = olapTable.getKeysType();
+        }
         // get rollup schema hash
         int mvSchemaHash = Util.schemaHash(0 /* init schema version */, mvColumns, olapTable.getCopiedBfColumns(),
                                            olapTable.getBfFpp());
diff --git a/fe/src/main/java/org/apache/doris/alter/RollupJob.java b/fe/src/main/java/org/apache/doris/alter/RollupJob.java
index 1bbe5e8..746bb76 100644
--- a/fe/src/main/java/org/apache/doris/alter/RollupJob.java
+++ b/fe/src/main/java/org/apache/doris/alter/RollupJob.java
@@ -20,6 +20,7 @@ package org.apache.doris.alter;
 import org.apache.doris.catalog.Catalog;
 import org.apache.doris.catalog.Column;
 import org.apache.doris.catalog.Database;
+import org.apache.doris.catalog.KeysType;
 import org.apache.doris.catalog.MaterializedIndex;
 import org.apache.doris.catalog.MaterializedIndex.IndexState;
 import org.apache.doris.catalog.OlapTable;
@@ -748,9 +749,8 @@ public class RollupJob extends AlterJob {
                 } // end for partitions
 
                 // set index's info
-                olapTable.setIndexSchemaInfo(rollupIndexId, rollupIndexName, rollupSchema, 0,
-                                             rollupSchemaHash, rollupShortKeyColumnCount);
-                olapTable.setStorageTypeToIndex(rollupIndexId, rollupStorageType);
+                olapTable.setIndexMeta(rollupIndexId, rollupIndexName, rollupSchema, 0, rollupSchemaHash,
+                        rollupShortKeyColumnCount, rollupStorageType, KeysType.fromThrift(rollupKeysType));
                 Preconditions.checkState(olapTable.getState() == OlapTableState.ROLLUP);
 
                 this.state = JobState.FINISHING;
@@ -883,9 +883,8 @@ public class RollupJob extends AlterJob {
                 }
             }
 
-            olapTable.setIndexSchemaInfo(rollupIndexId, rollupIndexName, rollupSchema, 0,
-                                         rollupSchemaHash, rollupShortKeyColumnCount);
-            olapTable.setStorageTypeToIndex(rollupIndexId, rollupStorageType);
+            olapTable.setIndexMeta(rollupIndexId, rollupIndexName, rollupSchema, 0, rollupSchemaHash,
+                    rollupShortKeyColumnCount, rollupStorageType, KeysType.fromThrift(rollupKeysType));
         } finally {
             db.writeUnlock();
         }
diff --git a/fe/src/main/java/org/apache/doris/alter/RollupJobV2.java b/fe/src/main/java/org/apache/doris/alter/RollupJobV2.java
index 425119e..f1a71ea 100644
--- a/fe/src/main/java/org/apache/doris/alter/RollupJobV2.java
+++ b/fe/src/main/java/org/apache/doris/alter/RollupJobV2.java
@@ -281,9 +281,8 @@ public class RollupJobV2 extends AlterJobV2 {
             partition.createRollupIndex(rollupIndex);
         }
 
-        tbl.setIndexSchemaInfo(rollupIndexId, rollupIndexName, rollupSchema, 0 /* init schema version */,
-                rollupSchemaHash, rollupShortKeyColumnCount);
-        tbl.setStorageTypeToIndex(rollupIndexId, TStorageType.COLUMN);
+        tbl.setIndexMeta(rollupIndexId, rollupIndexName, rollupSchema, 0 /* init schema version */,
+                rollupSchemaHash, rollupShortKeyColumnCount,TStorageType.COLUMN, rollupKeysType);
     }
 
     /*
diff --git a/fe/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java b/fe/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
index 8cf8fc6..a604d2e 100644
--- a/fe/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
+++ b/fe/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
@@ -42,6 +42,7 @@ import org.apache.doris.catalog.KeysType;
 import org.apache.doris.catalog.MaterializedIndex;
 import org.apache.doris.catalog.MaterializedIndex.IndexExtState;
 import org.apache.doris.catalog.MaterializedIndex.IndexState;
+import org.apache.doris.catalog.MaterializedIndexMeta;
 import org.apache.doris.catalog.OlapTable;
 import org.apache.doris.catalog.OlapTable.OlapTableState;
 import org.apache.doris.catalog.Partition;
@@ -241,12 +242,7 @@ public class SchemaChangeHandler extends AlterHandler {
             // if not specify rollup index, column should be dropped from both base and rollup indexes.
             List<Long> indexIds = new ArrayList<Long>();
             indexIds.add(baseIndexId);
-            for (long indexId : olapTable.getIndexIdToSchema().keySet()) {
-                if (indexId == baseIndexId) {
-                    continue;
-                }
-                indexIds.add(indexId);
-            }
+            indexIds.addAll(olapTable.getIndexIdListExceptBaseIndex());
 
             // find column in base index and remove it
             List<Column> baseSchema = indexSchemaMap.get(baseIndexId);
@@ -1070,16 +1066,17 @@ public class SchemaChangeHandler extends AlterHandler {
          */
         for (Map.Entry<Long, List<Column>> entry : changedIndexIdToSchema.entrySet()) {
             long originIndexId = entry.getKey();
+            MaterializedIndexMeta currentIndexMeta = olapTable.getIndexMetaByIndexId(originIndexId);
             // 1. get new schema version/schema version hash, short key column count
-            int currentSchemaVersion = olapTable.getSchemaVersionByIndexId(originIndexId);
+            int currentSchemaVersion = currentIndexMeta.getSchemaVersion();
             int newSchemaVersion = currentSchemaVersion + 1;
             // generate schema hash for new index has to generate a new schema hash not equal to current schema hash
-            int currentSchemaHash = olapTable.getSchemaHashByIndexId(originIndexId);
+            int currentSchemaHash = currentIndexMeta.getSchemaHash();
             int newSchemaHash = Util.generateSchemaHash();
             while (currentSchemaHash == newSchemaHash) {
                 newSchemaHash = Util.generateSchemaHash();
             }
-            String newIndexName = SHADOW_NAME_PRFIX + olapTable.getIndexNameById(originIndexId);
+            String newIndexName = SHADOW_NAME_PRFIX + currentIndexMeta.getIndexName();
             short newShortKeyColumnCount = indexIdToShortKeyColumnCount.get(originIndexId);
             long shadowIndexId = catalog.getNextId();
 
diff --git a/fe/src/main/java/org/apache/doris/alter/SchemaChangeJob.java b/fe/src/main/java/org/apache/doris/alter/SchemaChangeJob.java
index d52ad61..97146b5 100644
--- a/fe/src/main/java/org/apache/doris/alter/SchemaChangeJob.java
+++ b/fe/src/main/java/org/apache/doris/alter/SchemaChangeJob.java
@@ -844,12 +844,8 @@ public class SchemaChangeJob extends AlterJob {
                     int schemaVersion = changedIndexIdToSchemaVersion.get(indexId);
                     int schemaHash = changedIndexIdToSchemaHash.get(indexId);
                     short shortKeyColumnCount = changedIndexIdToShortKeyColumnCount.get(indexId);
-                    olapTable.setIndexSchemaInfo(indexId, null, entry.getValue(), schemaVersion, schemaHash,
-                                                 shortKeyColumnCount);
-
-                    if (newStorageType != null) {
-                        olapTable.setIndexStorageType(indexId, newStorageType);
-                    }
+                    olapTable.setIndexMeta(indexId, null, entry.getValue(), schemaVersion, schemaHash,
+                            shortKeyColumnCount, newStorageType, null);
                 }
 
                 // 3. update base schema if changed
@@ -1012,12 +1008,9 @@ public class SchemaChangeJob extends AlterJob {
                 int schemaVersion = getSchemaVersionByIndexId(indexId);
                 int schemaHash = getSchemaHashByIndexId(indexId);
                 short shortKeyColumnCount = getShortKeyColumnCountByIndexId(indexId);
-                olapTable.setIndexSchemaInfo(indexId, null, entry.getValue(), schemaVersion, schemaHash,
-                                             shortKeyColumnCount);
+                olapTable.setIndexMeta(indexId, null, entry.getValue(), schemaVersion, schemaHash,
+                        shortKeyColumnCount, newStorageType, null);
 
-                if (newStorageType != null) {
-                    olapTable.setIndexStorageType(indexId, newStorageType);
-                }
                 if (indexId == olapTable.getBaseIndexId()) {
                     olapTable.setNewFullSchema(entry.getValue());
                 }
diff --git a/fe/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java b/fe/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java
index a9095b1..5875fdd 100644
--- a/fe/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java
+++ b/fe/src/main/java/org/apache/doris/alter/SchemaChangeJobV2.java
@@ -314,11 +314,10 @@ public class SchemaChangeJobV2 extends AlterJobV2 {
         }
 
         for (long shadowIdxId : indexIdMap.keySet()) {
-            tbl.setIndexSchemaInfo(shadowIdxId, indexIdToName.get(shadowIdxId), indexSchemaMap.get(shadowIdxId),
+            tbl.setIndexMeta(shadowIdxId, indexIdToName.get(shadowIdxId), indexSchemaMap.get(shadowIdxId),
                     indexSchemaVersionAndHashMap.get(shadowIdxId).first,
                     indexSchemaVersionAndHashMap.get(shadowIdxId).second,
-                    indexShortKeyMap.get(shadowIdxId));
-            tbl.setStorageTypeToIndex(shadowIdxId, TStorageType.COLUMN);
+                    indexShortKeyMap.get(shadowIdxId), TStorageType.COLUMN, null);
         }
 
         tbl.rebuildFullSchema();
diff --git a/fe/src/main/java/org/apache/doris/analysis/CreateMaterializedViewStmt.java b/fe/src/main/java/org/apache/doris/analysis/CreateMaterializedViewStmt.java
index 2af26d3..698b3b7 100644
--- a/fe/src/main/java/org/apache/doris/analysis/CreateMaterializedViewStmt.java
+++ b/fe/src/main/java/org/apache/doris/analysis/CreateMaterializedViewStmt.java
@@ -18,6 +18,7 @@
 package org.apache.doris.analysis;
 
 import org.apache.doris.catalog.AggregateType;
+import org.apache.doris.catalog.KeysType;
 import org.apache.doris.common.AnalysisException;
 import org.apache.doris.common.Config;
 import org.apache.doris.common.ErrorCode;
@@ -65,6 +66,7 @@ public class CreateMaterializedViewStmt extends DdlStmt {
     private List<MVColumnItem> mvColumnItemList = Lists.newArrayList();
     private String baseIndexName;
     private String dbName;
+    private KeysType mvKeysType = KeysType.DUP_KEYS;
 
     public CreateMaterializedViewStmt(String mvName, SelectStmt selectStmt,
                                       Map<String, String> properties) {
@@ -93,6 +95,10 @@ public class CreateMaterializedViewStmt extends DdlStmt {
         return dbName;
     }
 
+    public KeysType getMVKeysType() {
+        return mvKeysType;
+    }
+
     @Override
     public void analyze(Analyzer analyzer) throws UserException {
         // TODO(ml): remove it
@@ -103,20 +109,15 @@ public class CreateMaterializedViewStmt extends DdlStmt {
         FeNameFormat.checkTableName(mvName);
         // TODO(ml): The mv name in from clause should pass the analyze without error.
         selectStmt.analyze(analyzer);
+        if (selectStmt.getAggInfo() != null) {
+            mvKeysType = KeysType.AGG_KEYS;
+        }
         analyzeSelectClause();
         analyzeFromClause();
         if (selectStmt.getWhereClause() != null) {
             throw new AnalysisException("The where clause is not supported in add materialized view clause, expr:"
                                                 + selectStmt.getWhereClause().toSql());
         }
-        if (selectStmt.getGroupByClause() != null) {
-            // TODO(ml): The metadata in fe does not support the deduplicate mv in base table
-            //           which keys type is duplicated.
-            if (selectStmt.getGroupByClause().getGroupingExprs().size() ==
-                    selectStmt.getSelectList().getItems().size()) {
-                throw new AnalysisException("The deduplicate materialized view is not yet supported");
-            }
-        }
         if (selectStmt.getHavingPred() != null) {
             throw new AnalysisException("The having clause is not supported in add materialized view clause, expr:"
                                                 + selectStmt.getHavingPred().toSql());
@@ -230,10 +231,10 @@ public class CreateMaterializedViewStmt extends DdlStmt {
     private void analyzeOrderByClause() throws AnalysisException {
         if (selectStmt.getOrderByElements() == null) {
             /**
-             * Materialized view includes the aggregation functions.
+             * The keys type of Materialized view is aggregation.
              * All of group by columns are keys of materialized view.
              */
-            if (beginIndexOfAggregation != -1) {
+            if (mvKeysType == KeysType.AGG_KEYS) {
                 for (MVColumnItem mvColumnItem : mvColumnItemList) {
                     if (mvColumnItem.getAggregationType() != null) {
                         break;
@@ -242,6 +243,7 @@ public class CreateMaterializedViewStmt extends DdlStmt {
                 }
                 return;
             }
+
             /**
              * There is no aggregation function in materialized view.
              * Supplement key of MV columns
diff --git a/fe/src/main/java/org/apache/doris/backup/RestoreJob.java b/fe/src/main/java/org/apache/doris/backup/RestoreJob.java
index c48d790..da0d516 100644
--- a/fe/src/main/java/org/apache/doris/backup/RestoreJob.java
+++ b/fe/src/main/java/org/apache/doris/backup/RestoreJob.java
@@ -24,13 +24,12 @@ import org.apache.doris.backup.BackupJobInfo.BackupTabletInfo;
 import org.apache.doris.backup.RestoreFileMapping.IdChain;
 import org.apache.doris.backup.Status.ErrCode;
 import org.apache.doris.catalog.Catalog;
-import org.apache.doris.catalog.Column;
 import org.apache.doris.catalog.DataProperty;
 import org.apache.doris.catalog.Database;
 import org.apache.doris.catalog.FsBroker;
-import org.apache.doris.catalog.KeysType;
 import org.apache.doris.catalog.MaterializedIndex;
 import org.apache.doris.catalog.MaterializedIndex.IndexExtState;
+import org.apache.doris.catalog.MaterializedIndexMeta;
 import org.apache.doris.catalog.OlapTable;
 import org.apache.doris.catalog.OlapTable.OlapTableState;
 import org.apache.doris.catalog.Partition;
@@ -615,23 +614,20 @@ public class RestoreJob extends AbstractJob {
                 Set<String> bfColumns = localTbl.getCopiedBfColumns();
                 double bfFpp = localTbl.getBfFpp();
                 for (MaterializedIndex restoredIdx : restorePart.getMaterializedIndices(IndexExtState.VISIBLE)) {
-                    short shortKeyColumnCount = localTbl.getShortKeyColumnCountByIndexId(restoredIdx.getId());
-                    int schemaHash = localTbl.getSchemaHashByIndexId(restoredIdx.getId());
-                    KeysType keysType = localTbl.getKeysType();
-                    List<Column> columns = localTbl.getSchemaByIndexId(restoredIdx.getId());
+                    MaterializedIndexMeta indexMeta = localTbl.getIndexMetaByIndexId(restoredIdx.getId());
                     TabletMeta tabletMeta = new TabletMeta(db.getId(), localTbl.getId(), restorePart.getId(),
-                            restoredIdx.getId(), schemaHash, TStorageMedium.HDD);
+                            restoredIdx.getId(), indexMeta.getSchemaHash(), TStorageMedium.HDD);
                     for (Tablet restoreTablet : restoredIdx.getTablets()) {
                         Catalog.getCurrentInvertedIndex().addTablet(restoreTablet.getId(), tabletMeta);
                         for (Replica restoreReplica : restoreTablet.getReplicas()) {
                             Catalog.getCurrentInvertedIndex().addReplica(restoreTablet.getId(), restoreReplica);
                             CreateReplicaTask task = new CreateReplicaTask(restoreReplica.getBackendId(), dbId,
                                     localTbl.getId(), restorePart.getId(), restoredIdx.getId(),
-                                    restoreTablet.getId(), shortKeyColumnCount,
-                                    schemaHash, restoreReplica.getVersion(), restoreReplica.getVersionHash(),
-                                    keysType, TStorageType.COLUMN,
+                                    restoreTablet.getId(), indexMeta.getShortKeyColumnCount(),
+                                    indexMeta.getSchemaHash(), restoreReplica.getVersion(),
+                                    restoreReplica.getVersionHash(), indexMeta.getKeysType(), TStorageType.COLUMN,
                                     TStorageMedium.HDD /* all restored replicas will be saved to HDD */,
-                                    columns, bfColumns, bfFpp, null,
+                                    indexMeta.getSchema(), bfColumns, bfFpp, null,
                                     localTbl.getCopiedIndexes(),
                                     localTbl.isInMemory());
                             task.setInRestoreMode(true);
@@ -650,21 +646,20 @@ public class RestoreJob extends AbstractJob {
                     Set<String> bfColumns = restoreTbl.getCopiedBfColumns();
                     double bfFpp = restoreTbl.getBfFpp();
                     for (MaterializedIndex index : restorePart.getMaterializedIndices(IndexExtState.VISIBLE)) {
-                        short shortKeyColumnCount = restoreTbl.getShortKeyColumnCountByIndexId(index.getId());
-                        int schemaHash = restoreTbl.getSchemaHashByIndexId(index.getId());
-                        KeysType keysType = restoreTbl.getKeysType();
-                        List<Column> columns = restoreTbl.getSchemaByIndexId(index.getId());
+                        MaterializedIndexMeta indexMeta = restoreTbl.getIndexMetaByIndexId(index.getId());
                         TabletMeta tabletMeta = new TabletMeta(db.getId(), restoreTbl.getId(), restorePart.getId(),
-                                index.getId(), schemaHash, TStorageMedium.HDD);
+                                index.getId(), indexMeta.getSchemaHash(), TStorageMedium.HDD);
                         for (Tablet tablet : index.getTablets()) {
                             Catalog.getCurrentInvertedIndex().addTablet(tablet.getId(), tabletMeta);
                             for (Replica replica : tablet.getReplicas()) {
                                 Catalog.getCurrentInvertedIndex().addReplica(tablet.getId(), replica);
                                 CreateReplicaTask task = new CreateReplicaTask(replica.getBackendId(), dbId,
                                         restoreTbl.getId(), restorePart.getId(), index.getId(), tablet.getId(),
-                                        shortKeyColumnCount, schemaHash, replica.getVersion(), replica.getVersionHash(),
-                                        keysType, TStorageType.COLUMN, TStorageMedium.HDD, columns,
-                                        bfColumns, bfFpp, null, restoreTbl.getCopiedIndexes(),
+                                        indexMeta.getShortKeyColumnCount(), indexMeta.getSchemaHash(),
+                                        replica.getVersion(), replica.getVersionHash(),
+                                        indexMeta.getKeysType(), TStorageType.COLUMN, TStorageMedium.HDD,
+                                        indexMeta.getSchema(), bfColumns, bfFpp, null,
+                                        restoreTbl.getCopiedIndexes(),
                                         restoreTbl.isInMemory());
                                 task.setInRestoreMode(true);
                                 batchTask.addTask(task);
diff --git a/fe/src/main/java/org/apache/doris/catalog/Catalog.java b/fe/src/main/java/org/apache/doris/catalog/Catalog.java
index c3557e8..bc0e371 100644
--- a/fe/src/main/java/org/apache/doris/catalog/Catalog.java
+++ b/fe/src/main/java/org/apache/doris/catalog/Catalog.java
@@ -2904,10 +2904,7 @@ public class Catalog {
         DistributionInfo distributionInfo = null;
         OlapTable olapTable = null;
 
-        Map<Long, List<Column>> indexIdToSchema = null;
-        Map<Long, Integer> indexIdToSchemaHash = null;
-        Map<Long, Short> indexIdToShortKeyColumnCount = null;
-        Map<Long, TStorageType> indexIdToStorageType = null;
+        Map<Long, MaterializedIndexMeta> indexIdToMeta;
         Set<String> bfColumns = null;
 
         String partitionName = singlePartitionDesc.getPartitionName();
@@ -3001,10 +2998,7 @@ public class Catalog {
                 groupSchema.checkReplicationNum(singlePartitionDesc.getReplicationNum());
             }
 
-            indexIdToShortKeyColumnCount = olapTable.getCopiedIndexIdToShortKeyColumnCount();
-            indexIdToSchemaHash = olapTable.getCopiedIndexIdToSchemaHash();
-            indexIdToStorageType = olapTable.getCopiedIndexIdToStorageType();
-            indexIdToSchema = olapTable.getCopiedIndexIdToSchema();
+            indexIdToMeta = olapTable.getCopiedIndexIdToMeta();
             bfColumns = olapTable.getCopiedBfColumns();
         } catch (AnalysisException e) {
             throw new DdlException(e.getMessage());
@@ -3014,10 +3008,7 @@ public class Catalog {
 
         Preconditions.checkNotNull(distributionInfo);
         Preconditions.checkNotNull(olapTable);
-        Preconditions.checkNotNull(indexIdToShortKeyColumnCount);
-        Preconditions.checkNotNull(indexIdToSchemaHash);
-        Preconditions.checkNotNull(indexIdToStorageType);
-        Preconditions.checkNotNull(indexIdToSchema);
+        Preconditions.checkNotNull(indexIdToMeta);
 
         // create partition outside db lock
         DataProperty dataProperty = singlePartitionDesc.getPartitionDataProperty();
@@ -3030,10 +3021,7 @@ public class Catalog {
                     olapTable.getId(),
                     olapTable.getBaseIndexId(),
                     partitionId, partitionName,
-                    indexIdToShortKeyColumnCount,
-                    indexIdToSchemaHash,
-                    indexIdToStorageType,
-                    indexIdToSchema,
+                    indexIdToMeta,
                     olapTable.getKeysType(),
                     distributionInfo,
                     dataProperty.getStorageMedium(),
@@ -3074,17 +3062,17 @@ public class Catalog {
                 // rollup index may be added or dropped during add partition operation.
                 // schema may be changed during add partition operation.
                 boolean metaChanged = false;
-                if (olapTable.getIndexNameToId().size() != indexIdToSchema.size()) {
+                if (olapTable.getIndexNameToId().size() != indexIdToMeta.size()) {
                     metaChanged = true;
                 } else {
                     // compare schemaHash
-                    for (Map.Entry<Long, Integer> entry : olapTable.getIndexIdToSchemaHash().entrySet()) {
+                    for (Map.Entry<Long, MaterializedIndexMeta> entry : olapTable.getIndexIdToMeta().entrySet()) {
                         long indexId = entry.getKey();
-                        if (!indexIdToSchemaHash.containsKey(indexId)) {
+                        if (!indexIdToMeta.containsKey(indexId)) {
                             metaChanged = true;
                             break;
                         }
-                        if (!indexIdToSchemaHash.get(indexId).equals(entry.getValue())) {
+                        if (indexIdToMeta.get(indexId).getSchemaHash() != entry.getValue().getSchemaHash()) {
                             metaChanged = true;
                             break;
                         }
@@ -3351,10 +3339,7 @@ public class Catalog {
 
     private Partition createPartitionWithIndices(String clusterName, long dbId, long tableId,
                                                  long baseIndexId, long partitionId, String partitionName,
-                                                 Map<Long, Short> indexIdToShortKeyColumnCount,
-                                                 Map<Long, Integer> indexIdToSchemaHash,
-                                                 Map<Long, TStorageType> indexIdToStorageType,
-                                                 Map<Long, List<Column>> indexIdToSchema,
+                                                 Map<Long, MaterializedIndexMeta> indexIdToMeta,
                                                  KeysType keysType,
                                                  DistributionInfo distributionInfo,
                                                  TStorageMedium storageMedium,
@@ -3377,7 +3362,7 @@ public class Catalog {
         indexMap.put(baseIndexId, baseIndex);
 
         // create rollup index if has
-        for (long indexId : indexIdToSchema.keySet()) {
+        for (long indexId : indexIdToMeta.keySet()) {
             if (indexId == baseIndexId) {
                 continue;
             }
@@ -3396,9 +3381,10 @@ public class Catalog {
         for (Map.Entry<Long, MaterializedIndex> entry : indexMap.entrySet()) {
             long indexId = entry.getKey();
             MaterializedIndex index = entry.getValue();
+            MaterializedIndexMeta indexMeta = indexIdToMeta.get(indexId);
 
             // create tablets
-            int schemaHash = indexIdToSchemaHash.get(indexId);
+            int schemaHash = indexMeta.getSchemaHash();
             TabletMeta tabletMeta = new TabletMeta(dbId, tableId, partitionId, indexId, schemaHash, storageMedium);
             createTablets(clusterName, index, ReplicaState.NORMAL, distributionInfo, version, versionHash,
                     replicationNum, tabletMeta, tabletIdSet);
@@ -3407,9 +3393,9 @@ public class Catalog {
             String errMsg = null;
 
             // add create replica task for olap
-            short shortKeyColumnCount = indexIdToShortKeyColumnCount.get(indexId);
-            TStorageType storageType = indexIdToStorageType.get(indexId);
-            List<Column> schema = indexIdToSchema.get(indexId);
+            short shortKeyColumnCount = indexMeta.getShortKeyColumnCount();
+            TStorageType storageType = indexMeta.getStorageType();
+            List<Column> schema = indexMeta.getSchema();
             int totalTaskNum = index.getTablets().size() * replicationNum;
             MarkedCountDownLatch<Long, Long> countDownLatch = new MarkedCountDownLatch<Long, Long>(totalTaskNum);
             AgentBatchTask batchTask = new AgentBatchTask();
@@ -3535,16 +3521,7 @@ public class Catalog {
 
         // set base index info to table
         // this should be done before create partition.
-        // get base index storage type. default is COLUMN
         Map<String, String> properties = stmt.getProperties();
-        TStorageType baseIndexStorageType = null;
-        try {
-            baseIndexStorageType = PropertyAnalyzer.analyzeStorageType(properties);
-        } catch (AnalysisException e) {
-            throw new DdlException(e.getMessage());
-        }
-        Preconditions.checkNotNull(baseIndexStorageType);
-        olapTable.setStorageTypeToIndex(baseIndexId, baseIndexStorageType);
 
         // analyze bloom filter columns
         Set<String> bfColumns = null;
@@ -3624,7 +3601,15 @@ public class Catalog {
             throw new DdlException(e.getMessage());
         }
 
-        // set index schema
+        // get base index storage type. default is COLUMN
+        TStorageType baseIndexStorageType = null;
+        try {
+            baseIndexStorageType = PropertyAnalyzer.analyzeStorageType(properties);
+        } catch (AnalysisException e) {
+            throw new DdlException(e.getMessage());
+        }
+        Preconditions.checkNotNull(baseIndexStorageType);
+        // set base index meta
         int schemaVersion = 0;
         try {
             schemaVersion = PropertyAnalyzer.analyzeSchemaVersion(properties);
@@ -3632,8 +3617,8 @@ public class Catalog {
             throw new DdlException(e.getMessage());
         }
         int schemaHash = Util.schemaHash(schemaVersion, baseSchema, bfColumns, bfFpp);
-        olapTable.setIndexSchemaInfo(baseIndexId, tableName, baseSchema, schemaVersion, schemaHash,
-                shortKeyColumnCount);
+        olapTable.setIndexMeta(baseIndexId, tableName, baseSchema, schemaVersion, schemaHash,
+                shortKeyColumnCount, baseIndexStorageType, keysType);
 
 
         for (AlterClause alterClause : stmt.getRollupAlterClauseList()) {
@@ -3641,15 +3626,7 @@ public class Catalog {
 
             Long baseRollupIndex = olapTable.getIndexIdByName(tableName);
 
-            // set rollup index schema to olap table
-            List<Column> rollupColumns = getRollupHandler().checkAndPrepareMaterializedView(addRollupClause, olapTable, baseRollupIndex, false);
-            short rollupShortKeyColumnCount = Catalog.calcShortKeyColumnCount(rollupColumns, alterClause.getProperties());
-            int rollupSchemaHash = Util.schemaHash(schemaVersion, rollupColumns, bfColumns, bfFpp);
-            long rollupIndexId = getCurrentCatalog().getNextId();
-            olapTable.setIndexSchemaInfo(rollupIndexId, addRollupClause.getRollupName(), rollupColumns, schemaVersion, rollupSchemaHash,
-                    rollupShortKeyColumnCount);
-
-            // set storage type for rollup index
+            // get storage type for rollup index
             TStorageType rollupIndexStorageType = null;
             try {
                 rollupIndexStorageType = PropertyAnalyzer.analyzeStorageType(addRollupClause.getProperties());
@@ -3657,7 +3634,14 @@ public class Catalog {
                 throw new DdlException(e.getMessage());
             }
             Preconditions.checkNotNull(rollupIndexStorageType);
-            olapTable.setStorageTypeToIndex(rollupIndexId, rollupIndexStorageType);
+            // set rollup index meta to olap table
+            List<Column> rollupColumns = getRollupHandler().checkAndPrepareMaterializedView(addRollupClause,
+                    olapTable, baseRollupIndex, false);
+            short rollupShortKeyColumnCount = Catalog.calcShortKeyColumnCount(rollupColumns, alterClause.getProperties());
+            int rollupSchemaHash = Util.schemaHash(schemaVersion, rollupColumns, bfColumns, bfFpp);
+            long rollupIndexId = getCurrentCatalog().getNextId();
+            olapTable.setIndexMeta(rollupIndexId, addRollupClause.getRollupName(), rollupColumns, schemaVersion,
+                    rollupSchemaHash, rollupShortKeyColumnCount, rollupIndexStorageType, keysType);
         }
 
         // analyze version info
@@ -3684,10 +3668,7 @@ public class Catalog {
                 Partition partition = createPartitionWithIndices(db.getClusterName(), db.getId(),
                         olapTable.getId(), olapTable.getBaseIndexId(),
                         partitionId, partitionName,
-                        olapTable.getIndexIdToShortKeyColumnCount(),
-                        olapTable.getIndexIdToSchemaHash(),
-                        olapTable.getIndexIdToStorageType(),
-                        olapTable.getIndexIdToSchema(),
+                        olapTable.getIndexIdToMeta(),
                         keysType,
                         distributionInfo,
                         partitionInfo.getDataProperty(partitionId).getStorageMedium(),
@@ -3717,10 +3698,7 @@ public class Catalog {
                     DataProperty dataProperty = rangePartitionInfo.getDataProperty(entry.getValue());
                     Partition partition = createPartitionWithIndices(db.getClusterName(), db.getId(), olapTable.getId(),
                             olapTable.getBaseIndexId(), entry.getValue(), entry.getKey(),
-                            olapTable.getIndexIdToShortKeyColumnCount(),
-                            olapTable.getIndexIdToSchemaHash(),
-                            olapTable.getIndexIdToStorageType(),
-                            olapTable.getIndexIdToSchema(),
+                            olapTable.getIndexIdToMeta(),
                             keysType, distributionInfo,
                             dataProperty.getStorageMedium(),
                             partitionInfo.getReplicationNum(entry.getValue()),
@@ -4137,19 +4115,21 @@ public class Catalog {
         // 3. rollup
         if (createRollupStmt != null && (table instanceof OlapTable)) {
             OlapTable olapTable = (OlapTable) table;
-            for (Map.Entry<Long, List<Column>> entry : olapTable.getIndexIdToSchema().entrySet()) {
+            for (Map.Entry<Long, MaterializedIndexMeta> entry : olapTable.getIndexIdToMeta().entrySet()) {
                 if (entry.getKey() == olapTable.getBaseIndexId()) {
                     continue;
                 }
+                MaterializedIndexMeta materializedIndexMeta = entry.getValue();
                 sb = new StringBuilder();
-                String indexName = olapTable.getIndexNameById(entry.getKey());
+                String indexName = materializedIndexMeta.getIndexName();
                 sb.append("ALTER TABLE ").append(table.getName()).append(" ADD ROLLUP ").append(indexName);
                 sb.append("(");
 
-                for (int i = 0; i < entry.getValue().size(); i++) {
-                    Column column = entry.getValue().get(i);
+                List<Column> indexSchema = materializedIndexMeta.getSchema();
+                for (int i = 0; i < indexSchema.size(); i++) {
+                    Column column = indexSchema.get(i);
                     sb.append(column.getName());
-                    if (i != entry.getValue().size() - 1) {
+                    if (i != indexSchema.size() - 1) {
                         sb.append(", ");
                     }
                 }
@@ -6223,10 +6203,7 @@ public class Catalog {
                 Partition newPartition = createPartitionWithIndices(db.getClusterName(),
                         db.getId(), copiedTbl.getId(), copiedTbl.getBaseIndexId(),
                         newPartitionId, entry.getKey(),
-                        copiedTbl.getIndexIdToShortKeyColumnCount(),
-                        copiedTbl.getIndexIdToSchemaHash(),
-                        copiedTbl.getIndexIdToStorageType(),
-                        copiedTbl.getIndexIdToSchema(),
+                        copiedTbl.getIndexIdToMeta(),
                         copiedTbl.getKeysType(),
                         copiedTbl.getDefaultDistributionInfo(),
                         copiedTbl.getPartitionInfo().getDataProperty(oldPartitionId).getStorageMedium(),
diff --git a/fe/src/main/java/org/apache/doris/catalog/KeysType.java b/fe/src/main/java/org/apache/doris/catalog/KeysType.java
index 8255e8a..cf22842 100644
--- a/fe/src/main/java/org/apache/doris/catalog/KeysType.java
+++ b/fe/src/main/java/org/apache/doris/catalog/KeysType.java
@@ -50,6 +50,21 @@ public enum KeysType {
         }
     }
 
+    public static KeysType fromThrift(TKeysType tKeysType) {
+        switch (tKeysType) {
+            case PRIMARY_KEYS:
+                return KeysType.PRIMARY_KEYS;
+            case DUP_KEYS:
+                return KeysType.DUP_KEYS;
+            case UNIQUE_KEYS:
+                return KeysType.UNIQUE_KEYS;
+            case AGG_KEYS:
+                return KeysType.AGG_KEYS;
+            default:
+                return null;
+        }
+    }
+
     public String toSql() {
         switch (this) {
             case PRIMARY_KEYS:
diff --git a/fe/src/main/java/org/apache/doris/catalog/MaterializedIndexMeta.java b/fe/src/main/java/org/apache/doris/catalog/MaterializedIndexMeta.java
new file mode 100644
index 0000000..c0276c4
--- /dev/null
+++ b/fe/src/main/java/org/apache/doris/catalog/MaterializedIndexMeta.java
@@ -0,0 +1,148 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.catalog;
+
+import org.apache.doris.common.io.Text;
+import org.apache.doris.common.io.Writable;
+import org.apache.doris.persist.gson.GsonUtils;
+import org.apache.doris.thrift.TStorageType;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.gson.annotations.SerializedName;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.List;
+
+public class MaterializedIndexMeta implements Writable {
+    @SerializedName(value = "indexId")
+    private long indexId;
+    @SerializedName(value = "indexName")
+    private String indexName;
+    @SerializedName(value = "schema")
+    private List<Column> schema = Lists.newArrayList();
+    @SerializedName(value = "schemaVersion")
+    private int schemaVersion;
+    @SerializedName(value = "schemaHash")
+    private int schemaHash;
+    @SerializedName(value = "shortKeyColumnCount")
+    private short shortKeyColumnCount;
+    @SerializedName(value = "storageType")
+    private TStorageType storageType;
+    @SerializedName(value = "keysType")
+    private KeysType keysType;
+
+    public MaterializedIndexMeta(long indexId, String indexName, List<Column> schema, int schemaVersion, int
+            schemaHash, short shortKeyColumnCount, TStorageType storageType, KeysType keysType) {
+        this.indexId = indexId;
+        Preconditions.checkState(indexName != null);
+        this.indexName = indexName;
+        Preconditions.checkState(schema != null);
+        Preconditions.checkState(schema.size() != 0);
+        this.schema = schema;
+        this.schemaVersion = schemaVersion;
+        this.schemaHash = schemaHash;
+        this.shortKeyColumnCount = shortKeyColumnCount;
+        Preconditions.checkState(storageType != null);
+        this.storageType = storageType;
+        Preconditions.checkState(keysType != null);
+        this.keysType = keysType;
+    }
+
+    public long getIndexId() {
+        return indexId;
+    }
+
+    public String getIndexName() {
+        return indexName;
+    }
+
+    public KeysType getKeysType() {
+        return keysType;
+    }
+
+    public void setKeysType(KeysType keysType) {
+        this.keysType = keysType;
+    }
+
+    public TStorageType getStorageType() {
+        return storageType;
+    }
+
+    public List<Column> getSchema() {
+        return schema;
+    }
+
+    public int getSchemaHash() {
+        return schemaHash;
+    }
+
+    public short getShortKeyColumnCount() {
+        return shortKeyColumnCount;
+    }
+
+    public int getSchemaVersion() {
+        return schemaVersion;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof MaterializedIndexMeta)) {
+            return false;
+        }
+        MaterializedIndexMeta indexMeta = (MaterializedIndexMeta) obj;
+        if (indexMeta.indexId != this.indexId) {
+            return false;
+        }
+        if (!indexMeta.indexName.equals(this.indexName)) {
+            return false;
+        }
+        if (indexMeta.schema.size() != this.schema.size() || !indexMeta.schema.containsAll(this.schema)) {
+            return false;
+        }
+        if (indexMeta.schemaVersion != this.schemaVersion) {
+            return false;
+        }
+        if (indexMeta.schemaHash != this.schemaHash) {
+            return false;
+        }
+        if (indexMeta.shortKeyColumnCount != this.shortKeyColumnCount) {
+            return false;
+        }
+        if (indexMeta.storageType != this.storageType) {
+            return false;
+        }
+        if (indexMeta.keysType != this.keysType) {
+            return false;
+        }
+        return true;
+    }
+
+    @Override
+    public void write(DataOutput out) throws IOException {
+        Text.writeString(out, GsonUtils.GSON.toJson(this));
+    }
+
+    public static MaterializedIndexMeta read(DataInput in) throws IOException {
+        String json = Text.readString(in);
+        return GsonUtils.GSON.fromJson(json, MaterializedIndexMeta.class);
+    }
+
+}
diff --git a/fe/src/main/java/org/apache/doris/catalog/OlapTable.java b/fe/src/main/java/org/apache/doris/catalog/OlapTable.java
index 99aca7d..006a0b5 100644
--- a/fe/src/main/java/org/apache/doris/catalog/OlapTable.java
+++ b/fe/src/main/java/org/apache/doris/catalog/OlapTable.java
@@ -17,11 +17,7 @@
 
 package org.apache.doris.catalog;
 
-import org.apache.doris.analysis.AddRollupClause;
-import org.apache.doris.analysis.AlterClause;
-import org.apache.doris.analysis.AlterTableStmt;
 import org.apache.doris.analysis.CreateTableStmt;
-import org.apache.doris.analysis.TableName;
 import org.apache.doris.backup.Status;
 import org.apache.doris.backup.Status.ErrCode;
 import org.apache.doris.catalog.DistributionInfo.DistributionInfoType;
@@ -90,23 +86,16 @@ public class OlapTable extends Table {
     }
 
     private OlapTableState state;
-    // index id -> table's schema
-    private Map<Long, List<Column>> indexIdToSchema;
-    // index id -> table's schema version
-    private Map<Long, Integer> indexIdToSchemaVersion;
-    // index id -> table's schema hash
-    private Map<Long, Integer> indexIdToSchemaHash;
-    // index id -> table's short key column count
-    private Map<Long, Short> indexIdToShortKeyColumnCount;
-    // index id -> table's storage type
-    private Map<Long, TStorageType> indexIdToStorageType;
+
+    // index id -> index meta
+    private Map<Long, MaterializedIndexMeta> indexIdToMeta = Maps.newHashMap();
     // index name -> index id
-    private Map<String, Long> indexNameToId;
+    private Map<String, Long> indexNameToId = Maps.newHashMap();
 
     private KeysType keysType;
     private PartitionInfo partitionInfo;
-    private Map<Long, Partition> idToPartition;
-    private Map<String, Partition> nameToPartition;
+    private Map<Long, Partition> idToPartition = new HashMap<>();
+    private Map<String, Partition> nameToPartition = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER);
 
     private DistributionInfo defaultDistributionInfo;
 
@@ -134,17 +123,6 @@ public class OlapTable extends Table {
     public OlapTable() {
         // for persist
         super(TableType.OLAP);
-        this.indexIdToSchema = new HashMap<Long, List<Column>>();
-        this.indexIdToSchemaHash = new HashMap<Long, Integer>();
-        this.indexIdToSchemaVersion = new HashMap<Long, Integer>();
-
-        this.indexIdToShortKeyColumnCount = new HashMap<Long, Short>();
-        this.indexIdToStorageType = new HashMap<Long, TStorageType>();
-
-        this.indexNameToId = new HashMap<String, Long>();
-
-        this.idToPartition = new HashMap<Long, Partition>();
-        this.nameToPartition = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER);
 
         this.bfColumns = null;
         this.bfFpp = 0;
@@ -167,18 +145,6 @@ public class OlapTable extends Table {
 
         this.state = OlapTableState.NORMAL;
 
-        this.indexIdToSchema = new HashMap<Long, List<Column>>();
-        this.indexIdToSchemaHash = new HashMap<Long, Integer>();
-        this.indexIdToSchemaVersion = new HashMap<Long, Integer>();
-
-        this.indexIdToShortKeyColumnCount = new HashMap<Long, Short>();
-        this.indexIdToStorageType = new HashMap<Long, TStorageType>();
-
-        this.indexNameToId = new HashMap<String, Long>();
-
-        this.idToPartition = new HashMap<Long, Partition>();
-        this.nameToPartition = Maps.newTreeMap(String.CASE_INSENSITIVE_ORDER);
-
         this.keysType = keysType;
         this.partitionInfo = partitionInfo;
         if (partitionInfo.getType() == PartitionType.RANGE) {
@@ -272,25 +238,34 @@ public class OlapTable extends Table {
         return indexNameToId.containsKey(indexName);
     }
 
-    /*
-     * Set index schema info for specified index.
-     */
-    public void setIndexSchemaInfo(Long indexId, String indexName, List<Column> schema, int schemaVersion,
-                                   int schemaHash, short shortKeyColumnCount) {
+    public void setIndexMeta(long indexId, String indexName, List<Column> schema, int schemaVersion, int schemaHash,
+            short shortKeyColumnCount, TStorageType storageType, KeysType keysType) {
+        // Nullable when meta comes from schema change
         if (indexName == null) {
-            Preconditions.checkState(indexNameToId.containsValue(indexId));
+            MaterializedIndexMeta oldIndexMeta = indexIdToMeta.get(indexId);
+            Preconditions.checkState(oldIndexMeta != null);
+            indexName = oldIndexMeta.getIndexName();
+            Preconditions.checkState(indexName != null);
+        }
+        // Nullable when meta is less then VERSION_74
+        if (keysType == null) {
+            keysType = this.keysType;
+        }
+        // Nullable when meta comes from schema change
+        if (storageType == null) {
+            MaterializedIndexMeta oldIndexMeta = indexIdToMeta.get(indexId);
+            Preconditions.checkState(oldIndexMeta != null);
+            storageType = oldIndexMeta.getStorageType();
+            Preconditions.checkState(storageType != null);
         } else {
-            indexNameToId.put(indexName, indexId);
+            // The new storage type must be TStorageType.COLUMN
+            Preconditions.checkState(storageType == TStorageType.COLUMN);
         }
-        indexIdToSchema.put(indexId, schema);
-        indexIdToSchemaVersion.put(indexId, schemaVersion);
-        indexIdToSchemaHash.put(indexId, schemaHash);
-        indexIdToShortKeyColumnCount.put(indexId, shortKeyColumnCount);
-    }
 
-    public void setIndexStorageType(Long indexId, TStorageType newStorageType) {
-        Preconditions.checkState(newStorageType == TStorageType.COLUMN);
-        indexIdToStorageType.put(indexId, newStorageType);
+        MaterializedIndexMeta indexMeta = new MaterializedIndexMeta(indexId, indexName, schema, schemaVersion,
+                schemaHash, shortKeyColumnCount, storageType, keysType);
+        indexIdToMeta.put(indexId, indexMeta);
+        indexNameToId.put(indexName, indexId);
     }
 
     // rebuild the full schema of table
@@ -298,12 +273,12 @@ public class OlapTable extends Table {
     public void rebuildFullSchema() {
         fullSchema.clear();
         nameToColumn.clear();
-        for (Column baseColumn : indexIdToSchema.get(baseIndexId)) {
+        for (Column baseColumn : indexIdToMeta.get(baseIndexId).getSchema()) {
             fullSchema.add(baseColumn);
             nameToColumn.put(baseColumn.getName(), baseColumn);
         }
-        for (List<Column> columns : indexIdToSchema.values()) {
-            for (Column column : columns) {
+        for (MaterializedIndexMeta indexMeta : indexIdToMeta.values()) {
+            for (Column column : indexMeta.getSchema()) {
                 if (!nameToColumn.containsKey(column.getName())) {
                     fullSchema.add(column);
                     nameToColumn.put(column.getName(), column);
@@ -319,11 +294,7 @@ public class OlapTable extends Table {
         }
 
         long indexId = this.indexNameToId.remove(indexName);
-        indexIdToSchema.remove(indexId);
-        indexIdToSchemaVersion.remove(indexId);
-        indexIdToSchemaHash.remove(indexId);
-        indexIdToShortKeyColumnCount.remove(indexId);
-        indexIdToStorageType.remove(indexId);
+        this.indexIdToMeta.remove(indexId);
         return true;
     }
 
@@ -344,11 +315,11 @@ public class OlapTable extends Table {
         return null;
     }
 
-    public Map<Long, List<Column>> getVisibleIndexIdToSchema() {
-        Map<Long, List<Column>> visibleMVs = Maps.newHashMap();
+    public Map<Long, MaterializedIndexMeta> getVisibleIndexIdToMeta() {
+        Map<Long, MaterializedIndexMeta> visibleMVs = Maps.newHashMap();
         List<MaterializedIndex> mvs = getVisibleIndex();
         for (MaterializedIndex mv : mvs) {
-            visibleMVs.put(mv.getId(), indexIdToSchema.get(mv.getId()));
+            visibleMVs.put(mv.getId(), indexIdToMeta.get(mv.getId()));
         }
         return visibleMVs;
     }
@@ -365,7 +336,7 @@ public class OlapTable extends Table {
     }
 
     public void renameColumnNamePrefix(long idxId) {
-        List<Column> columns = indexIdToSchema.get(idxId);
+        List<Column> columns = indexIdToMeta.get(idxId).getSchema();
         for (Column column : columns) {
             column.setName(Column.removeNamePrefix(column.getName()));
         }
@@ -388,11 +359,7 @@ public class OlapTable extends Table {
                 // base index
                 baseIndexId = newIdxId;
             }
-            indexIdToSchema.put(newIdxId, indexIdToSchema.remove(entry.getKey()));
-            indexIdToSchemaHash.put(newIdxId, indexIdToSchemaHash.remove(entry.getKey()));
-            indexIdToSchemaVersion.put(newIdxId, indexIdToSchemaVersion.remove(entry.getKey()));
-            indexIdToShortKeyColumnCount.put(newIdxId, indexIdToShortKeyColumnCount.remove(entry.getKey()));
-            indexIdToStorageType.put(newIdxId, indexIdToStorageType.remove(entry.getKey()));
+            indexIdToMeta.put(newIdxId, indexIdToMeta.remove(entry.getKey()));
             indexNameToId.put(entry.getValue(), newIdxId);
         }
 
@@ -436,7 +403,7 @@ public class OlapTable extends Table {
             for (Map.Entry<Long, String> entry2 : origIdxIdToName.entrySet()) {
                 MaterializedIndex idx = partition.getIndex(entry2.getKey());
                 long newIdxId = indexNameToId.get(entry2.getValue());
-                int schemaHash = indexIdToSchemaHash.get(newIdxId);
+                int schemaHash = indexIdToMeta.get(newIdxId).getSchemaHash();
                 idx.setIdForRestore(newIdxId);
                 if (newIdxId != baseIndexId) {
                     // not base table, reset
@@ -477,17 +444,39 @@ public class OlapTable extends Table {
         return Status.OK;
     }
 
-    // schema
-    public Map<Long, List<Column>> getIndexIdToSchema() {
-        return indexIdToSchema;
+    public Map<Long, MaterializedIndexMeta> getIndexIdToMeta() {
+        return indexIdToMeta;
     }
 
-    public Map<Long, List<Column>> getCopiedIndexIdToSchema() {
-        return new HashMap<>(indexIdToSchema);
+    public Map<Long, MaterializedIndexMeta> getCopiedIndexIdToMeta() {
+        return new HashMap<>(indexIdToMeta);
+    }
+
+    public MaterializedIndexMeta getIndexMetaByIndexId(long indexId) {
+        return indexIdToMeta.get(indexId);
+    }
+
+    public List<Long> getIndexIdListExceptBaseIndex() {
+        List<Long> result = Lists.newArrayList();
+        for (Long indexId : indexIdToMeta.keySet()) {
+            if (indexId != baseIndexId) {
+                result.add(indexId);
+            }
+        }
+        return result;
+    }
+
+    // schema
+    public Map<Long, List<Column>> getIndexIdToSchema() {
+        Map<Long, List<Column>> result = Maps.newHashMap();
+        for (Map.Entry<Long, MaterializedIndexMeta> entry : indexIdToMeta.entrySet()) {
+            result.put(entry.getKey(), entry.getValue().getSchema());
+        }
+        return result;
     }
 
     public List<Column> getSchemaByIndexId(Long indexId) {
-        return indexIdToSchema.get(indexId);
+        return indexIdToMeta.get(indexId).getSchema();
     }
 
     public List<Column> getKeyColumnsByIndexId(Long indexId) {
@@ -502,61 +491,29 @@ public class OlapTable extends Table {
         return keyColumns;
     }
 
-    // schema version
-    public int getSchemaVersionByIndexId(Long indexId) {
-        if (indexIdToSchemaVersion.containsKey(indexId)) {
-            return indexIdToSchemaVersion.get(indexId);
-        }
-        return -1;
-    }
-
     // schemaHash
     public Map<Long, Integer> getIndexIdToSchemaHash() {
-        return indexIdToSchemaHash;
-    }
-
-    public Map<Long, Integer> getCopiedIndexIdToSchemaHash() {
-        return new HashMap<>(indexIdToSchemaHash);
-    }
-
-    public int getSchemaHashByIndexId(Long indexId) {
-        if (indexIdToSchemaHash.containsKey(indexId)) {
-            return indexIdToSchemaHash.get(indexId);
+        Map<Long, Integer> result = Maps.newHashMap();
+        for (Map.Entry<Long, MaterializedIndexMeta> entry : indexIdToMeta.entrySet()) {
+            result.put(entry.getKey(), entry.getValue().getSchemaHash());
         }
-        return -1;
-    }
-
-    // short key
-    public Map<Long, Short> getIndexIdToShortKeyColumnCount() {
-        return indexIdToShortKeyColumnCount;
+        return result;
     }
 
-    public Map<Long, Short> getCopiedIndexIdToShortKeyColumnCount() {
-        return new HashMap<>(indexIdToShortKeyColumnCount);
-    }
-
-    public short getShortKeyColumnCountByIndexId(Long indexId) {
-        if (indexIdToShortKeyColumnCount.containsKey(indexId)) {
-            return indexIdToShortKeyColumnCount.get(indexId);
+    public int getSchemaHashByIndexId(Long indexId) {
+        MaterializedIndexMeta indexMeta = indexIdToMeta.get(indexId);
+        if (indexMeta == null) {
+            return -1;
         }
-        return (short) -1;
-    }
-
-    // storage type
-    public Map<Long, TStorageType> getIndexIdToStorageType() {
-        return indexIdToStorageType;
-    }
-
-    public Map<Long, TStorageType> getCopiedIndexIdToStorageType() {
-        return new HashMap<>(indexIdToStorageType);
-    }
-
-    public void setStorageTypeToIndex(Long indexId, TStorageType storageType) {
-        indexIdToStorageType.put(indexId, storageType);
+        return indexMeta.getSchemaHash();
     }
 
     public TStorageType getStorageTypeByIndexId(Long indexId) {
-        return indexIdToStorageType.get(indexId);
+        MaterializedIndexMeta indexMeta = indexIdToMeta.get(indexId);
+        if (indexMeta == null) {
+            return TStorageType.COLUMN;
+        }
+        return indexMeta.getStorageType();
     }
 
     public KeysType getKeysType() {
@@ -711,34 +668,6 @@ public class OlapTable extends Table {
         return rowCount;
     }
 
-    public AlterTableStmt toAddRollupStmt(String dbName, Collection<Long> indexIds) {
-        List<AlterClause> alterClauses = Lists.newArrayList();
-        for (Map.Entry<String, Long> entry : indexNameToId.entrySet()) {
-            String indexName = entry.getKey();
-            long indexId = entry.getValue();
-            if (!indexIds.contains(indexId)) {
-                continue;
-            }
-
-            // cols
-            List<String> columnNames = Lists.newArrayList();
-            for (Column column : indexIdToSchema.get(indexId)) {
-                columnNames.add(column.getName());
-            }
-            
-            // properties
-            Map<String, String> properties = Maps.newHashMap();
-            properties.put(PropertyAnalyzer.PROPERTIES_STORAGE_TYPE, indexIdToStorageType.get(indexId).name());
-            properties.put(PropertyAnalyzer.PROPERTIES_SHORT_KEY, indexIdToShortKeyColumnCount.get(indexId).toString());
-            properties.put(PropertyAnalyzer.PROPERTIES_SCHEMA_VERSION, indexIdToSchemaVersion.get(indexId).toString());
-
-            AddRollupClause addRollupClause = new AddRollupClause(indexName, columnNames, null, null, properties);
-            alterClauses.add(addRollupClause);
-        }
-
-        return new AlterTableStmt(new TableName(dbName, name), alterClauses);
-    }
-
     @Override
     public CreateTableStmt toCreateTableStmt(String dbName) {
         throw new RuntimeException("Don't support anymore");
@@ -764,15 +693,16 @@ public class OlapTable extends Table {
                 long indexId = indexNameToId.get(indexName);
                 adler32.update(indexName.getBytes(charsetName));
                 LOG.debug("signature. index name: {}", indexName);
+                MaterializedIndexMeta indexMeta = indexIdToMeta.get(indexId);
                 // schema hash
-                adler32.update(indexIdToSchemaHash.get(indexId));
-                LOG.debug("signature. index schema hash: {}", indexIdToSchemaHash.get(indexId));
+                adler32.update(indexMeta.getSchemaHash());
+                LOG.debug("signature. index schema hash: {}", indexMeta.getSchemaHash());
                 // short key column count
-                adler32.update(indexIdToShortKeyColumnCount.get(indexId));
-                LOG.debug("signature. index short key: {}", indexIdToShortKeyColumnCount.get(indexId));
+                adler32.update(indexMeta.getShortKeyColumnCount());
+                LOG.debug("signature. index short key: {}", indexMeta.getShortKeyColumnCount());
                 // storage type
-                adler32.update(indexIdToStorageType.get(indexId).name().getBytes(charsetName));
-                LOG.debug("signature. index storage type: {}", indexIdToStorageType.get(indexId));
+                adler32.update(indexMeta.getStorageType().name().getBytes(charsetName));
+                LOG.debug("signature. index storage type: {}", indexMeta.getStorageType());
             }
 
             // bloom filter
@@ -862,23 +792,7 @@ public class OlapTable extends Table {
             long indexId = entry.getValue();
             Text.writeString(out, indexName);
             out.writeLong(indexId);
-            // schema
-            out.writeInt(indexIdToSchema.get(indexId).size());
-            for (Column column : indexIdToSchema.get(indexId)) {
-                column.write(out);
-            }
-
-            // storage type
-            Text.writeString(out, indexIdToStorageType.get(indexId).name());
-
-            // indices's schema version
-            out.writeInt(indexIdToSchemaVersion.get(indexId));
-
-            // indices's schema hash
-            out.writeInt(indexIdToSchemaHash.get(indexId));
-
-            // indices's short key column count
-            out.writeShort(indexIdToShortKeyColumnCount.get(indexId));
+            indexIdToMeta.get(indexId).write(out);
         }
 
         Text.writeString(out, keysType.name());
@@ -942,32 +856,42 @@ public class OlapTable extends Table {
 
         // indices's schema
         int counter = in.readInt();
+        // tmp index meta list
+        List<MaterializedIndexMeta> tmpIndexMetaList = Lists.newArrayList();
         for (int i = 0; i < counter; i++) {
             String indexName = Text.readString(in);
             long indexId = in.readLong();
             this.indexNameToId.put(indexName, indexId);
 
-            // schema
-            int colCount = in.readInt();
-            List<Column> schema = new LinkedList<Column>();
-            for (int j = 0; j < colCount; j++) {
-                Column column = Column.read(in);
-                schema.add(column);
-            }
-            this.indexIdToSchema.put(indexId, schema);
+            if (Catalog.getCurrentCatalogJournalVersion() < FeMetaVersion.VERSION_75) {
+                // schema
+                int colCount = in.readInt();
+                List<Column> schema = new LinkedList<Column>();
+                for (int j = 0; j < colCount; j++) {
+                    Column column = Column.read(in);
+                    schema.add(column);
+                }
+
+                // storage type
+                TStorageType storageType = TStorageType.valueOf(Text.readString(in));
 
-            // storage type
-            TStorageType type = TStorageType.valueOf(Text.readString(in));
-            this.indexIdToStorageType.put(indexId, type);
+                // indices's schema version
+                int schemaVersion = in.readInt();
 
-            // indices's schema version
-            this.indexIdToSchemaVersion.put(indexId, in.readInt());
+                // indices's schema hash
+                int schemaHash = in.readInt();
 
-            // indices's schema hash
-            this.indexIdToSchemaHash.put(indexId, in.readInt());
+                // indices's short key column count
+                short shortKeyColumnCount = in.readShort();
 
-            // indices's short key column count
-            this.indexIdToShortKeyColumnCount.put(indexId, in.readShort());
+                // The keys type in here is incorrect
+                MaterializedIndexMeta indexMeta = new MaterializedIndexMeta(indexId, indexName, schema,
+                        schemaVersion, schemaHash, shortKeyColumnCount, storageType, KeysType.AGG_KEYS);
+                tmpIndexMetaList.add(indexMeta);
+            } else {
+                MaterializedIndexMeta indexMeta = MaterializedIndexMeta.read(in);
+                indexIdToMeta.put(indexId, indexMeta);
+            }
         }
 
         // partition and distribution info
@@ -977,6 +901,12 @@ public class OlapTable extends Table {
             keysType = KeysType.AGG_KEYS;
         }
 
+        // add the correct keys type in tmp index meta
+        for (MaterializedIndexMeta indexMeta : tmpIndexMetaList) {
+            indexMeta.setKeysType(keysType);
+            indexIdToMeta.put(indexMeta.getIndexId(), indexMeta);
+        }
+
         PartitionType partType = PartitionType.valueOf(Text.readString(in));
         if (partType == PartitionType.UNPARTITIONED) {
             partitionInfo = SinglePartitionInfo.read(in);
@@ -1211,7 +1141,7 @@ public class OlapTable extends Table {
 
     @Override
     public List<Column> getBaseSchema() {
-        return indexIdToSchema.get(baseIndexId);
+        return getSchemaByIndexId(baseIndexId);
     }
 
     public int getKeysNum() {
@@ -1226,7 +1156,7 @@ public class OlapTable extends Table {
 
     public boolean convertRandomDistributionToHashDistribution() {
         boolean hasChanged = false;
-        List<Column> baseSchema = indexIdToSchema.get(baseIndexId);
+        List<Column> baseSchema = getBaseSchema();
         if (defaultDistributionInfo.getType() == DistributionInfoType.RANDOM) {
             defaultDistributionInfo = ((RandomDistributionInfo) defaultDistributionInfo).toHashDistributionInfo(baseSchema);
             hasChanged = true;
diff --git a/fe/src/main/java/org/apache/doris/common/FeMetaVersion.java b/fe/src/main/java/org/apache/doris/common/FeMetaVersion.java
index 0d260e8..aeb605f 100644
--- a/fe/src/main/java/org/apache/doris/common/FeMetaVersion.java
+++ b/fe/src/main/java/org/apache/doris/common/FeMetaVersion.java
@@ -159,7 +159,8 @@ public final class FeMetaVersion {
     public static final int VERSION_73 = 73;
     // temp partitions
     public static final int VERSION_74 = 74;
-
+    // support materialized index meta while there is different keys type in different materialized index
+    public static final int VERSION_75 = 75;
     // note: when increment meta version, should assign the latest version to VERSION_CURRENT
-    public static final int VERSION_CURRENT = VERSION_74;
+    public static final int VERSION_CURRENT = VERSION_75;
 }
diff --git a/fe/src/main/java/org/apache/doris/common/proc/IndexInfoProcDir.java b/fe/src/main/java/org/apache/doris/common/proc/IndexInfoProcDir.java
index 9d13d9e..95d61dd 100644
--- a/fe/src/main/java/org/apache/doris/common/proc/IndexInfoProcDir.java
+++ b/fe/src/main/java/org/apache/doris/common/proc/IndexInfoProcDir.java
@@ -19,11 +19,11 @@ package org.apache.doris.common.proc;
 
 import org.apache.doris.catalog.Column;
 import org.apache.doris.catalog.Database;
+import org.apache.doris.catalog.MaterializedIndexMeta;
 import org.apache.doris.catalog.OlapTable;
 import org.apache.doris.catalog.Table;
 import org.apache.doris.catalog.Table.TableType;
 import org.apache.doris.common.AnalysisException;
-import org.apache.doris.thrift.TStorageType;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
@@ -66,18 +66,10 @@ public class IndexInfoProcDir implements ProcDirInterface {
                 // indices order
                 List<Long> indices = Lists.newArrayList();
                 indices.add(olapTable.getBaseIndexId());
-                for (Long indexId : olapTable.getIndexIdToSchema().keySet()) {
-                    if (indexId != olapTable.getBaseIndexId()) {
-                        indices.add(indexId);
-                    }
-                }
+                indices.addAll(olapTable.getIndexIdListExceptBaseIndex());
 
                 for (long indexId : indices) {
-                    int schemaVersion = olapTable.getSchemaVersionByIndexId(indexId);
-                    int schemaHash = olapTable.getSchemaHashByIndexId(indexId);
-                    short shortKeyColumnCount = olapTable.getShortKeyColumnCountByIndexId(indexId);
-                    TStorageType storageType = olapTable.getStorageTypeByIndexId(indexId);
-                    String indexName = olapTable.getIndexNameById(indexId);
+                    MaterializedIndexMeta indexMeta = olapTable.getIndexIdToMeta().get(indexId);
 
                     String type = olapTable.getKeysType().name();
                     StringBuilder builder = new StringBuilder();
@@ -92,11 +84,11 @@ public class IndexInfoProcDir implements ProcDirInterface {
                     builder.append(Joiner.on(", ").join(columnNames)).append(")");
 
                     result.addRow(Lists.newArrayList(String.valueOf(indexId),
-                            indexName,
-                            String.valueOf(schemaVersion),
-                            String.valueOf(schemaHash),
-                            String.valueOf(shortKeyColumnCount),
-                            storageType.name(),
+                            indexMeta.getIndexName(),
+                            String.valueOf(indexMeta.getSchemaVersion()),
+                            String.valueOf(indexMeta.getSchemaHash()),
+                            String.valueOf(indexMeta.getShortKeyColumnCount()),
+                            indexMeta.getStorageType().name(),
                             builder.toString()));
                 }
             } else {
diff --git a/fe/src/main/java/org/apache/doris/http/rest/StorageTypeCheckAction.java b/fe/src/main/java/org/apache/doris/http/rest/StorageTypeCheckAction.java
index cf206d5..69bb662 100644
--- a/fe/src/main/java/org/apache/doris/http/rest/StorageTypeCheckAction.java
+++ b/fe/src/main/java/org/apache/doris/http/rest/StorageTypeCheckAction.java
@@ -18,6 +18,7 @@
 package org.apache.doris.http.rest;
 
 import org.apache.doris.catalog.Database;
+import org.apache.doris.catalog.MaterializedIndexMeta;
 import org.apache.doris.catalog.OlapTable;
 import org.apache.doris.catalog.Table;
 import org.apache.doris.catalog.Table.TableType;
@@ -76,10 +77,10 @@ public class StorageTypeCheckAction extends RestBaseAction {
 
                 OlapTable olapTbl = (OlapTable) tbl;
                 JSONObject indexObj = new JSONObject();
-                for (Map.Entry<Long, TStorageType> entry : olapTbl.getIndexIdToStorageType().entrySet()) {
-                    if (entry.getValue() == TStorageType.ROW) {
-                        String idxName = olapTbl.getIndexNameById(entry.getKey());
-                        indexObj.put(idxName, entry.getValue().name());
+                for (Map.Entry<Long, MaterializedIndexMeta> entry : olapTbl.getIndexIdToMeta().entrySet()) {
+                    MaterializedIndexMeta indexMeta = entry.getValue();
+                    if (indexMeta.getStorageType() == TStorageType.ROW) {
+                        indexObj.put(indexMeta.getIndexName(), indexMeta.getStorageType().name());
                     }
                 }
                 root.put(tbl.getName(), indexObj);
diff --git a/fe/src/main/java/org/apache/doris/master/ReportHandler.java b/fe/src/main/java/org/apache/doris/master/ReportHandler.java
index ca5c756..b262a73 100644
--- a/fe/src/main/java/org/apache/doris/master/ReportHandler.java
+++ b/fe/src/main/java/org/apache/doris/master/ReportHandler.java
@@ -21,11 +21,10 @@ import com.google.common.collect.Sets;
 import org.apache.commons.lang3.tuple.ImmutableTriple;
 import org.apache.commons.lang3.tuple.Triple;
 import org.apache.doris.catalog.Catalog;
-import org.apache.doris.catalog.Column;
 import org.apache.doris.catalog.Database;
-import org.apache.doris.catalog.KeysType;
 import org.apache.doris.catalog.MaterializedIndex;
 import org.apache.doris.catalog.MaterializedIndex.IndexState;
+import org.apache.doris.catalog.MaterializedIndexMeta;
 import org.apache.doris.catalog.OlapTable;
 import org.apache.doris.catalog.Partition;
 import org.apache.doris.catalog.Replica;
@@ -566,18 +565,15 @@ public class ReportHandler extends Daemon {
                                     LOG.warn("tablet {} has only one replica {} on backend {}"
                                             + "and it is lost. create an empty replica to recover it",
                                             tabletId, replica.getId(), backendId);
-                                    short shortKeyColumnCount = olapTable.getShortKeyColumnCountByIndexId(indexId);
-                                    int schemaHash = olapTable.getSchemaHashByIndexId(indexId);
-                                    KeysType keysType = olapTable.getKeysType();
-                                    List<Column> columns = olapTable.getSchemaByIndexId(indexId);
+                                    MaterializedIndexMeta indexMeta = olapTable.getIndexMetaByIndexId(indexId);
                                     Set<String> bfColumns = olapTable.getCopiedBfColumns();
                                     double bfFpp = olapTable.getBfFpp();
                                     CreateReplicaTask createReplicaTask = new CreateReplicaTask(backendId, dbId,
-                                            tableId, partitionId, indexId, tabletId, shortKeyColumnCount,
-                                            schemaHash, partition.getVisibleVersion(),
-                                            partition.getVisibleVersionHash(), keysType,
+                                            tableId, partitionId, indexId, tabletId, indexMeta.getShortKeyColumnCount(),
+                                            indexMeta.getSchemaHash(), partition.getVisibleVersion(),
+                                            partition.getVisibleVersionHash(), indexMeta.getKeysType(),
                                             TStorageType.COLUMN,
-                                            TStorageMedium.HDD, columns, bfColumns, bfFpp, null,
+                                            TStorageMedium.HDD, indexMeta.getSchema(), bfColumns, bfFpp, null,
                                             olapTable.getCopiedIndexes(),
                                             olapTable.isInMemory());
                                     createReplicaBatchTask.addTask(createReplicaTask);
diff --git a/fe/src/main/java/org/apache/doris/planner/MaterializedViewSelector.java b/fe/src/main/java/org/apache/doris/planner/MaterializedViewSelector.java
index 8572ea4..d0fbf10 100644
--- a/fe/src/main/java/org/apache/doris/planner/MaterializedViewSelector.java
+++ b/fe/src/main/java/org/apache/doris/planner/MaterializedViewSelector.java
@@ -27,6 +27,7 @@ import org.apache.doris.analysis.SlotRef;
 import org.apache.doris.analysis.TableRef;
 import org.apache.doris.catalog.Column;
 import org.apache.doris.catalog.KeysType;
+import org.apache.doris.catalog.MaterializedIndexMeta;
 import org.apache.doris.catalog.OlapTable;
 import org.apache.doris.catalog.Table;
 import org.apache.doris.common.UserException;
@@ -115,20 +116,20 @@ public class MaterializedViewSelector {
 
     private Map<Long, List<Column>> predicates(OlapScanNode scanNode) {
         // Step1: all of predicates is compensating predicates
-        Map<Long, List<Column>> candidateIndexIdToSchema = scanNode.getOlapTable().getVisibleIndexIdToSchema();
+        Map<Long, MaterializedIndexMeta> candidateIndexIdToMeta = scanNode.getOlapTable().getVisibleIndexIdToMeta();
         OlapTable table = scanNode.getOlapTable();
         Preconditions.checkState(table != null);
         String tableName = table.getName();
         // Step2: check all columns in compensating predicates are available in the view output
-        checkCompensatingPredicates(columnNamesInPredicates.get(tableName), candidateIndexIdToSchema);
+        checkCompensatingPredicates(columnNamesInPredicates.get(tableName), candidateIndexIdToMeta);
         // Step3: group by list in query is the subset of group by list in view or view contains no aggregation
-        checkGrouping(columnNamesInGrouping.get(tableName), candidateIndexIdToSchema, table.getKeysType());
+        checkGrouping(columnNamesInGrouping.get(tableName), candidateIndexIdToMeta);
         // Step4: aggregation functions are available in the view output
-        checkAggregationFunction(aggregateColumnsInQuery.get(tableName), candidateIndexIdToSchema);
+        checkAggregationFunction(aggregateColumnsInQuery.get(tableName), candidateIndexIdToMeta);
         // Step5: columns required to compute output expr are available in the view output
-        checkOutputColumns(columnNamesInQueryOutput.get(tableName), candidateIndexIdToSchema);
+        checkOutputColumns(columnNamesInQueryOutput.get(tableName), candidateIndexIdToMeta);
         // Step6: if table type is aggregate and the candidateIndexIdToSchema is empty,
-        if (table.getKeysType() == KeysType.AGG_KEYS && candidateIndexIdToSchema.size() == 0) {
+        if (table.getKeysType() == KeysType.AGG_KEYS && candidateIndexIdToMeta.size() == 0) {
             // the base index will be added in the candidateIndexIdToSchema.
             /**
              * In Doris, it is allowed that the aggregate table should be scanned directly
@@ -144,11 +145,15 @@ public class MaterializedViewSelector {
              * So, we need to compensate those kinds of index in following step.
              *
              */
-            compensateCandidateIndex(candidateIndexIdToSchema, scanNode.getOlapTable().getVisibleIndexIdToSchema(),
+            compensateCandidateIndex(candidateIndexIdToMeta, scanNode.getOlapTable().getVisibleIndexIdToMeta(),
                             table);
-            checkOutputColumns(columnNamesInQueryOutput.get(tableName), candidateIndexIdToSchema);
+            checkOutputColumns(columnNamesInQueryOutput.get(tableName), candidateIndexIdToMeta);
         }
-        return candidateIndexIdToSchema;
+        Map<Long, List<Column>> result = Maps.newHashMap();
+        for (Map.Entry<Long, MaterializedIndexMeta> entry : candidateIndexIdToMeta.entrySet()) {
+            result.put(entry.getKey(), entry.getValue().getSchema());
+        }
+        return result;
     }
 
     private long priorities(OlapScanNode scanNode, Map<Long, List<Column>> candidateIndexIdToSchema) {
@@ -217,8 +222,8 @@ public class MaterializedViewSelector {
                 selectedIndexId = indexId;
             } else if (rowCount == minRowCount) {
                 // check column number, select one minimum column number
-                int selectedColumnSize = olapTable.getIndexIdToSchema().get(selectedIndexId).size();
-                int currColumnSize = olapTable.getIndexIdToSchema().get(indexId).size();
+                int selectedColumnSize = olapTable.getSchemaByIndexId(selectedIndexId).size();
+                int currColumnSize = olapTable.getSchemaByIndexId(indexId).size();
                 if (currColumnSize < selectedColumnSize) {
                     selectedIndexId = indexId;
                 }
@@ -248,24 +253,24 @@ public class MaterializedViewSelector {
         return selectedIndexId;
     }
 
-    private void checkCompensatingPredicates(Set<String> columnsInPredicates,
-                                             Map<Long, List<Column>> candidateIndexIdToSchema) {
+    private void checkCompensatingPredicates(Set<String> columnsInPredicates, Map<Long, MaterializedIndexMeta>
+            candidateIndexIdToMeta) {
         // When the query statement does not contain any columns in predicates, all candidate index can pass this check
         if (columnsInPredicates == null) {
             return;
         }
-        Iterator<Map.Entry<Long, List<Column>>> iterator = candidateIndexIdToSchema.entrySet().iterator();
+        Iterator<Map.Entry<Long, MaterializedIndexMeta>> iterator = candidateIndexIdToMeta.entrySet().iterator();
         while (iterator.hasNext()) {
-            Map.Entry<Long, List<Column>> entry = iterator.next();
+            Map.Entry<Long, MaterializedIndexMeta> entry = iterator.next();
             Set<String> indexNonAggregatedColumnNames = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
-            entry.getValue().stream().filter(column -> !column.isAggregated())
+            entry.getValue().getSchema().stream().filter(column -> !column.isAggregated())
                     .forEach(column -> indexNonAggregatedColumnNames.add(column.getName()));
             if (!indexNonAggregatedColumnNames.containsAll(columnsInPredicates)) {
                 iterator.remove();
             }
         }
         LOG.debug("Those mv pass the test of compensating predicates:"
-                          + Joiner.on(",").join(candidateIndexIdToSchema.keySet()));
+                          + Joiner.on(",").join(candidateIndexIdToMeta.keySet()));
     }
 
     /**
@@ -277,35 +282,37 @@ public class MaterializedViewSelector {
      * 2. the empty grouping columns in query is subset of all of views
      *
      * @param columnsInGrouping
-     * @param candidateIndexIdToSchema
+     * @param candidateIndexIdToMeta
      */
 
-    private void checkGrouping(Set<String> columnsInGrouping, Map<Long, List<Column>> candidateIndexIdToSchema,
-            KeysType keysType) {
-        Iterator<Map.Entry<Long, List<Column>>> iterator = candidateIndexIdToSchema.entrySet().iterator();
+    private void checkGrouping(Set<String> columnsInGrouping, Map<Long, MaterializedIndexMeta>
+            candidateIndexIdToMeta) {
+        Iterator<Map.Entry<Long, MaterializedIndexMeta>> iterator = candidateIndexIdToMeta.entrySet().iterator();
         while (iterator.hasNext()) {
-            Map.Entry<Long, List<Column>> entry = iterator.next();
+            Map.Entry<Long, MaterializedIndexMeta> entry = iterator.next();
             Set<String> indexNonAggregatedColumnNames = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
-            List<Column> candidateIndexSchema = entry.getValue();
+            MaterializedIndexMeta candidateIndexMeta = entry.getValue();
+            List<Column> candidateIndexSchema = candidateIndexMeta.getSchema();
             candidateIndexSchema.stream().filter(column -> !column.isAggregated())
                     .forEach(column -> indexNonAggregatedColumnNames.add(column.getName()));
             /*
-            If there is no aggregated column in duplicate table, the index will be SPJ.
+            If there is no aggregated column in duplicate index, the index will be SPJ.
             For example:
                 duplicate table (k1, k2, v1)
-                mv index (k1, v1)
+                duplicate mv index (k1, v1)
             When the candidate index is SPJ type, it passes the verification directly
 
-            If there is no aggregated column in aggregate index, the index will be deduplicate table.
+            If there is no aggregated column in aggregate index, the index will be deduplicate index.
             For example:
-                aggregate table (k1, k2, v1 sum)
-                mv index (k1, k2)
+                duplicate table (k1, k2, v1 sum)
+                aggregate mv index (k1, k2)
             This kind of index is SPJG which same as select k1, k2 from aggregate_table group by k1, k2.
             It also need to check the grouping column using following steps.
 
             ISSUE-3016, MaterializedViewFunctionTest: testDeduplicateQueryInAgg
              */
-            if (indexNonAggregatedColumnNames.size() == candidateIndexSchema.size() && keysType == KeysType.DUP_KEYS) {
+            if (indexNonAggregatedColumnNames.size() == candidateIndexSchema.size()
+                    && candidateIndexMeta.getKeysType() == KeysType.DUP_KEYS) {
                 continue;
             }
             // When the query is SPJ type but the candidate index is SPJG type, it will not pass directly.
@@ -324,16 +331,16 @@ public class MaterializedViewSelector {
             }
         }
         LOG.debug("Those mv pass the test of grouping:"
-                          + Joiner.on(",").join(candidateIndexIdToSchema.keySet()));
+                          + Joiner.on(",").join(candidateIndexIdToMeta.keySet()));
     }
 
     private void checkAggregationFunction(Set<AggregatedColumn> aggregatedColumnsInQueryOutput,
-                                          Map<Long, List<Column>> candidateIndexIdToSchema) {
-        Iterator<Map.Entry<Long, List<Column>>> iterator = candidateIndexIdToSchema.entrySet().iterator();
+            Map<Long, MaterializedIndexMeta> candidateIndexIdToMeta) {
+        Iterator<Map.Entry<Long, MaterializedIndexMeta>> iterator = candidateIndexIdToMeta.entrySet().iterator();
         while (iterator.hasNext()) {
-            Map.Entry<Long, List<Column>> entry = iterator.next();
+            Map.Entry<Long, MaterializedIndexMeta> entry = iterator.next();
             List<AggregatedColumn> indexAggregatedColumns = Lists.newArrayList();
-            List<Column> candidateIndexSchema = entry.getValue();
+            List<Column> candidateIndexSchema = entry.getValue().getSchema();
             candidateIndexSchema.stream().filter(column -> column.isAggregated())
                     .forEach(column -> indexAggregatedColumns.add(
                             new AggregatedColumn(column.getName(), column.getAggregationType().name())));
@@ -360,19 +367,19 @@ public class MaterializedViewSelector {
             }
         }
         LOG.debug("Those mv pass the test of aggregation function:"
-                          + Joiner.on(",").join(candidateIndexIdToSchema.keySet()));
+                          + Joiner.on(",").join(candidateIndexIdToMeta.keySet()));
     }
 
-    private void checkOutputColumns(Set<String> columnNamesInQueryOutput,
-                                    Map<Long, List<Column>> candidateIndexIdToSchema) {
+    private void checkOutputColumns(Set<String> columnNamesInQueryOutput, Map<Long, MaterializedIndexMeta>
+            candidateIndexIdToMeta) {
         if (columnNamesInQueryOutput == null) {
             return;
         }
-        Iterator<Map.Entry<Long, List<Column>>> iterator = candidateIndexIdToSchema.entrySet().iterator();
+        Iterator<Map.Entry<Long, MaterializedIndexMeta>> iterator = candidateIndexIdToMeta.entrySet().iterator();
         while (iterator.hasNext()) {
-            Map.Entry<Long, List<Column>> entry = iterator.next();
+            Map.Entry<Long, MaterializedIndexMeta> entry = iterator.next();
             Set<String> indexColumnNames = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
-            List<Column> candidateIndexSchema = entry.getValue();
+            List<Column> candidateIndexSchema = entry.getValue().getSchema();
             candidateIndexSchema.stream().forEach(column -> indexColumnNames.add(column.getName()));
             // The aggregated columns in query output must be subset of the aggregated columns in view
             if (!indexColumnNames.containsAll(columnNamesInQueryOutput)) {
@@ -380,23 +387,23 @@ public class MaterializedViewSelector {
             }
         }
         LOG.debug("Those mv pass the test of output columns:"
-                          + Joiner.on(",").join(candidateIndexIdToSchema.keySet()));
+                          + Joiner.on(",").join(candidateIndexIdToMeta.keySet()));
     }
 
-    private void compensateCandidateIndex(Map<Long, List<Column>> candidateIndexIdToSchema,
-                                 Map<Long, List<Column>> allVisibleIndexes,
+    private void compensateCandidateIndex(Map<Long, MaterializedIndexMeta> candidateIndexIdToMeta, Map<Long,
+            MaterializedIndexMeta> allVisibleIndexes,
                                  OlapTable table) {
         isPreAggregation = false;
         reasonOfDisable = "The aggregate operator does not match";
         int keySizeOfBaseIndex = table.getKeyColumnsByIndexId(table.getBaseIndexId()).size();
-        for (Map.Entry<Long, List<Column>> index : allVisibleIndexes.entrySet()) {
+        for (Map.Entry<Long, MaterializedIndexMeta> index : allVisibleIndexes.entrySet()) {
             long mvIndexId = index.getKey();
             if (table.getKeyColumnsByIndexId(mvIndexId).size() == keySizeOfBaseIndex) {
-                candidateIndexIdToSchema.put(mvIndexId, index.getValue());
+                candidateIndexIdToMeta.put(mvIndexId, index.getValue());
             }
         }
         LOG.debug("Those mv pass the test of output columns:"
-                          + Joiner.on(",").join(candidateIndexIdToSchema.keySet()));
+                          + Joiner.on(",").join(candidateIndexIdToMeta.keySet()));
     }
 
     private void init() {
diff --git a/fe/src/main/java/org/apache/doris/planner/OlapTableSink.java b/fe/src/main/java/org/apache/doris/planner/OlapTableSink.java
index 9017fa0..b9f6607 100644
--- a/fe/src/main/java/org/apache/doris/planner/OlapTableSink.java
+++ b/fe/src/main/java/org/apache/doris/planner/OlapTableSink.java
@@ -25,6 +25,7 @@ import org.apache.doris.catalog.DistributionInfo;
 import org.apache.doris.catalog.HashDistributionInfo;
 import org.apache.doris.catalog.MaterializedIndex;
 import org.apache.doris.catalog.MaterializedIndex.IndexExtState;
+import org.apache.doris.catalog.MaterializedIndexMeta;
 import org.apache.doris.catalog.OlapTable;
 import org.apache.doris.catalog.Partition;
 import org.apache.doris.catalog.PartitionKey;
@@ -170,11 +171,12 @@ public class OlapTableSink extends DataSink {
             schemaParam.addToSlot_descs(slotDesc.toThrift());
         }
 
-        for (Map.Entry<Long, List<Column>> pair : table.getIndexIdToSchema().entrySet()) {
+        for (Map.Entry<Long, MaterializedIndexMeta> pair : table.getIndexIdToMeta().entrySet()) {
+            MaterializedIndexMeta indexMeta = pair.getValue();
             List<String> columns = Lists.newArrayList();
-            columns.addAll(pair.getValue().stream().map(Column::getName).collect(Collectors.toList()));
+            columns.addAll(indexMeta.getSchema().stream().map(Column::getName).collect(Collectors.toList()));
             TOlapTableIndexSchema indexSchema = new TOlapTableIndexSchema(pair.getKey(), columns,
-                    table.getSchemaHashByIndexId(pair.getKey()));
+                    indexMeta.getSchemaHash());
             schemaParam.addToIndexes(indexSchema);
         }
         return schemaParam;
diff --git a/fe/src/main/java/org/apache/doris/planner/RollupSelector.java b/fe/src/main/java/org/apache/doris/planner/RollupSelector.java
index fc76a86..1a5fee7 100644
--- a/fe/src/main/java/org/apache/doris/planner/RollupSelector.java
+++ b/fe/src/main/java/org/apache/doris/planner/RollupSelector.java
@@ -84,8 +84,8 @@ public final class RollupSelector {
                 selectedIndexId = indexId;
             } else if (rowCount == minRowCount) {
                 // check column number, select one minimum column number
-                int selectedColumnSize = table.getIndexIdToSchema().get(selectedIndexId).size();
-                int currColumnSize = table.getIndexIdToSchema().get(indexId).size();
+                int selectedColumnSize = table.getSchemaByIndexId(selectedIndexId).size();
+                int currColumnSize = table.getSchemaByIndexId(indexId).size();
                 if (currColumnSize < selectedColumnSize) {
                     selectedIndexId = indexId;
                 }
diff --git a/fe/src/main/java/org/apache/doris/task/HadoopLoadPendingTask.java b/fe/src/main/java/org/apache/doris/task/HadoopLoadPendingTask.java
index e8c9946..c734fe8 100644
--- a/fe/src/main/java/org/apache/doris/task/HadoopLoadPendingTask.java
+++ b/fe/src/main/java/org/apache/doris/task/HadoopLoadPendingTask.java
@@ -24,6 +24,7 @@ import org.apache.doris.catalog.Column;
 import org.apache.doris.catalog.DistributionInfo;
 import org.apache.doris.catalog.HashDistributionInfo;
 import org.apache.doris.catalog.KeysType;
+import org.apache.doris.catalog.MaterializedIndexMeta;
 import org.apache.doris.catalog.OlapTable;
 import org.apache.doris.catalog.Partition;
 import org.apache.doris.catalog.PartitionInfo;
@@ -180,10 +181,11 @@ public class HadoopLoadPendingTask extends LoadPendingTask {
         Map<String, EtlIndex> etlIndices = Maps.newHashMap();
 
         TableLoadInfo tableLoadInfo = job.getTableLoadInfo(table.getId());
-        for (Entry<Long, List<Column>> entry : table.getIndexIdToSchema().entrySet()) {
+        for (Entry<Long, MaterializedIndexMeta> entry : table.getIndexIdToMeta().entrySet()) {
             long indexId = entry.getKey();
+            MaterializedIndexMeta indexMeta = entry.getValue();
 
-            List<Column> indexColumns = entry.getValue();
+            List<Column> indexColumns = indexMeta.getSchema();
 
             Partition partition = table.getPartition(partitionId);
             if (partition == null) {
@@ -196,7 +198,7 @@ public class HadoopLoadPendingTask extends LoadPendingTask {
             etlIndex.setIndexId(indexId);
 
             // schema hash
-            int schemaHash = table.getSchemaHashByIndexId(indexId);
+            int schemaHash = indexMeta.getSchemaHash();
             etlIndex.setSchemaHash(schemaHash);
             tableLoadInfo.addIndexSchemaHash(indexId, schemaHash);
 
diff --git a/fe/src/main/java/org/apache/doris/transaction/TransactionState.java b/fe/src/main/java/org/apache/doris/transaction/TransactionState.java
index 03bcf70..0a967ac 100644
--- a/fe/src/main/java/org/apache/doris/transaction/TransactionState.java
+++ b/fe/src/main/java/org/apache/doris/transaction/TransactionState.java
@@ -452,7 +452,7 @@ public class TransactionState implements Writable {
         }
         // always rewrite the index ids
         indexIds.clear();
-        for (Long indexId : table.getIndexIdToSchema().keySet()) {
+        for (Long indexId : table.getIndexIdToMeta().keySet()) {
             indexIds.add(indexId);
         }
     }
diff --git a/fe/src/test/java/org/apache/doris/analysis/AccessTestUtil.java b/fe/src/test/java/org/apache/doris/analysis/AccessTestUtil.java
index e9c521a..d222a6a 100644
--- a/fe/src/test/java/org/apache/doris/analysis/AccessTestUtil.java
+++ b/fe/src/test/java/org/apache/doris/analysis/AccessTestUtil.java
@@ -40,6 +40,7 @@ import org.apache.doris.mysql.privilege.PrivPredicate;
 import org.apache.doris.persist.EditLog;
 import org.apache.doris.qe.ConnectContext;
 import org.apache.doris.system.SystemInfoService;
+import org.apache.doris.thrift.TStorageType;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
@@ -98,9 +99,12 @@ public class AccessTestUtil {
             RandomDistributionInfo distributionInfo = new RandomDistributionInfo(10);
             Partition partition = new Partition(20000L, "testTbl", baseIndex, distributionInfo);
             List<Column> baseSchema = new LinkedList<Column>();
+            Column column = new Column();
+            baseSchema.add(column);
             OlapTable table = new OlapTable(30000, "testTbl", baseSchema,
                     KeysType.AGG_KEYS, new SinglePartitionInfo(), distributionInfo);
-            table.setIndexSchemaInfo(baseIndex.getId(), "testTbl", baseSchema, 0, 1, (short) 1);
+            table.setIndexMeta(baseIndex.getId(), "testTbl", baseSchema, 0, 1, (short) 1,
+                    TStorageType.COLUMN, KeysType.AGG_KEYS);
             table.addPartition(partition);
             table.setBaseIndexId(baseIndex.getId());
             db.createTable(table);
diff --git a/fe/src/test/java/org/apache/doris/analysis/CreateMaterializedViewStmtTest.java b/fe/src/test/java/org/apache/doris/analysis/CreateMaterializedViewStmtTest.java
index bab7833..063ccf7 100644
--- a/fe/src/test/java/org/apache/doris/analysis/CreateMaterializedViewStmtTest.java
+++ b/fe/src/test/java/org/apache/doris/analysis/CreateMaterializedViewStmtTest.java
@@ -18,6 +18,8 @@
 package org.apache.doris.analysis;
 
 import org.apache.doris.catalog.AggregateType;
+import org.apache.doris.catalog.Column;
+import org.apache.doris.catalog.KeysType;
 import org.apache.doris.common.Config;
 import org.apache.doris.common.UserException;
 import org.apache.doris.common.jmockit.Deencapsulation;
@@ -32,6 +34,8 @@ import org.junit.Test;
 import java.util.ArrayList;
 import java.util.List;
 
+import javax.validation.constraints.AssertTrue;
+
 import mockit.Expectations;
 import mockit.Injectable;
 import mockit.Mocked;
@@ -53,7 +57,7 @@ public class CreateMaterializedViewStmtTest {
     }
 
     @Test
-    public void testFunctionColumnInSelectClause(@Injectable ArithmeticExpr arithmeticExpr) {
+    public void testFunctionColumnInSelectClause(@Injectable ArithmeticExpr arithmeticExpr) throws UserException {
         SelectList selectList = new SelectList();
         SelectListItem selectListItem = new SelectListItem(arithmeticExpr, null);
         selectList.addItem(selectListItem);
@@ -295,8 +299,6 @@ public class CreateMaterializedViewStmtTest {
                 result = slotRef2;
                 selectStmt.getGroupByClause();
                 result = groupByClause;
-                groupByClause.getGroupingExprs();
-                result = groupByList;
             }
         };
         CreateMaterializedViewStmt createMaterializedViewStmt = new CreateMaterializedViewStmt("test", selectStmt, null);
@@ -396,8 +398,6 @@ public class CreateMaterializedViewStmtTest {
                 result = functionChild0;
                 selectStmt.getGroupByClause();
                 result = groupByClause;
-                groupByClause.getGroupingExprs();
-                result = groupByList;
             }
         };
         CreateMaterializedViewStmt createMaterializedViewStmt = new CreateMaterializedViewStmt("test", selectStmt, null);
@@ -476,8 +476,6 @@ public class CreateMaterializedViewStmtTest {
                 result = columnName5;
                 selectStmt.getGroupByClause();
                 result = groupByClause;
-                groupByClause.getGroupingExprs();
-                result = groupByList;
             }
         };
 
@@ -520,8 +518,7 @@ public class CreateMaterializedViewStmtTest {
     @Test
     public void testMVColumnsWithoutOrderbyWithoutAggregation(@Injectable SlotRef slotRef1,
             @Injectable SlotRef slotRef2, @Injectable SlotRef slotRef3, @Injectable SlotRef slotRef4,
-            @Injectable TableRef tableRef, @Injectable SelectStmt selectStmt,
-            @Injectable GroupByClause groupByClause) throws UserException {
+            @Injectable TableRef tableRef, @Injectable SelectStmt selectStmt) throws UserException {
         SelectList selectList = new SelectList();
         SelectListItem selectListItem1 = new SelectListItem(slotRef1, null);
         selectList.addItem(selectListItem1);
@@ -537,10 +534,6 @@ public class CreateMaterializedViewStmtTest {
         final String columnName3 = "k3";
         final String columnName4 = "v1";
 
-        List<Expr> groupByList = Lists.newArrayList();
-        groupByList.add(slotRef1);
-        groupByList.add(slotRef2);
-        groupByList.add(slotRef3);
         new Expectations() {
             {
                 analyzer.getClusterName();
@@ -577,9 +570,7 @@ public class CreateMaterializedViewStmtTest {
                 slotRef4.getType().getStorageLayoutBytes();
                 result = 4;
                 selectStmt.getGroupByClause();
-                result = groupByClause;
-                groupByClause.getGroupingExprs();
-                result = groupByList;
+                result = null;
             }
         };
 
@@ -669,8 +660,6 @@ public class CreateMaterializedViewStmtTest {
                 result = columnName3;
                 selectStmt.getGroupByClause();
                 result = groupByClause;
-                groupByClause.getGroupingExprs();
-                result = groupByList;
             }
         };
 
@@ -694,6 +683,7 @@ public class CreateMaterializedViewStmtTest {
             Assert.assertFalse(mvColumn2.isAggregationTypeImplicit());
             Assert.assertEquals(columnName3, mvColumn2.getName());
             Assert.assertEquals(AggregateType.SUM, mvColumn2.getAggregationType());
+            Assert.assertEquals(KeysType.AGG_KEYS, createMaterializedViewStmt.getMVKeysType());
         } catch (UserException e) {
             Assert.fail(e.getMessage());
         }
@@ -726,17 +716,22 @@ public class CreateMaterializedViewStmtTest {
                 result = columnName1;
                 selectStmt.getGroupByClause();
                 result = groupByClause;
-                groupByClause.getGroupingExprs();
-                result = groupByList;
+                selectStmt.getHavingPred();
+                result = null;
+                selectStmt.getLimit();
+                result = -1;
             }
         };
 
         CreateMaterializedViewStmt createMaterializedViewStmt = new CreateMaterializedViewStmt("test", selectStmt, null);
         try {
             createMaterializedViewStmt.analyze(analyzer);
-            Assert.fail();
+            Assert.assertTrue(KeysType.AGG_KEYS == createMaterializedViewStmt.getMVKeysType());
+            List<MVColumnItem> mvSchema = createMaterializedViewStmt.getMVColumnItemList();
+            Assert.assertEquals(1, mvSchema.size());
+            Assert.assertTrue(mvSchema.get(0).isKey());
         } catch (UserException e) {
-            System.out.print(e.getMessage());
+            Assert.fail(e.getMessage());
         }
 
     }
diff --git a/fe/src/test/java/org/apache/doris/backup/CatalogMocker.java b/fe/src/test/java/org/apache/doris/backup/CatalogMocker.java
index d911ae9..aed97d1 100644
--- a/fe/src/test/java/org/apache/doris/backup/CatalogMocker.java
+++ b/fe/src/test/java/org/apache/doris/backup/CatalogMocker.java
@@ -17,7 +17,6 @@
 
 package org.apache.doris.backup;
 
-import mockit.Expectations;
 import org.apache.doris.analysis.PartitionValue;
 import org.apache.doris.catalog.AggregateType;
 import org.apache.doris.catalog.Catalog;
@@ -64,6 +63,8 @@ import com.google.common.collect.Range;
 import java.util.List;
 import java.util.Map;
 
+import mockit.Expectations;
+
 public class CatalogMocker {
     // user
     public static final String ROOTUSER = "root";
@@ -255,8 +256,8 @@ public class CatalogMocker {
         tablet0.addReplica(replica1);
         tablet0.addReplica(replica2);
 
-        olapTable.setIndexSchemaInfo(TEST_TBL_ID, TEST_TBL_NAME, TEST_TBL_BASE_SCHEMA, 0, SCHEMA_HASH, (short) 1);
-        olapTable.setStorageTypeToIndex(TEST_TBL_ID, TStorageType.COLUMN);
+        olapTable.setIndexMeta(TEST_TBL_ID, TEST_TBL_NAME, TEST_TBL_BASE_SCHEMA, 0, SCHEMA_HASH, (short) 1,
+                TStorageType.COLUMN, KeysType.AGG_KEYS);
         olapTable.addPartition(partition);
         db.createTable(olapTable);
 
@@ -340,8 +341,8 @@ public class CatalogMocker {
         baseTabletP2.addReplica(replica8);
 
 
-        olapTable2.setIndexSchemaInfo(TEST_TBL2_ID, TEST_TBL2_NAME, TEST_TBL_BASE_SCHEMA, 0, SCHEMA_HASH, (short) 1);
-        olapTable2.setStorageTypeToIndex(TEST_TBL2_ID, TStorageType.COLUMN);
+        olapTable2.setIndexMeta(TEST_TBL2_ID, TEST_TBL2_NAME, TEST_TBL_BASE_SCHEMA, 0, SCHEMA_HASH, (short) 1,
+                TStorageType.COLUMN, KeysType.AGG_KEYS);
         olapTable2.addPartition(partition1);
         olapTable2.addPartition(partition2);
 
@@ -378,9 +379,8 @@ public class CatalogMocker {
 
         partition2.createRollupIndex(rollupIndexP2);
 
-        olapTable2.setIndexSchemaInfo(TEST_ROLLUP_ID, TEST_ROLLUP_NAME, TEST_ROLLUP_SCHEMA, 0, ROLLUP_SCHEMA_HASH,
-                                      (short) 1);
-        olapTable2.setStorageTypeToIndex(TEST_ROLLUP_ID, TStorageType.COLUMN);
+        olapTable2.setIndexMeta(TEST_ROLLUP_ID, TEST_ROLLUP_NAME, TEST_ROLLUP_SCHEMA, 0, ROLLUP_SCHEMA_HASH,
+                                      (short) 1, TStorageType.COLUMN, KeysType.AGG_KEYS);
         db.createTable(olapTable2);
 
         return db;
diff --git a/fe/src/test/java/org/apache/doris/catalog/CatalogTestUtil.java b/fe/src/test/java/org/apache/doris/catalog/CatalogTestUtil.java
index eaeec48..0764b87 100644
--- a/fe/src/test/java/org/apache/doris/catalog/CatalogTestUtil.java
+++ b/fe/src/test/java/org/apache/doris/catalog/CatalogTestUtil.java
@@ -29,6 +29,7 @@ import org.apache.doris.system.Backend;
 import org.apache.doris.system.SystemInfoService;
 import org.apache.doris.thrift.TDisk;
 import org.apache.doris.thrift.TStorageMedium;
+import org.apache.doris.thrift.TStorageType;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -218,7 +219,8 @@ public class CatalogTestUtil {
         OlapTable table = new OlapTable(tableId, testTable1, columns, KeysType.AGG_KEYS, partitionInfo,
                 distributionInfo);
         table.addPartition(partition);
-        table.setIndexSchemaInfo(indexId, testIndex1, columns, 0, testSchemaHash1, (short) 1);
+        table.setIndexMeta(indexId, testIndex1, columns, 0, testSchemaHash1, (short) 1,
+                TStorageType.COLUMN, KeysType.AGG_KEYS);
         table.setBaseIndexId(indexId);
         // db
         Database db = new Database(dbId, testDb1);
diff --git a/fe/src/test/java/org/apache/doris/catalog/DatabaseTest.java b/fe/src/test/java/org/apache/doris/catalog/DatabaseTest.java
index 4ceecd2..a5634cc 100644
--- a/fe/src/test/java/org/apache/doris/catalog/DatabaseTest.java
+++ b/fe/src/test/java/org/apache/doris/catalog/DatabaseTest.java
@@ -157,9 +157,10 @@ public class DatabaseTest {
         // db2
         Database db2 = new Database(2, "db2");
         List<Column> columns = new ArrayList<Column>();
-        columns.add(new Column("column2", 
-                        ScalarType.createType(PrimitiveType.TINYINT), false, AggregateType.MIN, "", ""));
-        columns.add(new Column("column3", 
+        Column column2 = new Column("column2",
+                ScalarType.createType(PrimitiveType.TINYINT), false, AggregateType.MIN, "", "");
+        columns.add(column2);
+        columns.add(new Column("column3",
                         ScalarType.createType(PrimitiveType.SMALLINT), false, AggregateType.SUM, "", ""));
         columns.add(new Column("column4", 
                         ScalarType.createType(PrimitiveType.INT), false, AggregateType.REPLACE, "", ""));
@@ -178,13 +179,15 @@ public class DatabaseTest {
         Partition partition = new Partition(20000L, "table", index, new RandomDistributionInfo(10));
         OlapTable table = new OlapTable(1000, "table", columns, KeysType.AGG_KEYS,
                                         new SinglePartitionInfo(), new RandomDistributionInfo(10));
+        short shortKeyColumnCount = 1;
+        table.setIndexMeta(1000, "group1", columns, 1,1,shortKeyColumnCount,TStorageType.COLUMN, KeysType.AGG_KEYS);
+
         List<Column> column = Lists.newArrayList();
-        short schemaHash = 1;
-        table.setIndexSchemaInfo(new Long(1), "test", column, 1, 1, schemaHash);
+        column.add(column2);
+        table.setIndexMeta(new Long(1), "test", column, 1, 1, shortKeyColumnCount,
+                TStorageType.COLUMN, KeysType.AGG_KEYS);
+        table.setIndexMeta(new Long(1), "test", column, 1, 1, shortKeyColumnCount, TStorageType.COLUMN, KeysType.AGG_KEYS);
         Deencapsulation.setField(table, "baseIndexId", 1);
-        Map<Long, TStorageType> indexIdToStorageType = Maps.newHashMap();
-        indexIdToStorageType.put(new Long(1), TStorageType.COLUMN);
-        Deencapsulation.setField(table, "indexIdToStorageType", indexIdToStorageType);
         table.addPartition(partition);
         db2.createTable(table);
         db2.write(dos);
diff --git a/fe/src/test/java/org/apache/doris/catalog/MaterializedIndexMetaTest.java b/fe/src/test/java/org/apache/doris/catalog/MaterializedIndexMetaTest.java
new file mode 100644
index 0000000..c06bbb8
--- /dev/null
+++ b/fe/src/test/java/org/apache/doris/catalog/MaterializedIndexMetaTest.java
@@ -0,0 +1,69 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.catalog;
+
+import org.apache.doris.thrift.TStorageType;
+
+import com.google.common.collect.Lists;
+
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.List;
+
+public class MaterializedIndexMetaTest {
+
+    private static String fileName = "./MaterializedIndexMetaSerializeTest";
+
+    @After
+    public void tearDown() {
+        File file = new File(fileName);
+        file.delete();
+    }
+
+    @Test
+    public void testSerializeMaterializedIndexMeta() throws IOException {
+        // 1. Write objects to file
+        File file = new File(fileName);
+        file.createNewFile();
+        DataOutputStream out = new DataOutputStream(new FileOutputStream(file));
+
+        List<Column> schema = Lists.newArrayList();
+        Column column = new Column("k1", Type.INT, true, null, true, "1", "");
+        schema.add(column);
+        short shortKeyColumnCount = 1;
+        MaterializedIndexMeta indexMeta = new MaterializedIndexMeta(1, "test", schema, 1, 1, shortKeyColumnCount,
+                TStorageType.COLUMN, KeysType.DUP_KEYS);
+        indexMeta.write(out);
+        out.flush();
+        out.close();
+
+        // 2. Read objects from file
+        DataInputStream in = new DataInputStream(new FileInputStream(file));
+
+        MaterializedIndexMeta readIndexMeta = MaterializedIndexMeta.read(in);
+        Assert.assertEquals(indexMeta, readIndexMeta);
+    }
+}
diff --git a/fe/src/test/java/org/apache/doris/catalog/TableTest.java b/fe/src/test/java/org/apache/doris/catalog/TableTest.java
index 0041513..2066b07 100644
--- a/fe/src/test/java/org/apache/doris/catalog/TableTest.java
+++ b/fe/src/test/java/org/apache/doris/catalog/TableTest.java
@@ -18,18 +18,7 @@
 package org.apache.doris.catalog;
 
 import org.apache.doris.common.FeConstants;
-
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
 import org.apache.doris.common.jmockit.Deencapsulation;
-import org.apache.doris.planner.OlapScanNode;
 import org.apache.doris.thrift.TStorageType;
 
 import com.google.common.collect.Lists;
@@ -39,6 +28,15 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
 public class TableTest {
     private FakeCatalog fakeCatalog;
 
@@ -61,8 +59,9 @@ public class TableTest {
         DataOutputStream dos = new DataOutputStream(new FileOutputStream(file));
 
         List<Column> columns = new ArrayList<Column>();
-        columns.add(new Column("column2", 
-                        ScalarType.createType(PrimitiveType.TINYINT), false, AggregateType.MIN, "", ""));
+        Column column2 = new Column("column2",
+                ScalarType.createType(PrimitiveType.TINYINT), false, AggregateType.MIN, "", "");
+        columns.add(column2);
         columns.add(new Column("column3", 
                         ScalarType.createType(PrimitiveType.SMALLINT), false, AggregateType.SUM, "", ""));
         columns.add(new Column("column4", 
@@ -80,13 +79,13 @@ public class TableTest {
 
         OlapTable table1 = new OlapTable(1000L, "group1", columns, KeysType.AGG_KEYS,
                                                   new SinglePartitionInfo(), new RandomDistributionInfo(10));
+        short shortKeyColumnCount = 1;
+        table1.setIndexMeta(1000, "group1", columns, 1,1,shortKeyColumnCount,TStorageType.COLUMN, KeysType.AGG_KEYS);
         List<Column> column = Lists.newArrayList();
-        short schemaHash = 1;
-        table1.setIndexSchemaInfo(new Long(1), "test", column, 1, 1, schemaHash);
-        Deencapsulation.setField(table1, "baseIndexId", 1);
-        Map<Long, TStorageType> indexIdToStorageType = Maps.newHashMap();
-        indexIdToStorageType.put(new Long(1), TStorageType.COLUMN);
-        Deencapsulation.setField(table1, "indexIdToStorageType", indexIdToStorageType);
+        column.add(column2);
+
+        table1.setIndexMeta(new Long(2), "test", column, 1, 1, shortKeyColumnCount, TStorageType.COLUMN, KeysType.AGG_KEYS);
+        Deencapsulation.setField(table1, "baseIndexId", 1000);
         table1.write(dos);
         dos.flush();
         dos.close();
@@ -97,6 +96,7 @@ public class TableTest {
         Table rFamily1 = Table.read(dis);
         Assert.assertTrue(table1.equals(rFamily1));
         Assert.assertEquals(table1.getCreateTime(), rFamily1.getCreateTime());
+        Assert.assertEquals(table1.getIndexMetaByIndexId(2).getKeysType(), KeysType.AGG_KEYS);
         
         // 3. delete files
         dis.close();
diff --git a/fe/src/test/java/org/apache/doris/catalog/TempPartitionTest.java b/fe/src/test/java/org/apache/doris/catalog/TempPartitionTest.java
index fcd83de..3acb60f 100644
--- a/fe/src/test/java/org/apache/doris/catalog/TempPartitionTest.java
+++ b/fe/src/test/java/org/apache/doris/catalog/TempPartitionTest.java
@@ -558,7 +558,7 @@ public class TempPartitionTest {
     
     private void testSerializeOlapTable(OlapTable tbl) throws IOException, AnalysisException {
         MetaContext metaContext = new MetaContext();
-        metaContext.setMetaVersion(FeMetaVersion.VERSION_74);
+        metaContext.setMetaVersion(FeMetaVersion.VERSION_75);
         metaContext.setThreadLocalInfo();
 
         // 1. Write objects to file
diff --git a/fe/src/test/java/org/apache/doris/common/util/UnitTestUtil.java b/fe/src/test/java/org/apache/doris/common/util/UnitTestUtil.java
index fa52c05..99c84d9 100644
--- a/fe/src/test/java/org/apache/doris/common/util/UnitTestUtil.java
+++ b/fe/src/test/java/org/apache/doris/common/util/UnitTestUtil.java
@@ -115,8 +115,8 @@ public class UnitTestUtil {
                                         KeysType.AGG_KEYS, partitionInfo, distributionInfo);
         Deencapsulation.setField(table, "baseIndexId", indexId);
         table.addPartition(partition);
-        table.setIndexSchemaInfo(indexId, TABLE_NAME, columns, 0, SCHEMA_HASH, (short) 1);
-        table.setStorageTypeToIndex(indexId, TStorageType.COLUMN);
+        table.setIndexMeta(indexId, TABLE_NAME, columns, 0, SCHEMA_HASH, (short) 1, TStorageType.COLUMN,
+                KeysType.AGG_KEYS);
 
         // db
         Database db = new Database(dbId, DB_NAME);
diff --git a/fe/src/test/java/org/apache/doris/http/DorisHttpTestCase.java b/fe/src/test/java/org/apache/doris/http/DorisHttpTestCase.java
index 84a47fe..4cf7791 100644
--- a/fe/src/test/java/org/apache/doris/http/DorisHttpTestCase.java
+++ b/fe/src/test/java/org/apache/doris/http/DorisHttpTestCase.java
@@ -46,6 +46,7 @@ import org.apache.doris.qe.ConnectContext;
 import org.apache.doris.system.Backend;
 import org.apache.doris.system.SystemInfoService;
 import org.apache.doris.thrift.TStorageMedium;
+import org.apache.doris.thrift.TStorageType;
 
 import com.google.common.collect.Lists;
 
@@ -154,7 +155,8 @@ abstract public class DorisHttpTestCase {
         OlapTable table = new OlapTable(testTableId, name, columns, KeysType.AGG_KEYS, partitionInfo,
                 distributionInfo);
         table.addPartition(partition);
-        table.setIndexSchemaInfo(testIndexId, "testIndex", columns, 0, testSchemaHash, (short) 1);
+        table.setIndexMeta(testIndexId, "testIndex", columns, 0, testSchemaHash, (short) 1, TStorageType.COLUMN,
+                KeysType.AGG_KEYS);
         table.setBaseIndexId(testIndexId);
         return table;
     }
diff --git a/fe/src/test/java/org/apache/doris/persist/CreateTableInfoTest.java b/fe/src/test/java/org/apache/doris/persist/CreateTableInfoTest.java
index e7f53ed..ebda67a 100644
--- a/fe/src/test/java/org/apache/doris/persist/CreateTableInfoTest.java
+++ b/fe/src/test/java/org/apache/doris/persist/CreateTableInfoTest.java
@@ -72,9 +72,10 @@ public class CreateTableInfoTest {
         DataOutputStream dos = new DataOutputStream(new FileOutputStream(file));
         
         List<Column> columns = new ArrayList<Column>();
-        columns.add(new Column("column2", 
-                        ScalarType.createType(PrimitiveType.TINYINT), false, AggregateType.MIN, "", ""));
-        columns.add(new Column("column3", 
+        Column column2 = new Column("column2",
+                ScalarType.createType(PrimitiveType.TINYINT), false, AggregateType.MIN, "", "");
+        columns.add(column2);
+        columns.add(new Column("column3",
                         ScalarType.createType(PrimitiveType.SMALLINT), false, AggregateType.SUM, "", ""));
         columns.add(new Column("column4", 
                         ScalarType.createType(PrimitiveType.INT), false, AggregateType.REPLACE, "", ""));
@@ -94,13 +95,14 @@ public class CreateTableInfoTest {
         Partition partition = new Partition(20000L, "table", index, distributionInfo);
         OlapTable table = new OlapTable(1000L, "table", columns, KeysType.AGG_KEYS, 
                                         new SinglePartitionInfo(), distributionInfo);
+        short shortKeyColumnCount = 1;
+        table.setIndexMeta(1000, "group1", columns, 1,1,shortKeyColumnCount,TStorageType.COLUMN, KeysType.AGG_KEYS);
+
         List<Column> column = Lists.newArrayList();
-        short schemaHash = 1;
-        table.setIndexSchemaInfo(new Long(1), "test", column, 1, 1, schemaHash);
-        Deencapsulation.setField(table, "baseIndexId", 1);
-        Map<Long, TStorageType> indexIdToStorageType = Maps.newHashMap();
-        indexIdToStorageType.put(new Long(1), TStorageType.COLUMN);
-        Deencapsulation.setField(table, "indexIdToStorageType", indexIdToStorageType);
+        column.add(column2);
+        table.setIndexMeta(new Long(1), "test", column, 1, 1, shortKeyColumnCount,
+                TStorageType.COLUMN, KeysType.AGG_KEYS);
+        Deencapsulation.setField(table, "baseIndexId", 1000);
         table.addPartition(partition);
         CreateTableInfo info = new CreateTableInfo("db1", table);
         info.write(dos);
diff --git a/fe/src/test/java/org/apache/doris/planner/MaterializedViewSelectorTest.java b/fe/src/test/java/org/apache/doris/planner/MaterializedViewSelectorTest.java
index 25d3548..d5648b9 100644
--- a/fe/src/test/java/org/apache/doris/planner/MaterializedViewSelectorTest.java
+++ b/fe/src/test/java/org/apache/doris/planner/MaterializedViewSelectorTest.java
@@ -28,6 +28,7 @@ import org.apache.doris.analysis.TupleDescriptor;
 import org.apache.doris.catalog.AggregateType;
 import org.apache.doris.catalog.Column;
 import org.apache.doris.catalog.KeysType;
+import org.apache.doris.catalog.MaterializedIndexMeta;
 import org.apache.doris.catalog.OlapTable;
 import org.apache.doris.catalog.Table;
 import org.apache.doris.catalog.Type;
@@ -166,32 +167,44 @@ public class MaterializedViewSelectorTest {
     }
 
     @Test
-    public void testCheckCompensatingPredicates(@Injectable SelectStmt selectStmt, @Injectable Analyzer analyzer) {
+    public void testCheckCompensatingPredicates(@Injectable SelectStmt selectStmt, @Injectable Analyzer analyzer,
+            @Injectable MaterializedIndexMeta indexMeta1,
+            @Injectable MaterializedIndexMeta indexMeta2,
+            @Injectable MaterializedIndexMeta indexMeta3,
+            @Injectable MaterializedIndexMeta indexMeta4) {
         Set<String> tableAColumnNames = Sets.newHashSet();
         tableAColumnNames.add("C1");
-        Map<Long, List<Column>> candidateIndexIdToSchema = Maps.newHashMap();
+        Map<Long, MaterializedIndexMeta> candidateIndexIdToSchema = Maps.newHashMap();
         List<Column> index1Columns = Lists.newArrayList();
         Column index1Column1 = new Column("c1", Type.INT, true, null, true, "", "");
         index1Columns.add(index1Column1);
-        candidateIndexIdToSchema.put(new Long(1), index1Columns);
+        candidateIndexIdToSchema.put(new Long(1), indexMeta1);
         List<Column> index2Columns = Lists.newArrayList();
         Column index2Column1 = new Column("c1", Type.INT, false, AggregateType.NONE, true, "", "");
         index2Columns.add(index2Column1);
-        candidateIndexIdToSchema.put(new Long(2), index2Columns);
+        candidateIndexIdToSchema.put(new Long(2), indexMeta2);
         List<Column> index3Columns = Lists.newArrayList();
         Column index3Column1 = new Column("c1", Type.INT, false, AggregateType.SUM, true, "", "");
         index3Columns.add(index3Column1);
-        candidateIndexIdToSchema.put(new Long(3), index3Columns);
+        candidateIndexIdToSchema.put(new Long(3), indexMeta3);
         List<Column> index4Columns = Lists.newArrayList();
         Column index4Column2 = new Column("c2", Type.INT, true, null, true, "", "");
         index4Columns.add(index4Column2);
-        candidateIndexIdToSchema.put(new Long(4), index4Columns);
+        candidateIndexIdToSchema.put(new Long(4), indexMeta4);
         new Expectations() {
             {
                 selectStmt.getAggInfo();
                 result = null;
                 selectStmt.getResultExprs();
                 result = Lists.newArrayList();
+                indexMeta1.getSchema();
+                result = index1Columns;
+                indexMeta2.getSchema();
+                result = index2Columns;
+                indexMeta3.getSchema();
+                result = index3Columns;
+                indexMeta4.getSchema();
+                result = index4Columns;
             }
         };
 
@@ -203,69 +216,88 @@ public class MaterializedViewSelectorTest {
     }
 
     @Test
-    public void testCheckGrouping(@Injectable SelectStmt selectStmt, @Injectable Analyzer analyzer) {
+    public void testCheckGrouping(@Injectable SelectStmt selectStmt, @Injectable Analyzer analyzer,
+            @Injectable MaterializedIndexMeta indexMeta1,
+            @Injectable MaterializedIndexMeta indexMeta2,
+            @Injectable MaterializedIndexMeta indexMeta3) {
         Set<String> tableAColumnNames = Sets.newHashSet();
         tableAColumnNames.add("C1");
-        Map<Long, List<Column>> candidateIndexIdToSchema = Maps.newHashMap();
+        Map<Long, MaterializedIndexMeta> candidateIndexIdToSchema = Maps.newHashMap();
         List<Column> index1Columns = Lists.newArrayList();
         Column index1Column1 = new Column("c2", Type.INT, true, null, true, "", "");
         index1Columns.add(index1Column1);
-        candidateIndexIdToSchema.put(new Long(1), index1Columns);
+        candidateIndexIdToSchema.put(new Long(1), indexMeta1);
         List<Column> index2Columns = Lists.newArrayList();
         Column index2Column1 = new Column("c1", Type.INT, true, null, true, "", "");
         index2Columns.add(index2Column1);
         Column index2Column2 = new Column("c2", Type.INT, false, AggregateType.SUM, true, "", "");
         index2Columns.add(index2Column2);
-        candidateIndexIdToSchema.put(new Long(2), index2Columns);
+        candidateIndexIdToSchema.put(new Long(2), indexMeta2);
         List<Column> index3Columns = Lists.newArrayList();
         Column index3Column1 = new Column("c2", Type.INT, true, null, true, "", "");
         index3Columns.add(index3Column1);
         Column index3Column2 = new Column("c1", Type.INT, false, AggregateType.SUM, true, "", "");
         index3Columns.add(index3Column2);
-        candidateIndexIdToSchema.put(new Long(3), index3Columns);
+        candidateIndexIdToSchema.put(new Long(3), indexMeta3);
         new Expectations() {
             {
                 selectStmt.getAggInfo();
                 result = null;
                 selectStmt.getResultExprs();
                 result = Lists.newArrayList();
+                indexMeta1.getSchema();
+                result = index1Columns;
+                indexMeta1.getKeysType();
+                result = KeysType.DUP_KEYS;
+                indexMeta2.getSchema();
+                result = index2Columns;
+                indexMeta3.getSchema();
+                result = index3Columns;
             }
         };
 
         MaterializedViewSelector selector = new MaterializedViewSelector(selectStmt, analyzer);
         Deencapsulation.setField(selector, "isSPJQuery", false);
-        Deencapsulation.invoke(selector, "checkGrouping", tableAColumnNames, candidateIndexIdToSchema,
-                KeysType.DUP_KEYS);
+        Deencapsulation.invoke(selector, "checkGrouping", tableAColumnNames, candidateIndexIdToSchema);
         Assert.assertEquals(2, candidateIndexIdToSchema.size());
         Assert.assertTrue(candidateIndexIdToSchema.keySet().contains(new Long(1)));
         Assert.assertTrue(candidateIndexIdToSchema.keySet().contains(new Long(2)));
     }
 
     @Test
-    public void testCheckAggregationFunction(@Injectable SelectStmt selectStmt, @Injectable Analyzer analyzer) {
-        Map<Long, List<Column>> candidateIndexIdToSchema = Maps.newHashMap();
+    public void testCheckAggregationFunction(@Injectable SelectStmt selectStmt, @Injectable Analyzer analyzer,
+            @Injectable MaterializedIndexMeta indexMeta1,
+            @Injectable MaterializedIndexMeta indexMeta2,
+            @Injectable MaterializedIndexMeta indexMeta3) {
+        Map<Long, MaterializedIndexMeta> candidateIndexIdToSchema = Maps.newHashMap();
         List<Column> index1Columns = Lists.newArrayList();
         Column index1Column1 = new Column("c2", Type.INT, true, null, true, "", "");
         index1Columns.add(index1Column1);
-        candidateIndexIdToSchema.put(new Long(1), index1Columns);
+        candidateIndexIdToSchema.put(new Long(1), indexMeta1);
         List<Column> index2Columns = Lists.newArrayList();
         Column index2Column1 = new Column("c1", Type.INT, true, null, true, "", "");
         index2Columns.add(index2Column1);
         Column index2Column2 = new Column("c2", Type.INT, false, AggregateType.SUM, true, "", "");
         index2Columns.add(index2Column2);
-        candidateIndexIdToSchema.put(new Long(2), index2Columns);
+        candidateIndexIdToSchema.put(new Long(2), indexMeta2);
         List<Column> index3Columns = Lists.newArrayList();
         Column index3Column1 = new Column("c2", Type.INT, true, null, true, "", "");
         index3Columns.add(index3Column1);
         Column index3Column2 = new Column("c1", Type.INT, false, AggregateType.SUM, true, "", "");
         index3Columns.add(index3Column2);
-        candidateIndexIdToSchema.put(new Long(3), index3Columns);
+        candidateIndexIdToSchema.put(new Long(3), indexMeta3);
         new Expectations() {
             {
                 selectStmt.getAggInfo();
                 result = null;
                 selectStmt.getResultExprs();
                 result = Lists.newArrayList();
+                indexMeta1.getSchema();
+                result = index1Columns;
+                indexMeta2.getSchema();
+                result = index2Columns;
+                indexMeta3.getSchema();
+                result = index3Columns;
             }
         };
 
@@ -283,30 +315,39 @@ public class MaterializedViewSelectorTest {
     }
 
     @Test
-    public void testCheckOutputColumns(@Injectable SelectStmt selectStmt, @Injectable Analyzer analyzer) {
-        Map<Long, List<Column>> candidateIndexIdToSchema = Maps.newHashMap();
+    public void testCheckOutputColumns(@Injectable SelectStmt selectStmt, @Injectable Analyzer analyzer,
+            @Injectable MaterializedIndexMeta indexMeta1,
+            @Injectable MaterializedIndexMeta indexMeta2,
+            @Injectable MaterializedIndexMeta indexMeta3) {
+        Map<Long, MaterializedIndexMeta> candidateIndexIdToSchema = Maps.newHashMap();
         List<Column> index1Columns = Lists.newArrayList();
         Column index1Column1 = new Column("c2", Type.INT, true, null, true, "", "");
         index1Columns.add(index1Column1);
-        candidateIndexIdToSchema.put(new Long(1), index1Columns);
+        candidateIndexIdToSchema.put(new Long(1), indexMeta1);
         List<Column> index2Columns = Lists.newArrayList();
         Column index2Column1 = new Column("c1", Type.INT, true, null, true, "", "");
         index2Columns.add(index2Column1);
         Column index2Column2 = new Column("c2", Type.INT, false, AggregateType.NONE, true, "", "");
         index2Columns.add(index2Column2);
-        candidateIndexIdToSchema.put(new Long(2), index2Columns);
+        candidateIndexIdToSchema.put(new Long(2), indexMeta2);
         List<Column> index3Columns = Lists.newArrayList();
         Column index3Column1 = new Column("C2", Type.INT, true, null, true, "", "");
         index3Columns.add(index3Column1);
         Column index3Column2 = new Column("c1", Type.INT, false, AggregateType.SUM, true, "", "");
         index3Columns.add(index3Column2);
-        candidateIndexIdToSchema.put(new Long(3), index3Columns);
+        candidateIndexIdToSchema.put(new Long(3), indexMeta3);
         new Expectations() {
             {
                 selectStmt.getAggInfo();
                 result = null;
                 selectStmt.getResultExprs();
                 result = Lists.newArrayList();
+                indexMeta1.getSchema();
+                result = index1Columns;
+                indexMeta2.getSchema();
+                result = index2Columns;
+                indexMeta3.getSchema();
+                result = index3Columns;
             }
         };
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@doris.apache.org
For additional commands, e-mail: commits-help@doris.apache.org