You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by ja...@apache.org on 2021/01/20 10:23:18 UTC

[iotdb] branch NewTsFileV2 created (now c76d135)

This is an automated email from the ASF dual-hosted git repository.

jackietien pushed a change to branch NewTsFileV2
in repository https://gitbox.apache.org/repos/asf/iotdb.git.


      at c76d135  wait for upgrade tool

This branch includes the following new commits:

     new 140ddec  some changes
     new c76d135  wait for upgrade tool

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



[iotdb] 01/02: some changes

Posted by ja...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jackietien pushed a commit to branch NewTsFileV2
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit 140ddec0e738ee4ed85ebfb999444639f8d329ba
Author: JackieTien97 <Ja...@foxmail.com>
AuthorDate: Tue Jan 19 13:54:10 2021 +0800

    some changes
---
 .../db/engine/cache/TimeSeriesMetadataCache.java   |  2 +-
 .../chunk/metadata/DiskChunkMetadataLoader.java    |  7 ++---
 .../tsfile/file/metadata/TimeseriesMetadata.java   | 32 ++++++++++++++++++++--
 .../iotdb/tsfile/write/writer/TsFileIOWriter.java  |  8 +++---
 4 files changed, 37 insertions(+), 12 deletions(-)

diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
index 384a496..a0cf793 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
@@ -193,7 +193,7 @@ public class TimeSeriesMetadataCache {
             "Get timeseries: {}.{}  metadata in file: {}  from cache: {}.", key.device,
             key.measurement, key.filePath, timeseriesMetadata);
       }
-      return new TimeseriesMetadata(timeseriesMetadata);
+      return timeseriesMetadata;
     }
   }
 
diff --git a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/DiskChunkMetadataLoader.java b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/DiskChunkMetadataLoader.java
index 7144419..2b55d3b 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/DiskChunkMetadataLoader.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/DiskChunkMetadataLoader.java
@@ -20,7 +20,6 @@ package org.apache.iotdb.db.query.reader.chunk.metadata;
 
 import java.io.IOException;
 import java.util.List;
-import org.apache.iotdb.db.engine.cache.ChunkMetadataCache;
 import org.apache.iotdb.db.engine.modification.Modification;
 import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
 import org.apache.iotdb.db.metadata.PartialPath;
@@ -49,10 +48,8 @@ public class DiskChunkMetadataLoader implements IChunkMetadataLoader {
   }
 
   @Override
-  public List<ChunkMetadata> loadChunkMetadataList(TimeseriesMetadata timeseriesMetadata)
-      throws IOException {
-    List<ChunkMetadata> chunkMetadataList = ChunkMetadataCache
-        .getInstance().get(resource.getTsFilePath(), seriesPath, timeseriesMetadata);
+  public List<ChunkMetadata> loadChunkMetadataList(TimeseriesMetadata timeseriesMetadata) {
+    List<ChunkMetadata> chunkMetadataList = timeseriesMetadata.getChunkMetadataList();
 
     setDiskChunkLoader(chunkMetadataList, resource, seriesPath, context);
 
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/TimeseriesMetadata.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/TimeseriesMetadata.java
index 4f93f17..a8c30dd 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/TimeseriesMetadata.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/TimeseriesMetadata.java
@@ -22,11 +22,13 @@ package org.apache.iotdb.tsfile.file.metadata;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
 import java.util.List;
 import org.apache.iotdb.tsfile.common.cache.Accountable;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.file.metadata.statistics.Statistics;
 import org.apache.iotdb.tsfile.read.controller.IChunkMetadataLoader;
+import org.apache.iotdb.tsfile.utils.PublicBAOS;
 import org.apache.iotdb.tsfile.utils.ReadWriteForEncodingUtils;
 import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
 
@@ -56,18 +58,25 @@ public class TimeseriesMetadata implements Accountable {
   // used for SeriesReader to indicate whether it is a seq/unseq timeseries metadata
   private boolean isSeq = true;
 
+  private ByteBuffer chunkMetadataListByteBuffer;
+
+  private PublicBAOS chunkMetadataListBuffer;
+
+  private ArrayList<ChunkMetadata> chunkMetadataList;
+
   public TimeseriesMetadata() {
   }
 
   public TimeseriesMetadata(byte timeSeriesMetadataType, long startOffsetOfChunkMetaDataList,
       int chunkMetaDataListDataSize, String measurementId, TSDataType dataType,
-      Statistics statistics) {
+      Statistics statistics, PublicBAOS chunkMetadataListBuffer) {
     this.timeSeriesMetadataType = timeSeriesMetadataType;
     this.startOffsetOfChunkMetaDataList = startOffsetOfChunkMetaDataList;
     this.chunkMetaDataListDataSize = chunkMetaDataListDataSize;
     this.measurementId = measurementId;
     this.dataType = dataType;
     this.statistics = statistics;
+    this.chunkMetadataListBuffer = chunkMetadataListBuffer;
   }
 
   public TimeseriesMetadata(TimeseriesMetadata timeseriesMetadata) {
@@ -86,9 +95,13 @@ public class TimeseriesMetadata implements Accountable {
     timeseriesMetaData.setMeasurementId(ReadWriteIOUtils.readVarIntString(buffer));
     timeseriesMetaData.setTSDataType(ReadWriteIOUtils.readDataType(buffer));
     timeseriesMetaData.setOffsetOfChunkMetaDataList(ReadWriteIOUtils.readLong(buffer));
+    int chunkMetaDataListDataSize = ReadWriteForEncodingUtils.readUnsignedVarInt(buffer);
     timeseriesMetaData
-        .setDataSizeOfChunkMetaDataList(ReadWriteForEncodingUtils.readUnsignedVarInt(buffer));
+        .setDataSizeOfChunkMetaDataList(chunkMetaDataListDataSize);
     timeseriesMetaData.setStatistics(Statistics.deserialize(buffer, timeseriesMetaData.dataType));
+    timeseriesMetaData.chunkMetadataListByteBuffer = buffer.slice();
+    timeseriesMetaData.chunkMetadataListByteBuffer.limit(chunkMetaDataListDataSize);
+    buffer.position(buffer.position() + chunkMetaDataListDataSize);
     return timeseriesMetaData;
   }
 
@@ -108,6 +121,8 @@ public class TimeseriesMetadata implements Accountable {
     byteLen += ReadWriteForEncodingUtils
         .writeUnsignedVarInt(chunkMetaDataListDataSize, outputStream);
     byteLen += statistics.serialize(outputStream);
+    chunkMetadataListBuffer.writeTo(outputStream);
+    byteLen += chunkMetadataListBuffer.size();
     return byteLen;
   }
 
@@ -167,6 +182,19 @@ public class TimeseriesMetadata implements Accountable {
     return chunkMetadataLoader.loadChunkMetadataList(this);
   }
 
+  public List<ChunkMetadata> getChunkMetadataList() {
+    if (chunkMetadataList == null) {
+      chunkMetadataList = new ArrayList<>();
+      while (chunkMetadataListByteBuffer.hasRemaining()) {
+        chunkMetadataList.add(ChunkMetadata.deserializeFrom(chunkMetadataListByteBuffer, this));
+      }
+      // minimize the storage of an ArrayList instance.
+      chunkMetadataList.trimToSize();
+      chunkMetadataListByteBuffer = null;
+    }
+    return chunkMetadataList;
+  }
+
   public boolean isModified() {
     return modified;
   }
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/TsFileIOWriter.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/TsFileIOWriter.java
index d3290a9..f7e7d72 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/TsFileIOWriter.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/TsFileIOWriter.java
@@ -282,8 +282,8 @@ public class TsFileIOWriter {
       String device = path.getDevice();
 
       // create TimeseriesMetaData
+      PublicBAOS publicBAOS = new PublicBAOS();
       TSDataType dataType = entry.getValue().get(entry.getValue().size() - 1).getDataType();
-      long offsetOfChunkMetadataList = out.getPosition();
       Statistics seriesStatistics = Statistics.getStatsByType(dataType);
 
       int chunkMetadataListLength = 0;
@@ -294,12 +294,12 @@ public class TsFileIOWriter {
           continue;
         }
         chunkMetadataListLength += chunkMetadata
-            .serializeTo(out.wrapAsStream(), serializeStatistic);
+            .serializeTo(publicBAOS, serializeStatistic);
         seriesStatistics.mergeStatistics(chunkMetadata.getStatistics());
       }
       TimeseriesMetadata timeseriesMetadata = new TimeseriesMetadata(
-          serializeStatistic ? (byte) 1 : (byte) 0, offsetOfChunkMetadataList,
-          chunkMetadataListLength, path.getMeasurement(), dataType, seriesStatistics);
+          serializeStatistic ? (byte) 1 : (byte) 0, 0,
+          chunkMetadataListLength, path.getMeasurement(), dataType, seriesStatistics, publicBAOS);
       deviceTimeseriesMetadataMap.computeIfAbsent(device, k -> new ArrayList<>())
           .add(timeseriesMetadata);
     }


[iotdb] 02/02: wait for upgrade tool

Posted by ja...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jackietien pushed a commit to branch NewTsFileV2
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit c76d13519e284f02d56d4aacc5a7056396aa678e
Author: JackieTien97 <Ja...@foxmail.com>
AuthorDate: Wed Jan 20 18:22:31 2021 +0800

    wait for upgrade tool
---
 .../resources/conf/iotdb-engine.properties         |   6 +-
 .../java/org/apache/iotdb/db/conf/IoTDBConfig.java |  14 +-
 .../org/apache/iotdb/db/conf/IoTDBDescriptor.java  |  10 +-
 .../db/engine/cache/CacheHitRatioMonitor.java      |  25 ---
 .../engine/cache/CacheHitRatioMonitorMXBean.java   |  11 -
 .../apache/iotdb/db/engine/cache/ChunkCache.java   |  12 +-
 .../iotdb/db/engine/cache/ChunkMetadataCache.java  | 222 ---------------------
 .../db/engine/cache/TimeSeriesMetadataCache.java   |  12 +-
 .../db/engine/compaction/TsFileManagement.java     |   2 -
 .../level/LevelCompactionTsFileManagement.java     |   2 -
 .../iotdb/db/engine/merge/task/MergeFileTask.java  |   4 -
 .../apache/iotdb/db/qp/executor/PlanExecutor.java  |   2 -
 .../db/query/reader/chunk/DiskChunkLoader.java     |  11 +-
 .../chunk/metadata/DiskChunkMetadataLoader.java    |  33 ++-
 .../chunk/metadata/MemChunkMetadataLoader.java     |   6 -
 .../org/apache/iotdb/db/service/TSServiceImpl.java |   6 +-
 .../db/engine/cache/ChunkMetadataCacheTest.java    | 172 ----------------
 .../db/engine/compaction/LevelCompactionTest.java  |   3 -
 .../apache/iotdb/db/engine/merge/MergeTest.java    |   2 -
 .../iotdb/db/integration/IoTDBClearCacheIT.java    |   3 -
 .../query/reader/series/SeriesReaderTestUtil.java  |   2 -
 .../apache/iotdb/db/utils/EnvironmentUtils.java    |   2 -
 .../apache/iotdb/spark/db/EnvironmentUtils.java    |  21 +-
 .../iotdb/tsfile/file/metadata/ChunkMetadata.java  |  19 ++
 .../tsfile/file/metadata/TimeseriesMetadata.java   |  27 ++-
 .../iotdb/tsfile/read/TsFileSequenceReader.java    | 143 +++++--------
 .../read/controller/IChunkMetadataLoader.java      |   8 -
 .../file/metadata/TimeSeriesMetadataTest.java      |   2 +-
 28 files changed, 134 insertions(+), 648 deletions(-)

diff --git a/server/src/assembly/resources/conf/iotdb-engine.properties b/server/src/assembly/resources/conf/iotdb-engine.properties
index 8201e9d..73fa1da 100644
--- a/server/src/assembly/resources/conf/iotdb-engine.properties
+++ b/server/src/assembly/resources/conf/iotdb-engine.properties
@@ -380,9 +380,9 @@ query_time_threshold=60000
 
 # whether to cache meta data(ChunkMetadata and TimeSeriesMetadata) or not.
 meta_data_cache_enable=true
-# Read memory Allocation Ratio: ChunkMetadataCache, ChunkCache, TimeSeriesMetadataCache, memory used for constructing QueryDataSet and Free Memory Used in Query.
-# The parameter form is a:b:c:d:e, where a, b, c, d and e are integers. for example: 1:1:1:1:1 , 1:1:1:3:4
-chunkmeta_chunk_timeseriesmeta_free_memory_proportion=1:1:1:3:4
+# Read memory Allocation Ratio: ChunkCache, TimeSeriesMetadataCache, memory used for constructing QueryDataSet and Free Memory Used in Query.
+# The parameter form is a:b:c:d, where a, b, c and d are integers. for example: 1:1:1:1 , 1:2:3:4
+chunk_timeseriesmeta_free_memory_proportion=1:2:3:4
 
 # cache size for MManager.
 # This cache is used to improve insert speed where all path check and TSDataType will be cached in MManager with corresponding Path.
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
index 0871744..7463691 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
@@ -409,12 +409,8 @@ public class IoTDBConfig {
   /**
    * Memory allocated for timeSeriesMetaData cache in read process
    */
-  private long allocateMemoryForTimeSeriesMetaDataCache = allocateMemoryForRead / 10;
+  private long allocateMemoryForTimeSeriesMetaDataCache = allocateMemoryForRead / 5;
 
-  /**
-   * Memory allocated for chunkMetaData cache in read process
-   */
-  private long allocateMemoryForChunkMetaDataCache = allocateMemoryForRead / 10;
 
   /**
    * Memory allocated for chunk cache in read process
@@ -1683,14 +1679,6 @@ public class IoTDBConfig {
     this.allocateMemoryForTimeSeriesMetaDataCache = allocateMemoryForTimeSeriesMetaDataCache;
   }
 
-  public long getAllocateMemoryForChunkMetaDataCache() {
-    return allocateMemoryForChunkMetaDataCache;
-  }
-
-  public void setAllocateMemoryForChunkMetaDataCache(long allocateMemoryForChunkMetaDataCache) {
-    this.allocateMemoryForChunkMetaDataCache = allocateMemoryForChunkMetaDataCache;
-  }
-
   public long getAllocateMemoryForChunkCache() {
     return allocateMemoryForChunkCache;
   }
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
index b11f9d3..0c5c19e 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
@@ -835,7 +835,7 @@ public class IoTDBDescriptor {
     }
 
     String queryMemoryAllocateProportion = properties
-        .getProperty("chunkmeta_chunk_timeseriesmeta_free_memory_proportion");
+        .getProperty("chunk_timeseriesmeta_free_memory_proportion");
     if (queryMemoryAllocateProportion != null) {
       String[] proportions = queryMemoryAllocateProportion.split(":");
       int proportionSum = 0;
@@ -845,14 +845,12 @@ public class IoTDBDescriptor {
       long maxMemoryAvailable = conf.getAllocateMemoryForRead();
       if (proportionSum != 0) {
         try {
-          conf.setAllocateMemoryForChunkMetaDataCache(
-              maxMemoryAvailable * Integer.parseInt(proportions[0].trim()) / proportionSum);
           conf.setAllocateMemoryForChunkCache(
-              maxMemoryAvailable * Integer.parseInt(proportions[1].trim()) / proportionSum);
+              maxMemoryAvailable * Integer.parseInt(proportions[0].trim()) / proportionSum);
           conf.setAllocateMemoryForTimeSeriesMetaDataCache(
-              maxMemoryAvailable * Integer.parseInt(proportions[2].trim()) / proportionSum);
+              maxMemoryAvailable * Integer.parseInt(proportions[1].trim()) / proportionSum);
           conf.setAllocateMemoryForReadWithoutCache(
-              maxMemoryAvailable * Integer.parseInt(proportions[3].trim()) / proportionSum);
+              maxMemoryAvailable * Integer.parseInt(proportions[2].trim()) / proportionSum);
         } catch (Exception e) {
           throw new RuntimeException(
               "Each subsection of configuration item chunkmeta_chunk_timeseriesmeta_free_memory_proportion"
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/CacheHitRatioMonitor.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/CacheHitRatioMonitor.java
index 2c411ac..df078ec 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/CacheHitRatioMonitor.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/CacheHitRatioMonitor.java
@@ -53,31 +53,6 @@ public class CacheHitRatioMonitor implements CacheHitRatioMonitorMXBean, IServic
   }
 
   @Override
-  public double getChunkMetaDataHitRatio() {
-    return ChunkMetadataCache.getInstance().calculateChunkMetaDataHitRatio();
-  }
-
-  @Override
-  public long getChunkMetaDataCacheUsedMemory() {
-    return ChunkMetadataCache.getInstance().getUsedMemory();
-  }
-
-  @Override
-  public long getChunkMetaDataCacheMaxMemory() {
-    return ChunkMetadataCache.getInstance().getMaxMemory();
-  }
-
-  @Override
-  public double getChunkMetaDataCacheUsedMemoryProportion() {
-    return ChunkMetadataCache.getInstance().getUsedMemoryProportion();
-  }
-
-  @Override
-  public long getChunkMetaDataCacheAverageSize() {
-    return ChunkMetadataCache.getInstance().getAverageSize();
-  }
-
-  @Override
   public double getChunkHitRatio() {
     return ChunkCache.getInstance().calculateChunkHitRatio();
   }
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/CacheHitRatioMonitorMXBean.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/CacheHitRatioMonitorMXBean.java
index 0514481..ae2697b 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/CacheHitRatioMonitorMXBean.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/CacheHitRatioMonitorMXBean.java
@@ -20,17 +20,6 @@ package org.apache.iotdb.db.engine.cache;
 
 public interface CacheHitRatioMonitorMXBean {
 
-  double getChunkMetaDataHitRatio();
-
-  long getChunkMetaDataCacheUsedMemory();
-
-  long getChunkMetaDataCacheMaxMemory();
-
-  double getChunkMetaDataCacheUsedMemoryProportion();
-
-  long getChunkMetaDataCacheAverageSize();
-
-
   double getChunkHitRatio();
 
   long getChunkCacheUsedMemory();
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
index c621a26..345d2e0 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
@@ -25,6 +25,7 @@ import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import org.apache.iotdb.db.conf.IoTDBConfig;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.query.control.FileReaderManager;
 import org.apache.iotdb.db.utils.TestOnly;
 import org.apache.iotdb.tsfile.file.metadata.ChunkMetadata;
 import org.apache.iotdb.tsfile.read.TsFileSequenceReader;
@@ -83,8 +84,11 @@ public class ChunkCache {
     return ChunkCacheHolder.INSTANCE;
   }
 
-  public Chunk get(ChunkMetadata chunkMetaData, TsFileSequenceReader reader) throws IOException {
+  public Chunk get(ChunkMetadata chunkMetaData) throws IOException {
     if (!CACHE_ENABLE) {
+      TsFileSequenceReader reader =
+          FileReaderManager.getInstance()
+              .get(chunkMetaData.getFilePath(), chunkMetaData.isClosed());
       Chunk chunk = reader.readMemChunk(chunkMetaData);
       return new Chunk(chunk.getHeader(), chunk.getData().duplicate(),
           chunk.getDeleteIntervalList(), chunkMetaData.getStatistics());
@@ -104,6 +108,8 @@ public class ChunkCache {
       printCacheLog(true);
     } else {
       printCacheLog(false);
+      TsFileSequenceReader reader = FileReaderManager.getInstance()
+          .get(chunkMetaData.getFilePath(), chunkMetaData.isClosed());
       try {
         chunk = reader.readMemChunk(chunkMetaData);
       } catch (IOException e) {
@@ -112,7 +118,9 @@ public class ChunkCache {
       }
       lock.writeLock().lock();
       try {
-        lruCache.put(chunkMetaData, chunk);
+        if (!lruCache.containsKey(chunkMetaData)) {
+          lruCache.put(chunkMetaData, chunk);
+        }
       } finally {
         lock.writeLock().unlock();
       }
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkMetadataCache.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkMetadataCache.java
deleted file mode 100644
index f8fd5f3..0000000
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkMetadataCache.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.iotdb.db.engine.cache;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import org.apache.iotdb.db.conf.IoTDBConfig;
-import org.apache.iotdb.db.conf.IoTDBConstant;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
-import org.apache.iotdb.db.query.control.FileReaderManager;
-import org.apache.iotdb.db.utils.TestOnly;
-import org.apache.iotdb.tsfile.file.metadata.ChunkMetadata;
-import org.apache.iotdb.tsfile.file.metadata.TimeseriesMetadata;
-import org.apache.iotdb.tsfile.read.TsFileSequenceReader;
-import org.apache.iotdb.tsfile.read.common.Path;
-import org.apache.iotdb.tsfile.utils.RamUsageEstimator;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class is used to cache <code>List<ChunkMetaData></code> of tsfile in IoTDB. The caching
- * strategy is LRU.
- */
-public class ChunkMetadataCache {
-
-  private static final Logger logger = LoggerFactory.getLogger(ChunkMetadataCache.class);
-  private static final Logger DEBUG_LOGGER = LoggerFactory.getLogger("QUERY_DEBUG");
-  private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
-  private static final long MEMORY_THRESHOLD_IN_B = config.getAllocateMemoryForChunkMetaDataCache();
-  private static final boolean CACHE_ENABLE = config.isMetaDataCacheEnable();
-
-  /**
-   * key: file path dot deviceId dot sensorId.
-   * <p>
-   * value: chunkMetaData list of one timeseries in the file.
-   */
-  private final LRULinkedHashMap<AccountableString, List<ChunkMetadata>> lruCache;
-
-  private final ReadWriteLock lock = new ReentrantReadWriteLock();
-
-  private final AtomicLong cacheHitNum = new AtomicLong();
-  private final AtomicLong cacheRequestNum = new AtomicLong();
-
-
-  private ChunkMetadataCache(long memoryThreshold) {
-    if (CACHE_ENABLE) {
-      logger.info("ChunkMetadataCache size = " + memoryThreshold);
-    }
-    lruCache = new LRULinkedHashMap<AccountableString, List<ChunkMetadata>>(memoryThreshold) {
-      @Override
-      protected long calEntrySize(AccountableString key, List<ChunkMetadata> value) {
-        if (value.isEmpty()) {
-          return RamUsageEstimator.sizeOf(key) + RamUsageEstimator.shallowSizeOf(value);
-        }
-        long entrySize;
-        if (count < 10) {
-          long currentSize = value.get(0).calculateRamSize();
-          averageSize = ((averageSize * count) + currentSize) / (++count);
-          entrySize = RamUsageEstimator.sizeOf(key)
-              + (currentSize + RamUsageEstimator.NUM_BYTES_OBJECT_REF) * value.size()
-              + RamUsageEstimator.shallowSizeOf(value);
-        } else if (count < 100000) {
-          count++;
-          entrySize = RamUsageEstimator.sizeOf(key)
-              + (averageSize + RamUsageEstimator.NUM_BYTES_OBJECT_REF) * value.size()
-              + RamUsageEstimator.shallowSizeOf(value);
-        } else {
-          averageSize = value.get(0).calculateRamSize();
-          count = 1;
-          entrySize = RamUsageEstimator.sizeOf(key)
-              + (averageSize + RamUsageEstimator.NUM_BYTES_OBJECT_REF) * value.size()
-              + RamUsageEstimator.shallowSizeOf(value);
-        }
-        return entrySize;
-      }
-    };
-  }
-
-  public static ChunkMetadataCache getInstance() {
-    return ChunkMetadataCacheSingleton.INSTANCE;
-  }
-
-  /**
-   * get {@link ChunkMetadata}. THREAD SAFE.
-   */
-  public List<ChunkMetadata> get(String filePath, Path seriesPath,
-      TimeseriesMetadata timeseriesMetadata) throws IOException {
-    if (timeseriesMetadata == null) {
-      return Collections.emptyList();
-    }
-    if (!CACHE_ENABLE) {
-      // bloom filter part
-      TsFileSequenceReader tsFileReader = FileReaderManager.getInstance().get(filePath, true);
-      // If timeseries isn't included in the tsfile, empty list is returned.
-      return tsFileReader.readChunkMetaDataList(timeseriesMetadata);
-    }
-
-    AccountableString key = new AccountableString(filePath + IoTDBConstant.PATH_SEPARATOR
-        + seriesPath.getDevice() + IoTDBConstant.PATH_SEPARATOR + seriesPath.getMeasurement());
-
-    cacheRequestNum.incrementAndGet();
-
-    lock.readLock().lock();
-    List<ChunkMetadata> chunkMetadataList;
-    try {
-      chunkMetadataList = lruCache.get(key);
-    } finally {
-      lock.readLock().unlock();
-    }
-
-    if (chunkMetadataList != null) {
-      printCacheLog(true);
-      cacheHitNum.incrementAndGet();
-    } else {
-      printCacheLog(false);
-      TsFileSequenceReader tsFileReader = FileReaderManager.getInstance().get(filePath, true);
-      chunkMetadataList = tsFileReader.readChunkMetaDataList(timeseriesMetadata);
-      lock.writeLock().lock();
-      try {
-        lruCache.put(key, chunkMetadataList);
-      } finally {
-        lock.writeLock().unlock();
-      }
-    }
-    if (config.isDebugOn()) {
-      DEBUG_LOGGER.info(
-          "Chunk meta data list size: " + chunkMetadataList.size() + " key is: " + key.getString());
-      chunkMetadataList.forEach(c -> DEBUG_LOGGER.info(c.toString()));
-    }
-    return new ArrayList<>(chunkMetadataList);
-  }
-
-  private void printCacheLog(boolean isHit) {
-    if (!logger.isDebugEnabled()) {
-      return;
-    }
-    logger.debug(
-        "[ChunkMetaData cache {}hit] The number of requests for cache is {}, hit rate is {}.",
-        isHit ? "" : "didn't ", cacheRequestNum.get(),
-        cacheHitNum.get() * 1.0 / cacheRequestNum.get());
-  }
-
-  double calculateChunkMetaDataHitRatio() {
-    if (cacheRequestNum.get() != 0) {
-      return cacheHitNum.get() * 1.0 / cacheRequestNum.get();
-    } else {
-      return 0;
-    }
-  }
-
-  public long getUsedMemory() {
-    return lruCache.getUsedMemory();
-  }
-
-  public long getMaxMemory() {
-    return lruCache.getMaxMemory();
-  }
-
-  public double getUsedMemoryProportion() {
-    return lruCache.getUsedMemoryProportion();
-  }
-
-  public long getAverageSize() {
-    return lruCache.getAverageSize();
-  }
-
-  /**
-   * clear LRUCache.
-   */
-  public void clear() {
-    lock.writeLock().lock();
-    if (lruCache != null) {
-      lruCache.clear();
-    }
-    lock.writeLock().unlock();
-  }
-
-  public void remove(TsFileResource resource) {
-    lock.writeLock().lock();
-    if (resource != null) {
-      lruCache.entrySet()
-          .removeIf(e -> e.getKey().getString().startsWith(resource.getTsFilePath()));
-    }
-    lock.writeLock().unlock();
-  }
-
-  @TestOnly
-  public boolean isEmpty() {
-    return lruCache.isEmpty();
-  }
-
-  /**
-   * singleton pattern.
-   */
-  private static class ChunkMetadataCacheSingleton {
-
-    private static final ChunkMetadataCache INSTANCE = new
-        ChunkMetadataCache(MEMORY_THRESHOLD_IN_B);
-  }
-}
\ No newline at end of file
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
index a0cf793..b329aa5 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
@@ -83,7 +83,10 @@ public class TimeSeriesMetadataCache {
           currentSize = RamUsageEstimator.shallowSizeOf(key) + RamUsageEstimator.sizeOf(key.device)
               + RamUsageEstimator.sizeOf(key.measurement) + RamUsageEstimator.shallowSizeOf(value)
               + RamUsageEstimator.sizeOf(value.getMeasurementId()) + RamUsageEstimator
-              .shallowSizeOf(value.getStatistics());
+              .shallowSizeOf(value.getStatistics()) +
+              (value.getChunkMetadataList().get(0).calculateRamSize()
+                  + RamUsageEstimator.NUM_BYTES_OBJECT_REF) * value.getChunkMetadataList().size()
+              + RamUsageEstimator.shallowSizeOf(value.getChunkMetadataList());
           averageSize = ((averageSize * count) + currentSize) / (++count);
         } else if (count < 100000) {
           count++;
@@ -92,7 +95,10 @@ public class TimeSeriesMetadataCache {
           averageSize = RamUsageEstimator.shallowSizeOf(key) + RamUsageEstimator.sizeOf(key.device)
               + RamUsageEstimator.sizeOf(key.measurement) + RamUsageEstimator.shallowSizeOf(value)
               + RamUsageEstimator.sizeOf(value.getMeasurementId()) + RamUsageEstimator
-              .shallowSizeOf(value.getStatistics());
+              .shallowSizeOf(value.getStatistics()) +
+              (value.getChunkMetadataList().get(0).calculateRamSize()
+                  + RamUsageEstimator.NUM_BYTES_OBJECT_REF) * value.getChunkMetadataList().size()
+              + RamUsageEstimator.shallowSizeOf(value.getChunkMetadataList());
           count = 1;
           currentSize = averageSize;
         }
@@ -193,7 +199,7 @@ public class TimeSeriesMetadataCache {
             "Get timeseries: {}.{}  metadata in file: {}  from cache: {}.", key.device,
             key.measurement, key.filePath, timeseriesMetadata);
       }
-      return timeseriesMetadata;
+      return new TimeseriesMetadata(timeseriesMetadata);
     }
   }
 
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/TsFileManagement.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/TsFileManagement.java
index 3d8e755..3d88e70 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/TsFileManagement.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/TsFileManagement.java
@@ -32,7 +32,6 @@ import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
 import org.apache.iotdb.db.engine.cache.ChunkCache;
-import org.apache.iotdb.db.engine.cache.ChunkMetadataCache;
 import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache;
 import org.apache.iotdb.db.engine.merge.manage.MergeManager;
 import org.apache.iotdb.db.engine.merge.manage.MergeResource;
@@ -322,7 +321,6 @@ public abstract class TsFileManagement {
       // clean cache
       if (IoTDBDescriptor.getInstance().getConfig().isMetaDataCacheEnable()) {
         ChunkCache.getInstance().clear();
-        ChunkMetadataCache.getInstance().clear();
         TimeSeriesMetadataCache.getInstance().clear();
       }
     } finally {
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/compaction/level/LevelCompactionTsFileManagement.java b/server/src/main/java/org/apache/iotdb/db/engine/compaction/level/LevelCompactionTsFileManagement.java
index ec1ebdd..35c7f7d 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/compaction/level/LevelCompactionTsFileManagement.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/compaction/level/LevelCompactionTsFileManagement.java
@@ -41,7 +41,6 @@ import java.util.TreeSet;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.CopyOnWriteArrayList;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.engine.cache.ChunkMetadataCache;
 import org.apache.iotdb.db.engine.compaction.TsFileManagement;
 import org.apache.iotdb.db.engine.compaction.utils.CompactionLogAnalyzer;
 import org.apache.iotdb.db.engine.compaction.utils.CompactionLogger;
@@ -123,7 +122,6 @@ public class LevelCompactionTsFileManagement extends TsFileManagement {
   private void deleteLevelFile(TsFileResource seqFile) {
     seqFile.writeLock();
     try {
-      ChunkMetadataCache.getInstance().remove(seqFile);
       FileReaderManager.getInstance().closeFileAndRemoveReader(seqFile.getTsFilePath());
       seqFile.setDeleted(true);
       seqFile.delete();
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/merge/task/MergeFileTask.java b/server/src/main/java/org/apache/iotdb/db/engine/merge/task/MergeFileTask.java
index a21e1a0..fbf6bc1 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/merge/task/MergeFileTask.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/merge/task/MergeFileTask.java
@@ -27,7 +27,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
 import org.apache.iotdb.db.engine.cache.ChunkCache;
-import org.apache.iotdb.db.engine.cache.ChunkMetadataCache;
 import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache;
 import org.apache.iotdb.db.engine.merge.manage.MergeContext;
 import org.apache.iotdb.db.engine.merge.manage.MergeResource;
@@ -143,7 +142,6 @@ class MergeFileTask {
 
     seqFile.writeLock();
     try {
-      ChunkMetadataCache.getInstance().remove(seqFile);
       FileReaderManager.getInstance().closeFileAndRemoveReader(seqFile.getTsFilePath());
 
       resource.removeFileReader(seqFile);
@@ -294,7 +292,6 @@ class MergeFileTask {
     seqFile.writeLock();
     try {
       resource.removeFileReader(seqFile);
-      ChunkMetadataCache.getInstance().remove(seqFile);
 
       File newMergeFile = seqFile.getTsFile();
       newMergeFile.delete();
@@ -306,7 +303,6 @@ class MergeFileTask {
       // clean cache
       if (IoTDBDescriptor.getInstance().getConfig().isMetaDataCacheEnable()) {
         ChunkCache.getInstance().clear();
-        ChunkMetadataCache.getInstance().clear();
         TimeSeriesMetadataCache.getInstance().clear();
         FileReaderManager.getInstance().closeFileAndRemoveReader(seqFile.getTsFilePath());
       }
diff --git a/server/src/main/java/org/apache/iotdb/db/qp/executor/PlanExecutor.java b/server/src/main/java/org/apache/iotdb/db/qp/executor/PlanExecutor.java
index b3416a3..0ee2ce8 100644
--- a/server/src/main/java/org/apache/iotdb/db/qp/executor/PlanExecutor.java
+++ b/server/src/main/java/org/apache/iotdb/db/qp/executor/PlanExecutor.java
@@ -71,7 +71,6 @@ import org.apache.iotdb.db.conf.IoTDBConstant;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
 import org.apache.iotdb.db.engine.StorageEngine;
 import org.apache.iotdb.db.engine.cache.ChunkCache;
-import org.apache.iotdb.db.engine.cache.ChunkMetadataCache;
 import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache;
 import org.apache.iotdb.db.engine.flush.pool.FlushTaskPoolManager;
 import org.apache.iotdb.db.engine.merge.manage.MergeManager;
@@ -337,7 +336,6 @@ public class PlanExecutor implements IPlanExecutor {
 
   private void operateClearCache() {
     ChunkCache.getInstance().clear();
-    ChunkMetadataCache.getInstance().clear();
     TimeSeriesMetadataCache.getInstance().clear();
   }
 
diff --git a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/DiskChunkLoader.java b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/DiskChunkLoader.java
index 60ceab0..4e946ce 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/DiskChunkLoader.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/DiskChunkLoader.java
@@ -21,10 +21,7 @@ package org.apache.iotdb.db.query.reader.chunk;
 
 import java.io.IOException;
 import org.apache.iotdb.db.engine.cache.ChunkCache;
-import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
-import org.apache.iotdb.db.query.control.FileReaderManager;
 import org.apache.iotdb.tsfile.file.metadata.ChunkMetadata;
-import org.apache.iotdb.tsfile.read.TsFileSequenceReader;
 import org.apache.iotdb.tsfile.read.common.Chunk;
 import org.apache.iotdb.tsfile.read.controller.IChunkLoader;
 
@@ -33,17 +30,13 @@ import org.apache.iotdb.tsfile.read.controller.IChunkLoader;
  */
 public class DiskChunkLoader implements IChunkLoader {
 
-  private final TsFileResource resource;
+  public DiskChunkLoader() {
 
-  public DiskChunkLoader(TsFileResource resource) {
-    this.resource = resource;
   }
 
   @Override
   public Chunk loadChunk(ChunkMetadata chunkMetaData) throws IOException {
-    TsFileSequenceReader tsFileSequenceReader =
-        FileReaderManager.getInstance().get(resource.getTsFilePath(), resource.isClosed());
-    return ChunkCache.getInstance().get(chunkMetaData, tsFileSequenceReader);
+    return ChunkCache.getInstance().get(chunkMetaData);
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/DiskChunkMetadataLoader.java b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/DiskChunkMetadataLoader.java
index 2b55d3b..09a7e0e 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/DiskChunkMetadataLoader.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/DiskChunkMetadataLoader.java
@@ -18,7 +18,6 @@
  */
 package org.apache.iotdb.db.query.reader.chunk.metadata;
 
-import java.io.IOException;
 import java.util.List;
 import org.apache.iotdb.db.engine.modification.Modification;
 import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
@@ -33,11 +32,11 @@ import org.apache.iotdb.tsfile.read.filter.basic.Filter;
 
 public class DiskChunkMetadataLoader implements IChunkMetadataLoader {
 
-  private TsFileResource resource;
-  private PartialPath seriesPath;
-  private QueryContext context;
+  private final TsFileResource resource;
+  private final PartialPath seriesPath;
+  private final QueryContext context;
   // time filter or value filter, only used to check time range
-  private Filter filter;
+  private final Filter filter;
 
   public DiskChunkMetadataLoader(TsFileResource resource, PartialPath seriesPath,
       QueryContext context, Filter filter) {
@@ -69,18 +68,6 @@ public class DiskChunkMetadataLoader implements IChunkMetadataLoader {
     return chunkMetadataList;
   }
 
-  /**
-   * For query v0.9/v1 tsfile only When generate temporary timeseriesMetadata set DiskChunkLoader to
-   * each chunkMetadata in the List
-   *
-   * @param chunkMetadataList
-   * @throws IOException
-   */
-  @Override
-  public void setDiskChunkLoader(List<ChunkMetadata> chunkMetadataList) {
-    setDiskChunkLoader(chunkMetadataList, resource, seriesPath, context);
-  }
-
   public static void setDiskChunkLoader(List<ChunkMetadata> chunkMetadataList,
       TsFileResource resource, PartialPath seriesPath, QueryContext context) {
     List<Modification> pathModifications =
@@ -89,10 +76,14 @@ public class DiskChunkMetadataLoader implements IChunkMetadataLoader {
     if (!pathModifications.isEmpty()) {
       QueryUtils.modifyChunkMetaData(chunkMetadataList, pathModifications);
     }
-
-    for (ChunkMetadata data : chunkMetadataList) {
-      data.setChunkLoader(new DiskChunkLoader(resource));
-    }
+    // it is ok, even if it is not thread safe, because the cost of creating a DiskChunkLoader is very cheap.
+    chunkMetadataList.forEach(chunkMetadata -> {
+      if (chunkMetadata.getChunkLoader() == null) {
+        chunkMetadata.setFilePath(resource.getTsFilePath());
+        chunkMetadata.setClosed(resource.isClosed());
+        chunkMetadata.setChunkLoader(new DiskChunkLoader());
+      }
+    });
   }
 
 }
diff --git a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/MemChunkMetadataLoader.java b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/MemChunkMetadataLoader.java
index 396cb24..843a982 100644
--- a/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/MemChunkMetadataLoader.java
+++ b/server/src/main/java/org/apache/iotdb/db/query/reader/chunk/metadata/MemChunkMetadataLoader.java
@@ -18,7 +18,6 @@
  */
 package org.apache.iotdb.db.query.reader.chunk.metadata;
 
-import java.io.IOException;
 import java.util.List;
 import org.apache.iotdb.db.engine.querycontext.ReadOnlyMemChunk;
 import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
@@ -69,9 +68,4 @@ public class MemChunkMetadataLoader implements IChunkMetadataLoader {
     }
     return chunkMetadataList;
   }
-
-  @Override
-  public void setDiskChunkLoader(List<ChunkMetadata> chunkMetadataList) throws IOException {
-    // DO NOTHING
-  }
 }
diff --git a/server/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java b/server/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java
index 261d0f5..d7a4739 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/TSServiceImpl.java
@@ -49,7 +49,6 @@ import org.apache.iotdb.db.conf.IoTDBDescriptor;
 import org.apache.iotdb.db.cost.statistic.Measurement;
 import org.apache.iotdb.db.cost.statistic.Operation;
 import org.apache.iotdb.db.engine.cache.ChunkCache;
-import org.apache.iotdb.db.engine.cache.ChunkMetadataCache;
 import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache;
 import org.apache.iotdb.db.exception.BatchProcessException;
 import org.apache.iotdb.db.exception.IoTDBException;
@@ -664,11 +663,10 @@ public class TSServiceImpl implements TSIService.Iface, ServerContext {
       }
       if (config.isDebugOn()) {
         SLOW_SQL_LOGGER.info(
-            "ChunkCache used memory proportion: {}\nChunkMetadataCache used memory proportion: {}\n"
+            "ChunkCache used memory proportion: {}\n"
                 + "TimeSeriesMetadataCache used memory proportion: {}",
             ChunkCache.getInstance()
-                .getUsedMemoryProportion(),
-            ChunkMetadataCache.getInstance().getUsedMemoryProportion(), TimeSeriesMetadataCache
+                .getUsedMemoryProportion(), TimeSeriesMetadataCache
                 .getInstance().getUsedMemoryProportion());
       }
     }
diff --git a/server/src/test/java/org/apache/iotdb/db/engine/cache/ChunkMetadataCacheTest.java b/server/src/test/java/org/apache/iotdb/db/engine/cache/ChunkMetadataCacheTest.java
deleted file mode 100644
index 7241405..0000000
--- a/server/src/test/java/org/apache/iotdb/db/engine/cache/ChunkMetadataCacheTest.java
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.iotdb.db.engine.cache;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.List;
-import org.apache.iotdb.db.conf.IoTDBDescriptor;
-import org.apache.iotdb.db.constant.TestConstant;
-import org.apache.iotdb.db.engine.MetadataManagerHelper;
-import org.apache.iotdb.db.engine.flush.TsFileFlushPolicy.DirectFlushPolicy;
-import org.apache.iotdb.db.engine.querycontext.QueryDataSource;
-import org.apache.iotdb.db.engine.storagegroup.StorageGroupProcessor;
-import org.apache.iotdb.db.engine.storagegroup.TsFileProcessor;
-import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
-import org.apache.iotdb.db.exception.WriteProcessException;
-import org.apache.iotdb.db.exception.metadata.IllegalPathException;
-import org.apache.iotdb.db.exception.query.QueryProcessException;
-import org.apache.iotdb.db.metadata.PartialPath;
-import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan;
-import org.apache.iotdb.db.query.context.QueryContext;
-import org.apache.iotdb.db.query.control.FileReaderManager;
-import org.apache.iotdb.db.utils.EnvironmentUtils;
-import org.apache.iotdb.tsfile.file.metadata.ChunkMetadata;
-import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
-import org.apache.iotdb.tsfile.read.common.Path;
-import org.apache.iotdb.tsfile.write.record.TSRecord;
-import org.apache.iotdb.tsfile.write.record.datapoint.DataPoint;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-public class ChunkMetadataCacheTest {
-
-  private QueryContext context = EnvironmentUtils.TEST_QUERY_CONTEXT;
-
-  private String storageGroup = "root.vehicle.d0";
-  private String measurementId0 = "s0";
-  private String measurementId1 = "s1";
-  private String measurementId2 = "s2";
-  private String measurementId3 = "s3";
-  private String measurementId4 = "s4";
-  private String measurementId5 = "s5";
-  private StorageGroupProcessor storageGroupProcessor;
-  private String systemDir = TestConstant.BASE_OUTPUT_PATH.concat("data")
-      .concat(File.separator).concat("info");
-
-  private int prevUnseqLevelNum = 0;
-
-  @Before
-  public void setUp() throws Exception {
-    prevUnseqLevelNum = IoTDBDescriptor.getInstance().getConfig().getUnseqLevelNum();
-    IoTDBDescriptor.getInstance().getConfig().setUnseqLevelNum(2);
-    EnvironmentUtils.envSetUp();
-    MetadataManagerHelper.initMetadata();
-    storageGroupProcessor = new StorageGroupProcessor(systemDir, storageGroup,
-        new DirectFlushPolicy());
-    insertData();
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    FileReaderManager.getInstance().closeAndRemoveAllOpenedReaders();
-    storageGroupProcessor.syncDeleteDataFiles();
-    EnvironmentUtils.cleanEnv();
-    EnvironmentUtils.cleanDir(systemDir);
-    IoTDBDescriptor.getInstance().getConfig().setUnseqLevelNum(prevUnseqLevelNum);
-  }
-
-  private void insertOneRecord(long time, int num)
-      throws WriteProcessException, IllegalPathException {
-    TSRecord record = new TSRecord(time, storageGroup);
-    record.addTuple(DataPoint.getDataPoint(TSDataType.INT32, measurementId0, String.valueOf(num)));
-    record.addTuple(DataPoint.getDataPoint(TSDataType.INT64, measurementId1, String.valueOf(num)));
-    record.addTuple(DataPoint.getDataPoint(TSDataType.FLOAT, measurementId2, String.valueOf(num)));
-    record.addTuple(DataPoint.getDataPoint(TSDataType.DOUBLE, measurementId3, String.valueOf(num)));
-    record.addTuple(DataPoint.getDataPoint(TSDataType.BOOLEAN, measurementId4, "True"));
-    InsertRowPlan insertRowPlan = new InsertRowPlan(record);
-    storageGroupProcessor.insert(insertRowPlan);
-  }
-
-  protected void insertData() throws IOException, WriteProcessException, IllegalPathException {
-    for (int j = 1; j <= 100; j++) {
-      insertOneRecord(j, j);
-    }
-    for (TsFileProcessor tsFileProcessor : storageGroupProcessor
-        .getWorkSequenceTsFileProcessors()) {
-      tsFileProcessor.syncFlush();
-    }
-
-    for (int j = 10; j >= 1; j--) {
-      insertOneRecord(j, j);
-    }
-    for (int j = 11; j <= 20; j++) {
-      insertOneRecord(j, j);
-    }
-    storageGroupProcessor.syncCloseAllWorkingTsFileProcessors();
-
-    for (int j = 21; j <= 30; j += 2) {
-      insertOneRecord(j, 0); // will be covered when read
-    }
-    storageGroupProcessor.syncCloseAllWorkingTsFileProcessors();
-
-    for (int j = 21; j <= 30; j += 2) {
-      insertOneRecord(j, j);
-    }
-    storageGroupProcessor.syncCloseAllWorkingTsFileProcessors();
-
-    insertOneRecord(2, 100);
-  }
-
-  @Test
-  public void test1() throws IOException, QueryProcessException, IllegalPathException {
-    IoTDBDescriptor.getInstance().getConfig().setMetaDataCacheEnable(false);
-    QueryDataSource queryDataSource = storageGroupProcessor
-        .query(new PartialPath(storageGroup), measurementId5, context, null, null);
-
-    List<TsFileResource> seqResources = queryDataSource.getSeqResources();
-    List<TsFileResource> unseqResources = queryDataSource.getUnseqResources();
-
-    Assert.assertEquals(1, seqResources.size());
-    Assert.assertEquals(3, unseqResources.size());
-    Assert.assertTrue(seqResources.get(0).isClosed());
-    Assert.assertTrue(unseqResources.get(0).isClosed());
-    Assert.assertTrue(unseqResources.get(1).isClosed());
-    Assert.assertTrue(unseqResources.get(2).isClosed());
-
-    List<ChunkMetadata> metaDataList = ChunkMetadataCache.getInstance()
-        .get(seqResources.get(0).getTsFilePath(), new Path(storageGroup, measurementId5), null);
-    Assert.assertEquals(0, metaDataList.size());
-  }
-
-  @Test
-  public void test2() throws IOException, QueryProcessException, IllegalPathException {
-    IoTDBDescriptor.getInstance().getConfig().setMetaDataCacheEnable(true);
-    QueryDataSource queryDataSource = storageGroupProcessor
-        .query(new PartialPath(storageGroup), measurementId5, context, null, null);
-
-    List<TsFileResource> seqResources = queryDataSource.getSeqResources();
-    List<TsFileResource> unseqResources = queryDataSource.getUnseqResources();
-
-    Assert.assertEquals(1, seqResources.size());
-    Assert.assertEquals(3, unseqResources.size());
-    Assert.assertTrue(seqResources.get(0).isClosed());
-    Assert.assertTrue(unseqResources.get(0).isClosed());
-    Assert.assertTrue(unseqResources.get(1).isClosed());
-    Assert.assertTrue(unseqResources.get(2).isClosed());
-
-    List<ChunkMetadata> metaDataList = ChunkMetadataCache.getInstance()
-        .get(seqResources.get(0).getTsFilePath(), new Path(storageGroup, measurementId5), null);
-    Assert.assertEquals(0, metaDataList.size());
-  }
-
-
-}
diff --git a/server/src/test/java/org/apache/iotdb/db/engine/compaction/LevelCompactionTest.java b/server/src/test/java/org/apache/iotdb/db/engine/compaction/LevelCompactionTest.java
index 7fbc509..1747971 100644
--- a/server/src/test/java/org/apache/iotdb/db/engine/compaction/LevelCompactionTest.java
+++ b/server/src/test/java/org/apache/iotdb/db/engine/compaction/LevelCompactionTest.java
@@ -30,9 +30,7 @@ import org.apache.iotdb.db.conf.IoTDBConstant;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
 import org.apache.iotdb.db.constant.TestConstant;
 import org.apache.iotdb.db.engine.cache.ChunkCache;
-import org.apache.iotdb.db.engine.cache.ChunkMetadataCache;
 import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache;
-import org.apache.iotdb.db.engine.merge.manage.MergeManager;
 import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
 import org.apache.iotdb.db.exception.StorageEngineException;
 import org.apache.iotdb.db.exception.metadata.MetadataException;
@@ -91,7 +89,6 @@ abstract class LevelCompactionTest {
     IoTDBDescriptor.getInstance().getConfig()
         .setMergeChunkPointNumberThreshold(prevMergeChunkThreshold);
     ChunkCache.getInstance().clear();
-    ChunkMetadataCache.getInstance().clear();
     TimeSeriesMetadataCache.getInstance().clear();
     IoTDB.metaManager.clear();
     EnvironmentUtils.cleanAllDir();
diff --git a/server/src/test/java/org/apache/iotdb/db/engine/merge/MergeTest.java b/server/src/test/java/org/apache/iotdb/db/engine/merge/MergeTest.java
index e296014..6746aec 100644
--- a/server/src/test/java/org/apache/iotdb/db/engine/merge/MergeTest.java
+++ b/server/src/test/java/org/apache/iotdb/db/engine/merge/MergeTest.java
@@ -30,7 +30,6 @@ import org.apache.iotdb.db.conf.IoTDBConstant;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
 import org.apache.iotdb.db.constant.TestConstant;
 import org.apache.iotdb.db.engine.cache.ChunkCache;
-import org.apache.iotdb.db.engine.cache.ChunkMetadataCache;
 import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache;
 import org.apache.iotdb.db.engine.merge.manage.MergeManager;
 import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
@@ -90,7 +89,6 @@ abstract class MergeTest {
     unseqResources.clear();
     IoTDBDescriptor.getInstance().getConfig().setMergeChunkPointNumberThreshold(prevMergeChunkThreshold);
     ChunkCache.getInstance().clear();
-    ChunkMetadataCache.getInstance().clear();
     TimeSeriesMetadataCache.getInstance().clear();
     IoTDB.metaManager.clear();
     EnvironmentUtils.cleanAllDir();
diff --git a/server/src/test/java/org/apache/iotdb/db/integration/IoTDBClearCacheIT.java b/server/src/test/java/org/apache/iotdb/db/integration/IoTDBClearCacheIT.java
index 74a20dc..578f298a 100644
--- a/server/src/test/java/org/apache/iotdb/db/integration/IoTDBClearCacheIT.java
+++ b/server/src/test/java/org/apache/iotdb/db/integration/IoTDBClearCacheIT.java
@@ -28,7 +28,6 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import org.apache.iotdb.db.engine.cache.ChunkCache;
-import org.apache.iotdb.db.engine.cache.ChunkMetadataCache;
 import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache;
 import org.apache.iotdb.db.utils.EnvironmentUtils;
 import org.apache.iotdb.jdbc.Config;
@@ -157,13 +156,11 @@ public class IoTDBClearCacheIT {
         Assert.assertEquals(10, cnt);
       }
       assertFalse(ChunkCache.getInstance().isEmpty());
-      assertFalse(ChunkMetadataCache.getInstance().isEmpty());
       assertFalse(TimeSeriesMetadataCache.getInstance().isEmpty());
 
       statement.execute("CLEAR CACHE");
 
       assertTrue(ChunkCache.getInstance().isEmpty());
-      assertTrue(ChunkMetadataCache.getInstance().isEmpty());
       assertTrue(TimeSeriesMetadataCache.getInstance().isEmpty());
 
     } catch (Exception e) {
diff --git a/server/src/test/java/org/apache/iotdb/db/query/reader/series/SeriesReaderTestUtil.java b/server/src/test/java/org/apache/iotdb/db/query/reader/series/SeriesReaderTestUtil.java
index 6bff6c1..b86e766 100644
--- a/server/src/test/java/org/apache/iotdb/db/query/reader/series/SeriesReaderTestUtil.java
+++ b/server/src/test/java/org/apache/iotdb/db/query/reader/series/SeriesReaderTestUtil.java
@@ -30,7 +30,6 @@ import java.util.Map;
 import org.apache.iotdb.db.conf.IoTDBConstant;
 import org.apache.iotdb.db.constant.TestConstant;
 import org.apache.iotdb.db.engine.cache.ChunkCache;
-import org.apache.iotdb.db.engine.cache.ChunkMetadataCache;
 import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache;
 import org.apache.iotdb.db.engine.merge.manage.MergeManager;
 import org.apache.iotdb.db.engine.storagegroup.TsFileResource;
@@ -74,7 +73,6 @@ public class SeriesReaderTestUtil {
     seqResources.clear();
     unseqResources.clear();
     ChunkCache.getInstance().clear();
-    ChunkMetadataCache.getInstance().clear();
     TimeSeriesMetadataCache.getInstance().clear();
     IoTDB.metaManager.clear();
     EnvironmentUtils.cleanAllDir();
diff --git a/server/src/test/java/org/apache/iotdb/db/utils/EnvironmentUtils.java b/server/src/test/java/org/apache/iotdb/db/utils/EnvironmentUtils.java
index 2dafafd..4420922 100644
--- a/server/src/test/java/org/apache/iotdb/db/utils/EnvironmentUtils.java
+++ b/server/src/test/java/org/apache/iotdb/db/utils/EnvironmentUtils.java
@@ -37,7 +37,6 @@ import org.apache.iotdb.db.conf.directories.DirectoryManager;
 import org.apache.iotdb.db.constant.TestConstant;
 import org.apache.iotdb.db.engine.StorageEngine;
 import org.apache.iotdb.db.engine.cache.ChunkCache;
-import org.apache.iotdb.db.engine.cache.ChunkMetadataCache;
 import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache;
 import org.apache.iotdb.db.engine.compaction.CompactionMergeTaskPoolManager;
 import org.apache.iotdb.db.exception.StorageEngineException;
@@ -128,7 +127,6 @@ public class EnvironmentUtils {
     // clean cache
     if (config.isMetaDataCacheEnable()) {
       ChunkCache.getInstance().clear();
-      ChunkMetadataCache.getInstance().clear();
       TimeSeriesMetadataCache.getInstance().clear();
     }
     // close metadata
diff --git a/spark-iotdb-connector/src/test/scala/org/apache/iotdb/spark/db/EnvironmentUtils.java b/spark-iotdb-connector/src/test/scala/org/apache/iotdb/spark/db/EnvironmentUtils.java
index ed458f2..90c596e 100644
--- a/spark-iotdb-connector/src/test/scala/org/apache/iotdb/spark/db/EnvironmentUtils.java
+++ b/spark-iotdb-connector/src/test/scala/org/apache/iotdb/spark/db/EnvironmentUtils.java
@@ -18,6 +18,13 @@
  */
 package org.apache.iotdb.spark.db;
 
+import java.io.File;
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Locale;
 import org.apache.commons.io.FileUtils;
 import org.apache.iotdb.db.auth.AuthException;
 import org.apache.iotdb.db.auth.authorizer.BasicAuthorizer;
@@ -26,7 +33,8 @@ import org.apache.iotdb.db.conf.IoTDBConfig;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
 import org.apache.iotdb.db.conf.directories.DirectoryManager;
 import org.apache.iotdb.db.engine.StorageEngine;
-import org.apache.iotdb.db.engine.cache.ChunkMetadataCache;
+import org.apache.iotdb.db.engine.cache.ChunkCache;
+import org.apache.iotdb.db.engine.cache.TimeSeriesMetadataCache;
 import org.apache.iotdb.db.engine.flush.FlushManager;
 import org.apache.iotdb.db.exception.StartupException;
 import org.apache.iotdb.db.exception.StorageEngineException;
@@ -42,14 +50,6 @@ import org.junit.Assert;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Locale;
-
 /**
  * <p>
  * This class is used for cleaning test environment in unit test and integration test
@@ -120,7 +120,8 @@ public class EnvironmentUtils {
     MultiFileLogNodeManager.getInstance().stop();
     // clean cache
     if (config.isMetaDataCacheEnable()) {
-      ChunkMetadataCache.getInstance().clear();
+      ChunkCache.getInstance().clear();
+      TimeSeriesMetadataCache.getInstance().clear();
     }
     // close metadata
     IoTDB.metaManager.clear();
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/ChunkMetadata.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/ChunkMetadata.java
index 083c9e1..981a764 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/ChunkMetadata.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/ChunkMetadata.java
@@ -76,6 +76,9 @@ public class ChunkMetadata implements Accountable {
   // used for SeriesReader to indicate whether it is a seq/unseq timeseries metadata
   private boolean isSeq = true;
 
+  private boolean isClosed;
+  private String filePath;
+
   private ChunkMetadata() {
   }
 
@@ -289,4 +292,20 @@ public class ChunkMetadata implements Accountable {
   public boolean isSeq() {
     return isSeq;
   }
+
+  public boolean isClosed() {
+    return isClosed;
+  }
+
+  public void setClosed(boolean closed) {
+    isClosed = closed;
+  }
+
+  public String getFilePath() {
+    return filePath;
+  }
+
+  public void setFilePath(String filePath) {
+    this.filePath = filePath;
+  }
 }
\ No newline at end of file
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/TimeseriesMetadata.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/TimeseriesMetadata.java
index a8c30dd..13ff2a0 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/TimeseriesMetadata.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/TimeseriesMetadata.java
@@ -58,8 +58,7 @@ public class TimeseriesMetadata implements Accountable {
   // used for SeriesReader to indicate whether it is a seq/unseq timeseries metadata
   private boolean isSeq = true;
 
-  private ByteBuffer chunkMetadataListByteBuffer;
-
+  // used to save chunk metadata list while serializing
   private PublicBAOS chunkMetadataListBuffer;
 
   private ArrayList<ChunkMetadata> chunkMetadataList;
@@ -87,9 +86,10 @@ public class TimeseriesMetadata implements Accountable {
     this.dataType = timeseriesMetadata.dataType;
     this.statistics = timeseriesMetadata.statistics;
     this.modified = timeseriesMetadata.modified;
+    this.chunkMetadataList = new ArrayList<>(timeseriesMetadata.chunkMetadataList);
   }
 
-  public static TimeseriesMetadata deserializeFrom(ByteBuffer buffer) {
+  public static TimeseriesMetadata deserializeFrom(ByteBuffer buffer, boolean needChunkMetadata) {
     TimeseriesMetadata timeseriesMetaData = new TimeseriesMetadata();
     timeseriesMetaData.setTimeSeriesMetadataType(ReadWriteIOUtils.readByte(buffer));
     timeseriesMetaData.setMeasurementId(ReadWriteIOUtils.readVarIntString(buffer));
@@ -99,8 +99,16 @@ public class TimeseriesMetadata implements Accountable {
     timeseriesMetaData
         .setDataSizeOfChunkMetaDataList(chunkMetaDataListDataSize);
     timeseriesMetaData.setStatistics(Statistics.deserialize(buffer, timeseriesMetaData.dataType));
-    timeseriesMetaData.chunkMetadataListByteBuffer = buffer.slice();
-    timeseriesMetaData.chunkMetadataListByteBuffer.limit(chunkMetaDataListDataSize);
+    if (needChunkMetadata) {
+      ByteBuffer byteBuffer = buffer.slice();
+      byteBuffer.limit(chunkMetaDataListDataSize);
+      timeseriesMetaData.chunkMetadataList = new ArrayList<>();
+      while (byteBuffer.hasRemaining()) {
+        timeseriesMetaData.chunkMetadataList.add(ChunkMetadata.deserializeFrom(byteBuffer, timeseriesMetaData));
+      }
+      // minimize the storage of an ArrayList instance.
+      timeseriesMetaData.chunkMetadataList.trimToSize();
+    }
     buffer.position(buffer.position() + chunkMetaDataListDataSize);
     return timeseriesMetaData;
   }
@@ -183,15 +191,6 @@ public class TimeseriesMetadata implements Accountable {
   }
 
   public List<ChunkMetadata> getChunkMetadataList() {
-    if (chunkMetadataList == null) {
-      chunkMetadataList = new ArrayList<>();
-      while (chunkMetadataListByteBuffer.hasRemaining()) {
-        chunkMetadataList.add(ChunkMetadata.deserializeFrom(chunkMetadataListByteBuffer, this));
-      }
-      // minimize the storage of an ArrayList instance.
-      chunkMetadataList.trimToSize();
-      chunkMetadataListByteBuffer = null;
-    }
     return chunkMetadataList;
   }
 
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileSequenceReader.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileSequenceReader.java
index 1661423..e059751 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileSequenceReader.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileSequenceReader.java
@@ -308,7 +308,8 @@ public class TsFileSequenceReader implements AutoCloseable {
   private Map<String, TimeseriesMetadata> readDeviceMetadataFromDisk(String device)
       throws IOException {
     readFileMetadata();
-    List<TimeseriesMetadata> timeseriesMetadataList = getDeviceTimeseriesMetadata(device);
+    List<TimeseriesMetadata> timeseriesMetadataList = getDeviceTimeseriesMetadataWithoutChunkMetadata(
+        device);
     Map<String, TimeseriesMetadata> deviceMetadata = new HashMap<>();
     for (TimeseriesMetadata timeseriesMetadata : timeseriesMetadataList) {
       deviceMetadata.put(timeseriesMetadata.getMeasurementId(), timeseriesMetadata);
@@ -343,7 +344,7 @@ public class TsFileSequenceReader implements AutoCloseable {
     buffer = readData(metadataIndexPair.left.getOffset(), metadataIndexPair.right);
     while (buffer.hasRemaining()) {
       try {
-        timeseriesMetadataList.add(TimeseriesMetadata.deserializeFrom(buffer));
+        timeseriesMetadataList.add(TimeseriesMetadata.deserializeFrom(buffer, true));
       } catch (BufferOverflowException e) {
         logger.error("Something error happened while deserializing TimeseriesMetadata of file {}",
             file);
@@ -389,7 +390,7 @@ public class TsFileSequenceReader implements AutoCloseable {
     while (buffer.hasRemaining()) {
       TimeseriesMetadata timeseriesMetadata;
       try {
-        timeseriesMetadata = TimeseriesMetadata.deserializeFrom(buffer);
+        timeseriesMetadata = TimeseriesMetadata.deserializeFrom(buffer, true);
       } catch (BufferOverflowException e) {
         logger.error("Something error happened while deserializing TimeseriesMetadata of file {}",
             file);
@@ -439,7 +440,7 @@ public class TsFileSequenceReader implements AutoCloseable {
           measurementMetadataIndexPair.right);
       while (buffer.hasRemaining()) {
         try {
-          timeseriesMetadataList.add(TimeseriesMetadata.deserializeFrom(buffer));
+          timeseriesMetadataList.add(TimeseriesMetadata.deserializeFrom(buffer, true));
         } catch (BufferOverflowException e) {
           logger.error("Something error happened while deserializing TimeseriesMetadata of file {}",
               file);
@@ -463,51 +464,6 @@ public class TsFileSequenceReader implements AutoCloseable {
     return resultTimeseriesMetadataList;
   }
 
-  /**
-   * Traverse and read TimeseriesMetadata of specific measurements in one device. This method need
-   * to deserialize all TimeseriesMetadata and then judge, in order to avoid frequent I/O when the
-   * number of queried measurements is too large. Attention: This method is not used currently
-   *
-   * @param timeseriesMetadataList TimeseriesMetadata list, to store the result
-   * @param type                   MetadataIndexNode type
-   * @param metadataIndexPair      <MetadataIndexEntry, offset> pair
-   * @param measurements           measurements to be queried
-   * @throws IOException io error
-   */
-  private void traverseAndReadTimeseriesMetadataInOneDevice(
-      List<TimeseriesMetadata> timeseriesMetadataList, MetadataIndexNodeType type,
-      Pair<MetadataIndexEntry, Long> metadataIndexPair, Set<String> measurements)
-      throws IOException {
-    ByteBuffer buffer = readData(metadataIndexPair.left.getOffset(), metadataIndexPair.right);
-    switch (type) {
-      case LEAF_DEVICE:
-      case INTERNAL_MEASUREMENT:
-        MetadataIndexNode metadataIndexNode = MetadataIndexNode.deserializeFrom(buffer);
-        int metadataIndexListSize = metadataIndexNode.getChildren().size();
-        for (int i = 0; i < metadataIndexListSize; i++) {
-          long endOffset = metadataIndexNode.getEndOffset();
-          if (i != metadataIndexListSize - 1) {
-            endOffset = metadataIndexNode.getChildren().get(i + 1).getOffset();
-          }
-          traverseAndReadTimeseriesMetadataInOneDevice(timeseriesMetadataList,
-              metadataIndexNode.getNodeType(),
-              new Pair<>(metadataIndexNode.getChildren().get(i), endOffset), measurements);
-        }
-        break;
-      case LEAF_MEASUREMENT:
-        while (buffer.hasRemaining()) {
-          TimeseriesMetadata timeseriesMetadata = TimeseriesMetadata.deserializeFrom(buffer);
-          if (measurements.contains(timeseriesMetadata.getMeasurementId())) {
-            timeseriesMetadataList.add(timeseriesMetadata);
-          }
-        }
-        break;
-      default:
-        throw new IOException("Failed to traverse and read TimeseriesMetadata in device: " +
-            metadataIndexPair.left.getName() + ". Wrong MetadataIndexEntry type.");
-    }
-  }
-
   protected int binarySearchInTimeseriesMetadataList(
       List<TimeseriesMetadata> timeseriesMetadataList, String key) {
     int low = 0;
@@ -577,35 +533,15 @@ public class TsFileSequenceReader implements AutoCloseable {
   public Map<String, List<ChunkMetadata>> readChunkMetadataInDevice(String device)
       throws IOException {
     readFileMetadata();
-
-    long start = 0;
-    int size = 0;
     List<TimeseriesMetadata> timeseriesMetadataMap = getDeviceTimeseriesMetadata(device);
     if (timeseriesMetadataMap.isEmpty()) {
       return new HashMap<>();
     }
-    for (TimeseriesMetadata timeseriesMetadata : timeseriesMetadataMap) {
-      if (start == 0) {
-        start = timeseriesMetadata.getOffsetOfChunkMetaDataList();
-      }
-      size += timeseriesMetadata.getDataSizeOfChunkMetaDataList();
-    }
-    // read buffer of all ChunkMetadatas of this device
-    ByteBuffer buffer = readData(start, size);
     Map<String, List<ChunkMetadata>> seriesMetadata = new HashMap<>();
-    int index = 0;
-    int curSize = timeseriesMetadataMap.get(index).getDataSizeOfChunkMetaDataList();
-    while (buffer.hasRemaining()) {
-      if (buffer.position() >= curSize) {
-        index++;
-        curSize += timeseriesMetadataMap.get(index).getDataSizeOfChunkMetaDataList();
-      }
-      ChunkMetadata chunkMetadata = ChunkMetadata
-          .deserializeFrom(buffer, timeseriesMetadataMap.get(index));
-      seriesMetadata.computeIfAbsent(chunkMetadata.getMeasurementUid(), key -> new ArrayList<>())
-          .add(chunkMetadata);
+    for (TimeseriesMetadata timeseriesMetadata : timeseriesMetadataMap) {
+      seriesMetadata
+          .put(timeseriesMetadata.getMeasurementId(), timeseriesMetadata.getChunkMetadataList());
     }
-
     return seriesMetadata;
   }
 
@@ -633,15 +569,17 @@ public class TsFileSequenceReader implements AutoCloseable {
    * @param buffer                byte buffer
    * @param deviceId              String
    * @param timeseriesMetadataMap map: deviceId -> timeseriesMetadata list
+   * @param needChunkMetadata     deserialize chunk metadata list or not
    */
   private void generateMetadataIndex(MetadataIndexEntry metadataIndex, ByteBuffer buffer,
       String deviceId, MetadataIndexNodeType type,
-      Map<String, List<TimeseriesMetadata>> timeseriesMetadataMap) throws IOException {
+      Map<String, List<TimeseriesMetadata>> timeseriesMetadataMap, boolean needChunkMetadata)
+      throws IOException {
     try {
       if (type.equals(MetadataIndexNodeType.LEAF_MEASUREMENT)) {
         List<TimeseriesMetadata> timeseriesMetadataList = new ArrayList<>();
         while (buffer.hasRemaining()) {
-          timeseriesMetadataList.add(TimeseriesMetadata.deserializeFrom(buffer));
+          timeseriesMetadataList.add(TimeseriesMetadata.deserializeFrom(buffer, needChunkMetadata));
         }
         timeseriesMetadataMap.computeIfAbsent(deviceId, k -> new ArrayList<>())
             .addAll(timeseriesMetadataList);
@@ -657,7 +595,7 @@ public class TsFileSequenceReader implements AutoCloseable {
           ByteBuffer nextBuffer = readData(metadataIndexNode.getChildren().get(i).getOffset(),
               endOffset);
           generateMetadataIndex(metadataIndexNode.getChildren().get(i), nextBuffer, deviceId,
-              metadataIndexNode.getNodeType(), timeseriesMetadataMap);
+              metadataIndexNode.getNodeType(), timeseriesMetadataMap, needChunkMetadata);
         }
       }
     } catch (BufferOverflowException e) {
@@ -666,6 +604,9 @@ public class TsFileSequenceReader implements AutoCloseable {
     }
   }
 
+  /**
+   * TimeseriesMetadata don't need deserialize chunk metadata list
+   */
   public Map<String, List<TimeseriesMetadata>> getAllTimeseriesMetadata() throws IOException {
     if (tsFileMetaData == null) {
       readFileMetadata();
@@ -681,11 +622,37 @@ public class TsFileSequenceReader implements AutoCloseable {
       }
       ByteBuffer buffer = readData(metadataIndexEntry.getOffset(), endOffset);
       generateMetadataIndex(metadataIndexEntry, buffer, null,
-          metadataIndexNode.getNodeType(), timeseriesMetadataMap);
+          metadataIndexNode.getNodeType(), timeseriesMetadataMap, false);
     }
     return timeseriesMetadataMap;
   }
 
+  /**
+   * This method will only deserialize the TimeseriesMetadata, not including chunk metadata list
+   */
+  private List<TimeseriesMetadata> getDeviceTimeseriesMetadataWithoutChunkMetadata(String device)
+      throws IOException {
+    MetadataIndexNode metadataIndexNode = tsFileMetaData.getMetadataIndex();
+    Pair<MetadataIndexEntry, Long> metadataIndexPair = getMetadataAndEndOffset(
+        metadataIndexNode, device, true, true);
+    if (metadataIndexPair == null) {
+      return Collections.emptyList();
+    }
+    ByteBuffer buffer = readData(metadataIndexPair.left.getOffset(), metadataIndexPair.right);
+    Map<String, List<TimeseriesMetadata>> timeseriesMetadataMap = new TreeMap<>();
+    generateMetadataIndex(metadataIndexPair.left, buffer, device,
+        MetadataIndexNodeType.INTERNAL_MEASUREMENT, timeseriesMetadataMap, false);
+    List<TimeseriesMetadata> deviceTimeseriesMetadata = new ArrayList<>();
+    for (List<TimeseriesMetadata> timeseriesMetadataList : timeseriesMetadataMap.values()) {
+      deviceTimeseriesMetadata.addAll(timeseriesMetadataList);
+    }
+    return deviceTimeseriesMetadata;
+  }
+
+  /**
+   * This method will not only deserialize the TimeseriesMetadata, but also all the chunk metadata
+   * list meanwhile.
+   */
   private List<TimeseriesMetadata> getDeviceTimeseriesMetadata(String device) throws IOException {
     MetadataIndexNode metadataIndexNode = tsFileMetaData.getMetadataIndex();
     Pair<MetadataIndexEntry, Long> metadataIndexPair = getMetadataAndEndOffset(
@@ -696,7 +663,7 @@ public class TsFileSequenceReader implements AutoCloseable {
     ByteBuffer buffer = readData(metadataIndexPair.left.getOffset(), metadataIndexPair.right);
     Map<String, List<TimeseriesMetadata>> timeseriesMetadataMap = new TreeMap<>();
     generateMetadataIndex(metadataIndexPair.left, buffer, device,
-        MetadataIndexNodeType.INTERNAL_MEASUREMENT, timeseriesMetadataMap);
+        MetadataIndexNodeType.INTERNAL_MEASUREMENT, timeseriesMetadataMap, true);
     List<TimeseriesMetadata> deviceTimeseriesMetadata = new ArrayList<>();
     for (List<TimeseriesMetadata> timeseriesMetadataList : timeseriesMetadataMap.values()) {
       deviceTimeseriesMetadata.addAll(timeseriesMetadataList);
@@ -729,7 +696,8 @@ public class TsFileSequenceReader implements AutoCloseable {
         Pair<MetadataIndexEntry, Long> childIndexEntry = metadataIndex
             .getChildIndexEntry(name, false);
         ByteBuffer buffer = readData(childIndexEntry.left.getOffset(), childIndexEntry.right);
-        return getMetadataAndEndOffset(MetadataIndexNode.deserializeFrom(buffer), name, isDeviceLevel,
+        return getMetadataAndEndOffset(MetadataIndexNode.deserializeFrom(buffer), name,
+            isDeviceLevel,
             false);
       }
     } catch (BufferOverflowException e) {
@@ -1145,24 +1113,7 @@ public class TsFileSequenceReader implements AutoCloseable {
    */
   public List<ChunkMetadata> readChunkMetaDataList(TimeseriesMetadata timeseriesMetaData)
       throws IOException {
-    try {
-      readFileMetadata();
-      ArrayList<ChunkMetadata> chunkMetadataList = new ArrayList<>();
-      long startOffsetOfChunkMetadataList = timeseriesMetaData.getOffsetOfChunkMetaDataList();
-      int dataSizeOfChunkMetadataList = timeseriesMetaData.getDataSizeOfChunkMetaDataList();
-
-      ByteBuffer buffer = readData(startOffsetOfChunkMetadataList, dataSizeOfChunkMetadataList);
-      while (buffer.hasRemaining()) {
-        chunkMetadataList.add(ChunkMetadata.deserializeFrom(buffer, timeseriesMetaData));
-      }
-
-      // minimize the storage of an ArrayList instance.
-      chunkMetadataList.trimToSize();
-      return chunkMetadataList;
-    } catch (BufferOverflowException e) {
-      logger.error("Something error happened while reading ChunkMetaDataList of file {}", file);
-      throw e;
-    }
+    return timeseriesMetaData.getChunkMetadataList();
   }
 
   /**
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/controller/IChunkMetadataLoader.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/controller/IChunkMetadataLoader.java
index bbda4b8..95da50f 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/controller/IChunkMetadataLoader.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/controller/IChunkMetadataLoader.java
@@ -31,12 +31,4 @@ public interface IChunkMetadataLoader {
    */
   List<ChunkMetadata> loadChunkMetadataList(TimeseriesMetadata timeseriesMetadata)
       throws IOException;
-
-  /**
-   * For query 0.9/v1 tsfile only
-   * @param chunkMetadataList
-   * @throws IOException
-   */
-  void setDiskChunkLoader(List<ChunkMetadata> chunkMetadataList) throws IOException;
-
 }
diff --git a/tsfile/src/test/java/org/apache/iotdb/tsfile/file/metadata/TimeSeriesMetadataTest.java b/tsfile/src/test/java/org/apache/iotdb/tsfile/file/metadata/TimeSeriesMetadataTest.java
index b5ba185..05bdc3e 100644
--- a/tsfile/src/test/java/org/apache/iotdb/tsfile/file/metadata/TimeSeriesMetadataTest.java
+++ b/tsfile/src/test/java/org/apache/iotdb/tsfile/file/metadata/TimeSeriesMetadataTest.java
@@ -67,7 +67,7 @@ public class TimeSeriesMetadataTest {
       ByteBuffer buffer = ByteBuffer.allocate((int) fch.size());
       fch.read(buffer);
       buffer.flip();
-      metaData = TimeseriesMetadata.deserializeFrom(buffer);
+      metaData = TimeseriesMetadata.deserializeFrom(buffer, true);
       return metaData;
     } catch (IOException e) {
       e.printStackTrace();