You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by ja...@apache.org on 2021/05/27 11:14:06 UTC

[iotdb] branch ChunkCacheBug created (now aadbf83)

This is an automated email from the ASF dual-hosted git repository.

jackietien pushed a change to branch ChunkCacheBug
in repository https://gitbox.apache.org/repos/asf/iotdb.git.


      at aadbf83  use google guava cache

This branch includes the following new commits:

     new aadbf83  use google guava cache

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


[iotdb] 01/01: use google guava cache

Posted by ja...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jackietien pushed a commit to branch ChunkCacheBug
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit aadbf83a790457ddec7e0bfc4ef680c597e15253
Author: JackieTien97 <Ja...@foxmail.com>
AuthorDate: Thu May 27 19:12:34 2021 +0800

    use google guava cache
---
 server/pom.xml                                     |   6 +
 .../db/engine/cache/CacheHitRatioMonitor.java      |  16 +-
 .../engine/cache/CacheHitRatioMonitorMXBean.java   |   8 +-
 .../apache/iotdb/db/engine/cache/ChunkCache.java   | 154 +++++-----
 .../iotdb/db/engine/cache/LRULinkedHashMap.java    | 138 ---------
 .../db/engine/cache/TimeSeriesMetadataCache.java   | 319 ++++++++++-----------
 6 files changed, 228 insertions(+), 413 deletions(-)

diff --git a/server/pom.xml b/server/pom.xml
index 08944e2..0597e03 100644
--- a/server/pom.xml
+++ b/server/pom.xml
@@ -213,6 +213,12 @@
             <version>4.3.5</version>
             <scope>compile</scope>
         </dependency>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+            <version>29.0-jre</version>
+            <scope>compile</scope>
+        </dependency>
     </dependencies>
     <build>
         <plugins>
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/CacheHitRatioMonitor.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/CacheHitRatioMonitor.java
index 51e9b1b..beda7fc 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/CacheHitRatioMonitor.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/CacheHitRatioMonitor.java
@@ -61,8 +61,8 @@ public class CacheHitRatioMonitor implements CacheHitRatioMonitorMXBean, IServic
   }
 
   @Override
-  public long getChunkCacheUsedMemory() {
-    return ChunkCache.getInstance().getUsedMemory();
+  public long getChunkEvictionCount() {
+    return ChunkCache.getInstance().getEvictionCount();
   }
 
   @Override
@@ -71,8 +71,8 @@ public class CacheHitRatioMonitor implements CacheHitRatioMonitorMXBean, IServic
   }
 
   @Override
-  public double getChunkCacheUsedMemoryProportion() {
-    return ChunkCache.getInstance().getUsedMemoryProportion();
+  public double getChunkCacheAverageLoadPenalty() {
+    return ChunkCache.getInstance().getAverageLoadPenalty();
   }
 
   @Override
@@ -86,8 +86,8 @@ public class CacheHitRatioMonitor implements CacheHitRatioMonitorMXBean, IServic
   }
 
   @Override
-  public long getTimeSeriesMetadataCacheUsedMemory() {
-    return TimeSeriesMetadataCache.getInstance().getUsedMemory();
+  public long getTimeSeriesMetadataCacheEvictionCount() {
+    return TimeSeriesMetadataCache.getInstance().getEvictionCount();
   }
 
   @Override
@@ -96,8 +96,8 @@ public class CacheHitRatioMonitor implements CacheHitRatioMonitorMXBean, IServic
   }
 
   @Override
-  public double getTimeSeriesCacheUsedMemoryProportion() {
-    return TimeSeriesMetadataCache.getInstance().getUsedMemoryProportion();
+  public double getTimeSeriesCacheAverageLoadPenalty() {
+    return TimeSeriesMetadataCache.getInstance().getAverageLoadPenalty();
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/CacheHitRatioMonitorMXBean.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/CacheHitRatioMonitorMXBean.java
index 854fb1a..b1d48b7 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/CacheHitRatioMonitorMXBean.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/CacheHitRatioMonitorMXBean.java
@@ -22,21 +22,21 @@ public interface CacheHitRatioMonitorMXBean {
 
   double getChunkHitRatio();
 
-  long getChunkCacheUsedMemory();
+  long getChunkEvictionCount();
 
   long getChunkCacheMaxMemory();
 
-  double getChunkCacheUsedMemoryProportion();
+  double getChunkCacheAverageLoadPenalty();
 
   long getChunkCacheAverageSize();
 
   double getTimeSeriesMetadataHitRatio();
 
-  long getTimeSeriesMetadataCacheUsedMemory();
+  long getTimeSeriesMetadataCacheEvictionCount();
 
   long getTimeSeriesMetadataCacheMaxMemory();
 
-  double getTimeSeriesCacheUsedMemoryProportion();
+  double getTimeSeriesCacheAverageLoadPenalty();
 
   long getTimeSeriesMetaDataCacheAverageSize();
 
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
index 556075f..bb7b095 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
@@ -28,13 +28,16 @@ import org.apache.iotdb.tsfile.read.TsFileSequenceReader;
 import org.apache.iotdb.tsfile.read.common.Chunk;
 import org.apache.iotdb.tsfile.utils.RamUsageEstimator;
 
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.cache.Weigher;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 /**
  * This class is used to cache <code>Chunk</code> of <code>ChunkMetaData</code> in IoTDB. The
@@ -49,39 +52,59 @@ public class ChunkCache {
       config.getAllocateMemoryForChunkCache();
   private static final boolean CACHE_ENABLE = config.isMetaDataCacheEnable();
 
-  private final LRULinkedHashMap<ChunkMetadata, Chunk> lruCache;
+  private final LoadingCache<ChunkMetadata, Chunk> lruCache;
 
-  private final AtomicLong cacheHitNum = new AtomicLong();
-  private final AtomicLong cacheRequestNum = new AtomicLong();
-
-  private final ReadWriteLock lock = new ReentrantReadWriteLock();
+  private final AtomicLong entryAverageSize = new AtomicLong(0);
 
   private ChunkCache() {
     if (CACHE_ENABLE) {
       logger.info("ChunkCache size = " + MEMORY_THRESHOLD_IN_CHUNK_CACHE);
     }
     lruCache =
-        new LRULinkedHashMap<ChunkMetadata, Chunk>(MEMORY_THRESHOLD_IN_CHUNK_CACHE) {
-
-          @Override
-          protected long calEntrySize(ChunkMetadata key, Chunk value) {
-            long currentSize;
-            if (count < 10) {
-              currentSize =
-                  RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.sizeOf(value);
-              averageSize = ((averageSize * count) + currentSize) / (++count);
-            } else if (count < 100000) {
-              count++;
-              currentSize = averageSize;
-            } else {
-              averageSize =
-                  RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.sizeOf(value);
-              count = 1;
-              currentSize = averageSize;
-            }
-            return currentSize;
-          }
-        };
+        CacheBuilder.newBuilder()
+            .maximumWeight(MEMORY_THRESHOLD_IN_CHUNK_CACHE)
+            .weigher(
+                new Weigher<ChunkMetadata, Chunk>() {
+
+                  int count = 0;
+                  int averageSize = 0;
+
+                  @Override
+                  public int weigh(ChunkMetadata chunkMetadata, Chunk chunk) {
+                    int currentSize;
+                    if (count < 10) {
+                      currentSize =
+                          (int)
+                              (RamUsageEstimator.NUM_BYTES_OBJECT_REF
+                                  + RamUsageEstimator.sizeOf(chunk));
+                      averageSize = ((averageSize * count) + currentSize) / (++count);
+                      entryAverageSize.set(averageSize);
+                    } else if (count < 100000) {
+                      count++;
+                      currentSize = averageSize;
+                    } else {
+                      averageSize =
+                          (int)
+                              (RamUsageEstimator.NUM_BYTES_OBJECT_REF
+                                  + RamUsageEstimator.sizeOf(chunk));
+                      count = 1;
+                      currentSize = averageSize;
+                      entryAverageSize.set(averageSize);
+                    }
+                    return currentSize;
+                  }
+                })
+            .recordStats()
+            .build(
+                new CacheLoader<ChunkMetadata, Chunk>() {
+                  @Override
+                  public Chunk load(ChunkMetadata chunkMetadata) throws Exception {
+                    TsFileSequenceReader reader =
+                        FileReaderManager.getInstance()
+                            .get(chunkMetadata.getFilePath(), chunkMetadata.isClosed());
+                    return reader.readMemChunk(chunkMetadata);
+                  }
+                });
   }
 
   public static ChunkCache getInstance() {
@@ -105,42 +128,18 @@ public class ChunkCache {
           chunkMetaData.getStatistics());
     }
 
-    cacheRequestNum.incrementAndGet();
-
     Chunk chunk;
-    lock.readLock().lock();
     try {
       chunk = lruCache.get(chunkMetaData);
-    } finally {
-      lock.readLock().unlock();
-    }
-    if (chunk != null) {
-      cacheHitNum.incrementAndGet();
-      printCacheLog(true);
-    } else {
-      printCacheLog(false);
-      TsFileSequenceReader reader =
-          FileReaderManager.getInstance()
-              .get(chunkMetaData.getFilePath(), chunkMetaData.isClosed());
-      try {
-        chunk = reader.readMemChunk(chunkMetaData);
-      } catch (IOException e) {
-        logger.error("something wrong happened while reading {}", reader.getFileName());
-        throw e;
-      }
-      lock.writeLock().lock();
-      try {
-        if (!lruCache.containsKey(chunkMetaData)) {
-          lruCache.put(chunkMetaData, chunk);
-        }
-      } finally {
-        lock.writeLock().unlock();
-      }
+    } catch (ExecutionException e) {
+      logger.error("something wrong happened while loading {}", chunkMetaData);
+      throw new IOException(e);
     }
 
     if (debug) {
       DEBUG_LOGGER.info("get chunk from cache whose meta data is: " + chunkMetaData);
     }
+
     return new Chunk(
         chunk.getHeader(),
         chunk.getData().duplicate(),
@@ -148,61 +147,38 @@ public class ChunkCache {
         chunkMetaData.getStatistics());
   }
 
-  private void printCacheLog(boolean isHit) {
-    if (!logger.isDebugEnabled()) {
-      return;
-    }
-    logger.debug(
-        "[ChunkMetaData cache {}hit] The number of requests for cache is {}, hit rate is {}.",
-        isHit ? "" : "didn't ",
-        cacheRequestNum.get(),
-        cacheHitNum.get() * 1.0 / cacheRequestNum.get());
-  }
-
   public double calculateChunkHitRatio() {
-    if (cacheRequestNum.get() != 0) {
-      return cacheHitNum.get() * 1.0 / cacheRequestNum.get();
-    } else {
-      return 0;
-    }
+    return lruCache.stats().hitRate();
   }
 
-  public long getUsedMemory() {
-    return lruCache.getUsedMemory();
+  public long getEvictionCount() {
+    return lruCache.stats().evictionCount();
   }
 
   public long getMaxMemory() {
-    return lruCache.getMaxMemory();
+    return MEMORY_THRESHOLD_IN_CHUNK_CACHE;
   }
 
-  public double getUsedMemoryProportion() {
-    return lruCache.getUsedMemoryProportion();
+  public double getAverageLoadPenalty() {
+    return lruCache.stats().averageLoadPenalty();
   }
 
   public long getAverageSize() {
-    return lruCache.getAverageSize();
+    return entryAverageSize.get();
   }
 
   /** clear LRUCache. */
   public void clear() {
-    lock.writeLock().lock();
-    if (lruCache != null) {
-      lruCache.clear();
-    }
-    lock.writeLock().unlock();
+    lruCache.cleanUp();
   }
 
   public void remove(ChunkMetadata chunkMetaData) {
-    lock.writeLock().lock();
-    if (chunkMetaData != null) {
-      lruCache.remove(chunkMetaData);
-    }
-    lock.writeLock().unlock();
+    lruCache.invalidate(chunkMetaData);
   }
 
   @TestOnly
   public boolean isEmpty() {
-    return lruCache.isEmpty();
+    return lruCache.size() == 0;
   }
 
   /** singleton pattern. */
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/LRULinkedHashMap.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/LRULinkedHashMap.java
deleted file mode 100644
index 6246574..0000000
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/LRULinkedHashMap.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.iotdb.db.engine.cache;
-
-import org.apache.iotdb.tsfile.common.cache.Accountable;
-
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.Map.Entry;
-import java.util.Set;
-
-/** This class is an LRU cache. <b>Note: It's not thread safe.</b> */
-public abstract class LRULinkedHashMap<K extends Accountable, V> {
-
-  private static final float LOAD_FACTOR_MAP = 0.75f;
-  private static final int INITIAL_CAPACITY = 128;
-  private static final float RETAIN_PERCENT = 0.9f;
-  private static final int MAP_ENTRY_SIZE = 40;
-
-  private final LinkedHashMap<K, V> linkedHashMap;
-
-  /** maximum memory threshold. */
-  private final long maxMemory;
-  /** current used memory. */
-  private long usedMemory;
-
-  /** memory size we need to retain while the cache is full */
-  private final long retainMemory;
-
-  protected int count = 0;
-  protected long averageSize = 0;
-
-  public LRULinkedHashMap(long maxMemory) {
-    this.linkedHashMap = new LinkedHashMap<>(INITIAL_CAPACITY, LOAD_FACTOR_MAP, true);
-    this.maxMemory = maxMemory;
-    this.retainMemory = (long) (maxMemory * RETAIN_PERCENT);
-  }
-
-  public V put(K key, V value) {
-    long size = calEntrySize(key, value) + MAP_ENTRY_SIZE;
-    key.setRamSize(size);
-    usedMemory += size;
-    V v = linkedHashMap.put(key, value);
-    if (usedMemory > maxMemory) {
-      Iterator<Entry<K, V>> iterator = linkedHashMap.entrySet().iterator();
-      while (usedMemory > retainMemory && iterator.hasNext()) {
-        Entry<K, V> entry = iterator.next();
-        usedMemory -= entry.getKey().getRamSize();
-        iterator.remove();
-      }
-    }
-    return v;
-  }
-
-  public V get(K key) {
-    return linkedHashMap.get(key);
-  }
-
-  public boolean containsKey(K key) {
-    return linkedHashMap.containsKey(key);
-  }
-
-  public void clear() {
-    linkedHashMap.clear();
-    usedMemory = 0;
-    count = 0;
-    averageSize = 0;
-  }
-
-  public V remove(K key) {
-    V v = linkedHashMap.remove(key);
-    if (v != null && key != null) {
-      usedMemory -= key.getRamSize();
-    }
-    return v;
-  }
-
-  /** approximately estimate the additional size of key and value. */
-  protected abstract long calEntrySize(K key, V value);
-
-  /** calculate the proportion of used memory. */
-  public double getUsedMemoryProportion() {
-    return usedMemory * 1.0 / maxMemory;
-  }
-
-  public long getUsedMemory() {
-    return usedMemory;
-  }
-
-  public long getMaxMemory() {
-    return maxMemory;
-  }
-
-  public long getAverageSize() {
-    return averageSize;
-  }
-
-  public Set<Entry<K, V>> entrySet() {
-    return linkedHashMap.entrySet();
-  }
-
-  public boolean isEmpty() {
-    return linkedHashMap.isEmpty();
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-    return super.equals(o);
-  }
-
-  @Override
-  public int hashCode() {
-    return super.hashCode();
-  }
-}
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
index 17d3f32..f5ff011 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
@@ -24,7 +24,6 @@ import org.apache.iotdb.db.conf.IoTDBConstant;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
 import org.apache.iotdb.db.query.control.FileReaderManager;
 import org.apache.iotdb.db.utils.TestOnly;
-import org.apache.iotdb.tsfile.common.cache.Accountable;
 import org.apache.iotdb.tsfile.file.metadata.ChunkMetadata;
 import org.apache.iotdb.tsfile.file.metadata.TimeseriesMetadata;
 import org.apache.iotdb.tsfile.read.TsFileSequenceReader;
@@ -32,6 +31,10 @@ import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.utils.BloomFilter;
 import org.apache.iotdb.tsfile.utils.RamUsageEstimator;
 
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.cache.Weigher;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -39,14 +42,13 @@ import java.io.IOException;
 import java.lang.ref.WeakReference;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
 import java.util.WeakHashMap;
 import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 /**
  * This class is used to cache <code>TimeSeriesMetadata</code> in IoTDB. The caching strategy is
@@ -61,12 +63,9 @@ public class TimeSeriesMetadataCache {
       config.getAllocateMemoryForTimeSeriesMetaDataCache();
   private static final boolean CACHE_ENABLE = config.isMetaDataCacheEnable();
 
-  private final LRULinkedHashMap<TimeSeriesMetadataCacheKey, TimeseriesMetadata> lruCache;
+  private final LoadingCache<TimeSeriesMetadataCacheKey, TimeseriesMetadata> lruCache;
 
-  private final AtomicLong cacheHitNum = new AtomicLong();
-  private final AtomicLong cacheRequestNum = new AtomicLong();
-
-  private final ReadWriteLock lock = new ReentrantReadWriteLock();
+  private final AtomicLong entryAverageSize = new AtomicLong(0);
 
   private final Map<String, WeakReference<String>> devices =
       Collections.synchronizedMap(new WeakHashMap<>());
@@ -78,46 +77,75 @@ public class TimeSeriesMetadataCache {
           "TimeseriesMetadataCache size = " + MEMORY_THRESHOLD_IN_TIME_SERIES_METADATA_CACHE);
     }
     lruCache =
-        new LRULinkedHashMap<TimeSeriesMetadataCacheKey, TimeseriesMetadata>(
-            MEMORY_THRESHOLD_IN_TIME_SERIES_METADATA_CACHE) {
-
-          @Override
-          protected long calEntrySize(TimeSeriesMetadataCacheKey key, TimeseriesMetadata value) {
-            long currentSize;
-            if (count < 10) {
-              currentSize =
-                  RamUsageEstimator.shallowSizeOf(key)
-                      + RamUsageEstimator.sizeOf(key.device)
-                      + RamUsageEstimator.sizeOf(key.measurement)
-                      + RamUsageEstimator.shallowSizeOf(value)
-                      + RamUsageEstimator.sizeOf(value.getMeasurementId())
-                      + RamUsageEstimator.shallowSizeOf(value.getStatistics())
-                      + (((ChunkMetadata) value.getChunkMetadataList().get(0)).calculateRamSize()
-                              + RamUsageEstimator.NUM_BYTES_OBJECT_REF)
-                          * value.getChunkMetadataList().size()
-                      + RamUsageEstimator.shallowSizeOf(value.getChunkMetadataList());
-              averageSize = ((averageSize * count) + currentSize) / (++count);
-            } else if (count < 100000) {
-              count++;
-              currentSize = averageSize;
-            } else {
-              averageSize =
-                  RamUsageEstimator.shallowSizeOf(key)
-                      + RamUsageEstimator.sizeOf(key.device)
-                      + RamUsageEstimator.sizeOf(key.measurement)
-                      + RamUsageEstimator.shallowSizeOf(value)
-                      + RamUsageEstimator.sizeOf(value.getMeasurementId())
-                      + RamUsageEstimator.shallowSizeOf(value.getStatistics())
-                      + (((ChunkMetadata) value.getChunkMetadataList().get(0)).calculateRamSize()
-                              + RamUsageEstimator.NUM_BYTES_OBJECT_REF)
-                          * value.getChunkMetadataList().size()
-                      + RamUsageEstimator.shallowSizeOf(value.getChunkMetadataList());
-              count = 1;
-              currentSize = averageSize;
-            }
-            return currentSize;
-          }
-        };
+        CacheBuilder.newBuilder()
+            .maximumWeight(MEMORY_THRESHOLD_IN_TIME_SERIES_METADATA_CACHE)
+            .weigher(
+                new Weigher<TimeSeriesMetadataCacheKey, TimeseriesMetadata>() {
+
+                  int count = 0;
+                  int averageSize = 0;
+
+                  @Override
+                  public int weigh(TimeSeriesMetadataCacheKey key, TimeseriesMetadata value) {
+                    int currentSize;
+                    if (count < 10) {
+                      currentSize =
+                          (int)
+                              (RamUsageEstimator.shallowSizeOf(key)
+                                  + RamUsageEstimator.sizeOf(key.device)
+                                  + RamUsageEstimator.sizeOf(key.measurement)
+                                  + RamUsageEstimator.shallowSizeOf(value)
+                                  + RamUsageEstimator.sizeOf(value.getMeasurementId())
+                                  + RamUsageEstimator.shallowSizeOf(value.getStatistics())
+                                  + (((ChunkMetadata) value.getChunkMetadataList().get(0))
+                                              .calculateRamSize()
+                                          + RamUsageEstimator.NUM_BYTES_OBJECT_REF)
+                                      * value.getChunkMetadataList().size()
+                                  + RamUsageEstimator.shallowSizeOf(value.getChunkMetadataList()));
+                      averageSize = ((averageSize * count) + currentSize) / (++count);
+                      entryAverageSize.set(averageSize);
+                    } else if (count < 100000) {
+                      count++;
+                      currentSize = averageSize;
+                    } else {
+                      averageSize =
+                          (int)
+                              (RamUsageEstimator.shallowSizeOf(key)
+                                  + RamUsageEstimator.sizeOf(key.device)
+                                  + RamUsageEstimator.sizeOf(key.measurement)
+                                  + RamUsageEstimator.shallowSizeOf(value)
+                                  + RamUsageEstimator.sizeOf(value.getMeasurementId())
+                                  + RamUsageEstimator.shallowSizeOf(value.getStatistics())
+                                  + (((ChunkMetadata) value.getChunkMetadataList().get(0))
+                                              .calculateRamSize()
+                                          + RamUsageEstimator.NUM_BYTES_OBJECT_REF)
+                                      * value.getChunkMetadataList().size()
+                                  + RamUsageEstimator.shallowSizeOf(value.getChunkMetadataList()));
+                      count = 1;
+                      currentSize = averageSize;
+                      entryAverageSize.set(averageSize);
+                    }
+                    return currentSize;
+                  }
+                })
+            .recordStats()
+            .build(
+                new CacheLoader<TimeSeriesMetadataCacheKey, TimeseriesMetadata>() {
+                  @Override
+                  public TimeseriesMetadata load(TimeSeriesMetadataCacheKey key) throws Exception {
+                    // bloom filter part
+                    TsFileSequenceReader reader =
+                        FileReaderManager.getInstance().get(key.filePath, true);
+                    BloomFilter bloomFilter = reader.readBloomFilter();
+                    if (bloomFilter != null
+                        && !bloomFilter.contains(
+                            key.device + IoTDBConstant.PATH_SEPARATOR + key.measurement)) {
+                      return null;
+                    }
+                    return reader.readTimeseriesMetadata(
+                        new Path(key.device, key.measurement), false);
+                  }
+                });
   }
 
   public static TimeSeriesMetadataCache getInstance() {
@@ -143,20 +171,9 @@ public class TimeSeriesMetadataCache {
       return reader.readTimeseriesMetadata(new Path(key.device, key.measurement), false);
     }
 
-    cacheRequestNum.incrementAndGet();
+    TimeseriesMetadata timeseriesMetadata = lruCache.getIfPresent(key);
 
-    TimeseriesMetadata timeseriesMetadata;
-    lock.readLock().lock();
-    try {
-      timeseriesMetadata = lruCache.get(key);
-    } finally {
-      lock.readLock().unlock();
-    }
-
-    if (timeseriesMetadata != null) {
-      cacheHitNum.incrementAndGet();
-      printCacheLog(true);
-    } else {
+    if (timeseriesMetadata == null) {
       if (debug) {
         DEBUG_LOGGER.info(
             "Cache miss: {}.{} in file: {}", key.device, key.measurement, key.filePath);
@@ -166,16 +183,8 @@ public class TimeSeriesMetadataCache {
       synchronized (
           devices.computeIfAbsent(key.device + SEPARATOR + key.filePath, WeakReference::new)) {
         // double check
-        lock.readLock().lock();
-        try {
-          timeseriesMetadata = lruCache.get(key);
-        } finally {
-          lock.readLock().unlock();
-        }
-        if (timeseriesMetadata != null) {
-          cacheHitNum.incrementAndGet();
-          printCacheLog(true);
-        } else {
+        timeseriesMetadata = lruCache.getIfPresent(key);
+        if (timeseriesMetadata == null) {
           Path path = new Path(key.device, key.measurement);
           // bloom filter part
           TsFileSequenceReader reader = FileReaderManager.getInstance().get(key.filePath, true);
@@ -186,25 +195,17 @@ public class TimeSeriesMetadataCache {
             }
             return null;
           }
-          printCacheLog(false);
           List<TimeseriesMetadata> timeSeriesMetadataList =
               reader.readTimeseriesMetadata(path, allSensors);
           // put TimeSeriesMetadata of all sensors used in this query into cache
-          lock.writeLock().lock();
-          try {
-            timeSeriesMetadataList.forEach(
-                metadata -> {
-                  TimeSeriesMetadataCacheKey k =
-                      new TimeSeriesMetadataCacheKey(
-                          key.filePath, key.device, metadata.getMeasurementId());
-                  if (!lruCache.containsKey(k)) {
-                    lruCache.put(k, metadata);
-                  }
-                });
-            timeseriesMetadata = lruCache.get(key);
-          } finally {
-            lock.writeLock().unlock();
-          }
+          timeSeriesMetadataList.forEach(
+              metadata -> {
+                TimeSeriesMetadataCacheKey k =
+                    new TimeSeriesMetadataCacheKey(
+                        key.filePath, key.device, metadata.getMeasurementId());
+                lruCache.put(k, metadata);
+              });
+          timeseriesMetadata = lruCache.getIfPresent(key);
         }
       }
     }
@@ -246,16 +247,11 @@ public class TimeSeriesMetadataCache {
       return reader.readTimeseriesMetadata(new Path(key.device, key.measurement), subSensorList);
     }
 
-    cacheRequestNum.incrementAndGet();
-
     List<TimeseriesMetadata> res = new ArrayList<>();
 
     getVectorTimeSeriesMetadataListFromCache(key, subSensorList, res);
 
-    if (!res.isEmpty()) {
-      cacheHitNum.incrementAndGet();
-      printCacheLog(true);
-    } else {
+    if (res.isEmpty()) {
       if (debug) {
         DEBUG_LOGGER.info(
             "Cache miss: {}.{} in file: {}", key.device, key.measurement, key.filePath);
@@ -266,10 +262,7 @@ public class TimeSeriesMetadataCache {
           devices.computeIfAbsent(key.device + SEPARATOR + key.filePath, WeakReference::new)) {
         // double check
         getVectorTimeSeriesMetadataListFromCache(key, subSensorList, res);
-        if (!res.isEmpty()) {
-          cacheHitNum.incrementAndGet();
-          printCacheLog(true);
-        } else {
+        if (res.isEmpty()) {
           Path path = new Path(key.device, key.measurement);
           // bloom filter part
           TsFileSequenceReader reader = FileReaderManager.getInstance().get(key.filePath, true);
@@ -280,25 +273,21 @@ public class TimeSeriesMetadataCache {
             }
             return Collections.emptyList();
           }
-          printCacheLog(false);
           List<TimeseriesMetadata> timeSeriesMetadataList =
               reader.readTimeseriesMetadata(path, allSensors);
+          Map<TimeSeriesMetadataCacheKey, TimeseriesMetadata> map = new HashMap<>();
           // put TimeSeriesMetadata of all sensors used in this query into cache
-          lock.writeLock().lock();
-          try {
-            timeSeriesMetadataList.forEach(
-                metadata -> {
-                  TimeSeriesMetadataCacheKey k =
-                      new TimeSeriesMetadataCacheKey(
-                          key.filePath, key.device, metadata.getMeasurementId());
-                  if (!lruCache.containsKey(k)) {
-                    lruCache.put(k, metadata);
-                  }
-                });
-            getVectorTimeSeriesMetadataListFromCache(key, subSensorList, res);
-          } finally {
-            lock.writeLock().unlock();
-          }
+          timeSeriesMetadataList.forEach(
+              metadata -> {
+                TimeSeriesMetadataCacheKey k =
+                    new TimeSeriesMetadataCacheKey(
+                        key.filePath, key.device, metadata.getMeasurementId());
+                lruCache.put(k, metadata);
+                map.put(k, metadata);
+              });
+          // The reason we don't get from cache is in case that
+          // the cache capacity is too small to contains all the sub sensors of this vector
+          getVectorTimeSeriesMetadataListFromMap(key, subSensorList, res, map);
         }
       }
     }
@@ -323,94 +312,86 @@ public class TimeSeriesMetadataCache {
     }
   }
 
-  private void getVectorTimeSeriesMetadataListFromCache(
-      TimeSeriesMetadataCacheKey key, List<String> subSensorList, List<TimeseriesMetadata> res) {
-    lock.readLock().lock();
-    try {
-      TimeseriesMetadata timeseriesMetadata = lruCache.get(key);
-      if (timeseriesMetadata != null) {
-        res.add(timeseriesMetadata);
-        for (String subSensor : subSensorList) {
-          timeseriesMetadata =
-              lruCache.get(new TimeSeriesMetadataCacheKey(key.filePath, key.device, subSensor));
-          if (timeseriesMetadata != null) {
-            res.add(timeseriesMetadata);
-          } else {
-            res.clear();
-            break;
-          }
+  private void getVectorTimeSeriesMetadataListFromMap(
+      TimeSeriesMetadataCacheKey key,
+      List<String> subSensorList,
+      List<TimeseriesMetadata> res,
+      Map<TimeSeriesMetadataCacheKey, TimeseriesMetadata> map) {
+    TimeseriesMetadata timeseriesMetadata = map.get(key);
+    if (timeseriesMetadata != null) {
+      res.add(timeseriesMetadata);
+      for (String subSensor : subSensorList) {
+        timeseriesMetadata =
+            map.get(new TimeSeriesMetadataCacheKey(key.filePath, key.device, subSensor));
+        if (timeseriesMetadata != null) {
+          res.add(timeseriesMetadata);
+        } else {
+          res.clear();
+          break;
         }
       }
-    } finally {
-      lock.readLock().unlock();
     }
   }
 
-  private void printCacheLog(boolean isHit) {
-    if (!logger.isDebugEnabled()) {
-      return;
+  private void getVectorTimeSeriesMetadataListFromCache(
+      TimeSeriesMetadataCacheKey key, List<String> subSensorList, List<TimeseriesMetadata> res) {
+    TimeseriesMetadata timeseriesMetadata = lruCache.getIfPresent(key);
+    if (timeseriesMetadata != null) {
+      res.add(timeseriesMetadata);
+      for (String subSensor : subSensorList) {
+        timeseriesMetadata =
+            lruCache.getIfPresent(
+                new TimeSeriesMetadataCacheKey(key.filePath, key.device, subSensor));
+        if (timeseriesMetadata != null) {
+          res.add(timeseriesMetadata);
+        } else {
+          res.clear();
+          break;
+        }
+      }
     }
-    logger.debug(
-        "[TimeSeriesMetadata cache {}hit] The number of requests for cache is {}, hit rate is {}.",
-        isHit ? "" : "didn't ",
-        cacheRequestNum.get(),
-        cacheHitNum.get() * 1.0 / cacheRequestNum.get());
   }
 
   public double calculateTimeSeriesMetadataHitRatio() {
-    if (cacheRequestNum.get() != 0) {
-      return cacheHitNum.get() * 1.0 / cacheRequestNum.get();
-    } else {
-      return 0;
-    }
+    return lruCache.stats().hitRate();
   }
 
-  public long getUsedMemory() {
-    return lruCache.getUsedMemory();
+  public long getEvictionCount() {
+    return lruCache.stats().evictionCount();
   }
 
   public long getMaxMemory() {
-    return lruCache.getMaxMemory();
+    return MEMORY_THRESHOLD_IN_TIME_SERIES_METADATA_CACHE;
   }
 
-  public double getUsedMemoryProportion() {
-    return lruCache.getUsedMemoryProportion();
+  public double getAverageLoadPenalty() {
+    return lruCache.stats().averageLoadPenalty();
   }
 
   public long getAverageSize() {
-    return lruCache.getAverageSize();
+    return entryAverageSize.get();
   }
 
   /** clear LRUCache. */
   public void clear() {
-    lock.writeLock().lock();
-    if (lruCache != null) {
-      lruCache.clear();
-    }
-    lock.writeLock().unlock();
+    lruCache.cleanUp();
   }
 
   public void remove(TimeSeriesMetadataCacheKey key) {
-    lock.writeLock().lock();
-    if (key != null) {
-      lruCache.remove(key);
-    }
-    lock.writeLock().unlock();
+    lruCache.invalidate(key);
   }
 
   @TestOnly
   public boolean isEmpty() {
-    return lruCache.isEmpty();
+    return lruCache.size() == 0;
   }
 
-  public static class TimeSeriesMetadataCacheKey implements Accountable {
+  public static class TimeSeriesMetadataCacheKey {
 
     private final String filePath;
     private final String device;
     private final String measurement;
 
-    private long ramSize;
-
     public TimeSeriesMetadataCacheKey(String filePath, String device, String measurement) {
       this.filePath = filePath;
       this.device = device;
@@ -435,16 +416,6 @@ public class TimeSeriesMetadataCache {
     public int hashCode() {
       return Objects.hash(filePath, device, measurement);
     }
-
-    @Override
-    public void setRamSize(long size) {
-      this.ramSize = size;
-    }
-
-    @Override
-    public long getRamSize() {
-      return ramSize;
-    }
   }
 
   /** singleton pattern. */