You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by ja...@apache.org on 2020/07/15 14:01:44 UTC

[incubator-iotdb] branch CacheImprovement created (now 6157198)

This is an automated email from the ASF dual-hosted git repository.

jackietien pushed a change to branch CacheImprovement
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git.


      at 6157198  change lock way

This branch includes the following new commits:

     new 6157198  change lock way

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



[incubator-iotdb] 01/01: change lock way

Posted by ja...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jackietien pushed a commit to branch CacheImprovement
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit 61571988bb1206ebeb3ee6fad15643542fb60ed1
Author: JackieTien97 <Ja...@foxmail.com>
AuthorDate: Wed Jul 15 16:13:44 2020 +0800

    change lock way
---
 .../apache/iotdb/db/engine/cache/ChunkCache.java   | 44 ++++++------
 .../iotdb/db/engine/cache/ChunkMetadataCache.java  | 35 ++++-----
 .../db/engine/cache/TimeSeriesMetadataCache.java   | 82 +++++++++++-----------
 3 files changed, 77 insertions(+), 84 deletions(-)

diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
index 6819219..fb518dc 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkCache.java
@@ -90,36 +90,32 @@ public class ChunkCache {
 
     cacheRequestNum.incrementAndGet();
 
+    Chunk chunk;
+    lock.readLock().lock();
     try {
-      lock.readLock().lock();
-      Chunk chunk = lruCache.get(chunkMetaData);
-      if (chunk != null) {
-        cacheHitNum.incrementAndGet();
-        printCacheLog(true);
-        return new Chunk(chunk.getHeader(), chunk.getData().duplicate(), chunk.getDeleteIntervalList());
-      }
+      chunk = lruCache.get(chunkMetaData);
     } finally {
       lock.readLock().unlock();
     }
-
-    lock.writeLock().lock();
-    try {
-      Chunk chunk = lruCache.get(chunkMetaData);
-      if (chunk != null) {
-        cacheHitNum.incrementAndGet();
-        printCacheLog(true);
-        return new Chunk(chunk.getHeader(), chunk.getData().duplicate(), chunk.getDeleteIntervalList());
-      }
+    if (chunk != null) {
+      cacheHitNum.incrementAndGet();
+      printCacheLog(true);
+    } else {
       printCacheLog(false);
-      chunk = reader.readMemChunk(chunkMetaData);
-      lruCache.put(chunkMetaData, chunk);
-      return new Chunk(chunk.getHeader(), chunk.getData().duplicate(), chunk.getDeleteIntervalList());
-    } catch (IOException e) {
-      logger.error("something wrong happened while reading {}", reader.getFileName());
-      throw e;
-    } finally {
-      lock.writeLock().unlock();
+      try {
+        chunk = reader.readMemChunk(chunkMetaData);
+      } catch (IOException e) {
+        logger.error("something wrong happened while reading {}", reader.getFileName());
+        throw e;
+      }
+      lock.writeLock().lock();
+      try {
+        lruCache.put(chunkMetaData, chunk);
+      } finally {
+        lock.writeLock().unlock();
+      }
     }
+    return new Chunk(chunk.getHeader(), chunk.getData().duplicate(), chunk.getDeleteIntervalList());
 
   }
 
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkMetadataCache.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkMetadataCache.java
index 6fc911f..af8e590 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkMetadataCache.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/ChunkMetadataCache.java
@@ -130,25 +130,18 @@ public class ChunkMetadataCache {
     cacheRequestNum.incrementAndGet();
 
     lock.readLock().lock();
+    List<ChunkMetadata> chunkMetadataList;
     try {
-      List<ChunkMetadata> chunkMetadataList = lruCache.get(key);
-      if (chunkMetadataList != null) {
-        cacheHitNum.incrementAndGet();
-        printCacheLog(true);
-        return new ArrayList<>(chunkMetadataList);
-      }
+      chunkMetadataList = lruCache.get(key);
     } finally {
       lock.readLock().unlock();
     }
 
-    lock.writeLock().lock();
-    try {
-      List<ChunkMetadata> chunkMetadataList = lruCache.get(key);
-      if (chunkMetadataList != null) {
-        printCacheLog(true);
-        cacheHitNum.incrementAndGet();
-        return new ArrayList<>(chunkMetadataList);
-      }
+
+    if (chunkMetadataList != null) {
+      printCacheLog(true);
+      cacheHitNum.incrementAndGet();
+    } else {
       printCacheLog(false);
       // bloom filter part
       TsFileSequenceReader tsFileReader = FileReaderManager.getInstance().get(filePath, true);
@@ -156,13 +149,15 @@ public class ChunkMetadataCache {
       if (bloomFilter != null && !bloomFilter.contains(seriesPath.getFullPath())) {
         return new ArrayList<>();
       }
-      List<ChunkMetadata> chunkMetaDataList = FileLoaderUtils
-          .getChunkMetadataList(seriesPath, filePath);
-      lruCache.put(key, chunkMetaDataList);
-      return new ArrayList<>(chunkMetaDataList);
-    } finally {
-      lock.writeLock().unlock();
+      chunkMetadataList = FileLoaderUtils.getChunkMetadataList(seriesPath, filePath);
+      lock.writeLock().lock();
+      try {
+        lruCache.put(key, chunkMetadataList);
+      } finally {
+        lock.writeLock().unlock();
+      }
     }
+    return new ArrayList<>(chunkMetadataList);
   }
 
   private void printCacheLog(boolean isHit) {
diff --git a/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java b/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
index c80eed6..89e43db 100644
--- a/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
+++ b/server/src/main/java/org/apache/iotdb/db/engine/cache/TimeSeriesMetadataCache.java
@@ -112,53 +112,55 @@ public class TimeSeriesMetadataCache {
 
     cacheRequestNum.incrementAndGet();
 
+    TimeseriesMetadata timeseriesMetadata;
+    lock.readLock().lock();
     try {
-      lock.readLock().lock();
-      TimeseriesMetadata timeseriesMetadata = lruCache.get(key);
-      if (timeseriesMetadata != null) {
-        cacheHitNum.incrementAndGet();
-        printCacheLog(true);
-        return new TimeseriesMetadata(timeseriesMetadata);
-      }
+      timeseriesMetadata = lruCache.get(key);
     } finally {
       lock.readLock().unlock();
     }
 
-    try {
-      lock.writeLock().lock();
-      TimeseriesMetadata tsMetadata = lruCache.get(key);
-      if (tsMetadata != null) {
-        cacheHitNum.incrementAndGet();
-        printCacheLog(true);
-        return new TimeseriesMetadata(tsMetadata);
-      }
-      printCacheLog(false);
-      // bloom filter part
-      TsFileSequenceReader reader = FileReaderManager.getInstance().get(key.filePath, true);
-      BloomFilter bloomFilter = reader.readBloomFilter();
-      if (bloomFilter != null && !bloomFilter
-          .contains(key.device + IoTDBConstant.PATH_SEPARATOR + key.measurement)) {
-        return null;
-      }
-      List<TimeseriesMetadata> timeSeriesMetadataList = reader
-          .readTimeseriesMetadata(key.device, allSensors);
-      // put TimeSeriesMetadata of all sensors used in this query into cache
-      timeSeriesMetadataList.forEach(t ->
-          lruCache.put(new TimeSeriesMetadataCacheKey(key.filePath, key.device,
-              t.getMeasurementId()), t));
-      tsMetadata = lruCache.get(key);
-      if (tsMetadata == null) {
-        return null;
-      } else {
-        return new TimeseriesMetadata(tsMetadata);
+    if (timeseriesMetadata != null) {
+      cacheHitNum.incrementAndGet();
+      printCacheLog(true);
+    } else {
+      // allow for the parallelism of different devices
+      synchronized (key.device.intern()) {
+        // double check
+        lock.readLock().lock();
+        try {
+          timeseriesMetadata = lruCache.get(key);
+        } finally {
+          lock.readLock().unlock();
+        }
+        if (timeseriesMetadata != null) {
+          cacheHitNum.incrementAndGet();
+          printCacheLog(true);
+        } else {
+          printCacheLog(false);
+          // bloom filter part
+          TsFileSequenceReader reader = FileReaderManager.getInstance().get(key.filePath, true);
+          BloomFilter bloomFilter = reader.readBloomFilter();
+          if (bloomFilter != null && !bloomFilter
+              .contains(key.device + IoTDBConstant.PATH_SEPARATOR + key.measurement)) {
+            return null;
+          }
+          List<TimeseriesMetadata> timeSeriesMetadataList = reader
+              .readTimeseriesMetadata(key.device, allSensors);
+          // put TimeSeriesMetadata of all sensors used in this query into cache
+          lock.writeLock().lock();
+          try {
+            timeSeriesMetadataList.forEach(metadata ->
+                lruCache.put(new TimeSeriesMetadataCacheKey(key.filePath, key.device,
+                    metadata.getMeasurementId()), metadata));
+            timeseriesMetadata = lruCache.get(key);
+          } finally {
+            lock.writeLock().unlock();
+          }
+        }
       }
-    } catch (IOException e) {
-      logger.error("something wrong happened while reading {}", key.filePath);
-      throw e;
-    } finally {
-      lock.writeLock().unlock();
     }
-
+    return new TimeseriesMetadata(timeseriesMetadata);
   }