You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by su...@apache.org on 2021/12/17 09:51:21 UTC

[iotdb] branch experimental/index updated: finish Hash structure

This is an automated email from the ASF dual-hosted git repository.

sunzesong pushed a commit to branch experimental/index
in repository https://gitbox.apache.org/repos/asf/iotdb.git


The following commit(s) were added to refs/heads/experimental/index by this push:
     new 06524ac  finish Hash structure
06524ac is described below

commit 06524ac49ad35472fc1188620cbe1595c5fabdee
Author: samperson1997 <sz...@mails.tsinghua.edu.cn>
AuthorDate: Fri Dec 17 17:49:56 2021 +0800

    finish Hash structure
---
 example/tsfile/pom.xml                             |   2 +-
 .../iotdb/tsfile/test1929/TsFileAggregation.java   |   4 +-
 .../iotdb/tsfile/test1929/TsFileRawRead.java       |  18 ++-
 .../apache/iotdb/tsfile/test1929/TsFileWrite.java  |   2 +
 .../iotdb/tsfile/common/conf/TSFileConfig.java     |   2 +-
 .../tsfile/file/metadata/MetadataIndexBucket.java  | 111 +++++---------
 .../file/metadata/MetadataIndexBucketEntry.java    |  32 +++-
 .../metadata/MetadataIndexBucketsConstructor.java  |  88 +++++++++++
 .../tsfile/file/metadata/TsFileMetadataHash.java   | 162 +++++++++++++++++++++
 .../iotdb/tsfile/read/TsFileSequenceReader.java    | 113 ++++++++++++--
 .../tsfile/read/controller/IMetadataQuerier.java   |   5 +
 .../read/controller/MetadataQuerierByFileImpl.java |  65 +++++++++
 .../tsfile/read/query/executor/TsFileExecutor.java |  11 +-
 .../tsfile/utils/ReadWriteForEncodingUtils.java    |  11 ++
 .../iotdb/tsfile/write/writer/TsFileIOWriter.java  |  29 ++--
 15 files changed, 540 insertions(+), 115 deletions(-)

diff --git a/example/tsfile/pom.xml b/example/tsfile/pom.xml
index 2db4040..01ab193 100644
--- a/example/tsfile/pom.xml
+++ b/example/tsfile/pom.xml
@@ -51,7 +51,7 @@
                         <configuration>
                             <archive>
                                 <manifest>
-                                    <mainClass>org.apache.iotdb.tsfile.test1929.TsFileAggregation</mainClass>
+                                    <mainClass>org.apache.iotdb.tsfile.test1929.TsFileRawRead</mainClass>
                                 </manifest>
                             </archive>
                             <descriptorRefs>
diff --git a/example/tsfile/src/main/java/org/apache/iotdb/tsfile/test1929/TsFileAggregation.java b/example/tsfile/src/main/java/org/apache/iotdb/tsfile/test1929/TsFileAggregation.java
index c9ef669..79bef60 100644
--- a/example/tsfile/src/main/java/org/apache/iotdb/tsfile/test1929/TsFileAggregation.java
+++ b/example/tsfile/src/main/java/org/apache/iotdb/tsfile/test1929/TsFileAggregation.java
@@ -94,7 +94,9 @@ public class TsFileAggregation {
         if (treeType == 0) {
           timeseriesMetadata = reader.readTimeseriesMetadataV4(seriesPath, false);
         } else if (treeType == 1) {
-          timeseriesMetadata = reader.readTimeseriesMetadataV5(seriesPath, false);
+          timeseriesMetadata = reader.readTimeseriesMetadataV5(seriesPath);
+        } else if (treeType == 2) {
+          timeseriesMetadata = reader.readTimeseriesMetadataHash(seriesPath);
         }
         long count = timeseriesMetadata.getStatistics().getCount();
       }
diff --git a/example/tsfile/src/main/java/org/apache/iotdb/tsfile/test1929/TsFileRawRead.java b/example/tsfile/src/main/java/org/apache/iotdb/tsfile/test1929/TsFileRawRead.java
index 2093536..c38fb18 100644
--- a/example/tsfile/src/main/java/org/apache/iotdb/tsfile/test1929/TsFileRawRead.java
+++ b/example/tsfile/src/main/java/org/apache/iotdb/tsfile/test1929/TsFileRawRead.java
@@ -43,6 +43,10 @@ public class TsFileRawRead {
   public static int treeType; // 0=Zesong Tree, 1=B+ Tree
   public static int fileNum;
 
+  public static long readFileMetadata;
+  public static long readBuffer;
+  public static long queryTime;
+
   private static final TSFileConfig config = TSFileDescriptor.getInstance().getConfig();
 
   public static void main(String[] args) throws IOException {
@@ -70,17 +74,19 @@ public class TsFileRawRead {
       deviceNum = Integer.parseInt(cl.getOptionValue("d"));
       sensorNum = Integer.parseInt(cl.getOptionValue("m"));
       fileNum = Integer.parseInt(cl.getOptionValue("f"));
-      treeType = 1; // Integer.parseInt(cl.getOptionValue("t"));
-      config.setMaxDegreeOfIndexNode(Integer.parseInt(cl.getOptionValue("c")));
+      treeType = Integer.parseInt(cl.getOptionValue("t"));
+      config.setMaxDegreeOfIndexNode(1024);
     } catch (Exception e) {
       e.printStackTrace();
     }
 
     long totalStartTime = System.nanoTime();
     for (int fileIndex = 0; fileIndex < fileNum; fileIndex++) {
+      String folder = treeType == 1 ? "root.b/" : "root.hash/";
       // file path
       String path =
-          "/data/szs/data/data/sequence/root.b/"
+          "/data/szs/data/data/sequence/"
+              + folder
               + config.getMaxDegreeOfIndexNode()
               + "/"
               + deviceNum
@@ -103,8 +109,14 @@ public class TsFileRawRead {
         while (queryDataSet.hasNext()) {
           queryDataSet.next();
         }
+        readFileMetadata += reader.readFileMetadata;
+        readBuffer += reader.readBuffer;
+        queryTime += reader.queryTime;
       }
     }
+    System.out.println("readBuffer: " + (double) readBuffer / (double) fileNum + "ms");
+    System.out.println("readFileMetadata: " + (double) readFileMetadata / (double) fileNum + "ms");
+    System.out.println("query time: " + (double) queryTime / (double) fileNum + "ms");
     long totalTime = (System.nanoTime() - totalStartTime) / 1000_000;
     System.out.println("Average cost time: " + (double) totalTime / (double) fileNum + "ms");
   }
diff --git a/example/tsfile/src/main/java/org/apache/iotdb/tsfile/test1929/TsFileWrite.java b/example/tsfile/src/main/java/org/apache/iotdb/tsfile/test1929/TsFileWrite.java
index 2b9ac8c..45aed4f 100644
--- a/example/tsfile/src/main/java/org/apache/iotdb/tsfile/test1929/TsFileWrite.java
+++ b/example/tsfile/src/main/java/org/apache/iotdb/tsfile/test1929/TsFileWrite.java
@@ -135,6 +135,8 @@ public class TsFileWrite {
             tsFileWriter.close();
           } else if (treeType == 1) {
             tsFileWriter.closeBTree();
+          } else if (treeType == 2) {
+            tsFileWriter.closeHash();
           }
         } catch (Throwable e) {
           e.printStackTrace();
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/common/conf/TSFileConfig.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/common/conf/TSFileConfig.java
index d13f817..233ec22 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/common/conf/TSFileConfig.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/common/conf/TSFileConfig.java
@@ -76,7 +76,7 @@ public class TSFileConfig implements Serializable {
   /** The maximum number of data points in a page, default value is 1024 * 1024. */
   private int maxNumberOfPointsInPage = 1024 * 1024;
   /** The maximum degree of a metadataIndex node, default value is 256 */
-  private int maxDegreeOfIndexNode = 256;
+  private int maxDegreeOfIndexNode = 5;
   /** Data type for input timestamp, TsFile supports INT64. */
   private TSDataType timeSeriesDataType = TSDataType.INT64;
   /** Max length limitation of input string. */
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/MetadataIndexBucket.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/MetadataIndexBucket.java
index fc78087..8a966cd 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/MetadataIndexBucket.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/MetadataIndexBucket.java
@@ -19,121 +19,78 @@
 
 package org.apache.iotdb.tsfile.file.metadata;
 
-import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
-import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor;
-import org.apache.iotdb.tsfile.file.metadata.enums.MetadataIndexNodeType;
 import org.apache.iotdb.tsfile.utils.Pair;
 import org.apache.iotdb.tsfile.utils.ReadWriteForEncodingUtils;
-import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.io.OutputStream;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.Comparator;
 import java.util.List;
 
 public class MetadataIndexBucket {
 
-  private static final TSFileConfig config = TSFileDescriptor.getInstance().getConfig();
-  private final List<MetadataIndexEntry> children;
-  private long endOffset;
-  private static final Logger resourceLogger = LoggerFactory.getLogger("FileMonitor");
-
-  /** type of the child node at offset */
-  private final MetadataIndexNodeType nodeType;
+  private final List<MetadataIndexBucketEntry> children;
 
-  public MetadataIndexBucket(MetadataIndexNodeType nodeType) {
-    children = new ArrayList<>();
-    endOffset = -1L;
-    this.nodeType = nodeType;
+  public MetadataIndexBucket() {
+    this.children = new ArrayList<>();
   }
 
-  public MetadataIndexBucket(
-      List<MetadataIndexEntry> children, long endOffset, MetadataIndexNodeType nodeType) {
+  public MetadataIndexBucket(List<MetadataIndexBucketEntry> children) {
     this.children = children;
-    this.endOffset = endOffset;
-    this.nodeType = nodeType;
   }
 
-  public List<MetadataIndexEntry> getChildren() {
+  public List<MetadataIndexBucketEntry> getChildren() {
     return children;
   }
 
-  public long getEndOffset() {
-    return endOffset;
-  }
-
-  public void setEndOffset(long endOffset) {
-    this.endOffset = endOffset;
-  }
-
-  public MetadataIndexNodeType getNodeType() {
-    return nodeType;
-  }
-
-  public void addEntry(MetadataIndexEntry metadataIndexEntry) {
-    this.children.add(metadataIndexEntry);
+  public void addEntry(MetadataIndexBucketEntry entry) {
+    this.children.add(entry);
   }
 
-  boolean isFull() {
-    return children.size() >= config.getMaxDegreeOfIndexNode();
+  public void orderEntries() {
+    this.children.sort(Comparator.comparing(MetadataIndexBucketEntry::getPath));
   }
 
-  MetadataIndexEntry peek() {
-    if (children.isEmpty()) {
-      return null;
-    }
-    return children.get(0);
-  }
-
-  public int serializeTo(OutputStream outputStream) throws IOException {
+  public int serializeTo(ByteBuffer buffer) throws IOException {
     int byteLen = 0;
-    byteLen += ReadWriteForEncodingUtils.writeUnsignedVarInt(children.size(), outputStream);
-    for (MetadataIndexEntry metadataIndexEntry : children) {
-      byteLen += metadataIndexEntry.serializeTo(outputStream);
+    byteLen += ReadWriteForEncodingUtils.writeUnsignedVarInt(children.size(), buffer);
+    for (MetadataIndexBucketEntry entry : children) {
+      byteLen += entry.serializeTo(buffer);
     }
-    byteLen += ReadWriteIOUtils.write(endOffset, outputStream);
-    byteLen += ReadWriteIOUtils.write(nodeType.serialize(), outputStream);
     return byteLen;
   }
 
   public static MetadataIndexBucket deserializeFrom(ByteBuffer buffer) {
-    List<MetadataIndexEntry> children = new ArrayList<>();
+    List<MetadataIndexBucketEntry> children = new ArrayList<>();
     int size = ReadWriteForEncodingUtils.readUnsignedVarInt(buffer);
     for (int i = 0; i < size; i++) {
-      children.add(MetadataIndexEntry.deserializeFrom(buffer));
+      children.add(MetadataIndexBucketEntry.deserializeFrom(buffer));
     }
-    long offset = ReadWriteIOUtils.readLong(buffer);
-    MetadataIndexNodeType nodeType =
-        MetadataIndexNodeType.deserialize(ReadWriteIOUtils.readByte(buffer));
-    return new MetadataIndexBucket(children, offset, nodeType);
+    return new MetadataIndexBucket(children);
   }
 
-  public Pair<MetadataIndexEntry, Long> getChildIndexEntry(String key, boolean exactSearch) {
-    int index = binarySearchInChildren(key, exactSearch);
+  /**
+   * get startOffset and endOffset of the TimeseriesMetadata
+   *
+   * @param key path
+   */
+  public Pair<Long, Integer> getChildIndexEntry(String key) {
+    int index = binarySearchInChildren(key);
     if (index == -1) {
       return null;
     }
-    long childEndOffset;
-    if (index != children.size() - 1) {
-      childEndOffset = children.get(index + 1).getOffset();
-    } else {
-      childEndOffset = this.endOffset;
-    }
-    return new Pair<>(children.get(index), childEndOffset);
+    return new Pair<>(children.get(index).getOffset(), children.get(index).getSize());
   }
 
-  int binarySearchInChildren(String key, boolean exactSearch) {
+  int binarySearchInChildren(String key) {
     int low = 0;
     int high = children.size() - 1;
 
     while (low <= high) {
       int mid = (low + high) >>> 1;
-      MetadataIndexEntry midVal = children.get(mid);
-      int cmp = midVal.getName().compareTo(key);
+      MetadataIndexBucketEntry midVal = children.get(mid);
+      int cmp = midVal.getPath().compareTo(key);
 
       if (cmp < 0) {
         low = mid + 1;
@@ -145,10 +102,14 @@ public class MetadataIndexBucket {
     }
 
     // key not found
-    if (exactSearch) {
-      return -1;
-    } else {
-      return low == 0 ? low : low - 1;
+    return -1;
+  }
+
+  public int getSerializeSize() {
+    int size = ReadWriteForEncodingUtils.uVarIntSize(children.size());
+    for (MetadataIndexBucketEntry entry : children) {
+      size += entry.getSerializeSize();
     }
+    return size;
   }
 }
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/MetadataIndexBucketEntry.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/MetadataIndexBucketEntry.java
index 09282ae..7d81271 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/MetadataIndexBucketEntry.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/MetadataIndexBucketEntry.java
@@ -19,20 +19,22 @@
 
 package org.apache.iotdb.tsfile.file.metadata;
 
+import org.apache.iotdb.tsfile.utils.ReadWriteForEncodingUtils;
 import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
 
 import java.io.IOException;
-import java.io.OutputStream;
 import java.nio.ByteBuffer;
 
 public class MetadataIndexBucketEntry {
 
   private String path;
   private long offset;
+  private int size;
 
-  public MetadataIndexBucketEntry(String path, long offset) {
+  public MetadataIndexBucketEntry(String path, long offset, int size) {
     this.path = path;
     this.offset = offset;
+    this.size = size;
   }
 
   public String getPath() {
@@ -51,20 +53,36 @@ public class MetadataIndexBucketEntry {
     this.offset = offset;
   }
 
+  public int getSize() {
+    return size;
+  }
+
+  public void setSize(int size) {
+    this.size = size;
+  }
+
   public String toString() {
-    return "<" + path + "," + offset + ">";
+    return "<" + path + "," + offset + "," + size + ">";
   }
 
-  public int serializeTo(OutputStream outputStream) throws IOException {
+  public int serializeTo(ByteBuffer buffer) throws IOException {
     int byteLen = 0;
-    byteLen += ReadWriteIOUtils.writeVar(path, outputStream);
-    byteLen += ReadWriteIOUtils.write(offset, outputStream);
+    byteLen += ReadWriteIOUtils.writeVar(path, buffer);
+    byteLen += ReadWriteIOUtils.write(offset, buffer);
+    byteLen += ReadWriteForEncodingUtils.writeUnsignedVarInt(size, buffer);
     return byteLen;
   }
 
   public static MetadataIndexBucketEntry deserializeFrom(ByteBuffer buffer) {
     String name = ReadWriteIOUtils.readVarIntString(buffer);
     long offset = ReadWriteIOUtils.readLong(buffer);
-    return new MetadataIndexBucketEntry(name, offset);
+    int size = ReadWriteForEncodingUtils.readUnsignedVarInt(buffer);
+    return new MetadataIndexBucketEntry(name, offset, size);
+  }
+
+  public int getSerializeSize() {
+    return ReadWriteForEncodingUtils.varSize(path)
+        + ReadWriteIOUtils.LONG_LEN
+        + ReadWriteForEncodingUtils.uVarIntSize(size);
   }
 }
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/MetadataIndexBucketsConstructor.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/MetadataIndexBucketsConstructor.java
new file mode 100644
index 0000000..8d4830c
--- /dev/null
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/MetadataIndexBucketsConstructor.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.tsfile.file.metadata;
+
+import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
+import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.utils.Pair;
+import org.apache.iotdb.tsfile.write.writer.TsFileOutput;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+public class MetadataIndexBucketsConstructor {
+
+  private static final TSFileConfig config = TSFileDescriptor.getInstance().getConfig();
+
+  private MetadataIndexBucketsConstructor() {
+    throw new IllegalStateException("Utility class");
+  }
+
+  /**
+   * Construct metadata index tree
+   *
+   * @param deviceTimeseriesMetadataMap device => TimeseriesMetadata list
+   * @param tsFileOutput tsfile output
+   */
+  @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning
+  public static Pair<MetadataIndexBucket[], Integer> constructMetadataIndexBuckets(
+      Map<String, List<TimeseriesMetadata>> deviceTimeseriesMetadataMap, TsFileOutput tsFileOutput)
+      throws IOException {
+
+    int timeseriesMetadataNum = 0;
+    for (List<TimeseriesMetadata> timeseriesMetadataList : deviceTimeseriesMetadataMap.values()) {
+      timeseriesMetadataNum += timeseriesMetadataList.size();
+    }
+    int bucketNum =
+        (int) Math.ceil((double) timeseriesMetadataNum / config.getMaxDegreeOfIndexNode()); // B
+    MetadataIndexBucket[] buckets = new MetadataIndexBucket[bucketNum];
+    for (int i = 0; i < bucketNum; i++) {
+      buckets[i] = new MetadataIndexBucket();
+    }
+
+    // for timeseriesMetadata of each device
+    for (Entry<String, List<TimeseriesMetadata>> entry : deviceTimeseriesMetadataMap.entrySet()) {
+      if (entry.getValue().isEmpty()) {
+        continue;
+      }
+      for (int i = 0; i < entry.getValue().size(); i++) {
+        TimeseriesMetadata timeseriesMetadata = entry.getValue().get(i);
+        Path path = new Path(entry.getKey(), timeseriesMetadata.getMeasurementId());
+        int bucketId = Math.abs(path.hashCode()) % bucketNum;
+        long startOffset = tsFileOutput.getPosition();
+        int size = timeseriesMetadata.serializeTo(tsFileOutput.wrapAsStream());
+        buckets[bucketId].addEntry(
+            new MetadataIndexBucketEntry(path.getFullPath(), startOffset, size));
+      }
+    }
+
+    // for each bucket
+    int bucketSerializeSize = 0;
+    for (MetadataIndexBucket bucket : buckets) {
+      bucket.orderEntries();
+      bucketSerializeSize = Math.max(bucket.getSerializeSize(), bucketSerializeSize);
+    }
+
+    return new Pair<>(buckets, bucketSerializeSize);
+  }
+}
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/TsFileMetadataHash.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/TsFileMetadataHash.java
new file mode 100644
index 0000000..a993532
--- /dev/null
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/file/metadata/TsFileMetadataHash.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iotdb.tsfile.file.metadata;
+
+import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor;
+import org.apache.iotdb.tsfile.read.common.Path;
+import org.apache.iotdb.tsfile.utils.BloomFilter;
+import org.apache.iotdb.tsfile.utils.ReadWriteForEncodingUtils;
+import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.Set;
+
+/** TSFileMetaData collects all metadata info and saves in its data structure. */
+public class TsFileMetadataHash {
+
+  // bloom filter
+  private BloomFilter bloomFilter;
+
+  // List of <name, offset, childMetadataIndexType>
+  private MetadataIndexBucket[] metadataIndexBuckets;
+
+  private int bucketSize;
+
+  private int bucketNum;
+
+  /**
+   * deserialize data from the buffer.
+   *
+   * @param buffer -buffer use to deserialize
+   * @return -a instance of TsFileMetaData
+   */
+  public static TsFileMetadataHash deserializeFrom(ByteBuffer buffer) {
+    TsFileMetadataHash fileMetaData = new TsFileMetadataHash();
+
+    // metadataIndex
+    fileMetaData.bucketNum = ReadWriteForEncodingUtils.readUnsignedVarInt(buffer);
+    fileMetaData.bucketSize = ReadWriteForEncodingUtils.readUnsignedVarInt(buffer);
+
+    // read bloom filter
+    if (buffer.hasRemaining()) {
+      byte[] bytes = ReadWriteIOUtils.readByteBufferWithSelfDescriptionLength(buffer);
+      int filterSize = ReadWriteForEncodingUtils.readUnsignedVarInt(buffer);
+      int hashFunctionSize = ReadWriteForEncodingUtils.readUnsignedVarInt(buffer);
+      fileMetaData.bloomFilter = BloomFilter.buildBloomFilter(bytes, filterSize, hashFunctionSize);
+    }
+
+    return fileMetaData;
+  }
+
+  public BloomFilter getBloomFilter() {
+    return bloomFilter;
+  }
+
+  public void setBloomFilter(BloomFilter bloomFilter) {
+    this.bloomFilter = bloomFilter;
+  }
+
+  public int serializeBuckets(OutputStream outputStream) throws IOException {
+    int byteLen = 0;
+    if (metadataIndexBuckets.length > 0) {
+      for (MetadataIndexBucket bucket : metadataIndexBuckets) {
+        ByteBuffer buffer = ByteBuffer.allocate(bucketSize);
+        byteLen += bucket.serializeTo(buffer);
+        outputStream.write(buffer.array());
+      }
+    }
+
+    return byteLen;
+  }
+
+  /**
+   * use the given outputStream to serialize.
+   *
+   * @param outputStream -output stream to determine byte length
+   * @return -byte length
+   */
+  public int serializeTo(OutputStream outputStream) throws IOException {
+    int byteLen = ReadWriteForEncodingUtils.writeUnsignedVarInt(bucketNum, outputStream);
+    byteLen += ReadWriteForEncodingUtils.writeUnsignedVarInt(bucketSize, outputStream);
+
+    return byteLen;
+  }
+
+  /**
+   * use the given outputStream to serialize bloom filter.
+   *
+   * @param outputStream -output stream to determine byte length
+   * @return -byte length
+   */
+  public int serializeBloomFilter(OutputStream outputStream, Set<Path> paths) throws IOException {
+    int byteLen = 0;
+    BloomFilter filter = buildBloomFilter(paths);
+
+    byte[] bytes = filter.serialize();
+    byteLen += ReadWriteForEncodingUtils.writeUnsignedVarInt(bytes.length, outputStream);
+    outputStream.write(bytes);
+    byteLen += bytes.length;
+    byteLen += ReadWriteForEncodingUtils.writeUnsignedVarInt(filter.getSize(), outputStream);
+    byteLen +=
+        ReadWriteForEncodingUtils.writeUnsignedVarInt(filter.getHashFunctionSize(), outputStream);
+    return byteLen;
+  }
+
+  /**
+   * build bloom filter
+   *
+   * @return bloom filter
+   */
+  private BloomFilter buildBloomFilter(Set<Path> paths) {
+    BloomFilter filter =
+        BloomFilter.getEmptyBloomFilter(
+            TSFileDescriptor.getInstance().getConfig().getBloomFilterErrorRate(), paths.size());
+    for (Path path : paths) {
+      filter.add(path.toString());
+    }
+    return filter;
+  }
+
+  public MetadataIndexBucket[] getMetadataIndexBuckets() {
+    return metadataIndexBuckets;
+  }
+
+  public void setMetadataIndexBuckets(MetadataIndexBucket[] metadataIndexBuckets) {
+    this.metadataIndexBuckets = metadataIndexBuckets;
+  }
+
+  public int getBucketSize() {
+    return bucketSize;
+  }
+
+  public void setBucketSize(int bucketSize) {
+    this.bucketSize = bucketSize;
+  }
+
+  public int getBucketNum() {
+    return bucketNum;
+  }
+
+  public void setBucketNum(int bucketNum) {
+    this.bucketNum = bucketNum;
+  }
+}
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileSequenceReader.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileSequenceReader.java
index 9d3e4fa..bfadaba 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileSequenceReader.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/TsFileSequenceReader.java
@@ -31,12 +31,14 @@ import org.apache.iotdb.tsfile.file.header.PageHeader;
 import org.apache.iotdb.tsfile.file.metadata.ChunkGroupMetadata;
 import org.apache.iotdb.tsfile.file.metadata.ChunkMetadata;
 import org.apache.iotdb.tsfile.file.metadata.IChunkMetadata;
+import org.apache.iotdb.tsfile.file.metadata.MetadataIndexBucket;
 import org.apache.iotdb.tsfile.file.metadata.MetadataIndexEntry;
 import org.apache.iotdb.tsfile.file.metadata.MetadataIndexNode;
 import org.apache.iotdb.tsfile.file.metadata.MetadataIndexNodeV2;
 import org.apache.iotdb.tsfile.file.metadata.TimeseriesMetadata;
 import org.apache.iotdb.tsfile.file.metadata.TimeseriesMetadataV2;
 import org.apache.iotdb.tsfile.file.metadata.TsFileMetadata;
+import org.apache.iotdb.tsfile.file.metadata.TsFileMetadataHash;
 import org.apache.iotdb.tsfile.file.metadata.TsFileMetadataV2;
 import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
 import org.apache.iotdb.tsfile.file.metadata.enums.MetadataIndexNodeType;
@@ -97,6 +99,7 @@ public class TsFileSequenceReader implements AutoCloseable {
   protected int fileMetadataSize;
   private ByteBuffer markerBuffer = ByteBuffer.allocate(Byte.BYTES);
   protected TsFileMetadata tsFileMetaData;
+  protected TsFileMetadataHash tsFileMetaDataHash;
   // device -> measurement -> TimeseriesMetadata
   private Map<String, Map<String, TimeseriesMetadata>> cachedDeviceMetadata =
       new ConcurrentHashMap<>();
@@ -105,7 +108,9 @@ public class TsFileSequenceReader implements AutoCloseable {
   private long minPlanIndex = Long.MAX_VALUE;
   private long maxPlanIndex = Long.MIN_VALUE;
 
-  private long startTime;
+  public long readFileMetadata;
+  public long readBuffer;
+  public long queryTime;
 
   /**
    * Create a file reader of the given file. The reader will read the tail of the file to get the
@@ -127,9 +132,9 @@ public class TsFileSequenceReader implements AutoCloseable {
    * @param loadMetadataSize -whether load meta data size
    */
   public TsFileSequenceReader(String file, boolean loadMetadataSize) throws IOException {
-    if (resourceLogger.isDebugEnabled()) {
-      resourceLogger.debug("{} reader is opened. {}", file, getClass().getName());
-    }
+    //    if (resourceLogger.isDebugEnabled()) {
+    //      resourceLogger.debug("{} reader is opened. {}", file, getClass().getName());
+    //    }
     this.file = file;
     tsFileInput = FSFactoryProducer.getFileInputFactory().getTsFileInput(file);
     if (FSFactoryProducer.getFSFactory().getFile(file + ".index").exists()) {
@@ -324,14 +329,18 @@ public class TsFileSequenceReader implements AutoCloseable {
         ByteBuffer rootNodeOffsetBuffer = ByteBuffer.allocate(Long.BYTES);
         metadataIndexInput.read(rootNodeOffsetBuffer, metadataIndexInput.size() - Long.BYTES);
         rootNodeOffsetBuffer.flip();
+        long startTime = System.nanoTime();
 
         long rootNodeOffset = ReadWriteIOUtils.readLong(rootNodeOffsetBuffer);
-        tsFileMetaData =
-            TsFileMetadata.deserializeFromV2(
-                readData(
-                    rootNodeOffset,
-                    FSFactoryProducer.getFSFactory().getFile(this.file + ".index").length(),
-                    metadataIndexInput));
+        ByteBuffer buffer =
+            readData(
+                rootNodeOffset,
+                FSFactoryProducer.getFSFactory().getFile(this.file + ".index").length(),
+                metadataIndexInput);
+        readBuffer += (System.nanoTime() - startTime) / 1000_000;
+
+        tsFileMetaData = TsFileMetadata.deserializeFromV2(buffer);
+        readFileMetadata += (System.nanoTime() - startTime) / 1000_000;
       }
     } catch (BufferOverflowException e) {
       logger.error("Something error happened while reading file metadata of file {}", file);
@@ -340,6 +349,32 @@ public class TsFileSequenceReader implements AutoCloseable {
     return tsFileMetaData;
   }
 
+  public TsFileMetadataHash readFileMetadataHash() throws IOException {
+    try {
+      if (tsFileMetaDataHash == null) {
+        ByteBuffer rootNodeOffsetBuffer = ByteBuffer.allocate(Long.BYTES);
+        metadataIndexInput.read(rootNodeOffsetBuffer, metadataIndexInput.size() - Long.BYTES);
+        rootNodeOffsetBuffer.flip();
+        long startTime = System.nanoTime();
+
+        long metadataPosition = ReadWriteIOUtils.readLong(rootNodeOffsetBuffer);
+        ByteBuffer buffer =
+            readData(
+                metadataPosition,
+                FSFactoryProducer.getFSFactory().getFile(this.file + ".index").length(),
+                metadataIndexInput);
+        readBuffer += (System.nanoTime() - startTime) / 1000_000;
+
+        tsFileMetaDataHash = TsFileMetadataHash.deserializeFrom(buffer);
+        readFileMetadata += (System.nanoTime() - startTime) / 1000_000;
+      }
+    } catch (BufferOverflowException e) {
+      logger.error("Something error happened while reading file metadata of file {}", file);
+      throw e;
+    }
+    return tsFileMetaDataHash;
+  }
+
   /**
    * this function does not modify the position of the file reader.
    *
@@ -530,8 +565,7 @@ public class TsFileSequenceReader implements AutoCloseable {
     return searchResult >= 0 ? timeseriesMetadataList.get(searchResult) : null;
   }
 
-  public TimeseriesMetadata readTimeseriesMetadataV5(Path path, boolean ignoreNotExists)
-      throws IOException {
+  public TimeseriesMetadata readTimeseriesMetadataV5(Path path) throws IOException {
     readFileMetadataV3();
     MetadataIndexNodeV2 deviceMetadataIndexNode = tsFileMetaData.getMetadataIndexV2();
     Pair<MetadataIndexEntry, Long> metadataIndexPair =
@@ -557,6 +591,25 @@ public class TsFileSequenceReader implements AutoCloseable {
     return searchResult >= 0 ? timeseriesMetadataList.get(searchResult) : null;
   }
 
+  public TimeseriesMetadata readTimeseriesMetadataHash(Path path) throws IOException {
+    readFileMetadataHash();
+
+    int bucketId = Math.abs(path.hashCode()) % tsFileMetaDataHash.getBucketNum();
+    int bucketSize = tsFileMetaDataHash.getBucketSize();
+    long startOffset = bucketId * bucketSize;
+    ByteBuffer bucketBuffer = readData(startOffset, startOffset + bucketSize, metadataIndexInput);
+    MetadataIndexBucket bucket = MetadataIndexBucket.deserializeFrom(bucketBuffer);
+    Pair<Long, Integer> timeseriesMetadataOffset = bucket.getChildIndexEntry(path.getFullPath());
+    ByteBuffer buffer = readData(timeseriesMetadataOffset.left, timeseriesMetadataOffset.right);
+    try {
+      return TimeseriesMetadata.deserializeFrom(buffer, true);
+    } catch (BufferOverflowException e) {
+      logger.error(
+          "Something error happened while deserializing TimeseriesMetadata of file {}", file);
+      throw e;
+    }
+  }
+
   /**
    * Find the leaf node that contains this vector, return all the needed subSensor and time column
    *
@@ -839,6 +892,7 @@ public class TsFileSequenceReader implements AutoCloseable {
 
   public List<TimeseriesMetadata> readTimeseriesMetadataV4(Set<String> paths) throws IOException {
     readFileMetadataV3();
+    long startTime = System.nanoTime();
     MetadataIndexNodeV2 deviceMetadataIndexNode = tsFileMetaData.getMetadataIndexV2();
     List<TimeseriesMetadata> timeseriesMetadataList = new ArrayList<>();
 
@@ -859,6 +913,35 @@ public class TsFileSequenceReader implements AutoCloseable {
         }
       }
     }
+    queryTime += (System.nanoTime() - startTime) / 1000_000;
+
+    return timeseriesMetadataList;
+  }
+
+  public List<TimeseriesMetadata> readTimeseriesMetadataHash(Set<String> paths) throws IOException {
+    readFileMetadataHash();
+
+    long startTime = System.nanoTime();
+    List<TimeseriesMetadata> timeseriesMetadataList = new ArrayList<>();
+
+    for (String path : paths) {
+      int bucketId = Math.abs(path.hashCode()) % tsFileMetaDataHash.getBucketNum();
+      int bucketSize = tsFileMetaDataHash.getBucketSize();
+      long startOffset = bucketId * bucketSize;
+      ByteBuffer bucketBuffer = readData(startOffset, startOffset + bucketSize, metadataIndexInput);
+      MetadataIndexBucket bucket = MetadataIndexBucket.deserializeFrom(bucketBuffer);
+      Pair<Long, Integer> timeseriesMetadataOffset = bucket.getChildIndexEntry(path);
+      ByteBuffer buffer = readData(timeseriesMetadataOffset.left, timeseriesMetadataOffset.right);
+      try {
+        timeseriesMetadataList.add(TimeseriesMetadata.deserializeFrom(buffer, true));
+      } catch (BufferOverflowException e) {
+        logger.error(
+            "Something error happened while deserializing TimeseriesMetadata of file {}", file);
+        throw e;
+      }
+    }
+    queryTime += (System.nanoTime() - startTime) / 1000_000;
+
     return timeseriesMetadataList;
   }
 
@@ -1438,9 +1521,9 @@ public class TsFileSequenceReader implements AutoCloseable {
 
   @Override
   public void close() throws IOException {
-    if (resourceLogger.isDebugEnabled()) {
-      resourceLogger.debug("{} reader is closed.", file);
-    }
+    //    if (resourceLogger.isDebugEnabled()) {
+    //      resourceLogger.debug("{} reader is closed.", file);
+    //    }
     this.tsFileInput.close();
   }
 
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/controller/IMetadataQuerier.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/controller/IMetadataQuerier.java
index 5b4bc53..1386d1b 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/controller/IMetadataQuerier.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/controller/IMetadataQuerier.java
@@ -21,6 +21,7 @@ package org.apache.iotdb.tsfile.read.controller;
 import org.apache.iotdb.tsfile.exception.write.NoMeasurementException;
 import org.apache.iotdb.tsfile.file.metadata.IChunkMetadata;
 import org.apache.iotdb.tsfile.file.metadata.TsFileMetadata;
+import org.apache.iotdb.tsfile.file.metadata.TsFileMetadataHash;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.read.common.TimeRange;
@@ -37,6 +38,8 @@ public interface IMetadataQuerier {
 
   TsFileMetadata getWholeFileMetadata();
 
+  TsFileMetadataHash getWholeFileMetadataHash();
+
   /**
    * this will load all chunk metadata of given paths into cache.
    *
@@ -51,6 +54,8 @@ public interface IMetadataQuerier {
 
   void loadChunkMetaDatasV4(List<Path> paths) throws IOException;
 
+  void loadChunkMetaDatasHash(List<Path> paths) throws IOException;
+
   /**
    * @return the corresponding data type.
    * @throws NoMeasurementException if the measurement not exists.
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/controller/MetadataQuerierByFileImpl.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/controller/MetadataQuerierByFileImpl.java
index 50d2357..ef74882 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/controller/MetadataQuerierByFileImpl.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/controller/MetadataQuerierByFileImpl.java
@@ -24,6 +24,7 @@ import org.apache.iotdb.tsfile.file.metadata.IChunkMetadata;
 import org.apache.iotdb.tsfile.file.metadata.TimeseriesMetadata;
 import org.apache.iotdb.tsfile.file.metadata.TimeseriesMetadataV2;
 import org.apache.iotdb.tsfile.file.metadata.TsFileMetadata;
+import org.apache.iotdb.tsfile.file.metadata.TsFileMetadataHash;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
 import org.apache.iotdb.tsfile.read.TsFileSequenceReader;
 import org.apache.iotdb.tsfile.read.TsFileSequenceReader.LocateStatus;
@@ -49,6 +50,8 @@ public class MetadataQuerierByFileImpl implements IMetadataQuerier {
 
   private TsFileMetadata fileMetaData;
 
+  private TsFileMetadataHash fileMetaDataHash;
+
   private LRUCache<Path, List<ChunkMetadata>> chunkMetaDataCache;
 
   private TsFileSequenceReader tsFileReader;
@@ -74,6 +77,8 @@ public class MetadataQuerierByFileImpl implements IMetadataQuerier {
       this.fileMetaData = tsFileReader.readFileMetadataV2();
     } else if (treeType == 1) { // B+ Tree
       this.fileMetaData = tsFileReader.readFileMetadataV3();
+    } else if (treeType == 2) { // Hash
+      this.fileMetaDataHash = tsFileReader.readFileMetadataHash();
     }
     chunkMetaDataCache =
         new LRUCache<Path, List<ChunkMetadata>>(CACHED_ENTRY_NUMBER) {
@@ -107,6 +112,11 @@ public class MetadataQuerierByFileImpl implements IMetadataQuerier {
   }
 
   @Override
+  public TsFileMetadataHash getWholeFileMetadataHash() {
+    return fileMetaDataHash;
+  }
+
+  @Override
   @SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning
   public void loadChunkMetaDatas(List<Path> paths) throws IOException {
     // group measurements by device
@@ -358,6 +368,61 @@ public class MetadataQuerierByFileImpl implements IMetadataQuerier {
     }
   }
 
+  public void loadChunkMetaDatasHash(List<Path> paths) throws IOException {
+    Set<String> pathSet = new HashSet<>();
+    TreeMap<String, Set<String>> deviceMeasurementsMap = new TreeMap<>();
+
+    for (Path path : paths) {
+      if (!deviceMeasurementsMap.containsKey(path.getDevice())) {
+        deviceMeasurementsMap.put(path.getDevice(), new HashSet<>());
+      }
+      deviceMeasurementsMap.get(path.getDevice()).add(path.getMeasurement());
+      pathSet.add(path.getFullPath());
+    }
+
+    Map<Path, List<ChunkMetadata>> tempChunkMetaDatas = new HashMap<>();
+
+    int count = 0;
+    boolean enough = false;
+
+    List<TimeseriesMetadata> timeseriesMetaDataList =
+        tsFileReader.readTimeseriesMetadataHash(pathSet);
+    List<ChunkMetadata> chunkMetadataList = new ArrayList<>();
+    for (TimeseriesMetadata timeseriesMetadata : timeseriesMetaDataList) {
+      chunkMetadataList.addAll(tsFileReader.readChunkMetaDataList(timeseriesMetadata));
+    }
+
+    // for cache
+    for (Map.Entry<String, Set<String>> deviceMeasurements : deviceMeasurementsMap.entrySet()) {
+      if (enough) {
+        break;
+      }
+      for (ChunkMetadata chunkMetaData : chunkMetadataList) {
+        String currentMeasurement = chunkMetaData.getMeasurementUid();
+        Path path = new Path(deviceMeasurements.getKey(), currentMeasurement);
+        if (!pathSet.contains(path.getFullPath())) {
+          continue;
+        }
+        // add into tempChunkMetaDatas
+        if (!tempChunkMetaDatas.containsKey(path)) {
+          tempChunkMetaDatas.put(path, new ArrayList<>());
+        }
+        tempChunkMetaDatas.get(path).add(chunkMetaData);
+
+        // check cache size, stop when reading enough
+        count++;
+        if (count == CACHED_ENTRY_NUMBER) {
+          enough = true;
+          break;
+        }
+      }
+    }
+
+    for (Map.Entry<Path, List<ChunkMetadata>> entry : tempChunkMetaDatas.entrySet()) {
+      chunkMetaDataCache.put(entry.getKey(), entry.getValue());
+    }
+  }
+
   @Override
   public TSDataType getDataType(Path path) throws IOException {
     if (tsFileReader.getChunkMetadataList(path) == null
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/executor/TsFileExecutor.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/executor/TsFileExecutor.java
index 7b195f9..a8be48e 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/executor/TsFileExecutor.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/read/query/executor/TsFileExecutor.java
@@ -99,7 +99,14 @@ public class TsFileExecutor implements QueryExecutor {
   @Override
   public QueryDataSet execute(QueryExpression queryExpression, int treeType) throws IOException {
     // bloom filter
-    BloomFilter bloomFilter = metadataQuerier.getWholeFileMetadata().getBloomFilter();
+
+    BloomFilter bloomFilter;
+    if (treeType != 2) {
+      bloomFilter = metadataQuerier.getWholeFileMetadata().getBloomFilter();
+    } else {
+      bloomFilter = metadataQuerier.getWholeFileMetadataHash().getBloomFilter();
+    }
+
     List<Path> filteredSeriesPath = new ArrayList<>();
     if (bloomFilter != null) {
       for (Path path : queryExpression.getSelectedSeries()) {
@@ -114,6 +121,8 @@ public class TsFileExecutor implements QueryExecutor {
       metadataQuerier.loadChunkMetaDatasV3(queryExpression.getSelectedSeries());
     } else if (treeType == 1) {
       metadataQuerier.loadChunkMetaDatasV4(queryExpression.getSelectedSeries());
+    } else if (treeType == 2) {
+      metadataQuerier.loadChunkMetaDatasHash(queryExpression.getSelectedSeries());
     }
     if (queryExpression.hasQueryFilter()) {
       try {
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/utils/ReadWriteForEncodingUtils.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/utils/ReadWriteForEncodingUtils.java
index b9d562b..3bd4b7a 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/utils/ReadWriteForEncodingUtils.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/utils/ReadWriteForEncodingUtils.java
@@ -18,6 +18,8 @@
  */
 package org.apache.iotdb.tsfile.utils;
 
+import org.apache.iotdb.tsfile.common.conf.TSFileConfig;
+
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
@@ -252,6 +254,15 @@ public class ReadWriteForEncodingUtils {
     return position;
   }
 
+  public static int varSize(String s) {
+    if (s == null) {
+      return varIntSize(-1);
+    }
+
+    byte[] bytes = s.getBytes(TSFileConfig.STRING_CHARSET);
+    return varIntSize(bytes.length) + bytes.length;
+  }
+
   /**
    * write integer value using special bit to output stream.
    *
diff --git a/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/TsFileIOWriter.java b/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/TsFileIOWriter.java
index 4d6e70a..2241de3 100644
--- a/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/TsFileIOWriter.java
+++ b/tsfile/src/main/java/org/apache/iotdb/tsfile/write/writer/TsFileIOWriter.java
@@ -27,12 +27,14 @@ import org.apache.iotdb.tsfile.file.metadata.ChunkGroupMetadata;
 import org.apache.iotdb.tsfile.file.metadata.ChunkMetadata;
 import org.apache.iotdb.tsfile.file.metadata.IChunkMetadata;
 import org.apache.iotdb.tsfile.file.metadata.MetadataIndexBucket;
+import org.apache.iotdb.tsfile.file.metadata.MetadataIndexBucketsConstructor;
 import org.apache.iotdb.tsfile.file.metadata.MetadataIndexConstructor;
 import org.apache.iotdb.tsfile.file.metadata.MetadataIndexConstructorV2;
 import org.apache.iotdb.tsfile.file.metadata.MetadataIndexNode;
 import org.apache.iotdb.tsfile.file.metadata.MetadataIndexNodeV2;
 import org.apache.iotdb.tsfile.file.metadata.TimeseriesMetadata;
 import org.apache.iotdb.tsfile.file.metadata.TsFileMetadata;
+import org.apache.iotdb.tsfile.file.metadata.TsFileMetadataHash;
 import org.apache.iotdb.tsfile.file.metadata.TsFileMetadataV2;
 import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
 import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
@@ -42,6 +44,7 @@ import org.apache.iotdb.tsfile.fileSystem.FSFactoryProducer;
 import org.apache.iotdb.tsfile.read.common.Chunk;
 import org.apache.iotdb.tsfile.read.common.Path;
 import org.apache.iotdb.tsfile.utils.BytesUtils;
+import org.apache.iotdb.tsfile.utils.Pair;
 import org.apache.iotdb.tsfile.utils.PublicBAOS;
 import org.apache.iotdb.tsfile.utils.ReadWriteIOUtils;
 
@@ -554,16 +557,20 @@ public class TsFileIOWriter {
       }
     }
 
-    TsFileMetadata tsFileMetaData = new TsFileMetadata();
+    TsFileMetadataHash tsFileMetaData = new TsFileMetadataHash();
     TsFileOutput metadataIndexOutput =
         new LocalTsFileOutput(new FileOutputStream(new File(file.getAbsolutePath() + ".index")));
-    MetadataIndexNodeV2 metadataIndex =
-        flushMetadataIndexV2(chunkMetadataListMap, vectorToPathsMap, metadataIndexOutput);
-    tsFileMetaData.setMetadataIndex(metadataIndex);
+    Pair<MetadataIndexBucket[], Integer> buckets =
+        flushMetadataIndexHash(chunkMetadataListMap, vectorToPathsMap);
 
-    long rootNodeOffset = metadataIndexOutput.getPosition();
     // write TsFileMetaData
-    int size = tsFileMetaData.serializeToV2(metadataIndexOutput.wrapAsStream());
+    tsFileMetaData.setMetadataIndexBuckets(buckets.left);
+    tsFileMetaData.setBucketNum(buckets.left.length);
+    tsFileMetaData.setBucketSize(buckets.right);
+
+    tsFileMetaData.serializeBuckets(metadataIndexOutput.wrapAsStream());
+    long metadataPosition = metadataIndexOutput.getPosition();
+    int size = tsFileMetaData.serializeTo(metadataIndexOutput.wrapAsStream());
     if (logger.isDebugEnabled()) {
       logger.debug(
           "finish flushing the footer {}, file pos:{}",
@@ -583,7 +590,7 @@ public class TsFileIOWriter {
     // write TsFileMetaData size
     ReadWriteIOUtils.write(
         size, metadataIndexOutput.wrapAsStream()); // write the size of the file metadata.
-    ReadWriteIOUtils.write(rootNodeOffset, metadataIndexOutput.wrapAsStream());
+    ReadWriteIOUtils.write(metadataPosition, metadataIndexOutput.wrapAsStream());
 
     metadataIndexOutput.close();
     ReadWriteIOUtils.write(metaOffset, out.wrapAsStream());
@@ -644,10 +651,9 @@ public class TsFileIOWriter {
         deviceTimeseriesMetadataMap, out, metadataIndexOutput);
   }
 
-  private MetadataIndexBucket[] flushMetadataIndexHash(
+  private Pair<MetadataIndexBucket[], Integer> flushMetadataIndexHash(
       Map<Path, List<IChunkMetadata>> chunkMetadataListMap,
-      Map<Path, Map<Path, List<IChunkMetadata>>> vectorToPathsMap,
-      TsFileOutput metadataIndexOutput)
+      Map<Path, Map<Path, List<IChunkMetadata>>> vectorToPathsMap)
       throws IOException {
 
     // convert ChunkMetadataList to this field
@@ -659,7 +665,8 @@ public class TsFileIOWriter {
     }
 
     // construct TsFileMetadata and return
-    return new MetadataIndexBucket[0];
+    return MetadataIndexBucketsConstructor.constructMetadataIndexBuckets(
+        deviceTimeseriesMetadataMap, out);
   }
 
   /**