You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by su...@apache.org on 2020/06/17 05:33:11 UTC

[incubator-iotdb] branch mtree_checkpoint updated (a1eb83b -> 1ca709b)

This is an automated email from the ASF dual-hosted git repository.

sunzesong pushed a change to branch mtree_checkpoint
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git.


 discard a1eb83b  Delete useless import and log
 discard 0236967  [IOTDB-726] CheckPoint of MTree
    omit 517b583  Merge master
    omit 51754db  Merge remote-tracking branch 'origin/master' into mtree_checkpoint
    omit b78bd3e  fix typo
    omit bf062fb  use StringBuilder
    omit 878b317  optimize getChild()
    omit de58e20  fix getChildren()
    omit fd8f0af  init children of MNode when used
    omit 6324b42  Add props of MeasurementMNode
    omit 1d416e2  Fix code smell
    omit db6baca  Fix review comment and code smell
    omit 049bbfd  Merge remote-tracking branch 'origin/master' into mtree_checkpoint
    omit 81f7e59  Merge remote-tracking branch 'origin/master' into mtree_checkpoint
    omit 5bab6c6  Merge master
    omit 20cea74  Merge remote-tracking branch 'origin/master' into mtree_checkpoint
    omit 765c420  [IOTDB-726] CheckPoint of MTree
    omit bdc587b  [IOTDB-759] Refactor MNode by removing InternalMNode
     add 90c5a4d  disable sync by default (#1325)
     add 7766000  Merge remote-tracking branch 'origin/master'
     add b6beda8  Merge remote-tracking branch 'origin/master'
     add a2416e8  Merge remote-tracking branch 'origin/master'
     add b13184a  remove detail classpath in Win bat scripts (#1377)
     add 63de9f3  Merge remote-tracking branch 'origin/master'
     add 5fdbe9b  add max_degree_of_index_node and tag_attribute_total_size in starting check
     add c95584c  [IOTDB-769]Fix precision lost when using PLAIN for FLOAT/DOUBLE (#1370)
     add 94cb03b  no last in restart (#1382)
     add 44c9e49  [IOTDB-759] Refactor MNode by removing InternalMNode (#1345)
     new 1ca709b  MTree checkpoint

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (a1eb83b)
            \
             N -- N -- N   refs/heads/mtree_checkpoint (1ca709b)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../src/assembly/resources/sbin/start-server.bat   |   5 +-
 .../assembly/resources/tools/detect-watermark.bat  |   4 +-
 .../tools/logVisualize/log-visualizer-cmd.bat      |   4 +-
 .../tools/logVisualize/log-visualizer-gui.bat      |   4 +-
 .../src/assembly/resources/tools/memory-tool.bat   |   4 +-
 .../assembly/resources/tools/start-WalChecker.bat  |   5 +-
 .../assembly/resources/tools/start-sync-client.bat |   5 +-
 .../tools/tsfileToolSet/print-iotdb-data-dir.bat   |   4 +-
 .../tsfileToolSet/print-tsfile-resource-files.bat  |   4 +-
 .../tools/tsfileToolSet/print-tsfile-sketch.bat    |   4 +-
 .../resources/tools/upgrade/offline-upgrade.bat    |   2 +-
 .../org/apache/iotdb/db/conf/IoTDBConfigCheck.java | 105 ++++++++++++-----
 .../java/org/apache/iotdb/db/metadata/MTree.java   |  28 ++---
 .../iotdb/db/utils/datastructure/DoubleTVList.java |   2 +-
 .../iotdb/db/utils/datastructure/FloatTVList.java  |   2 +-
 .../db/utils/datastructure/PrecisionTest.java      | 125 +++++++++++++++++++++
 16 files changed, 228 insertions(+), 79 deletions(-)
 create mode 100644 server/src/test/java/org/apache/iotdb/db/utils/datastructure/PrecisionTest.java


[incubator-iotdb] 01/01: MTree checkpoint

Posted by su...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

sunzesong pushed a commit to branch mtree_checkpoint
in repository https://gitbox.apache.org/repos/asf/incubator-iotdb.git

commit 1ca709b469b2d50e67a3468d98b1b0a51a627c12
Author: samperson1997 <sz...@mails.tsinghua.edu.cn>
AuthorDate: Wed Jun 17 13:32:46 2020 +0800

    MTree checkpoint
---
 .../resources/conf/iotdb-engine.properties         |   3 +
 .../java/org/apache/iotdb/db/conf/IoTDBConfig.java |  19 +++-
 .../org/apache/iotdb/db/conf/IoTDBConfigCheck.java |  61 ++++++----
 .../org/apache/iotdb/db/conf/IoTDBDescriptor.java  |   4 +-
 .../org/apache/iotdb/db/metadata/MLogWriter.java   |  57 +++++-----
 .../org/apache/iotdb/db/metadata/MManager.java     | 123 ++++++++++++++-------
 .../java/org/apache/iotdb/db/metadata/MTree.java   |  87 ++++++++++++++-
 .../org/apache/iotdb/db/metadata/mnode/MNode.java  |   2 +-
 8 files changed, 254 insertions(+), 102 deletions(-)

diff --git a/server/src/assembly/resources/conf/iotdb-engine.properties b/server/src/assembly/resources/conf/iotdb-engine.properties
index 846228c..c0d21f6 100644
--- a/server/src/assembly/resources/conf/iotdb-engine.properties
+++ b/server/src/assembly/resources/conf/iotdb-engine.properties
@@ -204,6 +204,9 @@ tag_attribute_total_size=700
 # if enable partial insert, one measurement failure will not impact other measurements
 enable_partial_insert=true
 
+# The interval line numbers of mlog.txt when creating a checkpoint and saving snapshot of mtree
+mtree_snapshot_interval=100000
+
 ####################
 ### Memory Control Configuration
 ####################
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
index 23c111a..2b96a2d 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfig.java
@@ -568,12 +568,16 @@ public class IoTDBConfig {
   private int primitiveArraySize = 64;
 
   /**
-   * whether enable data partition
-   * if disabled, all data belongs to partition 0
+   * whether enable data partition if disabled, all data belongs to partition 0
    */
   private boolean enablePartition = false;
 
   /**
+   * Interval line number of mlog.txt when creating a checkpoint and saving snapshot of mtree
+   */
+  private int mtreeSnapshotInterval = 100000;
+
+  /**
    * Time range for partitioning data inside each storage group, the unit is second
    */
   private long partitionInterval = 604800;
@@ -628,6 +632,14 @@ public class IoTDBConfig {
     this.enablePartition = enablePartition;
   }
 
+  public int getMtreeSnapshotInterval() {
+    return mtreeSnapshotInterval;
+  }
+
+  public void setMtreeSnapshotInterval(int mtreeSnapshotInterval) {
+    this.mtreeSnapshotInterval = mtreeSnapshotInterval;
+  }
+
   public long getPartitionInterval() {
     return partitionInterval;
   }
@@ -1211,7 +1223,8 @@ public class IoTDBConfig {
     return allocateMemoryForTimeSeriesMetaDataCache;
   }
 
-  public void setAllocateMemoryForTimeSeriesMetaDataCache(long allocateMemoryForTimeSeriesMetaDataCache) {
+  public void setAllocateMemoryForTimeSeriesMetaDataCache(
+      long allocateMemoryForTimeSeriesMetaDataCache) {
     this.allocateMemoryForTimeSeriesMetaDataCache = allocateMemoryForTimeSeriesMetaDataCache;
   }
 
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfigCheck.java b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfigCheck.java
index 807c5e2..8acc79f 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfigCheck.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBConfigCheck.java
@@ -18,9 +18,17 @@
  */
 package org.apache.iotdb.db.conf;
 
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.nio.file.Files;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Properties;
 import org.apache.commons.io.FileUtils;
 import org.apache.iotdb.db.conf.directories.DirectoryManager;
 import org.apache.iotdb.db.engine.fileSystem.SystemFileFactory;
@@ -34,20 +42,17 @@ import org.apache.iotdb.tsfile.fileSystem.FSFactoryProducer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.*;
-import java.nio.file.Files;
-import java.util.List;
-import java.util.Properties;
-
 public class IoTDBConfigCheck {
 
   private static final Logger logger = LoggerFactory.getLogger(IoTDBDescriptor.class);
 
+  private static final IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
+
   // this file is located in data/system/schema/system.properties
   // If user delete folder "data", system.properties can reset.
   private static final String PROPERTIES_FILE_NAME = "system.properties";
-  private static final String SCHEMA_DIR = IoTDBDescriptor.getInstance().getConfig().getSchemaDir();
-  private static final String WAL_DIR = IoTDBDescriptor.getInstance().getConfig().getWalFolder();
+  private static final String SCHEMA_DIR = config.getSchemaDir();
+  private static final String WAL_DIR = config.getWalFolder();
 
   private File propertiesFile;
   private File tmpPropertiesFile;
@@ -59,22 +64,26 @@ public class IoTDBConfigCheck {
   private static final String SYSTEM_PROPERTIES_STRING = "System properties:";
 
   private static final String TIMESTAMP_PRECISION_STRING = "timestamp_precision";
-  private static String timestampPrecision = IoTDBDescriptor.getInstance().getConfig().getTimestampPrecision();
+  private static String timestampPrecision = config.getTimestampPrecision();
 
   private static final String PARTITION_INTERVAL_STRING = "partition_interval";
-  private static long partitionInterval = IoTDBDescriptor.getInstance().getConfig().getPartitionInterval();
+  private static long partitionInterval = config.getPartitionInterval();
 
   private static final String TSFILE_FILE_SYSTEM_STRING = "tsfile_storage_fs";
-  private static String tsfileFileSystem = IoTDBDescriptor.getInstance().getConfig().getTsFileStorageFs().toString();
+  private static String tsfileFileSystem = config.getTsFileStorageFs().toString();
 
   private static final String ENABLE_PARTITION_STRING = "enable_partition";
-  private static boolean enablePartition = IoTDBDescriptor.getInstance().getConfig().isEnablePartition();
+  private static boolean enablePartition = config.isEnablePartition();
 
   private static final String TAG_ATTRIBUTE_SIZE_STRING = "tag_attribute_total_size";
-  private static final String tagAttributeTotalSize = String.valueOf(IoTDBDescriptor.getInstance().getConfig().getTagAttributeTotalSize());
+  private static String tagAttributeTotalSize = String.valueOf(config.getTagAttributeTotalSize());
 
   private static final String MAX_DEGREE_OF_INDEX_STRING = "max_degree_of_index_node";
-  private static final String maxDegreeOfIndexNode = String.valueOf(TSFileDescriptor.getInstance().getConfig().getMaxDegreeOfIndexNode());
+  private static String maxDegreeOfIndexNode = String
+      .valueOf(TSFileDescriptor.getInstance().getConfig().getMaxDegreeOfIndexNode());
+
+  private static final String MTREE_SNAPSHOT_INTERVAL = "mtree_snapshot_interval";
+  private static String mtreeSnapshotInterval = String.valueOf(config.getMtreeSnapshotInterval());
 
   private static final String IOTDB_VERSION_STRING = "iotdb_version";
 
@@ -86,6 +95,7 @@ public class IoTDBConfigCheck {
   }
 
   private static class IoTDBConfigCheckHolder {
+
     private static final IoTDBConfigCheck INSTANCE = new IoTDBConfigCheck();
   }
 
@@ -106,8 +116,9 @@ public class IoTDBConfigCheck {
     // check time stamp precision
     if (!(timestampPrecision.equals("ms") || timestampPrecision.equals("us")
         || timestampPrecision.equals("ns"))) {
-      logger.error("Wrong " + TIMESTAMP_PRECISION_STRING + ", please set as: ms, us or ns ! Current is: "
-          + timestampPrecision);
+      logger.error(
+          "Wrong " + TIMESTAMP_PRECISION_STRING + ", please set as: ms, us or ns ! Current is: "
+              + timestampPrecision);
       System.exit(-1);
     }
 
@@ -128,6 +139,7 @@ public class IoTDBConfigCheck {
     systemProperties.put(ENABLE_PARTITION_STRING, String.valueOf(enablePartition));
     systemProperties.put(TAG_ATTRIBUTE_SIZE_STRING, tagAttributeTotalSize);
     systemProperties.put(MAX_DEGREE_OF_INDEX_STRING, maxDegreeOfIndexNode);
+    systemProperties.put(MTREE_SNAPSHOT_INTERVAL, mtreeSnapshotInterval);
   }
 
 
@@ -136,14 +148,12 @@ public class IoTDBConfigCheck {
    *
    * When init: create system.properties directly
    *
-   * When upgrading the system.properties:
-   * (1) create system.properties.tmp
-   * (2) delete system.properties
-   * (2) rename system.properties.tmp to system.properties
+   * When upgrading the system.properties: (1) create system.properties.tmp (2) delete
+   * system.properties (2) rename system.properties.tmp to system.properties
    */
   public void checkConfig() throws IOException {
     propertiesFile = SystemFileFactory.INSTANCE
-            .getFile(IoTDBConfigCheck.SCHEMA_DIR + File.separator + PROPERTIES_FILE_NAME);
+        .getFile(IoTDBConfigCheck.SCHEMA_DIR + File.separator + PROPERTIES_FILE_NAME);
     tmpPropertiesFile = new File(propertiesFile.getAbsoluteFile() + ".tmp");
 
     // system init first time, no need to check, write system.properties and return
@@ -208,6 +218,7 @@ public class IoTDBConfigCheck {
       properties.setProperty(ENABLE_PARTITION_STRING, String.valueOf(enablePartition));
       properties.setProperty(TAG_ATTRIBUTE_SIZE_STRING, tagAttributeTotalSize);
       properties.setProperty(MAX_DEGREE_OF_INDEX_STRING, maxDegreeOfIndexNode);
+      properties.setProperty(MTREE_SNAPSHOT_INTERVAL, mtreeSnapshotInterval);
       properties.store(tmpFOS, SYSTEM_PROPERTIES_STRING);
 
       // upgrade finished, delete old system.properties file
@@ -221,7 +232,7 @@ public class IoTDBConfigCheck {
 
 
   /**
-   *  repair 0.10 properties
+   * repair 0.10 properties
    */
   private void upgradePropertiesFileFromBrokenFile()
       throws IOException {
@@ -290,13 +301,19 @@ public class IoTDBConfigCheck {
           .getProperty(MAX_DEGREE_OF_INDEX_STRING)));
       System.exit(-1);
     }
+
+    if (!(properties.getProperty(MTREE_SNAPSHOT_INTERVAL).equals(mtreeSnapshotInterval))) {
+      logger.error(String.format(ERROR_LOG, MTREE_SNAPSHOT_INTERVAL, properties
+          .getProperty(MTREE_SNAPSHOT_INTERVAL)));
+      System.exit(-1);
+    }
   }
 
   /**
    * ensure all tsfiles are closed in 0.9 when starting 0.10
    */
   private void checkUnClosedTsFileV1() {
-    if (SystemFileFactory.INSTANCE.getFile(WAL_DIR).isDirectory() 
+    if (SystemFileFactory.INSTANCE.getFile(WAL_DIR).isDirectory()
         && SystemFileFactory.INSTANCE.getFile(WAL_DIR).list().length != 0) {
       logger.error("Unclosed Version-1 TsFile detected, please run 'flush' on V0.9 IoTDB"
           + " before upgrading to V0.10");
diff --git a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
index 333b2d8..0ef9293 100644
--- a/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
+++ b/server/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java
@@ -315,6 +315,9 @@ public class IoTDBDescriptor {
           Boolean.parseBoolean(properties.getProperty("enable_partial_insert",
               String.valueOf(conf.isEnablePartialInsert()))));
 
+      conf.setMtreeSnapshotInterval(Integer.parseInt(properties.getProperty(
+          "mtree_snapshot_interval", Integer.toString(conf.getMtreeSnapshotInterval()))));
+
       conf.setEnablePerformanceStat(Boolean
           .parseBoolean(properties.getProperty("enable_performance_stat",
               Boolean.toString(conf.isEnablePerformanceStat())).trim()));
@@ -428,7 +431,6 @@ public class IoTDBDescriptor {
       //if using org.apache.iotdb.db.auth.authorizer.OpenIdAuthorizer, openID_url is needed.
       conf.setOpenIdProviderUrl(properties.getProperty("openID_url", ""));
 
-
       // At the same time, set TSFileConfig
       TSFileDescriptor.getInstance().getConfig()
           .setTSFileStorageFs(FSType.valueOf(
diff --git a/server/src/main/java/org/apache/iotdb/db/metadata/MLogWriter.java b/server/src/main/java/org/apache/iotdb/db/metadata/MLogWriter.java
index 72ee54b..4a9e834 100644
--- a/server/src/main/java/org/apache/iotdb/db/metadata/MLogWriter.java
+++ b/server/src/main/java/org/apache/iotdb/db/metadata/MLogWriter.java
@@ -35,6 +35,7 @@ public class MLogWriter {
 
   private static final Logger logger = LoggerFactory.getLogger(MLogWriter.class);
   private BufferedWriter writer;
+  private int lineNumber;
 
   public MLogWriter(String schemaDir, String logFileName) throws IOException {
     File metadataDir = SystemFileFactory.INSTANCE.getFile(schemaDir);
@@ -47,21 +48,18 @@ public class MLogWriter {
     }
 
     File logFile = SystemFileFactory.INSTANCE.getFile(schemaDir + File.separator + logFileName);
-
-    FileWriter fileWriter;
-    fileWriter = new FileWriter(logFile, true);
+    FileWriter fileWriter = new FileWriter(logFile, true);
     writer = new BufferedWriter(fileWriter);
   }
 
-
   public void close() throws IOException {
     writer.close();
   }
 
-  public void createTimeseries(CreateTimeSeriesPlan plan, long offset) throws IOException {
+  public int createTimeseries(CreateTimeSeriesPlan plan, long offset) throws IOException {
     writer.write(String.format("%s,%s,%s,%s,%s", MetadataOperationType.CREATE_TIMESERIES,
-        plan.getPath().getFullPath(), plan.getDataType().serialize(), plan.getEncoding().serialize(),
-        plan.getCompressor().serialize()));
+        plan.getPath().getFullPath(), plan.getDataType().serialize(),
+        plan.getEncoding().serialize(), plan.getCompressor().serialize()));
 
     writer.write(",");
     if (plan.getProps() != null) {
@@ -86,44 +84,37 @@ public class MLogWriter {
       writer.write(String.valueOf(offset));
     }
 
-    writer.newLine();
-    writer.flush();
+    return newLine();
   }
 
-  public void deleteTimeseries(String path) throws IOException {
+  public int deleteTimeseries(String path) throws IOException {
     writer.write(MetadataOperationType.DELETE_TIMESERIES + "," + path);
-    writer.newLine();
-    writer.flush();
+    return newLine();
   }
 
-  public void setStorageGroup(String storageGroup) throws IOException {
+  public int setStorageGroup(String storageGroup) throws IOException {
     writer.write(MetadataOperationType.SET_STORAGE_GROUP + "," + storageGroup);
-    writer.newLine();
-    writer.flush();
+    return newLine();
   }
 
-  public void deleteStorageGroup(String storageGroup) throws IOException {
+  public int deleteStorageGroup(String storageGroup) throws IOException {
     writer.write(MetadataOperationType.DELETE_STORAGE_GROUP + "," + storageGroup);
-    writer.newLine();
-    writer.flush();
+    return newLine();
   }
 
-  public void setTTL(String storageGroup, long ttl) throws IOException {
+  public int setTTL(String storageGroup, long ttl) throws IOException {
     writer.write(String.format("%s,%s,%s", MetadataOperationType.SET_TTL, storageGroup, ttl));
-    writer.newLine();
-    writer.flush();
+    return newLine();
   }
 
-  public void changeOffset(String path, long offset) throws IOException {
+  public int changeOffset(String path, long offset) throws IOException {
     writer.write(String.format("%s,%s,%s", MetadataOperationType.CHANGE_OFFSET, path, offset));
-    writer.newLine();
-    writer.flush();
+    return newLine();
   }
 
-  public void changeAlias(String path, String alias) throws IOException {
+  public int changeAlias(String path, String alias) throws IOException {
     writer.write(String.format("%s,%s,%s", MetadataOperationType.CHANGE_ALIAS, path, alias));
-    writer.newLine();
-    writer.flush();
+    return newLine();
   }
 
   public static void upgradeMLog(String schemaDir, String logFileName) throws IOException {
@@ -158,7 +149,6 @@ public class MLogWriter {
         writer.write(buf.toString());
         writer.newLine();
         writer.flush();
-        
       }
     }
 
@@ -166,9 +156,16 @@ public class MLogWriter {
     if (!logFile.delete()) {
       throw new IOException("Deleting " + logFile + "failed.");
     }
-    
+
     // rename tmpLogFile to mlog
     FSFactoryProducer.getFSFactory().moveFile(tmpLogFile, logFile);
   }
-  
+
+  private int newLine() throws IOException {
+    writer.newLine();
+    writer.flush();
+
+    // Every MTREE_SNAPSHOT_INTERVAL lines, create a checkpoint and save the MTree as a snapshot
+    return lineNumber++;
+  }
 }
diff --git a/server/src/main/java/org/apache/iotdb/db/metadata/MManager.java b/server/src/main/java/org/apache/iotdb/db/metadata/MManager.java
index 23b292b..99805ef 100644
--- a/server/src/main/java/org/apache/iotdb/db/metadata/MManager.java
+++ b/server/src/main/java/org/apache/iotdb/db/metadata/MManager.java
@@ -83,11 +83,13 @@ public class MManager {
 
   private static final Logger logger = LoggerFactory.getLogger(MManager.class);
   private static final String TIME_SERIES_TREE_HEADER = "===  Timeseries Tree  ===\n\n";
+  private final int MTREE_SNAPSHOT_INTERVAL;
 
   // the lock for read/insert
   private ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
   // the log file seriesPath
   private String logFilePath;
+  private String mtreeSnapshotPath;
   private MTree mtree;
   private MLogWriter logWriter;
   private TagLogFile tagLogFile;
@@ -117,6 +119,7 @@ public class MManager {
 
   private MManager() {
     config = IoTDBDescriptor.getInstance().getConfig();
+    MTREE_SNAPSHOT_INTERVAL = config.getMtreeSnapshotInterval();
     String schemaDir = config.getSchemaDir();
     File schemaFolder = SystemFileFactory.INSTANCE.getFile(schemaDir);
     if (!schemaFolder.exists()) {
@@ -127,6 +130,7 @@ public class MManager {
       }
     }
     logFilePath = schemaDir + File.separator + MetadataConstant.METADATA_LOG;
+    mtreeSnapshotPath = schemaDir + File.separator + MetadataConstant.MTREE_SNAPSHOT;
 
     // do not write log when recover
     isRecovering = true;
@@ -199,11 +203,16 @@ public class MManager {
 
   private void initFromLog(File logFile) throws IOException {
     // init the metadata from the operation log
-    mtree = new MTree();
+    mtree = MTree.deserializeFrom(mtreeSnapshotPath);
     if (logFile.exists()) {
       try (FileReader fr = new FileReader(logFile);
           BufferedReader br = new BufferedReader(fr)) {
         String cmd;
+        int idx = 0;
+        while (idx <= mtree.getSnapshotLineNumber()) {
+          br.readLine();
+          idx++;
+        }
         while ((cmd = br.readLine()) != null) {
           try {
             operation(cmd);
@@ -358,7 +367,10 @@ public class MManager {
             || (plan.getAttributes() != null && !plan.getAttributes().isEmpty())) {
           offset = tagLogFile.write(plan.getTags(), plan.getAttributes());
         }
-        logWriter.createTimeseries(plan, offset);
+        int logLineNumber = logWriter.createTimeseries(plan, offset);
+        if (logLineNumber % MTREE_SNAPSHOT_INTERVAL == 0) {
+          mtree.serializeTo(mtreeSnapshotPath, logLineNumber);
+        }
       }
       leafMNode.setOffset(offset);
 
@@ -372,9 +384,9 @@ public class MManager {
   /**
    * Add one timeseries to metadata tree, if the timeseries already exists, throw exception
    *
-   * @param path       the timeseries path
-   * @param dataType   the dateType {@code DataType} of the timeseries
-   * @param encoding   the encoding function {@code Encoding} of the timeseries
+   * @param path the timeseries path
+   * @param dataType the dateType {@code DataType} of the timeseries
+   * @param encoding the encoding function {@code Encoding} of the timeseries
    * @param compressor the compressor function {@code Compressor} of the time series
    * @return whether the measurement occurs for the first time in this storage group (if true, the
    * measurement should be registered to the StorageEngine too)
@@ -425,7 +437,10 @@ public class MManager {
             if (emptyStorageGroup != null) {
               StorageEngine.getInstance().deleteAllDataFilesInOneStorageGroup(emptyStorageGroup);
             }
-            logWriter.deleteTimeseries(p);
+            int logLineNumber = logWriter.deleteTimeseries(p);
+            if (logLineNumber % MTREE_SNAPSHOT_INTERVAL == 0) {
+              mtree.serializeTo(mtreeSnapshotPath, logLineNumber);
+            }
           }
         } catch (DeleteFailedException e) {
           failedNames.add(e.getName());
@@ -470,7 +485,8 @@ public class MManager {
             logger.debug(String.format(
                 "Delete: TimeSeries %s's tag info has been removed from tag inverted index before "
                     + "deleting it, tag key is %s, tag value is %s, tlog offset is %d, contains key %b",
-                node.getFullPath(), entry.getKey(), entry.getValue(), node.getOffset(), tagIndex.containsKey(entry.getKey())));
+                node.getFullPath(), entry.getKey(), entry.getValue(), node.getOffset(),
+                tagIndex.containsKey(entry.getKey())));
           }
         }
       }
@@ -528,7 +544,10 @@ public class MManager {
         seriesNumberInStorageGroups.put(storageGroup, 0);
       }
       if (!isRecovering) {
-        logWriter.setStorageGroup(storageGroup);
+        int logLineNumber = logWriter.setStorageGroup(storageGroup);
+        if (logLineNumber % MTREE_SNAPSHOT_INTERVAL == 0) {
+          mtree.serializeTo(mtreeSnapshotPath, logLineNumber);
+        }
       }
     } catch (IOException e) {
       throw new MetadataException(e.getMessage());
@@ -574,7 +593,10 @@ public class MManager {
         }
         // if success
         if (!isRecovering) {
-          logWriter.deleteStorageGroup(storageGroup);
+          int logLineNumber = logWriter.deleteStorageGroup(storageGroup);
+          if (logLineNumber % MTREE_SNAPSHOT_INTERVAL == 0) {
+            mtree.serializeTo(mtreeSnapshotPath, logLineNumber);
+          }
         }
       }
     } catch (ConfigAdjusterException e) {
@@ -651,7 +673,7 @@ public class MManager {
    * Get all devices under given prefixPath.
    *
    * @param prefixPath a prefix of a full path. if the wildcard is not at the tail, then each
-   *                   wildcard can only match one level, otherwise it can match to the tail.
+   * wildcard can only match one level, otherwise it can match to the tail.
    * @return A HashSet instance which stores devices names with given prefixPath.
    */
   public Set<String> getDevices(String prefixPath) throws MetadataException {
@@ -667,9 +689,9 @@ public class MManager {
    * Get all nodes from the given level
    *
    * @param prefixPath can be a prefix of a full path. Can not be a full path. can not have
-   *                   wildcard. But, the level of the prefixPath can be smaller than the given
-   *                   level, e.g., prefixPath = root.a while the given level is 5
-   * @param nodeLevel  the level can not be smaller than the level of the prefixPath
+   * wildcard. But, the level of the prefixPath can be smaller than the given level, e.g.,
+   * prefixPath = root.a while the given level is 5
+   * @param nodeLevel the level can not be smaller than the level of the prefixPath
    * @return A List instance which stores all node at given level
    */
   public List<String> getNodesList(String prefixPath, int nodeLevel) throws MetadataException {
@@ -726,7 +748,7 @@ public class MManager {
    * expression in this method is formed by the amalgamation of seriesPath and the character '*'.
    *
    * @param prefixPath can be a prefix or a full path. if the wildcard is not at the tail, then each
-   *                   wildcard can only match one level, otherwise it can match to the tail.
+   * wildcard can only match one level, otherwise it can match to the tail.
    */
   public List<String> getAllTimeseriesName(String prefixPath) throws MetadataException {
     lock.readLock().lock();
@@ -766,7 +788,7 @@ public class MManager {
    * To calculate the count of nodes in the given level for given prefix path.
    *
    * @param prefixPath a prefix path or a full path, can not contain '*'
-   * @param level      the level can not be smaller than the level of the prefixPath
+   * @param level the level can not be smaller than the level of the prefixPath
    */
   public int getNodesCountInGivenLevel(String prefixPath, int level) throws MetadataException {
     lock.readLock().lock();
@@ -917,7 +939,8 @@ public class MManager {
       throws MetadataException {
     lock.readLock().lock();
     try {
-      MNode leaf = mtree.getNodeByPath(device).getChild(measurement);
+      MNode node = mtree.getNodeByPath(device);
+      MNode leaf = node.getChild(measurement);
       if (leaf != null) {
         return ((MeasurementMNode) leaf).getSchema();
       } else {
@@ -1004,8 +1027,7 @@ public class MManager {
   }
 
   /**
-   * get device node, if the storage group is not set, create it when autoCreateSchema is true
-   * <p>
+   * get device node, if the storage group is not set, create it when autoCreateSchema is true <p>
    * (we develop this method as we need to get the node's lock after we get the lock.writeLock())
    *
    * <p>!!!!!!Attention!!!!! must call the return node's readUnlock() if you call this method.
@@ -1068,7 +1090,7 @@ public class MManager {
 
   public MNode getDeviceNode(String path) throws MetadataException {
     lock.readLock().lock();
-    MNode node = null;
+    MNode node;
     try {
       node = mNodeCache.get(path);
       return node;
@@ -1080,10 +1102,10 @@ public class MManager {
   }
 
   /**
-   * To reduce the String number in memory, 
-   * use the deviceId from MManager instead of the deviceId read from disk
-   * 
-   * @param deviceId read from disk
+   * To reduce the String number in memory, use the deviceId from MManager instead of the deviceId
+   * read from disk
+   *
+   * @param path read from disk
    * @return deviceId
    */
   public String getDeviceId(String path) {
@@ -1132,7 +1154,10 @@ public class MManager {
     try {
       getStorageGroupNode(storageGroup).setDataTTL(dataTTL);
       if (!isRecovering) {
-        logWriter.setTTL(storageGroup, dataTTL);
+        int logLineNumber = logWriter.setTTL(storageGroup, dataTTL);
+        if (logLineNumber % MTREE_SNAPSHOT_INTERVAL == 0) {
+          mtree.serializeTo(mtreeSnapshotPath, logLineNumber);
+        }
       }
     } finally {
       lock.writeLock().unlock();
@@ -1162,7 +1187,7 @@ public class MManager {
    * Check whether the given path contains a storage group change or set the new offset of a
    * timeseries
    *
-   * @param path   timeseries
+   * @param path timeseries
    * @param offset offset in the tag file
    */
   public void changeOffset(String path, long offset) throws MetadataException {
@@ -1192,10 +1217,10 @@ public class MManager {
    * upsert tags and attributes key-value for the timeseries if the key has existed, just use the
    * new value to update it.
    *
-   * @param alias         newly added alias
-   * @param tagsMap       newly added tags map
+   * @param alias newly added alias
+   * @param tagsMap newly added tags map
    * @param attributesMap newly added attributes map
-   * @param fullPath      timeseries
+   * @param fullPath timeseries
    */
   public void upsertTagsAndAttributes(String alias, Map<String, String> tagsMap,
       Map<String, String> attributesMap, String fullPath) throws MetadataException, IOException {
@@ -1218,7 +1243,10 @@ public class MManager {
         leafMNode.getParent().addAlias(alias, leafMNode);
         leafMNode.setAlias(alias);
         // persist to WAL
-        logWriter.changeAlias(fullPath, alias);
+        int logLineNumber = logWriter.changeAlias(fullPath, alias);
+        if (logLineNumber % MTREE_SNAPSHOT_INTERVAL == 0) {
+          mtree.serializeTo(mtreeSnapshotPath, logLineNumber);
+        }
       }
 
       if (tagsMap == null && attributesMap == null) {
@@ -1227,7 +1255,10 @@ public class MManager {
       // no tag or attribute, we need to add a new record in log
       if (leafMNode.getOffset() < 0) {
         long offset = tagLogFile.write(tagsMap, attributesMap);
-        logWriter.changeOffset(fullPath, offset);
+        int logLineNumber = logWriter.changeOffset(fullPath, offset);
+        if (logLineNumber % MTREE_SNAPSHOT_INTERVAL == 0) {
+          mtree.serializeTo(mtreeSnapshotPath, logLineNumber);
+        }
         leafMNode.setOffset(offset);
         // update inverted Index map
         if (tagsMap != null) {
@@ -1269,7 +1300,8 @@ public class MManager {
                 logger.debug(String.format(
                     "Upsert: TimeSeries %s's tag info has been removed from tag inverted index "
                         + "before deleting it, tag key is %s, tag value is %s, tlog offset is %d, contains key %b",
-                    leafMNode.getFullPath(), key, beforeValue, leafMNode.getOffset(), tagIndex.containsKey(key)));
+                    leafMNode.getFullPath(), key, beforeValue, leafMNode.getOffset(),
+                    tagIndex.containsKey(key)));
               }
             }
           }
@@ -1297,7 +1329,7 @@ public class MManager {
    * add new attributes key-value for the timeseries
    *
    * @param attributesMap newly added attributes map
-   * @param fullPath      timeseries
+   * @param fullPath timeseries
    */
   public void addAttributes(Map<String, String> attributesMap, String fullPath)
       throws MetadataException, IOException {
@@ -1311,7 +1343,10 @@ public class MManager {
       // no tag or attribute, we need to add a new record in log
       if (leafMNode.getOffset() < 0) {
         long offset = tagLogFile.write(Collections.emptyMap(), attributesMap);
-        logWriter.changeOffset(fullPath, offset);
+        int logLineNumber = logWriter.changeOffset(fullPath, offset);
+        if (logLineNumber % MTREE_SNAPSHOT_INTERVAL == 0) {
+          mtree.serializeTo(mtreeSnapshotPath, logLineNumber);
+        }
         leafMNode.setOffset(offset);
         return;
       }
@@ -1339,7 +1374,7 @@ public class MManager {
   /**
    * add new tags key-value for the timeseries
    *
-   * @param tagsMap  newly added tags map
+   * @param tagsMap newly added tags map
    * @param fullPath timeseries
    */
   public void addTags(Map<String, String> tagsMap, String fullPath)
@@ -1354,7 +1389,10 @@ public class MManager {
       // no tag or attribute, we need to add a new record in log
       if (leafMNode.getOffset() < 0) {
         long offset = tagLogFile.write(tagsMap, Collections.emptyMap());
-        logWriter.changeOffset(fullPath, offset);
+        int logLineNumber = logWriter.changeOffset(fullPath, offset);
+        if (logLineNumber % MTREE_SNAPSHOT_INTERVAL == 0) {
+          mtree.serializeTo(mtreeSnapshotPath, logLineNumber);
+        }
         leafMNode.setOffset(offset);
         // update inverted Index map
         for (Entry<String, String> entry : tagsMap.entrySet()) {
@@ -1392,7 +1430,7 @@ public class MManager {
   /**
    * drop tags or attributes of the timeseries
    *
-   * @param keySet   tags key or attributes key
+   * @param keySet tags key or attributes key
    * @param fullPath timeseries path
    */
   public void dropTagsOrAttributes(Set<String> keySet, String fullPath)
@@ -1449,7 +1487,8 @@ public class MManager {
             logger.debug(String.format(
                 "Drop: TimeSeries %s's tag info has been removed from tag inverted index "
                     + "before deleting it, tag key is %s, tag value is %s, tlog offset is %d, contains key %b",
-                leafMNode.getFullPath(), key, value, leafMNode.getOffset(), tagIndex.containsKey(key)));
+                leafMNode.getFullPath(), key, value, leafMNode.getOffset(),
+                tagIndex.containsKey(key)));
           }
         }
 
@@ -1525,7 +1564,8 @@ public class MManager {
             logger.debug(String.format(
                 "Set: TimeSeries %s's tag info has been removed from tag inverted index "
                     + "before deleting it, tag key is %s, tag value is %s, tlog offset is %d, contains key %b",
-                leafMNode.getFullPath(), key, beforeValue, leafMNode.getOffset(), tagIndex.containsKey(key)));
+                leafMNode.getFullPath(), key, beforeValue, leafMNode.getOffset(),
+                tagIndex.containsKey(key)));
           }
         }
         tagIndex.computeIfAbsent(key, k -> new HashMap<>())
@@ -1539,8 +1579,8 @@ public class MManager {
   /**
    * rename the tag or attribute's key of the timeseries
    *
-   * @param oldKey   old key of tag or attribute
-   * @param newKey   new key of tag or attribute
+   * @param oldKey old key of tag or attribute
+   * @param newKey new key of tag or attribute
    * @param fullPath timeseries
    */
   public void renameTagOrAttributeKey(String oldKey, String newKey, String fullPath)
@@ -1590,7 +1630,8 @@ public class MManager {
             logger.debug(String.format(
                 "Rename: TimeSeries %s's tag info has been removed from tag inverted index "
                     + "before deleting it, tag key is %s, tag value is %s, tlog offset is %d, contains key %b",
-                leafMNode.getFullPath(), oldKey, value, leafMNode.getOffset(), tagIndex.containsKey(oldKey)));
+                leafMNode.getFullPath(), oldKey, value, leafMNode.getOffset(),
+                tagIndex.containsKey(oldKey)));
           }
         }
         tagIndex.computeIfAbsent(newKey, k -> new HashMap<>())
diff --git a/server/src/main/java/org/apache/iotdb/db/metadata/MTree.java b/server/src/main/java/org/apache/iotdb/db/metadata/MTree.java
index 089be37..494ea93 100644
--- a/server/src/main/java/org/apache/iotdb/db/metadata/MTree.java
+++ b/server/src/main/java/org/apache/iotdb/db/metadata/MTree.java
@@ -26,6 +26,12 @@ import static org.apache.iotdb.db.query.executor.LastQueryExecutor.calculateLast
 import com.alibaba.fastjson.JSON;
 import com.alibaba.fastjson.JSONObject;
 import com.alibaba.fastjson.serializer.SerializerFeature;
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayDeque;
 import java.util.ArrayList;
@@ -40,11 +46,13 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Queue;
 import java.util.Set;
+import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.regex.Pattern;
 import java.util.stream.Stream;
 import org.apache.iotdb.db.conf.IoTDBConstant;
 import org.apache.iotdb.db.conf.IoTDBDescriptor;
+import org.apache.iotdb.db.engine.fileSystem.SystemFileFactory;
 import org.apache.iotdb.db.exception.metadata.AliasAlreadyExistException;
 import org.apache.iotdb.db.exception.metadata.IllegalPathException;
 import org.apache.iotdb.db.exception.metadata.MetadataException;
@@ -72,17 +80,24 @@ import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
 public class MTree implements Serializable {
 
   private static final long serialVersionUID = -4200394435237291964L;
+
   private MNode root;
+  private int snapshotLineNumber;
 
-  private transient ThreadLocal<Integer> limit = new ThreadLocal<>();
-  private transient ThreadLocal<Integer> offset = new ThreadLocal<>();
-  private transient ThreadLocal<Integer> count = new ThreadLocal<>();
-  private transient ThreadLocal<Integer> curOffset = new ThreadLocal<>();
+  private transient static ThreadLocal<Integer> limit = new ThreadLocal<>();
+  private transient static ThreadLocal<Integer> offset = new ThreadLocal<>();
+  private transient static ThreadLocal<Integer> count = new ThreadLocal<>();
+  private transient static ThreadLocal<Integer> curOffset = new ThreadLocal<>();
 
   MTree() {
     this.root = new MNode(null, IoTDBConstant.PATH_ROOT);
   }
 
+  private MTree(MNode root, int snapshotLineNumber) {
+    this.root = root;
+    this.snapshotLineNumber = snapshotLineNumber;
+  }
+
   /**
    * Create a timeseries with a full path from root to leaf node Before creating a timeseries, the
    * storage group should be set first, throw exception otherwise
@@ -899,6 +914,70 @@ public class MTree implements Serializable {
     }
   }
 
+  public int getSnapshotLineNumber() {
+    return snapshotLineNumber;
+  }
+
+  public void serializeTo(String snapshotPath, int lineNumber) throws IOException {
+    BufferedWriter bw = new BufferedWriter(
+        new FileWriter(SystemFileFactory.INSTANCE.getFile(snapshotPath)));
+    bw.write(String.valueOf(lineNumber));
+    bw.newLine();
+    root.serializeTo(bw);
+    bw.close();
+  }
+
+  public static MTree deserializeFrom(String mtreeSnapshotPath) throws IOException {
+    File mtreeSnapshot = SystemFileFactory.INSTANCE.getFile(mtreeSnapshotPath);
+    if (!mtreeSnapshot.exists()) {
+      return new MTree();
+    }
+    BufferedReader br = new BufferedReader(new FileReader(mtreeSnapshot));
+    int snapshotLineNumber = Integer.valueOf(br.readLine());
+    String s;
+    Deque<MNode> nodeStack = new ArrayDeque<>();
+    MNode node = null;
+
+    while ((s = br.readLine()) != null) {
+      String[] nodeInfo = s.split(",");
+      short nodeType = Short.valueOf(nodeInfo[0]);
+      if (nodeType == MetadataConstant.STORAGE_GROUP_MNODE_TYPE) {
+        node = StorageGroupMNode.deserializeFrom(nodeInfo);
+      } else if (nodeType == MetadataConstant.MEASUREMENT_MNODE_TYPE) {
+        node = MeasurementMNode.deserializeFrom(nodeInfo);
+      } else {
+        node = new MNode(null, nodeInfo[1]);
+      }
+
+      int childrenSize = Integer.valueOf(nodeInfo[nodeInfo.length - 1]);
+      if (childrenSize == 0) {
+        nodeStack.push(node);
+      } else {
+        Map<String, MNode> childrenMap = new TreeMap<>();
+        for (int i = 0; i < childrenSize; i++) {
+          MNode child = nodeStack.removeFirst();
+          child.setParent(node);
+          childrenMap.put(child.getName(), child);
+          if (child instanceof MeasurementMNode) {
+            String alias = ((MeasurementMNode) child).getAlias();
+            if (alias != null) {
+              node.addAlias(alias, child);
+            }
+          }
+        }
+        node.setChildren(childrenMap);
+        nodeStack.push(node);
+      }
+    }
+    br.close();
+
+    limit = new ThreadLocal<>();
+    offset = new ThreadLocal<>();
+    count = new ThreadLocal<>();
+    curOffset = new ThreadLocal<>();
+    return new MTree(node, snapshotLineNumber);
+  }
+
   @Override
   public String toString() {
     JSONObject jsonObject = new JSONObject();
diff --git a/server/src/main/java/org/apache/iotdb/db/metadata/mnode/MNode.java b/server/src/main/java/org/apache/iotdb/db/metadata/mnode/MNode.java
index a34df03..002540d 100644
--- a/server/src/main/java/org/apache/iotdb/db/metadata/mnode/MNode.java
+++ b/server/src/main/java/org/apache/iotdb/db/metadata/mnode/MNode.java
@@ -85,7 +85,7 @@ public class MNode implements Serializable {
   }
 
   /**
-   * If delete a leafMNode, lock its parent, if delete an InternalNode, lock itself
+   * delete a child
    */
   public void deleteChild(String name) throws DeleteFailedException {
     if (children != null && children.containsKey(name)) {